~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/i386/mach-voyager/voyager_smp.c

Version: ~ [ linux-5.0-rc6 ] ~ [ linux-4.20.10 ] ~ [ linux-4.19.23 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.101 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.158 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.174 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.134 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* -*- mode: c; c-basic-offset: 8 -*- */
  2 
  3 /* Copyright (C) 1999,2001
  4  *
  5  * Author: J.E.J.Bottomley@HansenPartnership.com
  6  *
  7  * linux/arch/i386/kernel/voyager_smp.c
  8  *
  9  * This file provides all the same external entries as smp.c but uses
 10  * the voyager hal to provide the functionality
 11  */
 12 #include <linux/config.h>
 13 #include <linux/mm.h>
 14 #include <linux/kernel_stat.h>
 15 #include <linux/delay.h>
 16 #include <linux/mc146818rtc.h>
 17 #include <linux/cache.h>
 18 #include <linux/interrupt.h>
 19 #include <linux/smp_lock.h>
 20 #include <linux/init.h>
 21 #include <linux/kernel.h>
 22 #include <linux/bootmem.h>
 23 #include <linux/completion.h>
 24 #include <asm/desc.h>
 25 #include <asm/voyager.h>
 26 #include <asm/vic.h>
 27 #include <asm/pgalloc.h>
 28 #include <asm/mtrr.h>
 29 #include <asm/pgalloc.h>
 30 #include <asm/tlbflush.h>
 31 #include <asm/desc.h>
 32 #include <asm/arch_hooks.h>
 33 
 34 #include <linux/irq.h>
 35 
 36 int reboot_smp = 0;
 37 
 38 /* TLB state -- visible externally, indexed physically */
 39 struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
 40 
 41 /* CPU IRQ affinity -- set to all ones initially */
 42 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
 43 
 44 /* Set when the idlers are all forked - Set in main.c but not actually
 45  * used by any other parts of the kernel */
 46 int smp_threads_ready = 0;
 47 
 48 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
 49  * indexed physically */
 50 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
 51 
 52 /* physical ID of the CPU used to boot the system */
 53 unsigned char boot_cpu_id;
 54 
 55 /* The memory line addresses for the Quad CPIs */
 56 struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
 57 
 58 /* The masks for the Extended VIC processors, filled in by cat_init */
 59 __u32 voyager_extended_vic_processors = 0;
 60 
 61 /* Masks for the extended Quad processors which cannot be VIC booted */
 62 __u32 voyager_allowed_boot_processors = 0;
 63 
 64 /* The mask for the Quad Processors (both extended and non-extended) */
 65 __u32 voyager_quad_processors = 0;
 66 
 67 /* Total count of live CPUs, used in process.c to display
 68  * the CPU information and in irq.c for the per CPU irq
 69  * activity count.  Finally exported by i386_ksyms.c */
 70 static int voyager_extended_cpus = 1;
 71 
 72 /* Have we found an SMP box - used by time.c to do the profiling
 73    interrupt for timeslicing; do not set to 1 until the per CPU timer
 74    interrupt is active */
 75 int smp_found_config = 0;
 76 
 77 /* Used for the invalidate map that's also checked in the spinlock */
 78 static volatile unsigned long smp_invalidate_needed;
 79 
 80 /* Bitmask of currently online CPUs - used by setup.c for
 81    /proc/cpuinfo, visible externally but still physical */
 82 cpumask_t cpu_online_map = CPU_MASK_NONE;
 83 
 84 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
 85  * by scheduler but indexed physically */
 86 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 87 
 88 /* estimate of time used to flush the SMP-local cache - used in
 89  * processor affinity calculations */
 90 cycles_t cacheflush_time = 0;
 91 
 92 /* cache decay ticks for scheduler---a fairly useless quantity for the
 93    voyager system with its odd affinity and huge L3 cache */
 94 unsigned long cache_decay_ticks = 20;
 95 
 96 
 97 /* The internal functions */
 98 static void send_CPI(__u32 cpuset, __u8 cpi);
 99 static void ack_CPI(__u8 cpi);
100 static int ack_QIC_CPI(__u8 cpi);
101 static void ack_special_QIC_CPI(__u8 cpi);
102 static void ack_VIC_CPI(__u8 cpi);
103 static void send_CPI_allbutself(__u8 cpi);
104 static void enable_vic_irq(unsigned int irq);
105 static void disable_vic_irq(unsigned int irq);
106 static unsigned int startup_vic_irq(unsigned int irq);
107 static void enable_local_vic_irq(unsigned int irq);
108 static void disable_local_vic_irq(unsigned int irq);
109 static void before_handle_vic_irq(unsigned int irq);
110 static void after_handle_vic_irq(unsigned int irq);
111 static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
112 static void ack_vic_irq(unsigned int irq);
113 static void vic_enable_cpi(void);
114 static void do_boot_cpu(__u8 cpuid);
115 static void do_quad_bootstrap(void);
116 static inline void wrapper_smp_local_timer_interrupt(struct pt_regs *);
117 
118 int hard_smp_processor_id(void);
119 
120 /* Inline functions */
121 static inline void
122 send_one_QIC_CPI(__u8 cpu, __u8 cpi)
123 {
124         voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
125                 (smp_processor_id() << 16) + cpi;
126 }
127 
128 static inline void
129 send_QIC_CPI(__u32 cpuset, __u8 cpi)
130 {
131         int cpu;
132 
133         for_each_cpu(cpu, cpu_online_map) {
134                 if(cpuset & (1<<cpu)) {
135 #ifdef VOYAGER_DEBUG
136                         if(!cpu_isset(cpu, cpu_online_map))
137                                 VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu));
138 #endif
139                         send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
140                 }
141         }
142 }
143 
144 static inline void
145 send_one_CPI(__u8 cpu, __u8 cpi)
146 {
147         if(voyager_quad_processors & (1<<cpu))
148                 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
149         else
150                 send_CPI(1<<cpu, cpi);
151 }
152 
153 static inline void
154 send_CPI_allbutself(__u8 cpi)
155 {
156         __u8 cpu = smp_processor_id();
157         __u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu);
158         send_CPI(mask, cpi);
159 }
160 
161 static inline int
162 is_cpu_quad(void)
163 {
164         __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
165         return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
166 }
167 
168 static inline int
169 is_cpu_extended(void)
170 {
171         __u8 cpu = hard_smp_processor_id();
172 
173         return(voyager_extended_vic_processors & (1<<cpu));
174 }
175 
176 static inline int
177 is_cpu_vic_boot(void)
178 {
179         __u8 cpu = hard_smp_processor_id();
180 
181         return(voyager_extended_vic_processors
182                & voyager_allowed_boot_processors & (1<<cpu));
183 }
184 
185 
186 static inline void
187 ack_CPI(__u8 cpi)
188 {
189         switch(cpi) {
190         case VIC_CPU_BOOT_CPI:
191                 if(is_cpu_quad() && !is_cpu_vic_boot())
192                         ack_QIC_CPI(cpi);
193                 else
194                         ack_VIC_CPI(cpi);
195                 break;
196         case VIC_SYS_INT:
197         case VIC_CMN_INT: 
198                 /* These are slightly strange.  Even on the Quad card,
199                  * They are vectored as VIC CPIs */
200                 if(is_cpu_quad())
201                         ack_special_QIC_CPI(cpi);
202                 else
203                         ack_VIC_CPI(cpi);
204                 break;
205         default:
206                 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
207                 break;
208         }
209 }
210 
211 /* local variables */
212 
213 /* The VIC IRQ descriptors -- these look almost identical to the
214  * 8259 IRQs except that masks and things must be kept per processor
215  */
216 static struct hw_interrupt_type vic_irq_type = {
217         "VIC-level",
218         startup_vic_irq,        /* startup */
219         disable_vic_irq,        /* shutdown */
220         enable_vic_irq,         /* enable */
221         disable_vic_irq,        /* disable */
222         before_handle_vic_irq,  /* ack */
223         after_handle_vic_irq,   /* end */
224         set_vic_irq_affinity,   /* affinity */
225 };
226 
227 /* used to count up as CPUs are brought on line (starts at 0) */
228 static int cpucount = 0;
229 
230 /* steal a page from the bottom of memory for the trampoline and
231  * squirrel its address away here.  This will be in kernel virtual
232  * space */
233 static __u32 trampoline_base;
234 
235 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
236 static DEFINE_PER_CPU(int, prof_multiplier) = 1;
237 static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
238 static DEFINE_PER_CPU(int, prof_counter) =  1;
239 
240 /* the map used to check if a CPU has booted */
241 static __u32 cpu_booted_map;
242 
243 /* the synchronize flag used to hold all secondary CPUs spinning in
244  * a tight loop until the boot sequence is ready for them */
245 static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
246 
247 /* This is for the new dynamic CPU boot code */
248 cpumask_t cpu_callin_map = CPU_MASK_NONE;
249 cpumask_t cpu_callout_map = CPU_MASK_NONE;
250 
251 /* The per processor IRQ masks (these are usually kept in sync) */
252 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
253 
254 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
255 static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
256 
257 /* Lock for enable/disable of VIC interrupts */
258 static spinlock_t vic_irq_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
259 
260 /* The boot processor is correctly set up in PC mode when it 
261  * comes up, but the secondaries need their master/slave 8259
262  * pairs initializing correctly */
263 
264 /* Interrupt counters (per cpu) and total - used to try to
265  * even up the interrupt handling routines */
266 static long vic_intr_total = 0;
267 static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
268 static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
269 
270 /* Since we can only use CPI0, we fake all the other CPIs */
271 static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
272 
273 /* debugging routine to read the isr of the cpu's pic */
274 static inline __u16
275 vic_read_isr(void)
276 {
277         __u16 isr;
278 
279         outb(0x0b, 0xa0);
280         isr = inb(0xa0) << 8;
281         outb(0x0b, 0x20);
282         isr |= inb(0x20);
283 
284         return isr;
285 }
286 
287 static __init void
288 qic_setup(void)
289 {
290         if(!is_cpu_quad()) {
291                 /* not a quad, no setup */
292                 return;
293         }
294         outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
295         outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
296         
297         if(is_cpu_extended()) {
298                 /* the QIC duplicate of the VIC base register */
299                 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
300                 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
301 
302                 /* FIXME: should set up the QIC timer and memory parity
303                  * error vectors here */
304         }
305 }
306 
307 static __init void
308 vic_setup_pic(void)
309 {
310         outb(1, VIC_REDIRECT_REGISTER_1);
311         /* clear the claim registers for dynamic routing */
312         outb(0, VIC_CLAIM_REGISTER_0);
313         outb(0, VIC_CLAIM_REGISTER_1);
314 
315         outb(0, VIC_PRIORITY_REGISTER);
316         /* Set the Primary and Secondary Microchannel vector
317          * bases to be the same as the ordinary interrupts
318          *
319          * FIXME: This would be more efficient using separate
320          * vectors. */
321         outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
322         outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
323         /* Now initiallise the master PIC belonging to this CPU by
324          * sending the four ICWs */
325 
326         /* ICW1: level triggered, ICW4 needed */
327         outb(0x19, 0x20);
328 
329         /* ICW2: vector base */
330         outb(FIRST_EXTERNAL_VECTOR, 0x21);
331 
332         /* ICW3: slave at line 2 */
333         outb(0x04, 0x21);
334 
335         /* ICW4: 8086 mode */
336         outb(0x01, 0x21);
337 
338         /* now the same for the slave PIC */
339 
340         /* ICW1: level trigger, ICW4 needed */
341         outb(0x19, 0xA0);
342 
343         /* ICW2: slave vector base */
344         outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
345         
346         /* ICW3: slave ID */
347         outb(0x02, 0xA1);
348 
349         /* ICW4: 8086 mode */
350         outb(0x01, 0xA1);
351 }
352 
353 static void
354 do_quad_bootstrap(void)
355 {
356         if(is_cpu_quad() && is_cpu_vic_boot()) {
357                 int i;
358                 unsigned long flags;
359                 __u8 cpuid = hard_smp_processor_id();
360 
361                 local_irq_save(flags);
362 
363                 for(i = 0; i<4; i++) {
364                         /* FIXME: this would be >>3 &0x7 on the 32 way */
365                         if(((cpuid >> 2) & 0x03) == i)
366                                 /* don't lower our own mask! */
367                                 continue;
368 
369                         /* masquerade as local Quad CPU */
370                         outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
371                         /* enable the startup CPI */
372                         outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
373                         /* restore cpu id */
374                         outb(0, QIC_PROCESSOR_ID);
375                 }
376                 local_irq_restore(flags);
377         }
378 }
379 
380 
381 /* Set up all the basic stuff: read the SMP config and make all the
382  * SMP information reflect only the boot cpu.  All others will be
383  * brought on-line later. */
384 void __init 
385 find_smp_config(void)
386 {
387         int i;
388 
389         boot_cpu_id = hard_smp_processor_id();
390 
391         printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
392 
393         /* initialize the CPU structures (moved from smp_boot_cpus) */
394         for(i=0; i<NR_CPUS; i++) {
395                 cpu_irq_affinity[i] = ~0;
396         }
397         cpu_online_map = cpumask_of_cpu(boot_cpu_id);
398 
399         /* The boot CPU must be extended */
400         voyager_extended_vic_processors = 1<<boot_cpu_id;
401         /* initially, all of the first 8 cpu's can boot */
402         voyager_allowed_boot_processors = 0xff;
403         /* set up everything for just this CPU, we can alter
404          * this as we start the other CPUs later */
405         /* now get the CPU disposition from the extended CMOS */
406         phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK));
407         cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
408         cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
409         cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
410         printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map));
411         /* Here we set up the VIC to enable SMP */
412         /* enable the CPIs by writing the base vector to their register */
413         outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
414         outb(1, VIC_REDIRECT_REGISTER_1);
415         /* set the claim registers for static routing --- Boot CPU gets
416          * all interrupts untill all other CPUs started */
417         outb(0xff, VIC_CLAIM_REGISTER_0);
418         outb(0xff, VIC_CLAIM_REGISTER_1);
419         /* Set the Primary and Secondary Microchannel vector
420          * bases to be the same as the ordinary interrupts
421          *
422          * FIXME: This would be more efficient using separate
423          * vectors. */
424         outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
425         outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
426 
427         /* Finally tell the firmware that we're driving */
428         outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
429              VOYAGER_SUS_IN_CONTROL_PORT);
430 
431         current_thread_info()->cpu = boot_cpu_id;
432 }
433 
434 /*
435  *      The bootstrap kernel entry code has set these up. Save them
436  *      for a given CPU, id is physical */
437 void __init
438 smp_store_cpu_info(int id)
439 {
440         struct cpuinfo_x86 *c=&cpu_data[id];
441 
442         *c = boot_cpu_data;
443 
444         identify_cpu(c);
445 }
446 
447 /* set up the trampoline and return the physical address of the code */
448 static __u32 __init
449 setup_trampoline(void)
450 {
451         /* these two are global symbols in trampoline.S */
452         extern __u8 trampoline_end[];
453         extern __u8 trampoline_data[];
454 
455         memcpy((__u8 *)trampoline_base, trampoline_data,
456                trampoline_end - trampoline_data);
457         return virt_to_phys((__u8 *)trampoline_base);
458 }
459 
460 /* Routine initially called when a non-boot CPU is brought online */
461 int __init
462 start_secondary(void *unused)
463 {
464         __u8 cpuid = hard_smp_processor_id();
465         /* external functions not defined in the headers */
466         extern void calibrate_delay(void);
467         extern int cpu_idle(void);
468 
469         cpu_init();
470 
471         /* OK, we're in the routine */
472         ack_CPI(VIC_CPU_BOOT_CPI);
473 
474         /* setup the 8259 master slave pair belonging to this CPU ---
475          * we won't actually receive any until the boot CPU
476          * relinquishes it's static routing mask */
477         vic_setup_pic();
478 
479         qic_setup();
480 
481         if(is_cpu_quad() && !is_cpu_vic_boot()) {
482                 /* clear the boot CPI */
483                 __u8 dummy;
484 
485                 dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
486                 printk("read dummy %d\n", dummy);
487         }
488 
489         /* lower the mask to receive CPIs */
490         vic_enable_cpi();
491 
492         VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
493 
494         /* enable interrupts */
495         local_irq_enable();
496 
497         /* get our bogomips */
498         calibrate_delay();
499 
500         /* save our processor parameters */
501         smp_store_cpu_info(cpuid);
502 
503         /* if we're a quad, we may need to bootstrap other CPUs */
504         do_quad_bootstrap();
505 
506         /* FIXME: this is rather a poor hack to prevent the CPU
507          * activating softirqs while it's supposed to be waiting for
508          * permission to proceed.  Without this, the new per CPU stuff
509          * in the softirqs will fail */
510         local_irq_disable();
511         cpu_set(cpuid, cpu_callin_map);
512 
513         /* signal that we're done */
514         cpu_booted_map = 1;
515 
516         while (!cpu_isset(cpuid, smp_commenced_mask))
517                 rep_nop();
518         local_irq_enable();
519 
520         local_flush_tlb();
521 
522         cpu_set(cpuid, cpu_online_map);
523         wmb();
524         return cpu_idle();
525 }
526 
527 static struct task_struct * __init
528 fork_by_hand(void)
529 {
530         struct pt_regs regs;
531         /* don't care about the eip and regs settings since we'll
532          * never reschedule the forked task. */
533         return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
534 }
535 
536 
537 /* Routine to kick start the given CPU and wait for it to report ready
538  * (or timeout in startup).  When this routine returns, the requested
539  * CPU is either fully running and configured or known to be dead.
540  *
541  * We call this routine sequentially 1 CPU at a time, so no need for
542  * locking */
543 
544 static void __init
545 do_boot_cpu(__u8 cpu)
546 {
547         struct task_struct *idle;
548         int timeout;
549         unsigned long flags;
550         int quad_boot = (1<<cpu) & voyager_quad_processors 
551                 & ~( voyager_extended_vic_processors
552                      & voyager_allowed_boot_processors);
553 
554         /* For the 486, we can't use the 4Mb page table trick, so
555          * must map a region of memory */
556 #ifdef CONFIG_M486
557         int i;
558         unsigned long *page_table_copies = (unsigned long *)
559                 __get_free_page(GFP_KERNEL);
560 #endif
561         pgd_t orig_swapper_pg_dir0;
562 
563         /* This is an area in head.S which was used to set up the
564          * initial kernel stack.  We need to alter this to give the
565          * booting CPU a new stack (taken from its idle process) */
566         extern struct {
567                 __u8 *esp;
568                 unsigned short ss;
569         } stack_start;
570         /* This is the format of the CPI IDT gate (in real mode) which
571          * we're hijacking to boot the CPU */
572         union   IDTFormat {
573                 struct seg {
574                         __u16   Offset;
575                         __u16   Segment;
576                 } idt;
577                 __u32 val;
578         } hijack_source;
579 
580         __u32 *hijack_vector;
581         __u32 start_phys_address = setup_trampoline();
582 
583         /* There's a clever trick to this: The linux trampoline is
584          * compiled to begin at absolute location zero, so make the
585          * address zero but have the data segment selector compensate
586          * for the actual address */
587         hijack_source.idt.Offset = start_phys_address & 0x000F;
588         hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
589 
590         cpucount++;
591         idle = fork_by_hand();
592         if(IS_ERR(idle))
593                 panic("failed fork for CPU%d", cpu);
594 
595         wake_up_forked_process(idle);
596 
597         init_idle(idle, cpu);
598 
599         idle->thread.eip = (unsigned long) start_secondary;
600         unhash_process(idle);
601         /* init_tasks (in sched.c) is indexed logically */
602 #if 0
603         // for AC kernels
604         stack_start.esp = (THREAD_SIZE + (__u8 *)TSK_TO_KSTACK(idle));
605 #else
606         stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle->thread_info);
607 #endif
608         /* Note: Don't modify initial ss override */
609         VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 
610                 (unsigned long)hijack_source.val, hijack_source.idt.Segment,
611                 hijack_source.idt.Offset, stack_start.esp));
612         /* set the original swapper_pg_dir[0] to map 0 to 4Mb transparently
613          * (so that the booting CPU can find start_32 */
614         orig_swapper_pg_dir0 = swapper_pg_dir[0];
615 #ifdef CONFIG_M486
616         if(page_table_copies == NULL)
617                 panic("No free memory for 486 page tables\n");
618         for(i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++)
619                 page_table_copies[i] = (i * PAGE_SIZE) 
620                         | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
621 
622         ((unsigned long *)swapper_pg_dir)[0] = 
623                 ((virt_to_phys(page_table_copies)) & PAGE_MASK)
624                 | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
625 #else
626         ((unsigned long *)swapper_pg_dir)[0] = 0x102007;
627 #endif
628 
629         if(quad_boot) {
630                 printk("CPU %d: non extended Quad boot\n", cpu);
631                 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4);
632                 *hijack_vector = hijack_source.val;
633         } else {
634                 printk("CPU%d: extended VIC boot\n", cpu);
635                 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4);
636                 *hijack_vector = hijack_source.val;
637                 /* VIC errata, may also receive interrupt at this address */
638                 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4);
639                 *hijack_vector = hijack_source.val;
640         }
641         /* All non-boot CPUs start with interrupts fully masked.  Need
642          * to lower the mask of the CPI we're about to send.  We do
643          * this in the VIC by masquerading as the processor we're
644          * about to boot and lowering its interrupt mask */
645         local_irq_save(flags);
646         if(quad_boot) {
647                 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
648         } else {
649                 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
650                 /* here we're altering registers belonging to `cpu' */
651                 
652                 outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
653                 /* now go back to our original identity */
654                 outb(boot_cpu_id, VIC_PROCESSOR_ID);
655 
656                 /* and boot the CPU */
657 
658                 send_CPI((1<<cpu), VIC_CPU_BOOT_CPI);
659         }
660         cpu_booted_map = 0;
661         local_irq_restore(flags);
662 
663         /* now wait for it to become ready (or timeout) */
664         for(timeout = 0; timeout < 50000; timeout++) {
665                 if(cpu_booted_map)
666                         break;
667                 udelay(100);
668         }
669         /* reset the page table */
670         swapper_pg_dir[0] = orig_swapper_pg_dir0;
671         local_flush_tlb();
672 #ifdef CONFIG_M486
673         free_page((unsigned long)page_table_copies);
674 #endif
675           
676         if (cpu_booted_map) {
677                 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
678                         cpu, smp_processor_id()));
679         
680                 printk("CPU%d: ", cpu);
681                 print_cpu_info(&cpu_data[cpu]);
682                 wmb();
683                 cpu_set(cpu, cpu_callout_map);
684         }
685         else {
686                 printk("CPU%d FAILED TO BOOT: ", cpu);
687                 if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5)
688                         printk("Stuck.\n");
689                 else
690                         printk("Not responding.\n");
691                 
692                 cpucount--;
693         }
694 }
695 
696 void __init
697 smp_boot_cpus(void)
698 {
699         int i;
700 
701         /* CAT BUS initialisation must be done after the memory */
702         /* FIXME: The L4 has a catbus too, it just needs to be
703          * accessed in a totally different way */
704         if(voyager_level == 5) {
705                 voyager_cat_init();
706 
707                 /* now that the cat has probed the Voyager System Bus, sanity
708                  * check the cpu map */
709                 if( ((voyager_quad_processors | voyager_extended_vic_processors)
710                      & cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) {
711                         /* should panic */
712                         printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
713                 }
714         } else if(voyager_level == 4)
715                 voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map);
716 
717         /* this sets up the idle task to run on the current cpu */
718         voyager_extended_cpus = 1;
719         /* Remove the global_irq_holder setting, it triggers a BUG() on
720          * schedule at the moment */
721         //global_irq_holder = boot_cpu_id;
722 
723         /* FIXME: Need to do something about this but currently only works
724          * on CPUs with a tsc which none of mine have. 
725         smp_tune_scheduling();
726          */
727         smp_store_cpu_info(boot_cpu_id);
728         printk("CPU%d: ", boot_cpu_id);
729         print_cpu_info(&cpu_data[boot_cpu_id]);
730 
731         if(is_cpu_quad()) {
732                 /* booting on a Quad CPU */
733                 printk("VOYAGER SMP: Boot CPU is Quad\n");
734                 qic_setup();
735                 do_quad_bootstrap();
736         }
737 
738         /* enable our own CPIs */
739         vic_enable_cpi();
740 
741         cpu_set(boot_cpu_id, cpu_online_map);
742         cpu_set(boot_cpu_id, cpu_callout_map);
743         
744         /* loop over all the extended VIC CPUs and boot them.  The 
745          * Quad CPUs must be bootstrapped by their extended VIC cpu */
746         for(i = 0; i < NR_CPUS; i++) {
747                 if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
748                         continue;
749                 do_boot_cpu(i);
750                 /* This udelay seems to be needed for the Quad boots
751                  * don't remove unless you know what you're doing */
752                 udelay(1000);
753         }
754         /* we could compute the total bogomips here, but why bother?,
755          * Code added from smpboot.c */
756         {
757                 unsigned long bogosum = 0;
758                 for (i = 0; i < NR_CPUS; i++)
759                         if (cpu_isset(i, cpu_online_map))
760                                 bogosum += cpu_data[i].loops_per_jiffy;
761                 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
762                         cpucount+1,
763                         bogosum/(500000/HZ),
764                         (bogosum/(5000/HZ))%100);
765         }
766         voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
767         printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus);
768         /* that's it, switch to symmetric mode */
769         outb(0, VIC_PRIORITY_REGISTER);
770         outb(0, VIC_CLAIM_REGISTER_0);
771         outb(0, VIC_CLAIM_REGISTER_1);
772         
773         VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
774 }
775 
776 /* Reload the secondary CPUs task structure (this function does not
777  * return ) */
778 void __init 
779 initialize_secondary(void)
780 {
781 #if 0
782         // AC kernels only
783         set_current(hard_get_current());
784 #endif
785 
786         /*
787          * We don't actually need to load the full TSS,
788          * basically just the stack pointer and the eip.
789          */
790 
791         asm volatile(
792                 "movl %0,%%esp\n\t"
793                 "jmp *%1"
794                 :
795                 :"r" (current->thread.esp),"r" (current->thread.eip));
796 }
797 
798 /* handle a Voyager SYS_INT -- If we don't, the base board will
799  * panic the system.
800  *
801  * System interrupts occur because some problem was detected on the
802  * various busses.  To find out what you have to probe all the
803  * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
804 asmlinkage void
805 smp_vic_sys_interrupt(void)
806 {
807         ack_CPI(VIC_SYS_INT);
808         printk("Voyager SYSTEM INTERRUPT\n");
809 }
810 
811 /* Handle a voyager CMN_INT; These interrupts occur either because of
812  * a system status change or because a single bit memory error
813  * occurred.  FIXME: At the moment, ignore all this. */
814 asmlinkage void
815 smp_vic_cmn_interrupt(void)
816 {
817         static __u8 in_cmn_int = 0;
818         static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED;
819 
820         /* common ints are broadcast, so make sure we only do this once */
821         _raw_spin_lock(&cmn_int_lock);
822         if(in_cmn_int)
823                 goto unlock_end;
824 
825         in_cmn_int++;
826         _raw_spin_unlock(&cmn_int_lock);
827 
828         VDEBUG(("Voyager COMMON INTERRUPT\n"));
829 
830         if(voyager_level == 5)
831                 voyager_cat_do_common_interrupt();
832 
833         _raw_spin_lock(&cmn_int_lock);
834         in_cmn_int = 0;
835  unlock_end:
836         _raw_spin_unlock(&cmn_int_lock);
837         ack_CPI(VIC_CMN_INT);
838 }
839 
840 /*
841  * Reschedule call back. Nothing to do, all the work is done
842  * automatically when we return from the interrupt.  */
843 asmlinkage void
844 smp_reschedule_interrupt(void)
845 {
846         /* do nothing */
847 }
848 
849 static struct mm_struct * flush_mm;
850 static unsigned long flush_va;
851 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
852 #define FLUSH_ALL       0xffffffff
853 
854 /*
855  * We cannot call mmdrop() because we are in interrupt context, 
856  * instead update mm->cpu_vm_mask.
857  *
858  * We need to reload %cr3 since the page tables may be going
859  * away from under us..
860  */
861 static inline void
862 leave_mm (unsigned long cpu)
863 {
864         if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
865                 BUG();
866         cpu_clear(cpu,  cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
867         load_cr3(swapper_pg_dir);
868 }
869 
870 
871 /*
872  * Invalidate call-back
873  */
874 asmlinkage void 
875 smp_invalidate_interrupt(void)
876 {
877         __u8 cpu = smp_processor_id();
878 
879         if (!test_bit(cpu, &smp_invalidate_needed))
880                 return;
881         /* This will flood messages.  Don't uncomment unless you see
882          * Problems with cross cpu invalidation
883         VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
884                 smp_processor_id()));
885         */
886 
887         if (flush_mm == cpu_tlbstate[cpu].active_mm) {
888                 if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
889                         if (flush_va == FLUSH_ALL)
890                                 local_flush_tlb();
891                         else
892                                 __flush_tlb_one(flush_va);
893                 } else
894                         leave_mm(cpu);
895         }
896         smp_mb__before_clear_bit();
897         clear_bit(cpu, &smp_invalidate_needed);
898         smp_mb__after_clear_bit();
899 }
900 
901 /* All the new flush operations for 2.4 */
902 
903 
904 /* This routine is called with a physical cpu mask */
905 static void
906 flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
907                                                 unsigned long va)
908 {
909         int stuck = 50000;
910 
911         if (!cpumask)
912                 BUG();
913         if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask)
914                 BUG();
915         if (cpumask & (1 << smp_processor_id()))
916                 BUG();
917         if (!mm)
918                 BUG();
919 
920         spin_lock(&tlbstate_lock);
921         
922         flush_mm = mm;
923         flush_va = va;
924         atomic_set_mask(cpumask, &smp_invalidate_needed);
925         /*
926          * We have to send the CPI only to
927          * CPUs affected.
928          */
929         send_CPI(cpumask, VIC_INVALIDATE_CPI);
930 
931         while (smp_invalidate_needed) {
932                 mb();
933                 if(--stuck == 0) {
934                         printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
935                         break;
936                 }
937         }
938 
939         /* Uncomment only to debug invalidation problems
940         VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
941         */
942 
943         flush_mm = NULL;
944         flush_va = 0;
945         spin_unlock(&tlbstate_lock);
946 }
947 
948 void
949 flush_tlb_current_task(void)
950 {
951         struct mm_struct *mm = current->mm;
952         unsigned long cpu_mask;
953 
954         preempt_disable();
955 
956         cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
957         local_flush_tlb();
958         if (cpu_mask)
959                 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
960 
961         preempt_enable();
962 }
963 
964 
965 void
966 flush_tlb_mm (struct mm_struct * mm)
967 {
968         unsigned long cpu_mask;
969 
970         preempt_disable();
971 
972         cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
973 
974         if (current->active_mm == mm) {
975                 if (current->mm)
976                         local_flush_tlb();
977                 else
978                         leave_mm(smp_processor_id());
979         }
980         if (cpu_mask)
981                 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
982 
983         preempt_enable();
984 }
985 
986 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
987 {
988         struct mm_struct *mm = vma->vm_mm;
989         unsigned long cpu_mask;
990 
991         preempt_disable();
992 
993         cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
994         if (current->active_mm == mm) {
995                 if(current->mm)
996                         __flush_tlb_one(va);
997                  else
998                         leave_mm(smp_processor_id());
999         }
1000 
1001         if (cpu_mask)
1002                 flush_tlb_others(cpu_mask, mm, va);
1003 
1004         preempt_enable();
1005 }
1006 
1007 /* enable the requested IRQs */
1008 asmlinkage void
1009 smp_enable_irq_interrupt(void)
1010 {
1011         __u8 irq;
1012         __u8 cpu = get_cpu();
1013 
1014         VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
1015                vic_irq_enable_mask[cpu]));
1016 
1017         spin_lock(&vic_irq_lock);
1018         for(irq = 0; irq < 16; irq++) {
1019                 if(vic_irq_enable_mask[cpu] & (1<<irq))
1020                         enable_local_vic_irq(irq);
1021         }
1022         vic_irq_enable_mask[cpu] = 0;
1023         spin_unlock(&vic_irq_lock);
1024 
1025         put_cpu_no_resched();
1026 }
1027         
1028 /*
1029  *      CPU halt call-back
1030  */
1031 static void
1032 smp_stop_cpu_function(void *dummy)
1033 {
1034         VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
1035         cpu_clear(smp_processor_id(), cpu_online_map);
1036         local_irq_disable();
1037         for(;;)
1038                __asm__("hlt");
1039 }
1040 
1041 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
1042 
1043 struct call_data_struct {
1044         void (*func) (void *info);
1045         void *info;
1046         volatile unsigned long started;
1047         volatile unsigned long finished;
1048         int wait;
1049 };
1050 
1051 static struct call_data_struct * call_data;
1052 
1053 /* execute a thread on a new CPU.  The function to be called must be
1054  * previously set up.  This is used to schedule a function for
1055  * execution on all CPU's - set up the function then broadcast a
1056  * function_interrupt CPI to come here on each CPU */
1057 asmlinkage void
1058 smp_call_function_interrupt(void)
1059 {
1060         void (*func) (void *info) = call_data->func;
1061         void *info = call_data->info;
1062         /* must take copy of wait because call_data may be replaced
1063          * unless the function is waiting for us to finish */
1064         int wait = call_data->wait;
1065         __u8 cpu = smp_processor_id();
1066 
1067         /*
1068          * Notify initiating CPU that I've grabbed the data and am
1069          * about to execute the function
1070          */
1071         mb();
1072         if(!test_and_clear_bit(cpu, &call_data->started)) {
1073                 /* If the bit wasn't set, this could be a replay */
1074                 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu);
1075                 return;
1076         }
1077         /*
1078          * At this point the info structure may be out of scope unless wait==1
1079          */
1080         irq_enter();
1081         (*func)(info);
1082         irq_exit();
1083         if (wait) {
1084                 mb();
1085                 clear_bit(cpu, &call_data->finished);
1086         }
1087 }
1088 
1089 /* Call this function on all CPUs using the function_interrupt above 
1090     <func> The function to run. This must be fast and non-blocking.
1091     <info> An arbitrary pointer to pass to the function.
1092     <retry> If true, keep retrying until ready.
1093     <wait> If true, wait until function has completed on other CPUs.
1094     [RETURNS] 0 on success, else a negative status code. Does not return until
1095     remote CPUs are nearly ready to execute <<func>> or are or have executed.
1096 */
1097 int
1098 smp_call_function (void (*func) (void *info), void *info, int retry,
1099                    int wait)
1100 {
1101         struct call_data_struct data;
1102         __u32 mask = cpus_coerce(cpu_online_map);
1103 
1104         mask &= ~(1<<smp_processor_id());
1105 
1106         if (!mask)
1107                 return 0;
1108 
1109         data.func = func;
1110         data.info = info;
1111         data.started = mask;
1112         data.wait = wait;
1113         if (wait)
1114                 data.finished = mask;
1115 
1116         spin_lock(&call_lock);
1117         call_data = &data;
1118         wmb();
1119         /* Send a message to all other CPUs and wait for them to respond */
1120         send_CPI_allbutself(VIC_CALL_FUNCTION_CPI);
1121 
1122         /* Wait for response */
1123         while (data.started)
1124                 barrier();
1125 
1126         if (wait)
1127                 while (data.finished)
1128                         barrier();
1129 
1130         spin_unlock(&call_lock);
1131 
1132         return 0;
1133 }
1134 
1135 /* Sorry about the name.  In an APIC based system, the APICs
1136  * themselves are programmed to send a timer interrupt.  This is used
1137  * by linux to reschedule the processor.  Voyager doesn't have this,
1138  * so we use the system clock to interrupt one processor, which in
1139  * turn, broadcasts a timer CPI to all the others --- we receive that
1140  * CPI here.  We don't use this actually for counting so losing
1141  * ticks doesn't matter 
1142  *
1143  * FIXME: For those CPU's which actually have a local APIC, we could
1144  * try to use it to trigger this interrupt instead of having to
1145  * broadcast the timer tick.  Unfortunately, all my pentium DYADs have
1146  * no local APIC, so I can't do this
1147  *
1148  * This function is currently a placeholder and is unused in the code */
1149 asmlinkage void 
1150 smp_apic_timer_interrupt(struct pt_regs regs)
1151 {
1152         wrapper_smp_local_timer_interrupt(&regs);
1153 }
1154 
1155 /* All of the QUAD interrupt GATES */
1156 asmlinkage void
1157 smp_qic_timer_interrupt(struct pt_regs regs)
1158 {
1159         ack_QIC_CPI(QIC_TIMER_CPI);
1160         wrapper_smp_local_timer_interrupt(&regs);
1161 }
1162 
1163 asmlinkage void
1164 smp_qic_invalidate_interrupt(void)
1165 {
1166         ack_QIC_CPI(QIC_INVALIDATE_CPI);
1167         smp_invalidate_interrupt();
1168 }
1169 
1170 asmlinkage void
1171 smp_qic_reschedule_interrupt(void)
1172 {
1173         ack_QIC_CPI(QIC_RESCHEDULE_CPI);
1174         smp_reschedule_interrupt();
1175 }
1176 
1177 asmlinkage void
1178 smp_qic_enable_irq_interrupt(void)
1179 {
1180         ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
1181         smp_enable_irq_interrupt();
1182 }
1183 
1184 asmlinkage void
1185 smp_qic_call_function_interrupt(void)
1186 {
1187         ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
1188         smp_call_function_interrupt();
1189 }
1190 
1191 asmlinkage void
1192 smp_vic_cpi_interrupt(struct pt_regs regs)
1193 {
1194         __u8 cpu = smp_processor_id();
1195 
1196         if(is_cpu_quad())
1197                 ack_QIC_CPI(VIC_CPI_LEVEL0);
1198         else
1199                 ack_VIC_CPI(VIC_CPI_LEVEL0);
1200 
1201         if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
1202                 wrapper_smp_local_timer_interrupt(&regs);
1203         if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
1204                 smp_invalidate_interrupt();
1205         if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
1206                 smp_reschedule_interrupt();
1207         if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
1208                 smp_enable_irq_interrupt();
1209         if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1210                 smp_call_function_interrupt();
1211 }
1212 
1213 static void
1214 do_flush_tlb_all(void* info)
1215 {
1216         unsigned long cpu = smp_processor_id();
1217 
1218         __flush_tlb_all();
1219         if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
1220                 leave_mm(cpu);
1221 }
1222 
1223 
1224 /* flush the TLB of every active CPU in the system */
1225 void
1226 flush_tlb_all(void)
1227 {
1228         on_each_cpu(do_flush_tlb_all, 0, 1, 1);
1229 }
1230 
1231 /* used to set up the trampoline for other CPUs when the memory manager
1232  * is sorted out */
1233 void __init
1234 smp_alloc_memory(void)
1235 {
1236         trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE);
1237         if(__pa(trampoline_base) >= 0x93000)
1238                 BUG();
1239 }
1240 
1241 /* send a reschedule CPI to one CPU by physical CPU number*/
1242 void
1243 smp_send_reschedule(int cpu)
1244 {
1245         send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1246 }
1247 
1248 
1249 int
1250 hard_smp_processor_id(void)
1251 {
1252         __u8 i;
1253         __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
1254         if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
1255                 return cpumask & 0x1F;
1256 
1257         for(i = 0; i < 8; i++) {
1258                 if(cpumask & (1<<i))
1259                         return i;
1260         }
1261         printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
1262         return 0;
1263 }
1264 
1265 /* broadcast a halt to all other CPUs */
1266 void
1267 smp_send_stop(void)
1268 {
1269         smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1270 }
1271 
1272 /* this function is triggered in time.c when a clock tick fires
1273  * we need to re-broadcast the tick to all CPUs */
1274 void
1275 smp_vic_timer_interrupt(struct pt_regs *regs)
1276 {
1277         send_CPI_allbutself(VIC_TIMER_CPI);
1278         smp_local_timer_interrupt(regs);
1279 }
1280 
1281 static inline void
1282 wrapper_smp_local_timer_interrupt(struct pt_regs *regs)
1283 {
1284         irq_enter();
1285         smp_local_timer_interrupt(regs);
1286         irq_exit();
1287 }
1288 
1289 /* local (per CPU) timer interrupt.  It does both profiling and
1290  * process statistics/rescheduling.
1291  *
1292  * We do profiling in every local tick, statistics/rescheduling
1293  * happen only every 'profiling multiplier' ticks. The default
1294  * multiplier is 1 and it can be changed by writing the new multiplier
1295  * value into /proc/profile.
1296  */
1297 void
1298 smp_local_timer_interrupt(struct pt_regs * regs)
1299 {
1300         int cpu = smp_processor_id();
1301         long weight;
1302 
1303         x86_do_profile(regs);
1304 
1305         if (--per_cpu(prof_counter, cpu) <= 0) {
1306                 /*
1307                  * The multiplier may have changed since the last time we got
1308                  * to this point as a result of the user writing to
1309                  * /proc/profile. In this case we need to adjust the APIC
1310                  * timer accordingly.
1311                  *
1312                  * Interrupts are already masked off at this point.
1313                  */
1314                 per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu);
1315                 if (per_cpu(prof_counter, cpu) !=
1316                                         per_cpu(prof_old_multiplier, cpu)) {
1317                         /* FIXME: need to update the vic timer tick here */
1318                         per_cpu(prof_old_multiplier, cpu) =
1319                                                 per_cpu(prof_counter, cpu);
1320                 }
1321 
1322                 update_process_times(user_mode(regs));
1323         }
1324 
1325         if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
1326                 /* only extended VIC processors participate in
1327                  * interrupt distribution */
1328                 return;
1329 
1330         /*
1331          * We take the 'long' return path, and there every subsystem
1332          * grabs the apropriate locks (kernel lock/ irq lock).
1333          *
1334          * we might want to decouple profiling from the 'long path',
1335          * and do the profiling totally in assembly.
1336          *
1337          * Currently this isn't too much of an issue (performance wise),
1338          * we can take more than 100K local irqs per second on a 100 MHz P5.
1339          */
1340 
1341         if((++vic_tick[cpu] & 0x7) != 0)
1342                 return;
1343         /* get here every 16 ticks (about every 1/6 of a second) */
1344 
1345         /* Change our priority to give someone else a chance at getting
1346          * the IRQ. The algorithm goes like this:
1347          *
1348          * In the VIC, the dynamically routed interrupt is always
1349          * handled by the lowest priority eligible (i.e. receiving
1350          * interrupts) CPU.  If >1 eligible CPUs are equal lowest, the
1351          * lowest processor number gets it.
1352          *
1353          * The priority of a CPU is controlled by a special per-CPU
1354          * VIC priority register which is 3 bits wide 0 being lowest
1355          * and 7 highest priority..
1356          *
1357          * Therefore we subtract the average number of interrupts from
1358          * the number we've fielded.  If this number is negative, we
1359          * lower the activity count and if it is positive, we raise
1360          * it.
1361          *
1362          * I'm afraid this still leads to odd looking interrupt counts:
1363          * the totals are all roughly equal, but the individual ones
1364          * look rather skewed.
1365          *
1366          * FIXME: This algorithm is total crap when mixed with SMP
1367          * affinity code since we now try to even up the interrupt
1368          * counts when an affinity binding is keeping them on a
1369          * particular CPU*/
1370         weight = (vic_intr_count[cpu]*voyager_extended_cpus
1371                   - vic_intr_total) >> 4;
1372         weight += 4;
1373         if(weight > 7)
1374                 weight = 7;
1375         if(weight < 0)
1376                 weight = 0;
1377         
1378         outb((__u8)weight, VIC_PRIORITY_REGISTER);
1379 
1380 #ifdef VOYAGER_DEBUG
1381         if((vic_tick[cpu] & 0xFFF) == 0) {
1382                 /* print this message roughly every 25 secs */
1383                 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1384                        cpu, vic_tick[cpu], weight);
1385         }
1386 #endif
1387 }
1388 
1389 /* setup the profiling timer */
1390 int 
1391 setup_profiling_timer(unsigned int multiplier)
1392 {
1393         int i;
1394 
1395         if ( (!multiplier))
1396                 return -EINVAL;
1397 
1398         /* 
1399          * Set the new multiplier for each CPU. CPUs don't start using the
1400          * new values until the next timer interrupt in which they do process
1401          * accounting.
1402          */
1403         for (i = 0; i < NR_CPUS; ++i)
1404                 per_cpu(prof_multiplier, i) = multiplier;
1405 
1406         return 0;
1407 }
1408 
1409 
1410 /*  The CPIs are handled in the per cpu 8259s, so they must be
1411  *  enabled to be received: FIX: enabling the CPIs in the early
1412  *  boot sequence interferes with bug checking; enable them later
1413  *  on in smp_init */
1414 #define VIC_SET_GATE(cpi, vector) \
1415         set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1416 #define QIC_SET_GATE(cpi, vector) \
1417         set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1418 
1419 void __init
1420 smp_intr_init(void)
1421 {
1422         int i;
1423 
1424         /* initialize the per cpu irq mask to all disabled */
1425         for(i = 0; i < NR_CPUS; i++)
1426                 vic_irq_mask[i] = 0xFFFF;
1427 
1428         VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
1429 
1430         VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
1431         VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
1432 
1433         QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
1434         QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
1435         QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
1436         QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
1437         QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
1438         
1439 
1440         /* now put the VIC descriptor into the first 48 IRQs 
1441          *
1442          * This is for later: first 16 correspond to PC IRQs; next 16
1443          * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1444         for(i = 0; i < 48; i++)
1445                 irq_desc[i].handler = &vic_irq_type;
1446 }
1447 
1448 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1449  * processor to receive CPI */
1450 static void
1451 send_CPI(__u32 cpuset, __u8 cpi)
1452 {
1453         int cpu;
1454         __u32 quad_cpuset = (cpuset & voyager_quad_processors);
1455 
1456         if(cpi < VIC_START_FAKE_CPI) {
1457                 /* fake CPI are only used for booting, so send to the 
1458                  * extended quads as well---Quads must be VIC booted */
1459                 outb((__u8)(cpuset), VIC_CPI_Registers[cpi]);
1460                 return;
1461         }
1462         if(quad_cpuset)
1463                 send_QIC_CPI(quad_cpuset, cpi);
1464         cpuset &= ~quad_cpuset;
1465         cpuset &= 0xff;         /* only first 8 CPUs vaild for VIC CPI */
1466         if(cpuset == 0)
1467                 return;
1468         for_each_cpu(cpu, cpu_online_map) {
1469                 if(cpuset & (1<<cpu))
1470                         set_bit(cpi, &vic_cpi_mailbox[cpu]);
1471         }
1472         if(cpuset)
1473                 outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
1474 }
1475 
1476 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1477  * set the cache line to shared by reading it.
1478  *
1479  * DON'T make this inline otherwise the cache line read will be
1480  * optimised away
1481  * */
1482 static int
1483 ack_QIC_CPI(__u8 cpi) {
1484         __u8 cpu = hard_smp_processor_id();
1485 
1486         cpi &= 7;
1487 
1488         outb(1<<cpi, QIC_INTERRUPT_CLEAR1);
1489         return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
1490 }
1491 
1492 static void
1493 ack_special_QIC_CPI(__u8 cpi)
1494 {
1495         switch(cpi) {
1496         case VIC_CMN_INT:
1497                 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
1498                 break;
1499         case VIC_SYS_INT:
1500                 outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
1501                 break;
1502         }
1503         /* also clear at the VIC, just in case (nop for non-extended proc) */
1504         ack_VIC_CPI(cpi);
1505 }
1506 
1507 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1508 static void
1509 ack_VIC_CPI(__u8 cpi)
1510 {
1511 #ifdef VOYAGER_DEBUG
1512         unsigned long flags;
1513         __u16 isr;
1514         __u8 cpu = smp_processor_id();
1515 
1516         local_irq_save(flags);
1517         isr = vic_read_isr();
1518         if((isr & (1<<(cpi &7))) == 0) {
1519                 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
1520         }
1521 #endif
1522         /* send specific EOI; the two system interrupts have
1523          * bit 4 set for a separate vector but behave as the
1524          * corresponding 3 bit intr */
1525         outb_p(0x60|(cpi & 7),0x20);
1526 
1527 #ifdef VOYAGER_DEBUG
1528         if((vic_read_isr() & (1<<(cpi &7))) != 0) {
1529                 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
1530         }
1531         local_irq_restore(flags);
1532 #endif
1533 }
1534 
1535 /* cribbed with thanks from irq.c */
1536 #define __byte(x,y)     (((unsigned char *)&(y))[x])
1537 #define cached_21(cpu)  (__byte(0,vic_irq_mask[cpu]))
1538 #define cached_A1(cpu)  (__byte(1,vic_irq_mask[cpu]))
1539 
1540 static unsigned int
1541 startup_vic_irq(unsigned int irq)
1542 {
1543         enable_vic_irq(irq);
1544 
1545         return 0;
1546 }
1547 
1548 /* The enable and disable routines.  This is where we run into
1549  * conflicting architectural philosophy.  Fundamentally, the voyager
1550  * architecture does not expect to have to disable interrupts globally
1551  * (the IRQ controllers belong to each CPU).  The processor masquerade
1552  * which is used to start the system shouldn't be used in a running OS
1553  * since it will cause great confusion if two separate CPUs drive to
1554  * the same IRQ controller (I know, I've tried it).
1555  *
1556  * The solution is a variant on the NCR lazy SPL design:
1557  *
1558  * 1) To disable an interrupt, do nothing (other than set the
1559  *    IRQ_DISABLED flag).  This dares the interrupt actually to arrive.
1560  *
1561  * 2) If the interrupt dares to come in, raise the local mask against
1562  *    it (this will result in all the CPU masks being raised
1563  *    eventually).
1564  *
1565  * 3) To enable the interrupt, lower the mask on the local CPU and
1566  *    broadcast an Interrupt enable CPI which causes all other CPUs to
1567  *    adjust their masks accordingly.  */
1568 
1569 static void
1570 enable_vic_irq(unsigned int irq)
1571 {
1572         /* linux doesn't to processor-irq affinity, so enable on
1573          * all CPUs we know about */
1574         int cpu = smp_processor_id(), real_cpu;
1575         __u16 mask = (1<<irq);
1576         __u32 processorList = 0;
1577         unsigned long flags;
1578 
1579         VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
1580                 irq, cpu, cpu_irq_affinity[cpu]));
1581         spin_lock_irqsave(&vic_irq_lock, flags);
1582         for_each_cpu(real_cpu, cpu_online_map) {
1583                 if(!(voyager_extended_vic_processors & (1<<real_cpu)))
1584                         continue;
1585                 if(!(cpu_irq_affinity[real_cpu] & mask)) {
1586                         /* irq has no affinity for this CPU, ignore */
1587                         continue;
1588                 }
1589                 if(real_cpu == cpu) {
1590                         enable_local_vic_irq(irq);
1591                 }
1592                 else if(vic_irq_mask[real_cpu] & mask) {
1593                         vic_irq_enable_mask[real_cpu] |= mask;
1594                         processorList |= (1<<real_cpu);
1595                 }
1596         }
1597         spin_unlock_irqrestore(&vic_irq_lock, flags);
1598         if(processorList)
1599                 send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
1600 }
1601 
1602 static void
1603 disable_vic_irq(unsigned int irq)
1604 {
1605         /* lazy disable, do nothing */
1606 }
1607 
1608 static void
1609 enable_local_vic_irq(unsigned int irq)
1610 {
1611         __u8 cpu = smp_processor_id();
1612         __u16 mask = ~(1 << irq);
1613         __u16 old_mask = vic_irq_mask[cpu];
1614 
1615         vic_irq_mask[cpu] &= mask;
1616         if(vic_irq_mask[cpu] == old_mask)
1617                 return;
1618 
1619         VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1620                 irq, cpu));
1621 
1622         if (irq & 8) {
1623                 outb_p(cached_A1(cpu),0xA1);
1624                 (void)inb_p(0xA1);
1625         }
1626         else {
1627                 outb_p(cached_21(cpu),0x21);
1628                 (void)inb_p(0x21);
1629         }
1630 }
1631 
1632 static void
1633 disable_local_vic_irq(unsigned int irq)
1634 {
1635         __u8 cpu = smp_processor_id();
1636         __u16 mask = (1 << irq);
1637         __u16 old_mask = vic_irq_mask[cpu];
1638 
1639         if(irq == 7)
1640                 return;
1641 
1642         vic_irq_mask[cpu] |= mask;
1643         if(old_mask == vic_irq_mask[cpu])
1644                 return;
1645 
1646         VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1647                 irq, cpu));
1648 
1649         if (irq & 8) {
1650                 outb_p(cached_A1(cpu),0xA1);
1651                 (void)inb_p(0xA1);
1652         }
1653         else {
1654                 outb_p(cached_21(cpu),0x21);
1655                 (void)inb_p(0x21);
1656         }
1657 }
1658 
1659 /* The VIC is level triggered, so the ack can only be issued after the
1660  * interrupt completes.  However, we do Voyager lazy interrupt
1661  * handling here: It is an extremely expensive operation to mask an
1662  * interrupt in the vic, so we merely set a flag (IRQ_DISABLED).  If
1663  * this interrupt actually comes in, then we mask and ack here to push
1664  * the interrupt off to another CPU */
1665 static void
1666 before_handle_vic_irq(unsigned int irq)
1667 {
1668         irq_desc_t *desc = irq_desc + irq;
1669         __u8 cpu = smp_processor_id();
1670 
1671         _raw_spin_lock(&vic_irq_lock);
1672         vic_intr_total++;
1673         vic_intr_count[cpu]++;
1674 
1675         if(!(cpu_irq_affinity[cpu] & (1<<irq))) {
1676                 /* The irq is not in our affinity mask, push it off
1677                  * onto another CPU */
1678                 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
1679                         irq, cpu));
1680                 disable_local_vic_irq(irq);
1681                 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1682                  * actually calling the interrupt routine */
1683                 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
1684         } else if(desc->status & IRQ_DISABLED) {
1685                 /* Damn, the interrupt actually arrived, do the lazy
1686                  * disable thing. The interrupt routine in irq.c will
1687                  * not handle a IRQ_DISABLED interrupt, so nothing more
1688                  * need be done here */
1689                 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1690                         irq, cpu));
1691                 disable_local_vic_irq(irq);
1692                 desc->status |= IRQ_REPLAY;
1693         } else {
1694                 desc->status &= ~IRQ_REPLAY;
1695         }
1696 
1697         _raw_spin_unlock(&vic_irq_lock);
1698 }
1699 
1700 /* Finish the VIC interrupt: basically mask */
1701 static void
1702 after_handle_vic_irq(unsigned int irq)
1703 {
1704         irq_desc_t *desc = irq_desc + irq;
1705 
1706         _raw_spin_lock(&vic_irq_lock);
1707         {
1708                 unsigned int status = desc->status & ~IRQ_INPROGRESS;
1709 #ifdef VOYAGER_DEBUG
1710                 __u16 isr;
1711 #endif
1712 
1713                 desc->status = status;
1714                 if ((status & IRQ_DISABLED))
1715                         disable_local_vic_irq(irq);
1716 #ifdef VOYAGER_DEBUG
1717                 /* DEBUG: before we ack, check what's in progress */
1718                 isr = vic_read_isr();
1719                 if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) {
1720                         int i;
1721                         __u8 cpu = smp_processor_id();
1722                         __u8 real_cpu;
1723                         int mask;
1724 
1725                         printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1726                                cpu, irq);
1727                         for_each_cpu(real_cpu, mask) {
1728 
1729                                 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1730                                      VIC_PROCESSOR_ID);
1731                                 isr = vic_read_isr();
1732                                 if(isr & (1<<irq)) {
1733                                         printk("VOYAGER SMP: CPU%d ack irq %d\n",
1734                                                real_cpu, irq);
1735                                         ack_vic_irq(irq);
1736                                 }
1737                                 outb(cpu, VIC_PROCESSOR_ID);
1738                         }
1739                 }
1740 #endif /* VOYAGER_DEBUG */
1741                 /* as soon as we ack, the interrupt is eligible for
1742                  * receipt by another CPU so everything must be in
1743                  * order here  */
1744                 ack_vic_irq(irq);
1745                 if(status & IRQ_REPLAY) {
1746                         /* replay is set if we disable the interrupt
1747                          * in the before_handle_vic_irq() routine, so
1748                          * clear the in progress bit here to allow the
1749                          * next CPU to handle this correctly */
1750                         desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
1751                 }
1752 #ifdef VOYAGER_DEBUG
1753                 isr = vic_read_isr();
1754                 if((isr & (1<<irq)) != 0)
1755                         printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
1756                                irq, isr);
1757 #endif /* VOYAGER_DEBUG */
1758         }
1759         _raw_spin_unlock(&vic_irq_lock);
1760 
1761         /* All code after this point is out of the main path - the IRQ
1762          * may be intercepted by another CPU if reasserted */
1763 }
1764 
1765 
1766 /* Linux processor - interrupt affinity manipulations.
1767  *
1768  * For each processor, we maintain a 32 bit irq affinity mask.
1769  * Initially it is set to all 1's so every processor accepts every
1770  * interrupt.  In this call, we change the processor's affinity mask:
1771  *
1772  * Change from enable to disable:
1773  *
1774  * If the interrupt ever comes in to the processor, we will disable it
1775  * and ack it to push it off to another CPU, so just accept the mask here.
1776  *
1777  * Change from disable to enable:
1778  *
1779  * change the mask and then do an interrupt enable CPI to re-enable on
1780  * the selected processors */
1781 
1782 void
1783 set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1784 {
1785         /* Only extended processors handle interrupts */
1786         unsigned long real_mask;
1787         unsigned long irq_mask = 1 << irq;
1788         int cpu;
1789 
1790         real_mask = cpus_coerce(mask) & voyager_extended_vic_processors;
1791         
1792         if(cpus_coerce(mask) == 0)
1793                 /* can't have no cpu's to accept the interrupt -- extremely
1794                  * bad things will happen */
1795                 return;
1796 
1797         if(irq == 0)
1798                 /* can't change the affinity of the timer IRQ.  This
1799                  * is due to the constraint in the voyager
1800                  * architecture that the CPI also comes in on and IRQ
1801                  * line and we have chosen IRQ0 for this.  If you
1802                  * raise the mask on this interrupt, the processor
1803                  * will no-longer be able to accept VIC CPIs */
1804                 return;
1805 
1806         if(irq >= 32) 
1807                 /* You can only have 32 interrupts in a voyager system
1808                  * (and 32 only if you have a secondary microchannel
1809                  * bus) */
1810                 return;
1811 
1812         for_each_cpu(cpu, cpu_online_map) {
1813                 unsigned long cpu_mask = 1 << cpu;
1814                 
1815                 if(cpu_mask & real_mask) {
1816                         /* enable the interrupt for this cpu */
1817                         cpu_irq_affinity[cpu] |= irq_mask;
1818                 } else {
1819                         /* disable the interrupt for this cpu */
1820                         cpu_irq_affinity[cpu] &= ~irq_mask;
1821                 }
1822         }
1823         /* this is magic, we now have the correct affinity maps, so
1824          * enable the interrupt.  This will send an enable CPI to
1825          * those cpu's who need to enable it in their local masks,
1826          * causing them to correct for the new affinity . If the
1827          * interrupt is currently globally disabled, it will simply be
1828          * disabled again as it comes in (voyager lazy disable).  If
1829          * the affinity map is tightened to disable the interrupt on a
1830          * cpu, it will be pushed off when it comes in */
1831         enable_vic_irq(irq);
1832 }
1833 
1834 static void
1835 ack_vic_irq(unsigned int irq)
1836 {
1837         if (irq & 8) {
1838                 outb(0x62,0x20);        /* Specific EOI to cascade */
1839                 outb(0x60|(irq & 7),0xA0);
1840         } else {
1841                 outb(0x60 | (irq & 7),0x20);
1842         }
1843 }
1844 
1845 /* enable the CPIs.  In the VIC, the CPIs are delivered by the 8259
1846  * but are not vectored by it.  This means that the 8259 mask must be
1847  * lowered to receive them */
1848 static __init void
1849 vic_enable_cpi(void)
1850 {
1851         __u8 cpu = smp_processor_id();
1852         
1853         /* just take a copy of the current mask (nop for boot cpu) */
1854         vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
1855 
1856         enable_local_vic_irq(VIC_CPI_LEVEL0);
1857         enable_local_vic_irq(VIC_CPI_LEVEL1);
1858         /* for sys int and cmn int */
1859         enable_local_vic_irq(7);
1860 
1861         if(is_cpu_quad()) {
1862                 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
1863                 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
1864                 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1865                         cpu, QIC_CPI_ENABLE));
1866         }
1867 
1868         VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1869                 cpu, vic_irq_mask[cpu]));
1870 }
1871 
1872 void
1873 voyager_smp_dump()
1874 {
1875         int old_cpu = smp_processor_id(), cpu;
1876 
1877         /* dump the interrupt masks of each processor */
1878         for_each_cpu(cpu, cpu_online_map) {
1879                 __u16 imr, isr, irr;
1880                 unsigned long flags;
1881 
1882                 local_irq_save(flags);
1883                 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
1884                 imr = (inb(0xa1) << 8) | inb(0x21);
1885                 outb(0x0a, 0xa0);
1886                 irr = inb(0xa0) << 8;
1887                 outb(0x0a, 0x20);
1888                 irr |= inb(0x20);
1889                 outb(0x0b, 0xa0);
1890                 isr = inb(0xa0) << 8;
1891                 outb(0x0b, 0x20);
1892                 isr |= inb(0x20);
1893                 outb(old_cpu, VIC_PROCESSOR_ID);
1894                 local_irq_restore(flags);
1895                 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1896                        cpu, vic_irq_mask[cpu], imr, irr, isr);
1897 #if 0
1898                 /* These lines are put in to try to unstick an un ack'd irq */
1899                 if(isr != 0) {
1900                         int irq;
1901                         for(irq=0; irq<16; irq++) {
1902                                 if(isr & (1<<irq)) {
1903                                         printk("\tCPU%d: ack irq %d\n",
1904                                                cpu, irq);
1905                                         local_irq_save(flags);
1906                                         outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
1907                                              VIC_PROCESSOR_ID);
1908                                         ack_vic_irq(irq);
1909                                         outb(old_cpu, VIC_PROCESSOR_ID);
1910                                         local_irq_restore(flags);
1911                                 }
1912                         }
1913                 }
1914 #endif
1915         }
1916 }
1917 
1918 void
1919 smp_voyager_power_off(void *dummy)
1920 {
1921         if(smp_processor_id() == boot_cpu_id) 
1922                 voyager_power_off();
1923         else
1924                 smp_stop_cpu_function(NULL);
1925 }
1926 
1927 void __init
1928 smp_prepare_cpus(unsigned int max_cpus)
1929 {
1930         /* FIXME: ignore max_cpus for now */
1931         smp_boot_cpus();
1932 }
1933 
1934 void __devinit smp_prepare_boot_cpu(void)
1935 {
1936         cpu_set(smp_processor_id(), cpu_online_map);
1937         cpu_set(smp_processor_id(), cpu_callout_map);
1938 }
1939 
1940 int __devinit
1941 __cpu_up(unsigned int cpu)
1942 {
1943         /* This only works at boot for x86.  See "rewrite" above. */
1944         if (cpu_isset(cpu, smp_commenced_mask))
1945                 return -ENOSYS;
1946 
1947         /* In case one didn't come up */
1948         if (!cpu_isset(cpu, cpu_callin_map))
1949                 return -EIO;
1950         /* Unleash the CPU! */
1951         cpu_set(cpu, smp_commenced_mask);
1952         while (!cpu_isset(cpu, cpu_online_map))
1953                 mb();
1954         return 0;
1955 }
1956 
1957 void __init 
1958 smp_cpus_done(unsigned int max_cpus)
1959 {
1960         zap_low_mappings();
1961 }
1962 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp