~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/kernel/sun4m_smp.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.2 ] ~ [ linux-4.20.16 ] ~ [ linux-4.19.29 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.106 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.163 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* sun4m_smp.c: Sparc SUN4M SMP support.
  2  *
  3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4  */
  5 
  6 #include <asm/head.h>
  7 
  8 #include <linux/kernel.h>
  9 #include <linux/sched.h>
 10 #include <linux/threads.h>
 11 #include <linux/smp.h>
 12 #include <linux/smp_lock.h>
 13 #include <linux/interrupt.h>
 14 #include <linux/kernel_stat.h>
 15 #include <linux/init.h>
 16 #include <linux/spinlock.h>
 17 #include <linux/mm.h>
 18 
 19 #include <asm/ptrace.h>
 20 #include <asm/atomic.h>
 21 
 22 #include <asm/delay.h>
 23 #include <asm/irq.h>
 24 #include <asm/page.h>
 25 #include <asm/pgalloc.h>
 26 #include <asm/pgtable.h>
 27 #include <asm/oplib.h>
 28 #include <asm/hardirq.h>
 29 
 30 #define __KERNEL_SYSCALLS__
 31 #include <linux/unistd.h>
 32 
 33 #define IRQ_RESCHEDULE          13
 34 #define IRQ_STOP_CPU            14
 35 #define IRQ_CROSS_CALL          15
 36 
 37 extern ctxd_t *srmmu_ctx_table_phys;
 38 extern int linux_num_cpus;
 39 
 40 extern void calibrate_delay(void);
 41 
 42 extern volatile int smp_processors_ready;
 43 extern unsigned long cpu_present_map;
 44 extern int smp_num_cpus;
 45 extern int smp_threads_ready;
 46 extern unsigned char mid_xlate[NR_CPUS];
 47 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 48 extern unsigned long smp_proc_in_lock[NR_CPUS];
 49 extern struct cpuinfo_sparc cpu_data[NR_CPUS];
 50 extern unsigned long cpu_offset[NR_CPUS];
 51 extern unsigned char boot_cpu_id;
 52 extern int smp_activated;
 53 extern volatile int __cpu_number_map[NR_CPUS];
 54 extern volatile int __cpu_logical_map[NR_CPUS];
 55 extern volatile unsigned long ipi_count;
 56 extern volatile int smp_process_available;
 57 extern volatile int smp_commenced;
 58 extern int __smp4m_processor_id(void);
 59 
 60 /*#define SMP_DEBUG*/
 61 
 62 #ifdef SMP_DEBUG
 63 #define SMP_PRINTK(x)   printk x
 64 #else
 65 #define SMP_PRINTK(x)
 66 #endif
 67 
 68 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
 69 {
 70         __asm__ __volatile__("swap [%1], %0\n\t" :
 71                              "=&r" (val), "=&r" (ptr) :
 72                              "" (val), "1" (ptr));
 73         return val;
 74 }
 75 
 76 static void smp_setup_percpu_timer(void);
 77 extern void cpu_probe(void);
 78 
 79 void __init smp4m_callin(void)
 80 {
 81         int cpuid = hard_smp_processor_id();
 82 
 83         local_flush_cache_all();
 84         local_flush_tlb_all();
 85 
 86         set_irq_udt(mid_xlate[boot_cpu_id]);
 87 
 88         /* Get our local ticker going. */
 89         smp_setup_percpu_timer();
 90 
 91         calibrate_delay();
 92         smp_store_cpu_info(cpuid);
 93 
 94         local_flush_cache_all();
 95         local_flush_tlb_all();
 96 
 97         /*
 98          * Unblock the master CPU _only_ when the scheduler state
 99          * of all secondary CPUs will be up-to-date, so after
100          * the SMP initialization the master will be just allowed
101          * to call the scheduler code.
102          */
103         init_idle();
104 
105         /* Allow master to continue. */
106         swap((unsigned long *)&cpu_callin_map[cpuid], 1);
107 
108         local_flush_cache_all();
109         local_flush_tlb_all();
110         
111         cpu_probe();
112 
113         /* Fix idle thread fields. */
114         __asm__ __volatile__("ld [%0], %%g6\n\t"
115                              : : "r" (&current_set[cpuid])
116                              : "memory" /* paranoid */);
117 
118         /* Attach to the address space of init_task. */
119         atomic_inc(&init_mm.mm_count);
120         current->active_mm = &init_mm;
121 
122         while(!smp_commenced)
123                 barrier();
124 
125         local_flush_cache_all();
126         local_flush_tlb_all();
127 
128         local_irq_enable();
129 }
130 
131 extern int cpu_idle(void *unused);
132 extern void init_IRQ(void);
133 extern void cpu_panic(void);
134 extern int start_secondary(void *unused);
135 
136 /*
137  *      Cycle through the processors asking the PROM to start each one.
138  */
139  
140 extern struct prom_cpuinfo linux_cpus[NR_CPUS];
141 extern struct linux_prom_registers smp_penguin_ctable;
142 extern unsigned long trapbase_cpu1[];
143 extern unsigned long trapbase_cpu2[];
144 extern unsigned long trapbase_cpu3[];
145 
146 void __init smp4m_boot_cpus(void)
147 {
148         int cpucount = 0;
149         int i = 0;
150         int first, prev;
151 
152         printk("Entering SMP Mode...\n");
153 
154         local_irq_enable();
155         cpu_present_map = 0;
156 
157         for(i=0; i < linux_num_cpus; i++)
158                 cpu_present_map |= (1<<i);
159 
160         for(i=0; i < NR_CPUS; i++) {
161                 cpu_offset[i] = (char *)&cpu_data[i] - (char *)&cpu_data;
162                 __cpu_number_map[i] = -1;
163                 __cpu_logical_map[i] = -1;
164         }
165 
166         mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
167         __cpu_number_map[boot_cpu_id] = 0;
168         __cpu_logical_map[0] = boot_cpu_id;
169         current->cpu = boot_cpu_id;
170 
171         smp_store_cpu_info(boot_cpu_id);
172         set_irq_udt(mid_xlate[boot_cpu_id]);
173         smp_setup_percpu_timer();
174         local_flush_cache_all();
175         if(linux_num_cpus == 1)
176                 return;  /* Not an MP box. */
177         for(i = 0; i < NR_CPUS; i++) {
178                 if(i == boot_cpu_id)
179                         continue;
180 
181                 if(cpu_present_map & (1 << i)) {
182                         extern unsigned long sun4m_cpu_startup;
183                         unsigned long *entry = &sun4m_cpu_startup;
184                         struct task_struct *p;
185                         int timeout;
186 
187                         /* Cook up an idler for this guy. */
188                         kernel_thread(start_secondary, NULL, CLONE_IDLETASK);
189 
190                         cpucount++;
191 
192                         p = prev_task(&init_task);
193 
194                         p->cpu = i;
195 
196                         current_set[i] = p;
197 
198                         unhash_process(p);
199 
200                         /* See trampoline.S for details... */
201                         entry += ((i-1) * 3);
202 
203                         /*
204                          * Initialize the contexts table
205                          * Since the call to prom_startcpu() trashes the structure,
206                          * we need to re-initialize it for each cpu
207                          */
208                         smp_penguin_ctable.which_io = 0;
209                         smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
210                         smp_penguin_ctable.reg_size = 0;
211 
212                         /* whirrr, whirrr, whirrrrrrrrr... */
213                         printk("Starting CPU %d at %p\n", i, entry);
214                         mid_xlate[i] = (linux_cpus[i].mid & ~8);
215                         local_flush_cache_all();
216                         prom_startcpu(linux_cpus[i].prom_node,
217                                       &smp_penguin_ctable, 0, (char *)entry);
218 
219                         /* wheee... it's going... */
220                         for(timeout = 0; timeout < 10000; timeout++) {
221                                 if(cpu_callin_map[i])
222                                         break;
223                                 udelay(200);
224                         }
225                         if(cpu_callin_map[i]) {
226                                 /* Another "Red Snapper". */
227                                 __cpu_number_map[i] = i;
228                                 __cpu_logical_map[i] = i;
229                         } else {
230                                 cpucount--;
231                                 printk("Processor %d is stuck.\n", i);
232                         }
233                 }
234                 if(!(cpu_callin_map[i])) {
235                         cpu_present_map &= ~(1 << i);
236                         __cpu_number_map[i] = -1;
237                 }
238         }
239         local_flush_cache_all();
240         if(cpucount == 0) {
241                 printk("Error: only one Processor found.\n");
242                 cpu_present_map = (1 << smp_processor_id());
243         } else {
244                 unsigned long bogosum = 0;
245                 for(i = 0; i < NR_CPUS; i++) {
246                         if(cpu_present_map & (1 << i))
247                                 bogosum += cpu_data[i].udelay_val;
248                 }
249                 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
250                        cpucount + 1,
251                        bogosum/(500000/HZ),
252                        (bogosum/(5000/HZ))%100);
253                 smp_activated = 1;
254                 smp_num_cpus = cpucount + 1;
255         }
256 
257         /* Setup CPU list for IRQ distribution scheme. */
258         first = prev = -1;
259         for(i = 0; i < NR_CPUS; i++) {
260                 if(cpu_present_map & (1 << i)) {
261                         if(first == -1)
262                                 first = i;
263                         if(prev != -1)
264                                 cpu_data[prev].next = i;
265                         cpu_data[i].mid = mid_xlate[i];
266                         prev = i;
267                 }
268         }
269         cpu_data[prev].next = first;
270         
271         /* Free unneeded trap tables */
272         if (!(cpu_present_map & (1 << 1))) {
273                 ClearPageReserved(virt_to_page(trapbase_cpu1));
274                 set_page_count(virt_to_page(trapbase_cpu1), 1);
275                 free_page((unsigned long)trapbase_cpu1);
276                 totalram_pages++;
277                 num_physpages++;
278         }
279         if (!(cpu_present_map & (1 << 2))) {
280                 ClearPageReserved(virt_to_page(trapbase_cpu2));
281                 set_page_count(virt_to_page(trapbase_cpu2), 1);
282                 free_page((unsigned long)trapbase_cpu2);
283                 totalram_pages++;
284                 num_physpages++;
285         }
286         if (!(cpu_present_map & (1 << 3))) {
287                 ClearPageReserved(virt_to_page(trapbase_cpu3));
288                 set_page_count(virt_to_page(trapbase_cpu3), 1);
289                 free_page((unsigned long)trapbase_cpu3);
290                 totalram_pages++;
291                 num_physpages++;
292         }
293 
294         /* Ok, they are spinning and ready to go. */
295         smp_processors_ready = 1;
296 }
297 
298 /* At each hardware IRQ, we get this called to forward IRQ reception
299  * to the next processor.  The caller must disable the IRQ level being
300  * serviced globally so that there are no double interrupts received.
301  */
302 void smp4m_irq_rotate(int cpu)
303 {
304         if(smp_processors_ready)
305                 set_irq_udt(cpu_data[cpu_data[cpu].next].mid);
306 }
307 
308 /* Cross calls, in order to work efficiently and atomically do all
309  * the message passing work themselves, only stopcpu and reschedule
310  * messages come through here.
311  */
312 void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
313 {
314         static unsigned long smp_cpu_in_msg[NR_CPUS];
315         unsigned long mask;
316         int me = smp_processor_id();
317         int irq, i;
318 
319         if(msg == MSG_RESCHEDULE) {
320                 irq = IRQ_RESCHEDULE;
321 
322                 if(smp_cpu_in_msg[me])
323                         return;
324         } else if(msg == MSG_STOP_CPU) {
325                 irq = IRQ_STOP_CPU;
326         } else {
327                 goto barf;
328         }
329 
330         smp_cpu_in_msg[me]++;
331         if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
332                 mask = cpu_present_map;
333                 if(target == MSG_ALL_BUT_SELF)
334                         mask &= ~(1 << me);
335                 for(i = 0; i < 4; i++) {
336                         if(mask & (1 << i))
337                                 set_cpu_int(mid_xlate[i], irq);
338                 }
339         } else {
340                 set_cpu_int(mid_xlate[target], irq);
341         }
342         smp_cpu_in_msg[me]--;
343 
344         return;
345 barf:
346         printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
347         panic("Bogon SMP message pass.");
348 }
349 
350 static struct smp_funcall {
351         smpfunc_t func;
352         unsigned long arg1;
353         unsigned long arg2;
354         unsigned long arg3;
355         unsigned long arg4;
356         unsigned long arg5;
357         unsigned long processors_in[NR_CPUS];  /* Set when ipi entered. */
358         unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
359 } ccall_info;
360 
361 static spinlock_t cross_call_lock = SPIN_LOCK_UNLOCKED;
362 
363 /* Cross calls must be serialized, at least currently. */
364 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
365                     unsigned long arg3, unsigned long arg4, unsigned long arg5)
366 {
367         if(smp_processors_ready) {
368                 register int ncpus = smp_num_cpus;
369                 unsigned long flags;
370 
371                 spin_lock_irqsave(&cross_call_lock, flags);
372 
373                 /* Init function glue. */
374                 ccall_info.func = func;
375                 ccall_info.arg1 = arg1;
376                 ccall_info.arg2 = arg2;
377                 ccall_info.arg3 = arg3;
378                 ccall_info.arg4 = arg4;
379                 ccall_info.arg5 = arg5;
380 
381                 /* Init receive/complete mapping, plus fire the IPI's off. */
382                 {
383                         register unsigned long mask;
384                         register int i;
385 
386                         mask = (cpu_present_map & ~(1 << smp_processor_id()));
387                         for(i = 0; i < ncpus; i++) {
388                                 if(mask & (1 << i)) {
389                                         ccall_info.processors_in[i] = 0;
390                                         ccall_info.processors_out[i] = 0;
391                                         set_cpu_int(mid_xlate[i], IRQ_CROSS_CALL);
392                                 } else {
393                                         ccall_info.processors_in[i] = 1;
394                                         ccall_info.processors_out[i] = 1;
395                                 }
396                         }
397                 }
398 
399                 {
400                         register int i;
401 
402                         i = 0;
403                         do {
404                                 while(!ccall_info.processors_in[i])
405                                         barrier();
406                         } while(++i < ncpus);
407 
408                         i = 0;
409                         do {
410                                 while(!ccall_info.processors_out[i])
411                                         barrier();
412                         } while(++i < ncpus);
413                 }
414 
415                 spin_unlock_irqrestore(&cross_call_lock, flags);
416         }
417 }
418 
419 /* Running cross calls. */
420 void smp4m_cross_call_irq(void)
421 {
422         int i = smp_processor_id();
423 
424         ccall_info.processors_in[i] = 1;
425         ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
426                         ccall_info.arg4, ccall_info.arg5);
427         ccall_info.processors_out[i] = 1;
428 }
429 
430 extern unsigned int prof_multiplier[NR_CPUS];
431 extern unsigned int prof_counter[NR_CPUS];
432 
433 extern void sparc_do_profile(unsigned long pc, unsigned long o7);
434 
435 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
436 {
437         int cpu = smp_processor_id();
438 
439         clear_profile_irq(mid_xlate[cpu]);
440 
441         if(!user_mode(regs))
442                 sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]);
443 
444         if(!--prof_counter[cpu]) {
445                 int user = user_mode(regs);
446 
447                 irq_enter();
448                 update_process_times(user);
449                 irq_exit();
450 
451                 prof_counter[cpu] = prof_multiplier[cpu];
452         }
453 }
454 
455 extern unsigned int lvl14_resolution;
456 
457 static void __init smp_setup_percpu_timer(void)
458 {
459         int cpu = smp_processor_id();
460 
461         prof_counter[cpu] = prof_multiplier[cpu] = 1;
462         load_profile_irq(mid_xlate[cpu], lvl14_resolution);
463 
464         if(cpu == boot_cpu_id)
465                 enable_pil_irq(14);
466 }
467 
468 void __init smp4m_blackbox_id(unsigned *addr)
469 {
470         int rd = *addr & 0x3e000000;
471         int rs1 = rd >> 11;
472         
473         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
474         addr[1] = 0x8130200c | rd | rs1;        /* srl reg, 0xc, reg */
475         addr[2] = 0x80082003 | rd | rs1;        /* and reg, 3, reg */
476 }
477 
478 void __init smp4m_blackbox_current(unsigned *addr)
479 {
480         int rd = *addr & 0x3e000000;
481         int rs1 = rd >> 11;
482         
483         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
484         addr[2] = 0x8130200a | rd | rs1;        /* srl reg, 0xa, reg */
485         addr[4] = 0x8008200c | rd | rs1;        /* and reg, 3, reg */
486 }
487 
488 void __init sun4m_init_smp(void)
489 {
490         BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
491         BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
492         BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
493         BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
494         BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
495 }
496 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp