~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/platforms/cell/spu_base.c

Version: ~ [ linux-5.9-rc6 ] ~ [ linux-5.8.10 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.66 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.146 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.198 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.236 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.236 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Low-level SPU handling
  3  *
  4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5  *
  6  * Author: Arnd Bergmann <arndb@de.ibm.com>
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License as published by
 10  * the Free Software Foundation; either version 2, or (at your option)
 11  * any later version.
 12  *
 13  * This program is distributed in the hope that it will be useful,
 14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16  * GNU General Public License for more details.
 17  *
 18  * You should have received a copy of the GNU General Public License
 19  * along with this program; if not, write to the Free Software
 20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 21  */
 22 
 23 #undef DEBUG
 24 
 25 #include <linux/interrupt.h>
 26 #include <linux/list.h>
 27 #include <linux/module.h>
 28 #include <linux/ptrace.h>
 29 #include <linux/slab.h>
 30 #include <linux/wait.h>
 31 #include <linux/mm.h>
 32 #include <linux/io.h>
 33 #include <linux/mutex.h>
 34 #include <linux/linux_logo.h>
 35 #include <linux/syscore_ops.h>
 36 #include <asm/spu.h>
 37 #include <asm/spu_priv1.h>
 38 #include <asm/spu_csa.h>
 39 #include <asm/xmon.h>
 40 #include <asm/prom.h>
 41 #include <asm/kexec.h>
 42 
 43 const struct spu_management_ops *spu_management_ops;
 44 EXPORT_SYMBOL_GPL(spu_management_ops);
 45 
 46 const struct spu_priv1_ops *spu_priv1_ops;
 47 EXPORT_SYMBOL_GPL(spu_priv1_ops);
 48 
 49 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
 50 EXPORT_SYMBOL_GPL(cbe_spu_info);
 51 
 52 /*
 53  * The spufs fault-handling code needs to call force_sig_info to raise signals
 54  * on DMA errors. Export it here to avoid general kernel-wide access to this
 55  * function
 56  */
 57 EXPORT_SYMBOL_GPL(force_sig_info);
 58 
 59 /*
 60  * Protects cbe_spu_info and spu->number.
 61  */
 62 static DEFINE_SPINLOCK(spu_lock);
 63 
 64 /*
 65  * List of all spus in the system.
 66  *
 67  * This list is iterated by callers from irq context and callers that
 68  * want to sleep.  Thus modifications need to be done with both
 69  * spu_full_list_lock and spu_full_list_mutex held, while iterating
 70  * through it requires either of these locks.
 71  *
 72  * In addition spu_full_list_lock protects all assignmens to
 73  * spu->mm.
 74  */
 75 static LIST_HEAD(spu_full_list);
 76 static DEFINE_SPINLOCK(spu_full_list_lock);
 77 static DEFINE_MUTEX(spu_full_list_mutex);
 78 
 79 struct spu_slb {
 80         u64 esid, vsid;
 81 };
 82 
 83 void spu_invalidate_slbs(struct spu *spu)
 84 {
 85         struct spu_priv2 __iomem *priv2 = spu->priv2;
 86         unsigned long flags;
 87 
 88         spin_lock_irqsave(&spu->register_lock, flags);
 89         if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
 90                 out_be64(&priv2->slb_invalidate_all_W, 0UL);
 91         spin_unlock_irqrestore(&spu->register_lock, flags);
 92 }
 93 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
 94 
 95 /* This is called by the MM core when a segment size is changed, to
 96  * request a flush of all the SPEs using a given mm
 97  */
 98 void spu_flush_all_slbs(struct mm_struct *mm)
 99 {
100         struct spu *spu;
101         unsigned long flags;
102 
103         spin_lock_irqsave(&spu_full_list_lock, flags);
104         list_for_each_entry(spu, &spu_full_list, full_list) {
105                 if (spu->mm == mm)
106                         spu_invalidate_slbs(spu);
107         }
108         spin_unlock_irqrestore(&spu_full_list_lock, flags);
109 }
110 
111 /* The hack below stinks... try to do something better one of
112  * these days... Does it even work properly with NR_CPUS == 1 ?
113  */
114 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
115 {
116         int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
117 
118         /* Global TLBIE broadcast required with SPEs. */
119         bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
120 }
121 
122 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
123 {
124         unsigned long flags;
125 
126         spin_lock_irqsave(&spu_full_list_lock, flags);
127         spu->mm = mm;
128         spin_unlock_irqrestore(&spu_full_list_lock, flags);
129         if (mm)
130                 mm_needs_global_tlbie(mm);
131 }
132 EXPORT_SYMBOL_GPL(spu_associate_mm);
133 
134 int spu_64k_pages_available(void)
135 {
136         return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
137 }
138 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
139 
140 static void spu_restart_dma(struct spu *spu)
141 {
142         struct spu_priv2 __iomem *priv2 = spu->priv2;
143 
144         if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
145                 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
146         else {
147                 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
148                 mb();
149         }
150 }
151 
152 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
153 {
154         struct spu_priv2 __iomem *priv2 = spu->priv2;
155 
156         pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
157                         __func__, slbe, slb->vsid, slb->esid);
158 
159         out_be64(&priv2->slb_index_W, slbe);
160         /* set invalid before writing vsid */
161         out_be64(&priv2->slb_esid_RW, 0);
162         /* now it's safe to write the vsid */
163         out_be64(&priv2->slb_vsid_RW, slb->vsid);
164         /* setting the new esid makes the entry valid again */
165         out_be64(&priv2->slb_esid_RW, slb->esid);
166 }
167 
168 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
169 {
170         struct mm_struct *mm = spu->mm;
171         struct spu_slb slb;
172         int psize;
173 
174         pr_debug("%s\n", __func__);
175 
176         slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
177 
178         switch(REGION_ID(ea)) {
179         case USER_REGION_ID:
180 #ifdef CONFIG_PPC_MM_SLICES
181                 psize = get_slice_psize(mm, ea);
182 #else
183                 psize = mm->context.user_psize;
184 #endif
185                 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
186                                 << SLB_VSID_SHIFT) | SLB_VSID_USER;
187                 break;
188         case VMALLOC_REGION_ID:
189                 if (ea < VMALLOC_END)
190                         psize = mmu_vmalloc_psize;
191                 else
192                         psize = mmu_io_psize;
193                 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
194                                 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
195                 break;
196         case KERNEL_REGION_ID:
197                 psize = mmu_linear_psize;
198                 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
199                                 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
200                 break;
201         default:
202                 /* Future: support kernel segments so that drivers
203                  * can use SPUs.
204                  */
205                 pr_debug("invalid region access at %016lx\n", ea);
206                 return 1;
207         }
208         slb.vsid |= mmu_psize_defs[psize].sllp;
209 
210         spu_load_slb(spu, spu->slb_replace, &slb);
211 
212         spu->slb_replace++;
213         if (spu->slb_replace >= 8)
214                 spu->slb_replace = 0;
215 
216         spu_restart_dma(spu);
217         spu->stats.slb_flt++;
218         return 0;
219 }
220 
221 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
222 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
223 {
224         int ret;
225 
226         pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
227 
228         /*
229          * Handle kernel space hash faults immediately. User hash
230          * faults need to be deferred to process context.
231          */
232         if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
233             (REGION_ID(ea) != USER_REGION_ID)) {
234 
235                 spin_unlock(&spu->register_lock);
236                 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
237                 spin_lock(&spu->register_lock);
238 
239                 if (!ret) {
240                         spu_restart_dma(spu);
241                         return 0;
242                 }
243         }
244 
245         spu->class_1_dar = ea;
246         spu->class_1_dsisr = dsisr;
247 
248         spu->stop_callback(spu, 1);
249 
250         spu->class_1_dar = 0;
251         spu->class_1_dsisr = 0;
252 
253         return 0;
254 }
255 
256 static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
257 {
258         unsigned long ea = (unsigned long)addr;
259         u64 llp;
260 
261         if (REGION_ID(ea) == KERNEL_REGION_ID)
262                 llp = mmu_psize_defs[mmu_linear_psize].sllp;
263         else
264                 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
265 
266         slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
267                 SLB_VSID_KERNEL | llp;
268         slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
269 }
270 
271 /**
272  * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
273  * address @new_addr is present.
274  */
275 static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
276                 void *new_addr)
277 {
278         unsigned long ea = (unsigned long)new_addr;
279         int i;
280 
281         for (i = 0; i < nr_slbs; i++)
282                 if (!((slbs[i].esid ^ ea) & ESID_MASK))
283                         return 1;
284 
285         return 0;
286 }
287 
288 /**
289  * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
290  * need to map both the context save area, and the save/restore code.
291  *
292  * Because the lscsa and code may cross segment boundaires, we check to see
293  * if mappings are required for the start and end of each range. We currently
294  * assume that the mappings are smaller that one segment - if not, something
295  * is seriously wrong.
296  */
297 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
298                 void *code, int code_size)
299 {
300         struct spu_slb slbs[4];
301         int i, nr_slbs = 0;
302         /* start and end addresses of both mappings */
303         void *addrs[] = {
304                 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
305                 code, code + code_size - 1
306         };
307 
308         /* check the set of addresses, and create a new entry in the slbs array
309          * if there isn't already a SLB for that address */
310         for (i = 0; i < ARRAY_SIZE(addrs); i++) {
311                 if (__slb_present(slbs, nr_slbs, addrs[i]))
312                         continue;
313 
314                 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
315                 nr_slbs++;
316         }
317 
318         spin_lock_irq(&spu->register_lock);
319         /* Add the set of SLBs */
320         for (i = 0; i < nr_slbs; i++)
321                 spu_load_slb(spu, i, &slbs[i]);
322         spin_unlock_irq(&spu->register_lock);
323 }
324 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
325 
326 static irqreturn_t
327 spu_irq_class_0(int irq, void *data)
328 {
329         struct spu *spu;
330         unsigned long stat, mask;
331 
332         spu = data;
333 
334         spin_lock(&spu->register_lock);
335         mask = spu_int_mask_get(spu, 0);
336         stat = spu_int_stat_get(spu, 0) & mask;
337 
338         spu->class_0_pending |= stat;
339         spu->class_0_dar = spu_mfc_dar_get(spu);
340         spu->stop_callback(spu, 0);
341         spu->class_0_pending = 0;
342         spu->class_0_dar = 0;
343 
344         spu_int_stat_clear(spu, 0, stat);
345         spin_unlock(&spu->register_lock);
346 
347         return IRQ_HANDLED;
348 }
349 
350 static irqreturn_t
351 spu_irq_class_1(int irq, void *data)
352 {
353         struct spu *spu;
354         unsigned long stat, mask, dar, dsisr;
355 
356         spu = data;
357 
358         /* atomically read & clear class1 status. */
359         spin_lock(&spu->register_lock);
360         mask  = spu_int_mask_get(spu, 1);
361         stat  = spu_int_stat_get(spu, 1) & mask;
362         dar   = spu_mfc_dar_get(spu);
363         dsisr = spu_mfc_dsisr_get(spu);
364         if (stat & CLASS1_STORAGE_FAULT_INTR)
365                 spu_mfc_dsisr_set(spu, 0ul);
366         spu_int_stat_clear(spu, 1, stat);
367 
368         pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
369                         dar, dsisr);
370 
371         if (stat & CLASS1_SEGMENT_FAULT_INTR)
372                 __spu_trap_data_seg(spu, dar);
373 
374         if (stat & CLASS1_STORAGE_FAULT_INTR)
375                 __spu_trap_data_map(spu, dar, dsisr);
376 
377         if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
378                 ;
379 
380         if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
381                 ;
382 
383         spu->class_1_dsisr = 0;
384         spu->class_1_dar = 0;
385 
386         spin_unlock(&spu->register_lock);
387 
388         return stat ? IRQ_HANDLED : IRQ_NONE;
389 }
390 
391 static irqreturn_t
392 spu_irq_class_2(int irq, void *data)
393 {
394         struct spu *spu;
395         unsigned long stat;
396         unsigned long mask;
397         const int mailbox_intrs =
398                 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
399 
400         spu = data;
401         spin_lock(&spu->register_lock);
402         stat = spu_int_stat_get(spu, 2);
403         mask = spu_int_mask_get(spu, 2);
404         /* ignore interrupts we're not waiting for */
405         stat &= mask;
406         /* mailbox interrupts are level triggered. mask them now before
407          * acknowledging */
408         if (stat & mailbox_intrs)
409                 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
410         /* acknowledge all interrupts before the callbacks */
411         spu_int_stat_clear(spu, 2, stat);
412 
413         pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
414 
415         if (stat & CLASS2_MAILBOX_INTR)
416                 spu->ibox_callback(spu);
417 
418         if (stat & CLASS2_SPU_STOP_INTR)
419                 spu->stop_callback(spu, 2);
420 
421         if (stat & CLASS2_SPU_HALT_INTR)
422                 spu->stop_callback(spu, 2);
423 
424         if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
425                 spu->mfc_callback(spu);
426 
427         if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
428                 spu->wbox_callback(spu);
429 
430         spu->stats.class2_intr++;
431 
432         spin_unlock(&spu->register_lock);
433 
434         return stat ? IRQ_HANDLED : IRQ_NONE;
435 }
436 
437 static int spu_request_irqs(struct spu *spu)
438 {
439         int ret = 0;
440 
441         if (spu->irqs[0] != NO_IRQ) {
442                 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
443                          spu->number);
444                 ret = request_irq(spu->irqs[0], spu_irq_class_0,
445                                   0, spu->irq_c0, spu);
446                 if (ret)
447                         goto bail0;
448         }
449         if (spu->irqs[1] != NO_IRQ) {
450                 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
451                          spu->number);
452                 ret = request_irq(spu->irqs[1], spu_irq_class_1,
453                                   0, spu->irq_c1, spu);
454                 if (ret)
455                         goto bail1;
456         }
457         if (spu->irqs[2] != NO_IRQ) {
458                 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
459                          spu->number);
460                 ret = request_irq(spu->irqs[2], spu_irq_class_2,
461                                   0, spu->irq_c2, spu);
462                 if (ret)
463                         goto bail2;
464         }
465         return 0;
466 
467 bail2:
468         if (spu->irqs[1] != NO_IRQ)
469                 free_irq(spu->irqs[1], spu);
470 bail1:
471         if (spu->irqs[0] != NO_IRQ)
472                 free_irq(spu->irqs[0], spu);
473 bail0:
474         return ret;
475 }
476 
477 static void spu_free_irqs(struct spu *spu)
478 {
479         if (spu->irqs[0] != NO_IRQ)
480                 free_irq(spu->irqs[0], spu);
481         if (spu->irqs[1] != NO_IRQ)
482                 free_irq(spu->irqs[1], spu);
483         if (spu->irqs[2] != NO_IRQ)
484                 free_irq(spu->irqs[2], spu);
485 }
486 
487 void spu_init_channels(struct spu *spu)
488 {
489         static const struct {
490                  unsigned channel;
491                  unsigned count;
492         } zero_list[] = {
493                 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
494                 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
495         }, count_list[] = {
496                 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
497                 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
498                 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
499         };
500         struct spu_priv2 __iomem *priv2;
501         int i;
502 
503         priv2 = spu->priv2;
504 
505         /* initialize all channel data to zero */
506         for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
507                 int count;
508 
509                 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
510                 for (count = 0; count < zero_list[i].count; count++)
511                         out_be64(&priv2->spu_chnldata_RW, 0);
512         }
513 
514         /* initialize channel counts to meaningful values */
515         for (i = 0; i < ARRAY_SIZE(count_list); i++) {
516                 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
517                 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
518         }
519 }
520 EXPORT_SYMBOL_GPL(spu_init_channels);
521 
522 static struct bus_type spu_subsys = {
523         .name = "spu",
524         .dev_name = "spu",
525 };
526 
527 int spu_add_dev_attr(struct device_attribute *attr)
528 {
529         struct spu *spu;
530 
531         mutex_lock(&spu_full_list_mutex);
532         list_for_each_entry(spu, &spu_full_list, full_list)
533                 device_create_file(&spu->dev, attr);
534         mutex_unlock(&spu_full_list_mutex);
535 
536         return 0;
537 }
538 EXPORT_SYMBOL_GPL(spu_add_dev_attr);
539 
540 int spu_add_dev_attr_group(struct attribute_group *attrs)
541 {
542         struct spu *spu;
543         int rc = 0;
544 
545         mutex_lock(&spu_full_list_mutex);
546         list_for_each_entry(spu, &spu_full_list, full_list) {
547                 rc = sysfs_create_group(&spu->dev.kobj, attrs);
548 
549                 /* we're in trouble here, but try unwinding anyway */
550                 if (rc) {
551                         printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
552                                         __func__, attrs->name);
553 
554                         list_for_each_entry_continue_reverse(spu,
555                                         &spu_full_list, full_list)
556                                 sysfs_remove_group(&spu->dev.kobj, attrs);
557                         break;
558                 }
559         }
560 
561         mutex_unlock(&spu_full_list_mutex);
562 
563         return rc;
564 }
565 EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
566 
567 
568 void spu_remove_dev_attr(struct device_attribute *attr)
569 {
570         struct spu *spu;
571 
572         mutex_lock(&spu_full_list_mutex);
573         list_for_each_entry(spu, &spu_full_list, full_list)
574                 device_remove_file(&spu->dev, attr);
575         mutex_unlock(&spu_full_list_mutex);
576 }
577 EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
578 
579 void spu_remove_dev_attr_group(struct attribute_group *attrs)
580 {
581         struct spu *spu;
582 
583         mutex_lock(&spu_full_list_mutex);
584         list_for_each_entry(spu, &spu_full_list, full_list)
585                 sysfs_remove_group(&spu->dev.kobj, attrs);
586         mutex_unlock(&spu_full_list_mutex);
587 }
588 EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
589 
590 static int spu_create_dev(struct spu *spu)
591 {
592         int ret;
593 
594         spu->dev.id = spu->number;
595         spu->dev.bus = &spu_subsys;
596         ret = device_register(&spu->dev);
597         if (ret) {
598                 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
599                                 spu->number);
600                 return ret;
601         }
602 
603         sysfs_add_device_to_node(&spu->dev, spu->node);
604 
605         return 0;
606 }
607 
608 static int __init create_spu(void *data)
609 {
610         struct spu *spu;
611         int ret;
612         static int number;
613         unsigned long flags;
614         struct timespec ts;
615 
616         ret = -ENOMEM;
617         spu = kzalloc(sizeof (*spu), GFP_KERNEL);
618         if (!spu)
619                 goto out;
620 
621         spu->alloc_state = SPU_FREE;
622 
623         spin_lock_init(&spu->register_lock);
624         spin_lock(&spu_lock);
625         spu->number = number++;
626         spin_unlock(&spu_lock);
627 
628         ret = spu_create_spu(spu, data);
629 
630         if (ret)
631                 goto out_free;
632 
633         spu_mfc_sdr_setup(spu);
634         spu_mfc_sr1_set(spu, 0x33);
635         ret = spu_request_irqs(spu);
636         if (ret)
637                 goto out_destroy;
638 
639         ret = spu_create_dev(spu);
640         if (ret)
641                 goto out_free_irqs;
642 
643         mutex_lock(&cbe_spu_info[spu->node].list_mutex);
644         list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
645         cbe_spu_info[spu->node].n_spus++;
646         mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
647 
648         mutex_lock(&spu_full_list_mutex);
649         spin_lock_irqsave(&spu_full_list_lock, flags);
650         list_add(&spu->full_list, &spu_full_list);
651         spin_unlock_irqrestore(&spu_full_list_lock, flags);
652         mutex_unlock(&spu_full_list_mutex);
653 
654         spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
655         ktime_get_ts(&ts);
656         spu->stats.tstamp = timespec_to_ns(&ts);
657 
658         INIT_LIST_HEAD(&spu->aff_list);
659 
660         goto out;
661 
662 out_free_irqs:
663         spu_free_irqs(spu);
664 out_destroy:
665         spu_destroy_spu(spu);
666 out_free:
667         kfree(spu);
668 out:
669         return ret;
670 }
671 
672 static const char *spu_state_names[] = {
673         "user", "system", "iowait", "idle"
674 };
675 
676 static unsigned long long spu_acct_time(struct spu *spu,
677                 enum spu_utilization_state state)
678 {
679         struct timespec ts;
680         unsigned long long time = spu->stats.times[state];
681 
682         /*
683          * If the spu is idle or the context is stopped, utilization
684          * statistics are not updated.  Apply the time delta from the
685          * last recorded state of the spu.
686          */
687         if (spu->stats.util_state == state) {
688                 ktime_get_ts(&ts);
689                 time += timespec_to_ns(&ts) - spu->stats.tstamp;
690         }
691 
692         return time / NSEC_PER_MSEC;
693 }
694 
695 
696 static ssize_t spu_stat_show(struct device *dev,
697                                 struct device_attribute *attr, char *buf)
698 {
699         struct spu *spu = container_of(dev, struct spu, dev);
700 
701         return sprintf(buf, "%s %llu %llu %llu %llu "
702                       "%llu %llu %llu %llu %llu %llu %llu %llu\n",
703                 spu_state_names[spu->stats.util_state],
704                 spu_acct_time(spu, SPU_UTIL_USER),
705                 spu_acct_time(spu, SPU_UTIL_SYSTEM),
706                 spu_acct_time(spu, SPU_UTIL_IOWAIT),
707                 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
708                 spu->stats.vol_ctx_switch,
709                 spu->stats.invol_ctx_switch,
710                 spu->stats.slb_flt,
711                 spu->stats.hash_flt,
712                 spu->stats.min_flt,
713                 spu->stats.maj_flt,
714                 spu->stats.class2_intr,
715                 spu->stats.libassist);
716 }
717 
718 static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
719 
720 #ifdef CONFIG_KEXEC
721 
722 struct crash_spu_info {
723         struct spu *spu;
724         u32 saved_spu_runcntl_RW;
725         u32 saved_spu_status_R;
726         u32 saved_spu_npc_RW;
727         u64 saved_mfc_sr1_RW;
728         u64 saved_mfc_dar;
729         u64 saved_mfc_dsisr;
730 };
731 
732 #define CRASH_NUM_SPUS  16      /* Enough for current hardware */
733 static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
734 
735 static void crash_kexec_stop_spus(void)
736 {
737         struct spu *spu;
738         int i;
739         u64 tmp;
740 
741         for (i = 0; i < CRASH_NUM_SPUS; i++) {
742                 if (!crash_spu_info[i].spu)
743                         continue;
744 
745                 spu = crash_spu_info[i].spu;
746 
747                 crash_spu_info[i].saved_spu_runcntl_RW =
748                         in_be32(&spu->problem->spu_runcntl_RW);
749                 crash_spu_info[i].saved_spu_status_R =
750                         in_be32(&spu->problem->spu_status_R);
751                 crash_spu_info[i].saved_spu_npc_RW =
752                         in_be32(&spu->problem->spu_npc_RW);
753 
754                 crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
755                 crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
756                 tmp = spu_mfc_sr1_get(spu);
757                 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
758 
759                 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
760                 spu_mfc_sr1_set(spu, tmp);
761 
762                 __delay(200);
763         }
764 }
765 
766 static void crash_register_spus(struct list_head *list)
767 {
768         struct spu *spu;
769         int ret;
770 
771         list_for_each_entry(spu, list, full_list) {
772                 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
773                         continue;
774 
775                 crash_spu_info[spu->number].spu = spu;
776         }
777 
778         ret = crash_shutdown_register(&crash_kexec_stop_spus);
779         if (ret)
780                 printk(KERN_ERR "Could not register SPU crash handler");
781 }
782 
783 #else
784 static inline void crash_register_spus(struct list_head *list)
785 {
786 }
787 #endif
788 
789 static void spu_shutdown(void)
790 {
791         struct spu *spu;
792 
793         mutex_lock(&spu_full_list_mutex);
794         list_for_each_entry(spu, &spu_full_list, full_list) {
795                 spu_free_irqs(spu);
796                 spu_destroy_spu(spu);
797         }
798         mutex_unlock(&spu_full_list_mutex);
799 }
800 
801 static struct syscore_ops spu_syscore_ops = {
802         .shutdown = spu_shutdown,
803 };
804 
805 static int __init init_spu_base(void)
806 {
807         int i, ret = 0;
808 
809         for (i = 0; i < MAX_NUMNODES; i++) {
810                 mutex_init(&cbe_spu_info[i].list_mutex);
811                 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
812         }
813 
814         if (!spu_management_ops)
815                 goto out;
816 
817         /* create system subsystem for spus */
818         ret = subsys_system_register(&spu_subsys, NULL);
819         if (ret)
820                 goto out;
821 
822         ret = spu_enumerate_spus(create_spu);
823 
824         if (ret < 0) {
825                 printk(KERN_WARNING "%s: Error initializing spus\n",
826                         __func__);
827                 goto out_unregister_subsys;
828         }
829 
830         if (ret > 0)
831                 fb_append_extra_logo(&logo_spe_clut224, ret);
832 
833         mutex_lock(&spu_full_list_mutex);
834         xmon_register_spus(&spu_full_list);
835         crash_register_spus(&spu_full_list);
836         mutex_unlock(&spu_full_list_mutex);
837         spu_add_dev_attr(&dev_attr_stat);
838         register_syscore_ops(&spu_syscore_ops);
839 
840         spu_init_affinity();
841 
842         return 0;
843 
844  out_unregister_subsys:
845         bus_unregister(&spu_subsys);
846  out:
847         return ret;
848 }
849 module_init(init_spu_base);
850 
851 MODULE_LICENSE("GPL");
852 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
853 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp