~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/platforms/cell/iommu.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * IOMMU implementation for Cell Broadband Processor Architecture
  3  *
  4  * (C) Copyright IBM Corporation 2006-2008
  5  *
  6  * Author: Jeremy Kerr <jk@ozlabs.org>
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License as published by
 10  * the Free Software Foundation; either version 2, or (at your option)
 11  * any later version.
 12  *
 13  * This program is distributed in the hope that it will be useful,
 14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16  * GNU General Public License for more details.
 17  *
 18  * You should have received a copy of the GNU General Public License
 19  * along with this program; if not, write to the Free Software
 20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 21  */
 22 
 23 #undef DEBUG
 24 
 25 #include <linux/kernel.h>
 26 #include <linux/init.h>
 27 #include <linux/interrupt.h>
 28 #include <linux/notifier.h>
 29 #include <linux/of.h>
 30 #include <linux/of_platform.h>
 31 #include <linux/slab.h>
 32 #include <linux/memblock.h>
 33 
 34 #include <asm/prom.h>
 35 #include <asm/iommu.h>
 36 #include <asm/machdep.h>
 37 #include <asm/pci-bridge.h>
 38 #include <asm/udbg.h>
 39 #include <asm/firmware.h>
 40 #include <asm/cell-regs.h>
 41 
 42 #include "cell.h"
 43 #include "interrupt.h"
 44 
 45 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
 46  * instead of leaving them mapped to some dummy page. This can be
 47  * enabled once the appropriate workarounds for spider bugs have
 48  * been enabled
 49  */
 50 #define CELL_IOMMU_REAL_UNMAP
 51 
 52 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
 53  * IO PTEs based on the transfer direction. That can be enabled
 54  * once spider-net has been fixed to pass the correct direction
 55  * to the DMA mapping functions
 56  */
 57 #define CELL_IOMMU_STRICT_PROTECTION
 58 
 59 
 60 #define NR_IOMMUS                       2
 61 
 62 /* IOC mmap registers */
 63 #define IOC_Reg_Size                    0x2000
 64 
 65 #define IOC_IOPT_CacheInvd              0x908
 66 #define IOC_IOPT_CacheInvd_NE_Mask      0xffe0000000000000ul
 67 #define IOC_IOPT_CacheInvd_IOPTE_Mask   0x000003fffffffff8ul
 68 #define IOC_IOPT_CacheInvd_Busy         0x0000000000000001ul
 69 
 70 #define IOC_IOST_Origin                 0x918
 71 #define IOC_IOST_Origin_E               0x8000000000000000ul
 72 #define IOC_IOST_Origin_HW              0x0000000000000800ul
 73 #define IOC_IOST_Origin_HL              0x0000000000000400ul
 74 
 75 #define IOC_IO_ExcpStat                 0x920
 76 #define IOC_IO_ExcpStat_V               0x8000000000000000ul
 77 #define IOC_IO_ExcpStat_SPF_Mask        0x6000000000000000ul
 78 #define IOC_IO_ExcpStat_SPF_S           0x6000000000000000ul
 79 #define IOC_IO_ExcpStat_SPF_P           0x2000000000000000ul
 80 #define IOC_IO_ExcpStat_ADDR_Mask       0x00000007fffff000ul
 81 #define IOC_IO_ExcpStat_RW_Mask         0x0000000000000800ul
 82 #define IOC_IO_ExcpStat_IOID_Mask       0x00000000000007fful
 83 
 84 #define IOC_IO_ExcpMask                 0x928
 85 #define IOC_IO_ExcpMask_SFE             0x4000000000000000ul
 86 #define IOC_IO_ExcpMask_PFE             0x2000000000000000ul
 87 
 88 #define IOC_IOCmd_Offset                0x1000
 89 
 90 #define IOC_IOCmd_Cfg                   0xc00
 91 #define IOC_IOCmd_Cfg_TE                0x0000800000000000ul
 92 
 93 
 94 /* Segment table entries */
 95 #define IOSTE_V                 0x8000000000000000ul /* valid */
 96 #define IOSTE_H                 0x4000000000000000ul /* cache hint */
 97 #define IOSTE_PT_Base_RPN_Mask  0x3ffffffffffff000ul /* base RPN of IOPT */
 98 #define IOSTE_NPPT_Mask         0x0000000000000fe0ul /* no. pages in IOPT */
 99 #define IOSTE_PS_Mask           0x0000000000000007ul /* page size */
100 #define IOSTE_PS_4K             0x0000000000000001ul /*   - 4kB  */
101 #define IOSTE_PS_64K            0x0000000000000003ul /*   - 64kB */
102 #define IOSTE_PS_1M             0x0000000000000005ul /*   - 1MB  */
103 #define IOSTE_PS_16M            0x0000000000000007ul /*   - 16MB */
104 
105 
106 /* IOMMU sizing */
107 #define IO_SEGMENT_SHIFT        28
108 #define IO_PAGENO_BITS(shift)   (IO_SEGMENT_SHIFT - (shift))
109 
110 /* The high bit needs to be set on every DMA address */
111 #define SPIDER_DMA_OFFSET       0x80000000ul
112 
113 struct iommu_window {
114         struct list_head list;
115         struct cbe_iommu *iommu;
116         unsigned long offset;
117         unsigned long size;
118         unsigned int ioid;
119         struct iommu_table table;
120 };
121 
122 #define NAMESIZE 8
123 struct cbe_iommu {
124         int nid;
125         char name[NAMESIZE];
126         void __iomem *xlate_regs;
127         void __iomem *cmd_regs;
128         unsigned long *stab;
129         unsigned long *ptab;
130         void *pad_page;
131         struct list_head windows;
132 };
133 
134 /* Static array of iommus, one per node
135  *   each contains a list of windows, keyed from dma_window property
136  *   - on bus setup, look for a matching window, or create one
137  *   - on dev setup, assign iommu_table ptr
138  */
139 static struct cbe_iommu iommus[NR_IOMMUS];
140 static int cbe_nr_iommus;
141 
142 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
143                 long n_ptes)
144 {
145         u64 __iomem *reg;
146         u64 val;
147         long n;
148 
149         reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
150 
151         while (n_ptes > 0) {
152                 /* we can invalidate up to 1 << 11 PTEs at once */
153                 n = min(n_ptes, 1l << 11);
154                 val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
155                         | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
156                         | IOC_IOPT_CacheInvd_Busy;
157 
158                 out_be64(reg, val);
159                 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
160                         ;
161 
162                 n_ptes -= n;
163                 pte += n;
164         }
165 }
166 
167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
168                 unsigned long uaddr, enum dma_data_direction direction,
169                 struct dma_attrs *attrs)
170 {
171         int i;
172         unsigned long *io_pte, base_pte;
173         struct iommu_window *window =
174                 container_of(tbl, struct iommu_window, table);
175 
176         /* implementing proper protection causes problems with the spidernet
177          * driver - check mapping directions later, but allow read & write by
178          * default for now.*/
179 #ifdef CELL_IOMMU_STRICT_PROTECTION
180         /* to avoid referencing a global, we use a trick here to setup the
181          * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
182          * together for each of the 3 supported direction values. It is then
183          * shifted left so that the fields matching the desired direction
184          * lands on the appropriate bits, and other bits are masked out.
185          */
186         const unsigned long prot = 0xc48;
187         base_pte =
188                 ((prot << (52 + 4 * direction)) &
189                  (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
190                 CBE_IOPTE_M | CBE_IOPTE_SO_RW |
191                 (window->ioid & CBE_IOPTE_IOID_Mask);
192 #else
193         base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
194                 CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
195 #endif
196         if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
197                 base_pte &= ~CBE_IOPTE_SO_RW;
198 
199         io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
200 
201         for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
202                 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
203 
204         mb();
205 
206         invalidate_tce_cache(window->iommu, io_pte, npages);
207 
208         pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
209                  index, npages, direction, base_pte);
210         return 0;
211 }
212 
213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
214 {
215 
216         int i;
217         unsigned long *io_pte, pte;
218         struct iommu_window *window =
219                 container_of(tbl, struct iommu_window, table);
220 
221         pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
222 
223 #ifdef CELL_IOMMU_REAL_UNMAP
224         pte = 0;
225 #else
226         /* spider bridge does PCI reads after freeing - insert a mapping
227          * to a scratch page instead of an invalid entry */
228         pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
229                 __pa(window->iommu->pad_page) |
230                 (window->ioid & CBE_IOPTE_IOID_Mask);
231 #endif
232 
233         io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
234 
235         for (i = 0; i < npages; i++)
236                 io_pte[i] = pte;
237 
238         mb();
239 
240         invalidate_tce_cache(window->iommu, io_pte, npages);
241 }
242 
243 static irqreturn_t ioc_interrupt(int irq, void *data)
244 {
245         unsigned long stat, spf;
246         struct cbe_iommu *iommu = data;
247 
248         stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
249         spf = stat & IOC_IO_ExcpStat_SPF_Mask;
250 
251         /* Might want to rate limit it */
252         printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
253         printk(KERN_ERR "  V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
254                !!(stat & IOC_IO_ExcpStat_V),
255                (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
256                (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
257                (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
258                (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
259         printk(KERN_ERR "  page=0x%016lx\n",
260                stat & IOC_IO_ExcpStat_ADDR_Mask);
261 
262         /* clear interrupt */
263         stat &= ~IOC_IO_ExcpStat_V;
264         out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
265 
266         return IRQ_HANDLED;
267 }
268 
269 static int cell_iommu_find_ioc(int nid, unsigned long *base)
270 {
271         struct device_node *np;
272         struct resource r;
273 
274         *base = 0;
275 
276         /* First look for new style /be nodes */
277         for_each_node_by_name(np, "ioc") {
278                 if (of_node_to_nid(np) != nid)
279                         continue;
280                 if (of_address_to_resource(np, 0, &r)) {
281                         printk(KERN_ERR "iommu: can't get address for %s\n",
282                                np->full_name);
283                         continue;
284                 }
285                 *base = r.start;
286                 of_node_put(np);
287                 return 0;
288         }
289 
290         /* Ok, let's try the old way */
291         for_each_node_by_type(np, "cpu") {
292                 const unsigned int *nidp;
293                 const unsigned long *tmp;
294 
295                 nidp = of_get_property(np, "node-id", NULL);
296                 if (nidp && *nidp == nid) {
297                         tmp = of_get_property(np, "ioc-translation", NULL);
298                         if (tmp) {
299                                 *base = *tmp;
300                                 of_node_put(np);
301                                 return 0;
302                         }
303                 }
304         }
305 
306         return -ENODEV;
307 }
308 
309 static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
310                                 unsigned long dbase, unsigned long dsize,
311                                 unsigned long fbase, unsigned long fsize)
312 {
313         struct page *page;
314         unsigned long segments, stab_size;
315 
316         segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
317 
318         pr_debug("%s: iommu[%d]: segments: %lu\n",
319                         __func__, iommu->nid, segments);
320 
321         /* set up the segment table */
322         stab_size = segments * sizeof(unsigned long);
323         page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
324         BUG_ON(!page);
325         iommu->stab = page_address(page);
326         memset(iommu->stab, 0, stab_size);
327 }
328 
329 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
330                 unsigned long base, unsigned long size, unsigned long gap_base,
331                 unsigned long gap_size, unsigned long page_shift)
332 {
333         struct page *page;
334         int i;
335         unsigned long reg, segments, pages_per_segment, ptab_size,
336                       n_pte_pages, start_seg, *ptab;
337 
338         start_seg = base >> IO_SEGMENT_SHIFT;
339         segments  = size >> IO_SEGMENT_SHIFT;
340         pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
341         /* PTEs for each segment must start on a 4K bounday */
342         pages_per_segment = max(pages_per_segment,
343                                 (1 << 12) / sizeof(unsigned long));
344 
345         ptab_size = segments * pages_per_segment * sizeof(unsigned long);
346         pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
347                         iommu->nid, ptab_size, get_order(ptab_size));
348         page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
349         BUG_ON(!page);
350 
351         ptab = page_address(page);
352         memset(ptab, 0, ptab_size);
353 
354         /* number of 4K pages needed for a page table */
355         n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
356 
357         pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
358                         __func__, iommu->nid, iommu->stab, ptab,
359                         n_pte_pages);
360 
361         /* initialise the STEs */
362         reg = IOSTE_V | ((n_pte_pages - 1) << 5);
363 
364         switch (page_shift) {
365         case 12: reg |= IOSTE_PS_4K;  break;
366         case 16: reg |= IOSTE_PS_64K; break;
367         case 20: reg |= IOSTE_PS_1M;  break;
368         case 24: reg |= IOSTE_PS_16M; break;
369         default: BUG();
370         }
371 
372         gap_base = gap_base >> IO_SEGMENT_SHIFT;
373         gap_size = gap_size >> IO_SEGMENT_SHIFT;
374 
375         pr_debug("Setting up IOMMU stab:\n");
376         for (i = start_seg; i < (start_seg + segments); i++) {
377                 if (i >= gap_base && i < (gap_base + gap_size)) {
378                         pr_debug("\toverlap at %d, skipping\n", i);
379                         continue;
380                 }
381                 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
382                                         (i - start_seg));
383                 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
384         }
385 
386         return ptab;
387 }
388 
389 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
390 {
391         int ret;
392         unsigned long reg, xlate_base;
393         unsigned int virq;
394 
395         if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
396                 panic("%s: missing IOC register mappings for node %d\n",
397                       __func__, iommu->nid);
398 
399         iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
400         iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
401 
402         /* ensure that the STEs have updated */
403         mb();
404 
405         /* setup interrupts for the iommu. */
406         reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
407         out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
408                         reg & ~IOC_IO_ExcpStat_V);
409         out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
410                         IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
411 
412         virq = irq_create_mapping(NULL,
413                         IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
414         BUG_ON(virq == NO_IRQ);
415 
416         ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
417         BUG_ON(ret);
418 
419         /* set the IOC segment table origin register (and turn on the iommu) */
420         reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
421         out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
422         in_be64(iommu->xlate_regs + IOC_IOST_Origin);
423 
424         /* turn on IO translation */
425         reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
426         out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
427 }
428 
429 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
430         unsigned long base, unsigned long size)
431 {
432         cell_iommu_setup_stab(iommu, base, size, 0, 0);
433         iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
434                                             IOMMU_PAGE_SHIFT_4K);
435         cell_iommu_enable_hardware(iommu);
436 }
437 
438 #if 0/* Unused for now */
439 static struct iommu_window *find_window(struct cbe_iommu *iommu,
440                 unsigned long offset, unsigned long size)
441 {
442         struct iommu_window *window;
443 
444         /* todo: check for overlapping (but not equal) windows) */
445 
446         list_for_each_entry(window, &(iommu->windows), list) {
447                 if (window->offset == offset && window->size == size)
448                         return window;
449         }
450 
451         return NULL;
452 }
453 #endif
454 
455 static inline u32 cell_iommu_get_ioid(struct device_node *np)
456 {
457         const u32 *ioid;
458 
459         ioid = of_get_property(np, "ioid", NULL);
460         if (ioid == NULL) {
461                 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
462                        np->full_name);
463                 return 0;
464         }
465 
466         return *ioid;
467 }
468 
469 static struct iommu_table_ops cell_iommu_ops = {
470         .set = tce_build_cell,
471         .clear = tce_free_cell
472 };
473 
474 static struct iommu_window * __init
475 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
476                         unsigned long offset, unsigned long size,
477                         unsigned long pte_offset)
478 {
479         struct iommu_window *window;
480         struct page *page;
481         u32 ioid;
482 
483         ioid = cell_iommu_get_ioid(np);
484 
485         window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
486         BUG_ON(window == NULL);
487 
488         window->offset = offset;
489         window->size = size;
490         window->ioid = ioid;
491         window->iommu = iommu;
492 
493         window->table.it_blocksize = 16;
494         window->table.it_base = (unsigned long)iommu->ptab;
495         window->table.it_index = iommu->nid;
496         window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
497         window->table.it_offset =
498                 (offset >> window->table.it_page_shift) + pte_offset;
499         window->table.it_size = size >> window->table.it_page_shift;
500         window->table.it_ops = &cell_iommu_ops;
501 
502         iommu_init_table(&window->table, iommu->nid);
503 
504         pr_debug("\tioid      %d\n", window->ioid);
505         pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
506         pr_debug("\tbase      0x%016lx\n", window->table.it_base);
507         pr_debug("\toffset    0x%lx\n", window->table.it_offset);
508         pr_debug("\tsize      %ld\n", window->table.it_size);
509 
510         list_add(&window->list, &iommu->windows);
511 
512         if (offset != 0)
513                 return window;
514 
515         /* We need to map and reserve the first IOMMU page since it's used
516          * by the spider workaround. In theory, we only need to do that when
517          * running on spider but it doesn't really matter.
518          *
519          * This code also assumes that we have a window that starts at 0,
520          * which is the case on all spider based blades.
521          */
522         page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
523         BUG_ON(!page);
524         iommu->pad_page = page_address(page);
525         clear_page(iommu->pad_page);
526 
527         __set_bit(0, window->table.it_map);
528         tce_build_cell(&window->table, window->table.it_offset, 1,
529                        (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
530 
531         return window;
532 }
533 
534 static struct cbe_iommu *cell_iommu_for_node(int nid)
535 {
536         int i;
537 
538         for (i = 0; i < cbe_nr_iommus; i++)
539                 if (iommus[i].nid == nid)
540                         return &iommus[i];
541         return NULL;
542 }
543 
544 static unsigned long cell_dma_direct_offset;
545 
546 static unsigned long dma_iommu_fixed_base;
547 
548 /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
549 static int iommu_fixed_is_weak;
550 
551 static struct iommu_table *cell_get_iommu_table(struct device *dev)
552 {
553         struct iommu_window *window;
554         struct cbe_iommu *iommu;
555 
556         /* Current implementation uses the first window available in that
557          * node's iommu. We -might- do something smarter later though it may
558          * never be necessary
559          */
560         iommu = cell_iommu_for_node(dev_to_node(dev));
561         if (iommu == NULL || list_empty(&iommu->windows)) {
562                 dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
563                        of_node_full_name(dev->of_node), dev_to_node(dev));
564                 return NULL;
565         }
566         window = list_entry(iommu->windows.next, struct iommu_window, list);
567 
568         return &window->table;
569 }
570 
571 /* A coherent allocation implies strong ordering */
572 
573 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
574                                       dma_addr_t *dma_handle, gfp_t flag,
575                                       struct dma_attrs *attrs)
576 {
577         if (iommu_fixed_is_weak)
578                 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
579                                             size, dma_handle,
580                                             device_to_mask(dev), flag,
581                                             dev_to_node(dev));
582         else
583                 return dma_direct_ops.alloc(dev, size, dma_handle, flag,
584                                             attrs);
585 }
586 
587 static void dma_fixed_free_coherent(struct device *dev, size_t size,
588                                     void *vaddr, dma_addr_t dma_handle,
589                                     struct dma_attrs *attrs)
590 {
591         if (iommu_fixed_is_weak)
592                 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
593                                     dma_handle);
594         else
595                 dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
596 }
597 
598 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
599                                      unsigned long offset, size_t size,
600                                      enum dma_data_direction direction,
601                                      struct dma_attrs *attrs)
602 {
603         if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
604                 return dma_direct_ops.map_page(dev, page, offset, size,
605                                                direction, attrs);
606         else
607                 return iommu_map_page(dev, cell_get_iommu_table(dev), page,
608                                       offset, size, device_to_mask(dev),
609                                       direction, attrs);
610 }
611 
612 static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
613                                  size_t size, enum dma_data_direction direction,
614                                  struct dma_attrs *attrs)
615 {
616         if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
617                 dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
618                                           attrs);
619         else
620                 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
621                                  direction, attrs);
622 }
623 
624 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
625                            int nents, enum dma_data_direction direction,
626                            struct dma_attrs *attrs)
627 {
628         if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
629                 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
630         else
631                 return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
632                                         nents, device_to_mask(dev),
633                                         direction, attrs);
634 }
635 
636 static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
637                                int nents, enum dma_data_direction direction,
638                                struct dma_attrs *attrs)
639 {
640         if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
641                 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
642         else
643                 ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
644                                    direction, attrs);
645 }
646 
647 static int dma_fixed_dma_supported(struct device *dev, u64 mask)
648 {
649         return mask == DMA_BIT_MASK(64);
650 }
651 
652 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
653 
654 struct dma_map_ops dma_iommu_fixed_ops = {
655         .alloc          = dma_fixed_alloc_coherent,
656         .free           = dma_fixed_free_coherent,
657         .map_sg         = dma_fixed_map_sg,
658         .unmap_sg       = dma_fixed_unmap_sg,
659         .dma_supported  = dma_fixed_dma_supported,
660         .set_dma_mask   = dma_set_mask_and_switch,
661         .map_page       = dma_fixed_map_page,
662         .unmap_page     = dma_fixed_unmap_page,
663 };
664 
665 static void cell_dma_dev_setup_fixed(struct device *dev);
666 
667 static void cell_dma_dev_setup(struct device *dev)
668 {
669         /* Order is important here, these are not mutually exclusive */
670         if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
671                 cell_dma_dev_setup_fixed(dev);
672         else if (get_pci_dma_ops() == &dma_iommu_ops)
673                 set_iommu_table_base(dev, cell_get_iommu_table(dev));
674         else if (get_pci_dma_ops() == &dma_direct_ops)
675                 set_dma_offset(dev, cell_dma_direct_offset);
676         else
677                 BUG();
678 }
679 
680 static void cell_pci_dma_dev_setup(struct pci_dev *dev)
681 {
682         cell_dma_dev_setup(&dev->dev);
683 }
684 
685 static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
686                               void *data)
687 {
688         struct device *dev = data;
689 
690         /* We are only intereted in device addition */
691         if (action != BUS_NOTIFY_ADD_DEVICE)
692                 return 0;
693 
694         /* We use the PCI DMA ops */
695         dev->archdata.dma_ops = get_pci_dma_ops();
696 
697         cell_dma_dev_setup(dev);
698 
699         return 0;
700 }
701 
702 static struct notifier_block cell_of_bus_notifier = {
703         .notifier_call = cell_of_bus_notify
704 };
705 
706 static int __init cell_iommu_get_window(struct device_node *np,
707                                          unsigned long *base,
708                                          unsigned long *size)
709 {
710         const __be32 *dma_window;
711         unsigned long index;
712 
713         /* Use ibm,dma-window if available, else, hard code ! */
714         dma_window = of_get_property(np, "ibm,dma-window", NULL);
715         if (dma_window == NULL) {
716                 *base = 0;
717                 *size = 0x80000000u;
718                 return -ENODEV;
719         }
720 
721         of_parse_dma_window(np, dma_window, &index, base, size);
722         return 0;
723 }
724 
725 static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
726 {
727         struct cbe_iommu *iommu;
728         int nid, i;
729 
730         /* Get node ID */
731         nid = of_node_to_nid(np);
732         if (nid < 0) {
733                 printk(KERN_ERR "iommu: failed to get node for %s\n",
734                        np->full_name);
735                 return NULL;
736         }
737         pr_debug("iommu: setting up iommu for node %d (%s)\n",
738                  nid, np->full_name);
739 
740         /* XXX todo: If we can have multiple windows on the same IOMMU, which
741          * isn't the case today, we probably want here to check whether the
742          * iommu for that node is already setup.
743          * However, there might be issue with getting the size right so let's
744          * ignore that for now. We might want to completely get rid of the
745          * multiple window support since the cell iommu supports per-page ioids
746          */
747 
748         if (cbe_nr_iommus >= NR_IOMMUS) {
749                 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
750                        np->full_name);
751                 return NULL;
752         }
753 
754         /* Init base fields */
755         i = cbe_nr_iommus++;
756         iommu = &iommus[i];
757         iommu->stab = NULL;
758         iommu->nid = nid;
759         snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
760         INIT_LIST_HEAD(&iommu->windows);
761 
762         return iommu;
763 }
764 
765 static void __init cell_iommu_init_one(struct device_node *np,
766                                        unsigned long offset)
767 {
768         struct cbe_iommu *iommu;
769         unsigned long base, size;
770 
771         iommu = cell_iommu_alloc(np);
772         if (!iommu)
773                 return;
774 
775         /* Obtain a window for it */
776         cell_iommu_get_window(np, &base, &size);
777 
778         pr_debug("\ttranslating window 0x%lx...0x%lx\n",
779                  base, base + size - 1);
780 
781         /* Initialize the hardware */
782         cell_iommu_setup_hardware(iommu, base, size);
783 
784         /* Setup the iommu_table */
785         cell_iommu_setup_window(iommu, np, base, size,
786                                 offset >> IOMMU_PAGE_SHIFT_4K);
787 }
788 
789 static void __init cell_disable_iommus(void)
790 {
791         int node;
792         unsigned long base, val;
793         void __iomem *xregs, *cregs;
794 
795         /* Make sure IOC translation is disabled on all nodes */
796         for_each_online_node(node) {
797                 if (cell_iommu_find_ioc(node, &base))
798                         continue;
799                 xregs = ioremap(base, IOC_Reg_Size);
800                 if (xregs == NULL)
801                         continue;
802                 cregs = xregs + IOC_IOCmd_Offset;
803 
804                 pr_debug("iommu: cleaning up iommu on node %d\n", node);
805 
806                 out_be64(xregs + IOC_IOST_Origin, 0);
807                 (void)in_be64(xregs + IOC_IOST_Origin);
808                 val = in_be64(cregs + IOC_IOCmd_Cfg);
809                 val &= ~IOC_IOCmd_Cfg_TE;
810                 out_be64(cregs + IOC_IOCmd_Cfg, val);
811                 (void)in_be64(cregs + IOC_IOCmd_Cfg);
812 
813                 iounmap(xregs);
814         }
815 }
816 
817 static int __init cell_iommu_init_disabled(void)
818 {
819         struct device_node *np = NULL;
820         unsigned long base = 0, size;
821 
822         /* When no iommu is present, we use direct DMA ops */
823         set_pci_dma_ops(&dma_direct_ops);
824 
825         /* First make sure all IOC translation is turned off */
826         cell_disable_iommus();
827 
828         /* If we have no Axon, we set up the spider DMA magic offset */
829         if (of_find_node_by_name(NULL, "axon") == NULL)
830                 cell_dma_direct_offset = SPIDER_DMA_OFFSET;
831 
832         /* Now we need to check to see where the memory is mapped
833          * in PCI space. We assume that all busses use the same dma
834          * window which is always the case so far on Cell, thus we
835          * pick up the first pci-internal node we can find and check
836          * the DMA window from there.
837          */
838         for_each_node_by_name(np, "axon") {
839                 if (np->parent == NULL || np->parent->parent != NULL)
840                         continue;
841                 if (cell_iommu_get_window(np, &base, &size) == 0)
842                         break;
843         }
844         if (np == NULL) {
845                 for_each_node_by_name(np, "pci-internal") {
846                         if (np->parent == NULL || np->parent->parent != NULL)
847                                 continue;
848                         if (cell_iommu_get_window(np, &base, &size) == 0)
849                                 break;
850                 }
851         }
852         of_node_put(np);
853 
854         /* If we found a DMA window, we check if it's big enough to enclose
855          * all of physical memory. If not, we force enable IOMMU
856          */
857         if (np && size < memblock_end_of_DRAM()) {
858                 printk(KERN_WARNING "iommu: force-enabled, dma window"
859                        " (%ldMB) smaller than total memory (%lldMB)\n",
860                        size >> 20, memblock_end_of_DRAM() >> 20);
861                 return -ENODEV;
862         }
863 
864         cell_dma_direct_offset += base;
865 
866         if (cell_dma_direct_offset != 0)
867                 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
868 
869         printk("iommu: disabled, direct DMA offset is 0x%lx\n",
870                cell_dma_direct_offset);
871 
872         return 0;
873 }
874 
875 /*
876  *  Fixed IOMMU mapping support
877  *
878  *  This code adds support for setting up a fixed IOMMU mapping on certain
879  *  cell machines. For 64-bit devices this avoids the performance overhead of
880  *  mapping and unmapping pages at runtime. 32-bit devices are unable to use
881  *  the fixed mapping.
882  *
883  *  The fixed mapping is established at boot, and maps all of physical memory
884  *  1:1 into device space at some offset. On machines with < 30 GB of memory
885  *  we setup the fixed mapping immediately above the normal IOMMU window.
886  *
887  *  For example a machine with 4GB of memory would end up with the normal
888  *  IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
889  *  this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
890  *  3GB, plus any offset required by firmware. The firmware offset is encoded
891  *  in the "dma-ranges" property.
892  *
893  *  On machines with 30GB or more of memory, we are unable to place the fixed
894  *  mapping above the normal IOMMU window as we would run out of address space.
895  *  Instead we move the normal IOMMU window to coincide with the hash page
896  *  table, this region does not need to be part of the fixed mapping as no
897  *  device should ever be DMA'ing to it. We then setup the fixed mapping
898  *  from 0 to 32GB.
899  */
900 
901 static u64 cell_iommu_get_fixed_address(struct device *dev)
902 {
903         u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
904         struct device_node *np;
905         const u32 *ranges = NULL;
906         int i, len, best, naddr, nsize, pna, range_size;
907 
908         np = of_node_get(dev->of_node);
909         while (1) {
910                 naddr = of_n_addr_cells(np);
911                 nsize = of_n_size_cells(np);
912                 np = of_get_next_parent(np);
913                 if (!np)
914                         break;
915 
916                 ranges = of_get_property(np, "dma-ranges", &len);
917 
918                 /* Ignore empty ranges, they imply no translation required */
919                 if (ranges && len > 0)
920                         break;
921         }
922 
923         if (!ranges) {
924                 dev_dbg(dev, "iommu: no dma-ranges found\n");
925                 goto out;
926         }
927 
928         len /= sizeof(u32);
929 
930         pna = of_n_addr_cells(np);
931         range_size = naddr + nsize + pna;
932 
933         /* dma-ranges format:
934          * child addr   : naddr cells
935          * parent addr  : pna cells
936          * size         : nsize cells
937          */
938         for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
939                 cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
940                 size = of_read_number(ranges + i + naddr + pna, nsize);
941 
942                 if (cpu_addr == 0 && size > best_size) {
943                         best = i;
944                         best_size = size;
945                 }
946         }
947 
948         if (best >= 0) {
949                 dev_addr = of_read_number(ranges + best, naddr);
950         } else
951                 dev_dbg(dev, "iommu: no suitable range found!\n");
952 
953 out:
954         of_node_put(np);
955 
956         return dev_addr;
957 }
958 
959 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
960 {
961         if (!dev->dma_mask || !dma_supported(dev, dma_mask))
962                 return -EIO;
963 
964         if (dma_mask == DMA_BIT_MASK(64) &&
965                 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
966         {
967                 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
968                 set_dma_ops(dev, &dma_iommu_fixed_ops);
969         } else {
970                 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
971                 set_dma_ops(dev, get_pci_dma_ops());
972         }
973 
974         cell_dma_dev_setup(dev);
975 
976         *dev->dma_mask = dma_mask;
977 
978         return 0;
979 }
980 
981 static void cell_dma_dev_setup_fixed(struct device *dev)
982 {
983         u64 addr;
984 
985         addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
986         set_dma_offset(dev, addr);
987 
988         dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
989 }
990 
991 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
992                            unsigned long base_pte)
993 {
994         unsigned long segment, offset;
995 
996         segment = addr >> IO_SEGMENT_SHIFT;
997         offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
998         ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
999 
1000         pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
1001                   addr, ptab, segment, offset);
1002 
1003         ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
1004 }
1005 
1006 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
1007         struct device_node *np, unsigned long dbase, unsigned long dsize,
1008         unsigned long fbase, unsigned long fsize)
1009 {
1010         unsigned long base_pte, uaddr, ioaddr, *ptab;
1011 
1012         ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
1013 
1014         dma_iommu_fixed_base = fbase;
1015 
1016         pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
1017 
1018         base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
1019                 (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
1020 
1021         if (iommu_fixed_is_weak)
1022                 pr_info("IOMMU: Using weak ordering for fixed mapping\n");
1023         else {
1024                 pr_info("IOMMU: Using strong ordering for fixed mapping\n");
1025                 base_pte |= CBE_IOPTE_SO_RW;
1026         }
1027 
1028         for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
1029                 /* Don't touch the dynamic region */
1030                 ioaddr = uaddr + fbase;
1031                 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
1032                         pr_debug("iommu: fixed/dynamic overlap, skipping\n");
1033                         continue;
1034                 }
1035 
1036                 insert_16M_pte(uaddr, ptab, base_pte);
1037         }
1038 
1039         mb();
1040 }
1041 
1042 static int __init cell_iommu_fixed_mapping_init(void)
1043 {
1044         unsigned long dbase, dsize, fbase, fsize, hbase, hend;
1045         struct cbe_iommu *iommu;
1046         struct device_node *np;
1047 
1048         /* The fixed mapping is only supported on axon machines */
1049         np = of_find_node_by_name(NULL, "axon");
1050         of_node_put(np);
1051 
1052         if (!np) {
1053                 pr_debug("iommu: fixed mapping disabled, no axons found\n");
1054                 return -1;
1055         }
1056 
1057         /* We must have dma-ranges properties for fixed mapping to work */
1058         np = of_find_node_with_property(NULL, "dma-ranges");
1059         of_node_put(np);
1060 
1061         if (!np) {
1062                 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
1063                 return -1;
1064         }
1065 
1066         /* The default setup is to have the fixed mapping sit after the
1067          * dynamic region, so find the top of the largest IOMMU window
1068          * on any axon, then add the size of RAM and that's our max value.
1069          * If that is > 32GB we have to do other shennanigans.
1070          */
1071         fbase = 0;
1072         for_each_node_by_name(np, "axon") {
1073                 cell_iommu_get_window(np, &dbase, &dsize);
1074                 fbase = max(fbase, dbase + dsize);
1075         }
1076 
1077         fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1078         fsize = memblock_phys_mem_size();
1079 
1080         if ((fbase + fsize) <= 0x800000000ul)
1081                 hbase = 0; /* use the device tree window */
1082         else {
1083                 /* If we're over 32 GB we need to cheat. We can't map all of
1084                  * RAM with the fixed mapping, and also fit the dynamic
1085                  * region. So try to place the dynamic region where the hash
1086                  * table sits, drivers never need to DMA to it, we don't
1087                  * need a fixed mapping for that area.
1088                  */
1089                 if (!htab_address) {
1090                         pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
1091                         return -1;
1092                 }
1093                 hbase = __pa(htab_address);
1094                 hend  = hbase + htab_size_bytes;
1095 
1096                 /* The window must start and end on a segment boundary */
1097                 if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
1098                     (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
1099                         pr_debug("iommu: hash window not segment aligned\n");
1100                         return -1;
1101                 }
1102 
1103                 /* Check the hash window fits inside the real DMA window */
1104                 for_each_node_by_name(np, "axon") {
1105                         cell_iommu_get_window(np, &dbase, &dsize);
1106 
1107                         if (hbase < dbase || (hend > (dbase + dsize))) {
1108                                 pr_debug("iommu: hash window doesn't fit in"
1109                                          "real DMA window\n");
1110                                 return -1;
1111                         }
1112                 }
1113 
1114                 fbase = 0;
1115         }
1116 
1117         /* Setup the dynamic regions */
1118         for_each_node_by_name(np, "axon") {
1119                 iommu = cell_iommu_alloc(np);
1120                 BUG_ON(!iommu);
1121 
1122                 if (hbase == 0)
1123                         cell_iommu_get_window(np, &dbase, &dsize);
1124                 else {
1125                         dbase = hbase;
1126                         dsize = htab_size_bytes;
1127                 }
1128 
1129                 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1130                         "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1131                          dbase + dsize, fbase, fbase + fsize);
1132 
1133                 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1134                 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1135                                                     IOMMU_PAGE_SHIFT_4K);
1136                 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1137                                              fbase, fsize);
1138                 cell_iommu_enable_hardware(iommu);
1139                 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1140         }
1141 
1142         dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
1143         set_pci_dma_ops(&dma_iommu_ops);
1144 
1145         return 0;
1146 }
1147 
1148 static int iommu_fixed_disabled;
1149 
1150 static int __init setup_iommu_fixed(char *str)
1151 {
1152         struct device_node *pciep;
1153 
1154         if (strcmp(str, "off") == 0)
1155                 iommu_fixed_disabled = 1;
1156 
1157         /* If we can find a pcie-endpoint in the device tree assume that
1158          * we're on a triblade or a CAB so by default the fixed mapping
1159          * should be set to be weakly ordered; but only if the boot
1160          * option WASN'T set for strong ordering
1161          */
1162         pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1163 
1164         if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1165                 iommu_fixed_is_weak = 1;
1166 
1167         of_node_put(pciep);
1168 
1169         return 1;
1170 }
1171 __setup("iommu_fixed=", setup_iommu_fixed);
1172 
1173 static u64 cell_dma_get_required_mask(struct device *dev)
1174 {
1175         struct dma_map_ops *dma_ops;
1176 
1177         if (!dev->dma_mask)
1178                 return 0;
1179 
1180         if (!iommu_fixed_disabled &&
1181                         cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1182                 return DMA_BIT_MASK(64);
1183 
1184         dma_ops = get_dma_ops(dev);
1185         if (dma_ops->get_required_mask)
1186                 return dma_ops->get_required_mask(dev);
1187 
1188         WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1189 
1190         return DMA_BIT_MASK(64);
1191 }
1192 
1193 static int __init cell_iommu_init(void)
1194 {
1195         struct device_node *np;
1196 
1197         /* If IOMMU is disabled or we have little enough RAM to not need
1198          * to enable it, we setup a direct mapping.
1199          *
1200          * Note: should we make sure we have the IOMMU actually disabled ?
1201          */
1202         if (iommu_is_off ||
1203             (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1204                 if (cell_iommu_init_disabled() == 0)
1205                         goto bail;
1206 
1207         /* Setup various callbacks */
1208         cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
1209         ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1210 
1211         if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1212                 goto bail;
1213 
1214         /* Create an iommu for each /axon node.  */
1215         for_each_node_by_name(np, "axon") {
1216                 if (np->parent == NULL || np->parent->parent != NULL)
1217                         continue;
1218                 cell_iommu_init_one(np, 0);
1219         }
1220 
1221         /* Create an iommu for each toplevel /pci-internal node for
1222          * old hardware/firmware
1223          */
1224         for_each_node_by_name(np, "pci-internal") {
1225                 if (np->parent == NULL || np->parent->parent != NULL)
1226                         continue;
1227                 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
1228         }
1229 
1230         /* Setup default PCI iommu ops */
1231         set_pci_dma_ops(&dma_iommu_ops);
1232 
1233  bail:
1234         /* Register callbacks on OF platform device addition/removal
1235          * to handle linking them to the right DMA operations
1236          */
1237         bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
1238 
1239         return 0;
1240 }
1241 machine_arch_initcall(cell, cell_iommu_init);
1242 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp