~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/mm/iommu.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* $Id: iommu.c,v 1.21 2001/02/13 01:16:43 davem Exp $
  2  * iommu.c:  IOMMU specific routines for memory management.
  3  *
  4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
  5  * Copyright (C) 1995 Pete Zaitcev
  6  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
  7  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  8  */
  9  
 10 #include <linux/config.h>
 11 #include <linux/kernel.h>
 12 #include <linux/init.h>
 13 #include <linux/mm.h>
 14 #include <linux/slab.h>
 15 #include <asm/scatterlist.h>
 16 #include <asm/pgalloc.h>
 17 #include <asm/pgtable.h>
 18 #include <asm/sbus.h>
 19 #include <asm/io.h>
 20 #include <asm/mxcc.h>
 21 #include <asm/mbus.h>
 22 
 23 /* srmmu.c */
 24 extern int viking_mxcc_present;
 25 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
 26 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
 27 extern int flush_page_for_dma_global;
 28 static int viking_flush;
 29 /* viking.S */
 30 extern void viking_flush_page(unsigned long page);
 31 extern void viking_mxcc_flush_page(unsigned long page);
 32 
 33 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
 34 #define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
 35 
 36 static inline void iommu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
 37 {
 38         unsigned long kern_end = (unsigned long) high_memory;
 39         unsigned long first = PAGE_OFFSET;
 40         unsigned long last = kern_end;
 41         iopte_t *iopte = iommu->page_table;
 42 
 43         iopte += ((first - iommu->start) >> PAGE_SHIFT);
 44         while(first <= last) {
 45                 *iopte++ = __iopte(MKIOPTE(__pa(first)));
 46                 first += PAGE_SIZE;
 47         }
 48 }
 49 
 50 void __init
 51 iommu_init(int iommund, struct sbus_bus *sbus)
 52 {
 53         unsigned int impl, vers, ptsize;
 54         unsigned long tmp;
 55         struct iommu_struct *iommu;
 56         struct linux_prom_registers iommu_promregs[PROMREG_MAX];
 57         struct resource r;
 58         int i;
 59 
 60         iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
 61         prom_getproperty(iommund, "reg", (void *) iommu_promregs,
 62                          sizeof(iommu_promregs));
 63         memset(&r, 0, sizeof(r));
 64         r.flags = iommu_promregs[0].which_io;
 65         r.start = iommu_promregs[0].phys_addr;
 66         iommu->regs = (struct iommu_regs *)
 67                 sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
 68         if(!iommu->regs)
 69                 panic("Cannot map IOMMU registers.");
 70         impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
 71         vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
 72         tmp = iommu->regs->control;
 73         tmp &= ~(IOMMU_CTRL_RNGE);
 74         switch(PAGE_OFFSET & 0xf0000000) {
 75         case 0xf0000000:
 76                 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
 77                 iommu->plow = iommu->start = 0xf0000000;
 78                 break;
 79         case 0xe0000000:
 80                 tmp |= (IOMMU_RNGE_512MB | IOMMU_CTRL_ENAB);
 81                 iommu->plow = iommu->start = 0xe0000000;
 82                 break;
 83         case 0xd0000000:
 84         case 0xc0000000:
 85                 tmp |= (IOMMU_RNGE_1GB | IOMMU_CTRL_ENAB);
 86                 iommu->plow = iommu->start = 0xc0000000;
 87                 break;
 88         case 0xb0000000:
 89         case 0xa0000000:
 90         case 0x90000000:
 91         case 0x80000000:
 92                 tmp |= (IOMMU_RNGE_2GB | IOMMU_CTRL_ENAB);
 93                 iommu->plow = iommu->start = 0x80000000;
 94                 break;
 95         }
 96         iommu->regs->control = tmp;
 97         iommu_invalidate(iommu->regs);
 98         iommu->end = 0xffffffff;
 99 
100         /* Allocate IOMMU page table */
101         ptsize = iommu->end - iommu->start + 1;
102         ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
103 
104         /* Stupid alignment constraints give me a headache. 
105            We need 256K or 512K or 1M or 2M area aligned to
106            its size and current gfp will fortunately give
107            it to us. */
108         for (i = 6; i < 9; i++)
109                 if ((1 << (i + PAGE_SHIFT)) == ptsize)
110                         break;
111         tmp = __get_free_pages(GFP_DMA, i);
112         if (!tmp) {
113                 prom_printf("Could not allocate iopte of size 0x%08x\n", ptsize);
114                 prom_halt();
115         }
116         iommu->lowest = iommu->page_table = (iopte_t *)tmp;
117 
118         /* Initialize new table. */
119         flush_cache_all();
120         memset(iommu->page_table, 0, ptsize);
121         iommu_map_dvma_pages_for_iommu(iommu);
122         if(viking_mxcc_present) {
123                 unsigned long start = (unsigned long) iommu->page_table;
124                 unsigned long end = (start + ptsize);
125                 while(start < end) {
126                         viking_mxcc_flush_page(start);
127                         start += PAGE_SIZE;
128                 }
129         } else if (viking_flush) {
130                 unsigned long start = (unsigned long) iommu->page_table;
131                 unsigned long end = (start + ptsize);
132                 while(start < end) {
133                         viking_flush_page(start);
134                         start += PAGE_SIZE;
135                 }
136         }
137         flush_tlb_all();
138         iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
139         iommu_invalidate(iommu->regs);
140 
141         sbus->iommu = iommu;
142         printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
143                impl, vers, iommu->page_table, ptsize);
144 }
145 
146 static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
147 {
148         return (__u32)vaddr;
149 }
150 
151 static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
152 {
153         flush_page_for_dma(0);
154         return (__u32)vaddr;
155 }
156 
157 static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
158 {
159         unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
160 
161         while(page < ((unsigned long)(vaddr + len))) {
162                 flush_page_for_dma(page);
163                 page += PAGE_SIZE;
164         }
165         return (__u32)vaddr;
166 }
167 
168 static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
169 {
170         while (sz != 0) {
171                 sz--;
172                 sg[sz].dvma_address = (__u32) (sg[sz].address);
173                 sg[sz].dvma_length = (__u32) (sg[sz].length);
174         }
175 }
176 
177 static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
178 {
179         flush_page_for_dma(0);
180         while (sz != 0) {
181                 sz--;
182                 sg[sz].dvma_address = (__u32) (sg[sz].address);
183                 sg[sz].dvma_length = (__u32) (sg[sz].length);
184         }
185 }
186 
187 static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
188 {
189         unsigned long page, oldpage = 0;
190 
191         while(sz != 0) {
192                 sz--;
193                 page = ((unsigned long) sg[sz].address) & PAGE_MASK;
194                 if (oldpage == page)
195                         page += PAGE_SIZE; /* We flushed that page already */
196                 while(page < (unsigned long)(sg[sz].address + sg[sz].length)) {
197                         flush_page_for_dma(page);
198                         page += PAGE_SIZE;
199                 }
200                 sg[sz].dvma_address = (__u32) (sg[sz].address);
201                 sg[sz].dvma_length = (__u32) (sg[sz].length);
202                 oldpage = page - PAGE_SIZE;
203         }
204 }
205 
206 static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
207 {
208 }
209 
210 static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
211 {
212 }
213 
214 #ifdef CONFIG_SBUS
215 static void iommu_map_dma_area(unsigned long va, __u32 addr, int len)
216 {
217         unsigned long page, end, ipte_cache;
218         pgprot_t dvma_prot;
219         struct iommu_struct *iommu = sbus_root->iommu;
220         iopte_t *iopte = iommu->page_table;
221         iopte_t *first;
222 
223         if(viking_mxcc_present || srmmu_modtype == HyperSparc) {
224                 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
225                 ipte_cache = 1;
226         } else {
227                 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
228                 ipte_cache = 0;
229         }
230 
231         iopte += ((addr - iommu->start) >> PAGE_SHIFT);
232         first = iopte;
233         end = PAGE_ALIGN((addr + len));
234         while(addr < end) {
235                 page = va;
236                 {
237                         pgd_t *pgdp;
238                         pmd_t *pmdp;
239                         pte_t *ptep;
240 
241                         if (viking_mxcc_present)
242                                 viking_mxcc_flush_page(page);
243                         else if (viking_flush)
244                                 viking_flush_page(page);
245                         else
246                                 __flush_page_to_ram(page);
247 
248                         pgdp = pgd_offset(&init_mm, addr);
249                         pmdp = pmd_offset(pgdp, addr);
250                         ptep = pte_offset(pmdp, addr);
251 
252                         set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
253                         if (ipte_cache != 0) {
254                                 iopte_val(*iopte++) = MKIOPTE(__pa(page));
255                         } else {
256                                 iopte_val(*iopte++) =
257                                         MKIOPTE(__pa(page)) & ~IOPTE_CACHE;
258                         }
259                 }
260                 addr += PAGE_SIZE;
261                 va += PAGE_SIZE;
262         }
263         /* P3: why do we need this?
264          *
265          * DAVEM: Because there are several aspects, none of which
266          *        are handled by a single interface.  Some cpus are
267          *        completely not I/O DMA coherent, and some have
268          *        virtually indexed caches.  The driver DMA flushing
269          *        methods handle the former case, but here during
270          *        IOMMU page table modifications, and usage of non-cacheable
271          *        cpu mappings of pages potentially in the cpu caches, we have
272          *        to handle the latter case as well.
273          */
274         flush_cache_all();
275         if(viking_mxcc_present) {
276                 unsigned long start = ((unsigned long) first) & PAGE_MASK;
277                 unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
278                 while(start < end) {
279                         viking_mxcc_flush_page(start);
280                         start += PAGE_SIZE;
281                 }
282         } else if(viking_flush) {
283                 unsigned long start = ((unsigned long) first) & PAGE_MASK;
284                 unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
285                 while(start < end) {
286                         viking_flush_page(start);
287                         start += PAGE_SIZE;
288                 }
289         }
290         flush_tlb_all();
291         iommu_invalidate(iommu->regs);
292 }
293 
294 static void iommu_unmap_dma_area(unsigned long busa, int len)
295 {
296         struct iommu_struct *iommu = sbus_root->iommu;
297         iopte_t *iopte = iommu->page_table;
298         unsigned long end;
299 
300         iopte += ((busa - iommu->start) >> PAGE_SHIFT);
301         end = PAGE_ALIGN((busa + len));
302         while (busa < end) {
303                 iopte_val(*iopte++) = 0;
304                 busa += PAGE_SIZE;
305         }
306         flush_tlb_all();        /* P3: Hmm... it would not hurt. */
307         iommu_invalidate(iommu->regs);
308 }
309 
310 static unsigned long iommu_translate_dvma(unsigned long busa)
311 {
312         struct iommu_struct *iommu = sbus_root->iommu;
313         iopte_t *iopte = iommu->page_table;
314         unsigned long pa;
315 
316         iopte += ((busa - iommu->start) >> PAGE_SHIFT);
317         pa = pte_val(*iopte);
318         pa = (pa & 0xFFFFFFF0) << 4;            /* Loose higher bits of 36 */
319         return pa + PAGE_OFFSET;
320 }
321 #endif
322 
323 static char *iommu_lockarea(char *vaddr, unsigned long len)
324 {
325         return vaddr;
326 }
327 
328 static void iommu_unlockarea(char *vaddr, unsigned long len)
329 {
330 }
331 
332 void __init ld_mmu_iommu(void)
333 {
334         viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
335         BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
336         BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
337 
338         if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
339                 /* IO coherent chip */
340                 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
341                 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
342         } else if (flush_page_for_dma_global) {
343                 /* flush_page_for_dma flushes everything, no matter of what page is it */
344                 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
345                 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
346         } else {
347                 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
348                 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
349         }
350         BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NOP);
351         BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NOP);
352 
353 #ifdef CONFIG_SBUS
354         BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
355         BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
356         BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
357 #endif
358 }
359 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp