~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/ioremap.c

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/arch/arm/mm/ioremap.c
  3  *
  4  * Re-map IO memory to kernel address space so that we can access it.
  5  *
  6  * (C) Copyright 1995 1996 Linus Torvalds
  7  *
  8  * Hacked for ARM by Phil Blundell <philb@gnu.org>
  9  * Hacked to allow all architectures to build, and various cleanups
 10  * by Russell King
 11  *
 12  * This allows a driver to remap an arbitrary region of bus memory into
 13  * virtual space.  One should *only* use readl, writel, memcpy_toio and
 14  * so on with such remapped areas.
 15  *
 16  * Because the ARM only has a 32-bit address space we can't address the
 17  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 18  * allows us to circumvent this restriction by splitting PCI space into
 19  * two 2GB chunks and mapping only one at a time into processor memory.
 20  * We use MMU protection domains to trap any attempt to access the bank
 21  * that is not currently mapped.  (This isn't fully implemented yet.)
 22  */
 23 #include <linux/module.h>
 24 #include <linux/errno.h>
 25 #include <linux/mm.h>
 26 #include <linux/vmalloc.h>
 27 #include <linux/io.h>
 28 #include <linux/sizes.h>
 29 
 30 #include <asm/cp15.h>
 31 #include <asm/cputype.h>
 32 #include <asm/cacheflush.h>
 33 #include <asm/mmu_context.h>
 34 #include <asm/pgalloc.h>
 35 #include <asm/tlbflush.h>
 36 #include <asm/system_info.h>
 37 
 38 #include <asm/mach/map.h>
 39 #include <asm/mach/pci.h>
 40 #include "mm.h"
 41 
 42 
 43 LIST_HEAD(static_vmlist);
 44 
 45 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
 46                         size_t size, unsigned int mtype)
 47 {
 48         struct static_vm *svm;
 49         struct vm_struct *vm;
 50 
 51         list_for_each_entry(svm, &static_vmlist, list) {
 52                 vm = &svm->vm;
 53                 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
 54                         continue;
 55                 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
 56                         continue;
 57 
 58                 if (vm->phys_addr > paddr ||
 59                         paddr + size - 1 > vm->phys_addr + vm->size - 1)
 60                         continue;
 61 
 62                 return svm;
 63         }
 64 
 65         return NULL;
 66 }
 67 
 68 struct static_vm *find_static_vm_vaddr(void *vaddr)
 69 {
 70         struct static_vm *svm;
 71         struct vm_struct *vm;
 72 
 73         list_for_each_entry(svm, &static_vmlist, list) {
 74                 vm = &svm->vm;
 75 
 76                 /* static_vmlist is ascending order */
 77                 if (vm->addr > vaddr)
 78                         break;
 79 
 80                 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
 81                         return svm;
 82         }
 83 
 84         return NULL;
 85 }
 86 
 87 void __init add_static_vm_early(struct static_vm *svm)
 88 {
 89         struct static_vm *curr_svm;
 90         struct vm_struct *vm;
 91         void *vaddr;
 92 
 93         vm = &svm->vm;
 94         vm_area_add_early(vm);
 95         vaddr = vm->addr;
 96 
 97         list_for_each_entry(curr_svm, &static_vmlist, list) {
 98                 vm = &curr_svm->vm;
 99 
100                 if (vm->addr > vaddr)
101                         break;
102         }
103         list_add_tail(&svm->list, &curr_svm->list);
104 }
105 
106 int ioremap_page(unsigned long virt, unsigned long phys,
107                  const struct mem_type *mtype)
108 {
109         return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
110                                   __pgprot(mtype->prot_pte));
111 }
112 EXPORT_SYMBOL(ioremap_page);
113 
114 void __check_vmalloc_seq(struct mm_struct *mm)
115 {
116         unsigned int seq;
117 
118         do {
119                 seq = init_mm.context.vmalloc_seq;
120                 memcpy(pgd_offset(mm, VMALLOC_START),
121                        pgd_offset_k(VMALLOC_START),
122                        sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
123                                         pgd_index(VMALLOC_START)));
124                 mm->context.vmalloc_seq = seq;
125         } while (seq != init_mm.context.vmalloc_seq);
126 }
127 
128 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
129 /*
130  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
131  * the other CPUs will not see this change until their next context switch.
132  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
133  * which requires the new ioremap'd region to be referenced, the CPU will
134  * reference the _old_ region.
135  *
136  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
137  * mask the size back to 1MB aligned or we will overflow in the loop below.
138  */
139 static void unmap_area_sections(unsigned long virt, unsigned long size)
140 {
141         unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
142         pgd_t *pgd;
143         pud_t *pud;
144         pmd_t *pmdp;
145 
146         flush_cache_vunmap(addr, end);
147         pgd = pgd_offset_k(addr);
148         pud = pud_offset(pgd, addr);
149         pmdp = pmd_offset(pud, addr);
150         do {
151                 pmd_t pmd = *pmdp;
152 
153                 if (!pmd_none(pmd)) {
154                         /*
155                          * Clear the PMD from the page table, and
156                          * increment the vmalloc sequence so others
157                          * notice this change.
158                          *
159                          * Note: this is still racy on SMP machines.
160                          */
161                         pmd_clear(pmdp);
162                         init_mm.context.vmalloc_seq++;
163 
164                         /*
165                          * Free the page table, if there was one.
166                          */
167                         if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
168                                 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
169                 }
170 
171                 addr += PMD_SIZE;
172                 pmdp += 2;
173         } while (addr < end);
174 
175         /*
176          * Ensure that the active_mm is up to date - we want to
177          * catch any use-after-iounmap cases.
178          */
179         if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
180                 __check_vmalloc_seq(current->active_mm);
181 
182         flush_tlb_kernel_range(virt, end);
183 }
184 
185 static int
186 remap_area_sections(unsigned long virt, unsigned long pfn,
187                     size_t size, const struct mem_type *type)
188 {
189         unsigned long addr = virt, end = virt + size;
190         pgd_t *pgd;
191         pud_t *pud;
192         pmd_t *pmd;
193 
194         /*
195          * Remove and free any PTE-based mapping, and
196          * sync the current kernel mapping.
197          */
198         unmap_area_sections(virt, size);
199 
200         pgd = pgd_offset_k(addr);
201         pud = pud_offset(pgd, addr);
202         pmd = pmd_offset(pud, addr);
203         do {
204                 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
205                 pfn += SZ_1M >> PAGE_SHIFT;
206                 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
207                 pfn += SZ_1M >> PAGE_SHIFT;
208                 flush_pmd_entry(pmd);
209 
210                 addr += PMD_SIZE;
211                 pmd += 2;
212         } while (addr < end);
213 
214         return 0;
215 }
216 
217 static int
218 remap_area_supersections(unsigned long virt, unsigned long pfn,
219                          size_t size, const struct mem_type *type)
220 {
221         unsigned long addr = virt, end = virt + size;
222         pgd_t *pgd;
223         pud_t *pud;
224         pmd_t *pmd;
225 
226         /*
227          * Remove and free any PTE-based mapping, and
228          * sync the current kernel mapping.
229          */
230         unmap_area_sections(virt, size);
231 
232         pgd = pgd_offset_k(virt);
233         pud = pud_offset(pgd, addr);
234         pmd = pmd_offset(pud, addr);
235         do {
236                 unsigned long super_pmd_val, i;
237 
238                 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
239                                 PMD_SECT_SUPER;
240                 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
241 
242                 for (i = 0; i < 8; i++) {
243                         pmd[0] = __pmd(super_pmd_val);
244                         pmd[1] = __pmd(super_pmd_val);
245                         flush_pmd_entry(pmd);
246 
247                         addr += PMD_SIZE;
248                         pmd += 2;
249                 }
250 
251                 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
252         } while (addr < end);
253 
254         return 0;
255 }
256 #endif
257 
258 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
259         unsigned long offset, size_t size, unsigned int mtype, void *caller)
260 {
261         const struct mem_type *type;
262         int err;
263         unsigned long addr;
264         struct vm_struct *area;
265         phys_addr_t paddr = __pfn_to_phys(pfn);
266 
267 #ifndef CONFIG_ARM_LPAE
268         /*
269          * High mappings must be supersection aligned
270          */
271         if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
272                 return NULL;
273 #endif
274 
275         type = get_mem_type(mtype);
276         if (!type)
277                 return NULL;
278 
279         /*
280          * Page align the mapping size, taking account of any offset.
281          */
282         size = PAGE_ALIGN(offset + size);
283 
284         /*
285          * Try to reuse one of the static mapping whenever possible.
286          */
287         if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
288                 struct static_vm *svm;
289 
290                 svm = find_static_vm_paddr(paddr, size, mtype);
291                 if (svm) {
292                         addr = (unsigned long)svm->vm.addr;
293                         addr += paddr - svm->vm.phys_addr;
294                         return (void __iomem *) (offset + addr);
295                 }
296         }
297 
298         /*
299          * Don't allow RAM to be mapped - this causes problems with ARMv6+
300          */
301         if (WARN_ON(pfn_valid(pfn)))
302                 return NULL;
303 
304         area = get_vm_area_caller(size, VM_IOREMAP, caller);
305         if (!area)
306                 return NULL;
307         addr = (unsigned long)area->addr;
308         area->phys_addr = paddr;
309 
310 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
311         if (DOMAIN_IO == 0 &&
312             (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
313                cpu_is_xsc3()) && pfn >= 0x100000 &&
314                !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
315                 area->flags |= VM_ARM_SECTION_MAPPING;
316                 err = remap_area_supersections(addr, pfn, size, type);
317         } else if (!((paddr | size | addr) & ~PMD_MASK)) {
318                 area->flags |= VM_ARM_SECTION_MAPPING;
319                 err = remap_area_sections(addr, pfn, size, type);
320         } else
321 #endif
322                 err = ioremap_page_range(addr, addr + size, paddr,
323                                          __pgprot(type->prot_pte));
324 
325         if (err) {
326                 vunmap((void *)addr);
327                 return NULL;
328         }
329 
330         flush_cache_vmap(addr, addr + size);
331         return (void __iomem *) (offset + addr);
332 }
333 
334 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
335         unsigned int mtype, void *caller)
336 {
337         phys_addr_t last_addr;
338         unsigned long offset = phys_addr & ~PAGE_MASK;
339         unsigned long pfn = __phys_to_pfn(phys_addr);
340 
341         /*
342          * Don't allow wraparound or zero size
343          */
344         last_addr = phys_addr + size - 1;
345         if (!size || last_addr < phys_addr)
346                 return NULL;
347 
348         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
349                         caller);
350 }
351 
352 /*
353  * Remap an arbitrary physical address space into the kernel virtual
354  * address space. Needed when the kernel wants to access high addresses
355  * directly.
356  *
357  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
358  * have to convert them into an offset in a page-aligned mapping, but the
359  * caller shouldn't need to know that small detail.
360  */
361 void __iomem *
362 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
363                   unsigned int mtype)
364 {
365         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
366                                         __builtin_return_address(0));
367 }
368 EXPORT_SYMBOL(__arm_ioremap_pfn);
369 
370 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
371                                       unsigned int, void *) =
372         __arm_ioremap_caller;
373 
374 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
375 {
376         return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
377                                    __builtin_return_address(0));
378 }
379 EXPORT_SYMBOL(ioremap);
380 
381 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
382 {
383         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
384                                    __builtin_return_address(0));
385 }
386 EXPORT_SYMBOL(ioremap_cache);
387 
388 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
389 {
390         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
391                                    __builtin_return_address(0));
392 }
393 EXPORT_SYMBOL(ioremap_wc);
394 
395 /*
396  * Remap an arbitrary physical address space into the kernel virtual
397  * address space as memory. Needed when the kernel wants to execute
398  * code in external memory. This is needed for reprogramming source
399  * clocks that would affect normal memory for example. Please see
400  * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
401  */
402 void __iomem *
403 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
404 {
405         unsigned int mtype;
406 
407         if (cached)
408                 mtype = MT_MEMORY_RWX;
409         else
410                 mtype = MT_MEMORY_RWX_NONCACHED;
411 
412         return __arm_ioremap_caller(phys_addr, size, mtype,
413                         __builtin_return_address(0));
414 }
415 
416 void __iounmap(volatile void __iomem *io_addr)
417 {
418         void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
419         struct static_vm *svm;
420 
421         /* If this is a static mapping, we must leave it alone */
422         svm = find_static_vm_vaddr(addr);
423         if (svm)
424                 return;
425 
426 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
427         {
428                 struct vm_struct *vm;
429 
430                 vm = find_vm_area(addr);
431 
432                 /*
433                  * If this is a section based mapping we need to handle it
434                  * specially as the VM subsystem does not know how to handle
435                  * such a beast.
436                  */
437                 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
438                         unmap_area_sections((unsigned long)vm->addr, vm->size);
439         }
440 #endif
441 
442         vunmap(addr);
443 }
444 
445 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
446 
447 void iounmap(volatile void __iomem *cookie)
448 {
449         arch_iounmap(cookie);
450 }
451 EXPORT_SYMBOL(iounmap);
452 
453 #ifdef CONFIG_PCI
454 static int pci_ioremap_mem_type = MT_DEVICE;
455 
456 void pci_ioremap_set_mem_type(int mem_type)
457 {
458         pci_ioremap_mem_type = mem_type;
459 }
460 
461 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
462 {
463         BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
464 
465         return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
466                                   PCI_IO_VIRT_BASE + offset + SZ_64K,
467                                   phys_addr,
468                                   __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
469 }
470 EXPORT_SYMBOL_GPL(pci_ioremap_io);
471 #endif
472 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp