~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/mem.c

Version: ~ [ linux-4.14 ] ~ [ linux-4.13.12 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.61 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.97 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.46 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.80 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.50 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.95 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  PowerPC version
  3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4  *
  5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7  *    Copyright (C) 1996 Paul Mackerras
  8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9  *
 10  *  Derived from "arch/i386/mm/init.c"
 11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 12  *
 13  *  This program is free software; you can redistribute it and/or
 14  *  modify it under the terms of the GNU General Public License
 15  *  as published by the Free Software Foundation; either version
 16  *  2 of the License, or (at your option) any later version.
 17  *
 18  */
 19 
 20 #include <linux/export.h>
 21 #include <linux/sched.h>
 22 #include <linux/kernel.h>
 23 #include <linux/errno.h>
 24 #include <linux/string.h>
 25 #include <linux/gfp.h>
 26 #include <linux/types.h>
 27 #include <linux/mm.h>
 28 #include <linux/stddef.h>
 29 #include <linux/init.h>
 30 #include <linux/bootmem.h>
 31 #include <linux/highmem.h>
 32 #include <linux/initrd.h>
 33 #include <linux/pagemap.h>
 34 #include <linux/suspend.h>
 35 #include <linux/memblock.h>
 36 #include <linux/hugetlb.h>
 37 #include <linux/slab.h>
 38 #include <linux/vmalloc.h>
 39 #include <linux/memremap.h>
 40 
 41 #include <asm/pgalloc.h>
 42 #include <asm/prom.h>
 43 #include <asm/io.h>
 44 #include <asm/mmu_context.h>
 45 #include <asm/pgtable.h>
 46 #include <asm/mmu.h>
 47 #include <asm/smp.h>
 48 #include <asm/machdep.h>
 49 #include <asm/btext.h>
 50 #include <asm/tlb.h>
 51 #include <asm/sections.h>
 52 #include <asm/sparsemem.h>
 53 #include <asm/vdso.h>
 54 #include <asm/fixmap.h>
 55 #include <asm/swiotlb.h>
 56 #include <asm/rtas.h>
 57 
 58 #include "mmu_decl.h"
 59 
 60 #ifndef CPU_FTR_COHERENT_ICACHE
 61 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
 62 #define CPU_FTR_NOEXECUTE       0
 63 #endif
 64 
 65 unsigned long long memory_limit;
 66 
 67 #ifdef CONFIG_HIGHMEM
 68 pte_t *kmap_pte;
 69 EXPORT_SYMBOL(kmap_pte);
 70 pgprot_t kmap_prot;
 71 EXPORT_SYMBOL(kmap_prot);
 72 #define TOP_ZONE ZONE_HIGHMEM
 73 
 74 static inline pte_t *virt_to_kpte(unsigned long vaddr)
 75 {
 76         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
 77                         vaddr), vaddr), vaddr);
 78 }
 79 #else
 80 #define TOP_ZONE ZONE_NORMAL
 81 #endif
 82 
 83 int page_is_ram(unsigned long pfn)
 84 {
 85 #ifndef CONFIG_PPC64    /* XXX for now */
 86         return pfn < max_pfn;
 87 #else
 88         unsigned long paddr = (pfn << PAGE_SHIFT);
 89         struct memblock_region *reg;
 90 
 91         for_each_memblock(memory, reg)
 92                 if (paddr >= reg->base && paddr < (reg->base + reg->size))
 93                         return 1;
 94         return 0;
 95 #endif
 96 }
 97 
 98 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 99                               unsigned long size, pgprot_t vma_prot)
100 {
101         if (ppc_md.phys_mem_access_prot)
102                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
103 
104         if (!page_is_ram(pfn))
105                 vma_prot = pgprot_noncached(vma_prot);
106 
107         return vma_prot;
108 }
109 EXPORT_SYMBOL(phys_mem_access_prot);
110 
111 #ifdef CONFIG_MEMORY_HOTPLUG
112 
113 #ifdef CONFIG_NUMA
114 int memory_add_physaddr_to_nid(u64 start)
115 {
116         return hot_add_scn_to_nid(start);
117 }
118 #endif
119 
120 int __weak create_section_mapping(unsigned long start, unsigned long end)
121 {
122         return -ENODEV;
123 }
124 
125 int __weak remove_section_mapping(unsigned long start, unsigned long end)
126 {
127         return -ENODEV;
128 }
129 
130 int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
131 {
132         unsigned long start_pfn = start >> PAGE_SHIFT;
133         unsigned long nr_pages = size >> PAGE_SHIFT;
134         int rc;
135 
136         resize_hpt_for_hotplug(memblock_phys_mem_size());
137 
138         start = (unsigned long)__va(start);
139         rc = create_section_mapping(start, start + size);
140         if (rc) {
141                 pr_warning(
142                         "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
143                         start, start + size, rc);
144                 return -EFAULT;
145         }
146 
147         return __add_pages(nid, start_pfn, nr_pages, want_memblock);
148 }
149 
150 #ifdef CONFIG_MEMORY_HOTREMOVE
151 int arch_remove_memory(u64 start, u64 size)
152 {
153         unsigned long start_pfn = start >> PAGE_SHIFT;
154         unsigned long nr_pages = size >> PAGE_SHIFT;
155         struct vmem_altmap *altmap;
156         struct page *page;
157         int ret;
158 
159         /*
160          * If we have an altmap then we need to skip over any reserved PFNs
161          * when querying the zone.
162          */
163         page = pfn_to_page(start_pfn);
164         altmap = to_vmem_altmap((unsigned long) page);
165         if (altmap)
166                 page += vmem_altmap_offset(altmap);
167 
168         ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
169         if (ret)
170                 return ret;
171 
172         /* Remove htab bolted mappings for this section of memory */
173         start = (unsigned long)__va(start);
174         ret = remove_section_mapping(start, start + size);
175 
176         /* Ensure all vmalloc mappings are flushed in case they also
177          * hit that section of memory
178          */
179         vm_unmap_aliases();
180 
181         resize_hpt_for_hotplug(memblock_phys_mem_size());
182 
183         return ret;
184 }
185 #endif
186 #endif /* CONFIG_MEMORY_HOTPLUG */
187 
188 /*
189  * walk_memory_resource() needs to make sure there is no holes in a given
190  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
191  * Instead it maintains it in memblock.memory structures.  Walk through the
192  * memory regions, find holes and callback for contiguous regions.
193  */
194 int
195 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
196                 void *arg, int (*func)(unsigned long, unsigned long, void *))
197 {
198         struct memblock_region *reg;
199         unsigned long end_pfn = start_pfn + nr_pages;
200         unsigned long tstart, tend;
201         int ret = -1;
202 
203         for_each_memblock(memory, reg) {
204                 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
205                 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
206                 if (tstart >= tend)
207                         continue;
208                 ret = (*func)(tstart, tend - tstart, arg);
209                 if (ret)
210                         break;
211         }
212         return ret;
213 }
214 EXPORT_SYMBOL_GPL(walk_system_ram_range);
215 
216 #ifndef CONFIG_NEED_MULTIPLE_NODES
217 void __init initmem_init(void)
218 {
219         max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
220         min_low_pfn = MEMORY_START >> PAGE_SHIFT;
221 #ifdef CONFIG_HIGHMEM
222         max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
223 #endif
224 
225         /* Place all memblock_regions in the same node and merge contiguous
226          * memblock_regions
227          */
228         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
229 
230         /* XXX need to clip this if using highmem? */
231         sparse_memory_present_with_active_regions(0);
232         sparse_init();
233 }
234 
235 /* mark pages that don't exist as nosave */
236 static int __init mark_nonram_nosave(void)
237 {
238         struct memblock_region *reg, *prev = NULL;
239 
240         for_each_memblock(memory, reg) {
241                 if (prev &&
242                     memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
243                         register_nosave_region(memblock_region_memory_end_pfn(prev),
244                                                memblock_region_memory_base_pfn(reg));
245                 prev = reg;
246         }
247         return 0;
248 }
249 #else /* CONFIG_NEED_MULTIPLE_NODES */
250 static int __init mark_nonram_nosave(void)
251 {
252         return 0;
253 }
254 #endif
255 
256 static bool zone_limits_final;
257 
258 /*
259  * The memory zones past TOP_ZONE are managed by generic mm code.
260  * These should be set to zero since that's what every other
261  * architecture does.
262  */
263 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
264         [0            ... TOP_ZONE        ] = ~0UL,
265         [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
266 };
267 
268 /*
269  * Restrict the specified zone and all more restrictive zones
270  * to be below the specified pfn.  May not be called after
271  * paging_init().
272  */
273 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
274 {
275         int i;
276 
277         if (WARN_ON(zone_limits_final))
278                 return;
279 
280         for (i = zone; i >= 0; i--) {
281                 if (max_zone_pfns[i] > pfn_limit)
282                         max_zone_pfns[i] = pfn_limit;
283         }
284 }
285 
286 /*
287  * Find the least restrictive zone that is entirely below the
288  * specified pfn limit.  Returns < 0 if no suitable zone is found.
289  *
290  * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
291  * systems -- the DMA limit can be higher than any possible real pfn.
292  */
293 int dma_pfn_limit_to_zone(u64 pfn_limit)
294 {
295         int i;
296 
297         for (i = TOP_ZONE; i >= 0; i--) {
298                 if (max_zone_pfns[i] <= pfn_limit)
299                         return i;
300         }
301 
302         return -EPERM;
303 }
304 
305 /*
306  * paging_init() sets up the page tables - in fact we've already done this.
307  */
308 void __init paging_init(void)
309 {
310         unsigned long long total_ram = memblock_phys_mem_size();
311         phys_addr_t top_of_ram = memblock_end_of_DRAM();
312 
313 #ifdef CONFIG_PPC32
314         unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
315         unsigned long end = __fix_to_virt(FIX_HOLE);
316 
317         for (; v < end; v += PAGE_SIZE)
318                 map_kernel_page(v, 0, 0); /* XXX gross */
319 #endif
320 
321 #ifdef CONFIG_HIGHMEM
322         map_kernel_page(PKMAP_BASE, 0, 0);      /* XXX gross */
323         pkmap_page_table = virt_to_kpte(PKMAP_BASE);
324 
325         kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
326         kmap_prot = PAGE_KERNEL;
327 #endif /* CONFIG_HIGHMEM */
328 
329         printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
330                (unsigned long long)top_of_ram, total_ram);
331         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
332                (long int)((top_of_ram - total_ram) >> 20));
333 
334 #ifdef CONFIG_HIGHMEM
335         limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
336 #endif
337         limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
338         zone_limits_final = true;
339         free_area_init_nodes(max_zone_pfns);
340 
341         mark_nonram_nosave();
342 }
343 
344 void __init mem_init(void)
345 {
346         /*
347          * book3s is limited to 16 page sizes due to encoding this in
348          * a 4-bit field for slices.
349          */
350         BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
351 
352 #ifdef CONFIG_SWIOTLB
353         swiotlb_init(0);
354 #endif
355 
356         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
357         set_max_mapnr(max_pfn);
358         free_all_bootmem();
359 
360 #ifdef CONFIG_HIGHMEM
361         {
362                 unsigned long pfn, highmem_mapnr;
363 
364                 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
365                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
366                         phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
367                         struct page *page = pfn_to_page(pfn);
368                         if (!memblock_is_reserved(paddr))
369                                 free_highmem_page(page);
370                 }
371         }
372 #endif /* CONFIG_HIGHMEM */
373 
374 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
375         /*
376          * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
377          * functions.... do it here for the non-smp case.
378          */
379         per_cpu(next_tlbcam_idx, smp_processor_id()) =
380                 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
381 #endif
382 
383         mem_init_print_info(NULL);
384 #ifdef CONFIG_PPC32
385         pr_info("Kernel virtual memory layout:\n");
386         pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
387 #ifdef CONFIG_HIGHMEM
388         pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
389                 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
390 #endif /* CONFIG_HIGHMEM */
391 #ifdef CONFIG_NOT_COHERENT_CACHE
392         pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
393                 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
394 #endif /* CONFIG_NOT_COHERENT_CACHE */
395         pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
396                 ioremap_bot, IOREMAP_TOP);
397         pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
398                 VMALLOC_START, VMALLOC_END);
399 #endif /* CONFIG_PPC32 */
400 }
401 
402 void free_initmem(void)
403 {
404         ppc_md.progress = ppc_printk_progress;
405         mark_initmem_nx();
406         free_initmem_default(POISON_FREE_INITMEM);
407 }
408 
409 #ifdef CONFIG_BLK_DEV_INITRD
410 void __init free_initrd_mem(unsigned long start, unsigned long end)
411 {
412         free_reserved_area((void *)start, (void *)end, -1, "initrd");
413 }
414 #endif
415 
416 /*
417  * This is called when a page has been modified by the kernel.
418  * It just marks the page as not i-cache clean.  We do the i-cache
419  * flush later when the page is given to a user process, if necessary.
420  */
421 void flush_dcache_page(struct page *page)
422 {
423         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
424                 return;
425         /* avoid an atomic op if possible */
426         if (test_bit(PG_arch_1, &page->flags))
427                 clear_bit(PG_arch_1, &page->flags);
428 }
429 EXPORT_SYMBOL(flush_dcache_page);
430 
431 void flush_dcache_icache_page(struct page *page)
432 {
433 #ifdef CONFIG_HUGETLB_PAGE
434         if (PageCompound(page)) {
435                 flush_dcache_icache_hugepage(page);
436                 return;
437         }
438 #endif
439 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
440         /* On 8xx there is no need to kmap since highmem is not supported */
441         __flush_dcache_icache(page_address(page));
442 #else
443         if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
444                 void *start = kmap_atomic(page);
445                 __flush_dcache_icache(start);
446                 kunmap_atomic(start);
447         } else {
448                 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
449         }
450 #endif
451 }
452 EXPORT_SYMBOL(flush_dcache_icache_page);
453 
454 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
455 {
456         clear_page(page);
457 
458         /*
459          * We shouldn't have to do this, but some versions of glibc
460          * require it (ld.so assumes zero filled pages are icache clean)
461          * - Anton
462          */
463         flush_dcache_page(pg);
464 }
465 EXPORT_SYMBOL(clear_user_page);
466 
467 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
468                     struct page *pg)
469 {
470         copy_page(vto, vfrom);
471 
472         /*
473          * We should be able to use the following optimisation, however
474          * there are two problems.
475          * Firstly a bug in some versions of binutils meant PLT sections
476          * were not marked executable.
477          * Secondly the first word in the GOT section is blrl, used
478          * to establish the GOT address. Until recently the GOT was
479          * not marked executable.
480          * - Anton
481          */
482 #if 0
483         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
484                 return;
485 #endif
486 
487         flush_dcache_page(pg);
488 }
489 
490 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
491                              unsigned long addr, int len)
492 {
493         unsigned long maddr;
494 
495         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
496         flush_icache_range(maddr, maddr + len);
497         kunmap(page);
498 }
499 EXPORT_SYMBOL(flush_icache_user_range);
500 
501 /*
502  * This is called at the end of handling a user page fault, when the
503  * fault has been handled by updating a PTE in the linux page tables.
504  * We use it to preload an HPTE into the hash table corresponding to
505  * the updated linux PTE.
506  * 
507  * This must always be called with the pte lock held.
508  */
509 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
510                       pte_t *ptep)
511 {
512 #ifdef CONFIG_PPC_STD_MMU
513         /*
514          * We don't need to worry about _PAGE_PRESENT here because we are
515          * called with either mm->page_table_lock held or ptl lock held
516          */
517         unsigned long access, trap;
518 
519         if (radix_enabled())
520                 return;
521 
522         /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
523         if (!pte_young(*ptep) || address >= TASK_SIZE)
524                 return;
525 
526         /* We try to figure out if we are coming from an instruction
527          * access fault and pass that down to __hash_page so we avoid
528          * double-faulting on execution of fresh text. We have to test
529          * for regs NULL since init will get here first thing at boot
530          *
531          * We also avoid filling the hash if not coming from a fault
532          */
533 
534         trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
535         switch (trap) {
536         case 0x300:
537                 access = 0UL;
538                 break;
539         case 0x400:
540                 access = _PAGE_EXEC;
541                 break;
542         default:
543                 return;
544         }
545 
546         hash_preload(vma->vm_mm, address, access, trap);
547 #endif /* CONFIG_PPC_STD_MMU */
548 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
549         && defined(CONFIG_HUGETLB_PAGE)
550         if (is_vm_hugetlb_page(vma))
551                 book3e_hugetlb_preload(vma, address, *ptep);
552 #endif
553 }
554 
555 /*
556  * System memory should not be in /proc/iomem but various tools expect it
557  * (eg kdump).
558  */
559 static int __init add_system_ram_resources(void)
560 {
561         struct memblock_region *reg;
562 
563         for_each_memblock(memory, reg) {
564                 struct resource *res;
565                 unsigned long base = reg->base;
566                 unsigned long size = reg->size;
567 
568                 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
569                 WARN_ON(!res);
570 
571                 if (res) {
572                         res->name = "System RAM";
573                         res->start = base;
574                         res->end = base + size - 1;
575                         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
576                         WARN_ON(request_resource(&iomem_resource, res) < 0);
577                 }
578         }
579 
580         return 0;
581 }
582 subsys_initcall(add_system_ram_resources);
583 
584 #ifdef CONFIG_STRICT_DEVMEM
585 /*
586  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
587  * is valid. The argument is a physical page number.
588  *
589  * Access has to be given to non-kernel-ram areas as well, these contain the
590  * PCI mmio resources as well as potential bios/acpi data regions.
591  */
592 int devmem_is_allowed(unsigned long pfn)
593 {
594         if (page_is_rtas_user_buf(pfn))
595                 return 1;
596         if (iomem_is_exclusive(PFN_PHYS(pfn)))
597                 return 0;
598         if (!page_is_ram(pfn))
599                 return 1;
600         return 0;
601 }
602 #endif /* CONFIG_STRICT_DEVMEM */
603 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp