~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/init.c

Version: ~ [ linux-5.5-rc6 ] ~ [ linux-5.4.11 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.95 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.164 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.209 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.209 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 1994 - 2000 Ralf Baechle
  7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10  */
 11 #include <linux/bug.h>
 12 #include <linux/init.h>
 13 #include <linux/module.h>
 14 #include <linux/signal.h>
 15 #include <linux/sched.h>
 16 #include <linux/smp.h>
 17 #include <linux/kernel.h>
 18 #include <linux/errno.h>
 19 #include <linux/string.h>
 20 #include <linux/types.h>
 21 #include <linux/pagemap.h>
 22 #include <linux/ptrace.h>
 23 #include <linux/mman.h>
 24 #include <linux/mm.h>
 25 #include <linux/bootmem.h>
 26 #include <linux/highmem.h>
 27 #include <linux/swap.h>
 28 #include <linux/proc_fs.h>
 29 #include <linux/pfn.h>
 30 #include <linux/hardirq.h>
 31 #include <linux/gfp.h>
 32 #include <linux/kcore.h>
 33 
 34 #include <asm/asm-offsets.h>
 35 #include <asm/bootinfo.h>
 36 #include <asm/cachectl.h>
 37 #include <asm/cpu.h>
 38 #include <asm/dma.h>
 39 #include <asm/kmap_types.h>
 40 #include <asm/mmu_context.h>
 41 #include <asm/sections.h>
 42 #include <asm/pgtable.h>
 43 #include <asm/pgalloc.h>
 44 #include <asm/tlb.h>
 45 #include <asm/fixmap.h>
 46 
 47 /* Atomicity and interruptability */
 48 #ifdef CONFIG_MIPS_MT_SMTC
 49 
 50 #include <asm/mipsmtregs.h>
 51 
 52 #define ENTER_CRITICAL(flags) \
 53         { \
 54         unsigned int mvpflags; \
 55         local_irq_save(flags);\
 56         mvpflags = dvpe()
 57 #define EXIT_CRITICAL(flags) \
 58         evpe(mvpflags); \
 59         local_irq_restore(flags); \
 60         }
 61 #else
 62 
 63 #define ENTER_CRITICAL(flags) local_irq_save(flags)
 64 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
 65 
 66 #endif /* CONFIG_MIPS_MT_SMTC */
 67 
 68 /*
 69  * We have up to 8 empty zeroed pages so we can map one of the right colour
 70  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
 71  * where we have to avoid VCED / VECI exceptions for good performance at
 72  * any price.  Since page is never written to after the initialization we
 73  * don't have to care about aliases on other CPUs.
 74  */
 75 unsigned long empty_zero_page, zero_page_mask;
 76 EXPORT_SYMBOL_GPL(empty_zero_page);
 77 
 78 /*
 79  * Not static inline because used by IP27 special magic initialization code
 80  */
 81 void setup_zero_pages(void)
 82 {
 83         unsigned int order, i;
 84         struct page *page;
 85 
 86         if (cpu_has_vce)
 87                 order = 3;
 88         else
 89                 order = 0;
 90 
 91         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 92         if (!empty_zero_page)
 93                 panic("Oh boy, that early out of memory?");
 94 
 95         page = virt_to_page((void *)empty_zero_page);
 96         split_page(page, order);
 97         for (i = 0; i < (1 << order); i++, page++)
 98                 mark_page_reserved(page);
 99 
100         zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
101 }
102 
103 #ifdef CONFIG_MIPS_MT_SMTC
104 static pte_t *kmap_coherent_pte;
105 static void __init kmap_coherent_init(void)
106 {
107         unsigned long vaddr;
108 
109         /* cache the first coherent kmap pte */
110         vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111         kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112 }
113 #else
114 static inline void kmap_coherent_init(void) {}
115 #endif
116 
117 void *kmap_coherent(struct page *page, unsigned long addr)
118 {
119         enum fixed_addresses idx;
120         unsigned long vaddr, flags, entrylo;
121         unsigned long old_ctx;
122         pte_t pte;
123         int tlbidx;
124 
125         BUG_ON(Page_dcache_dirty(page));
126 
127         inc_preempt_count();
128         idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129 #ifdef CONFIG_MIPS_MT_SMTC
130         idx += FIX_N_COLOURS * smp_processor_id() +
131                 (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132 #else
133         idx += in_interrupt() ? FIX_N_COLOURS : 0;
134 #endif
135         vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136         pte = mk_pte(page, PAGE_KERNEL);
137 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
138         entrylo = pte.pte_high;
139 #else
140         entrylo = pte_to_entrylo(pte_val(pte));
141 #endif
142 
143         ENTER_CRITICAL(flags);
144         old_ctx = read_c0_entryhi();
145         write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146         write_c0_entrylo0(entrylo);
147         write_c0_entrylo1(entrylo);
148 #ifdef CONFIG_MIPS_MT_SMTC
149         set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150         /* preload TLB instead of local_flush_tlb_one() */
151         mtc0_tlbw_hazard();
152         tlb_probe();
153         tlb_probe_hazard();
154         tlbidx = read_c0_index();
155         mtc0_tlbw_hazard();
156         if (tlbidx < 0)
157                 tlb_write_random();
158         else
159                 tlb_write_indexed();
160 #else
161         tlbidx = read_c0_wired();
162         write_c0_wired(tlbidx + 1);
163         write_c0_index(tlbidx);
164         mtc0_tlbw_hazard();
165         tlb_write_indexed();
166 #endif
167         tlbw_use_hazard();
168         write_c0_entryhi(old_ctx);
169         EXIT_CRITICAL(flags);
170 
171         return (void*) vaddr;
172 }
173 
174 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
175 
176 void kunmap_coherent(void)
177 {
178 #ifndef CONFIG_MIPS_MT_SMTC
179         unsigned int wired;
180         unsigned long flags, old_ctx;
181 
182         ENTER_CRITICAL(flags);
183         old_ctx = read_c0_entryhi();
184         wired = read_c0_wired() - 1;
185         write_c0_wired(wired);
186         write_c0_index(wired);
187         write_c0_entryhi(UNIQUE_ENTRYHI(wired));
188         write_c0_entrylo0(0);
189         write_c0_entrylo1(0);
190         mtc0_tlbw_hazard();
191         tlb_write_indexed();
192         tlbw_use_hazard();
193         write_c0_entryhi(old_ctx);
194         EXIT_CRITICAL(flags);
195 #endif
196         dec_preempt_count();
197         preempt_check_resched();
198 }
199 
200 void copy_user_highpage(struct page *to, struct page *from,
201         unsigned long vaddr, struct vm_area_struct *vma)
202 {
203         void *vfrom, *vto;
204 
205         vto = kmap_atomic(to);
206         if (cpu_has_dc_aliases &&
207             page_mapped(from) && !Page_dcache_dirty(from)) {
208                 vfrom = kmap_coherent(from, vaddr);
209                 copy_page(vto, vfrom);
210                 kunmap_coherent();
211         } else {
212                 vfrom = kmap_atomic(from);
213                 copy_page(vto, vfrom);
214                 kunmap_atomic(vfrom);
215         }
216         if ((!cpu_has_ic_fills_f_dc) ||
217             pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
218                 flush_data_cache_page((unsigned long)vto);
219         kunmap_atomic(vto);
220         /* Make sure this page is cleared on other CPU's too before using it */
221         smp_wmb();
222 }
223 
224 void copy_to_user_page(struct vm_area_struct *vma,
225         struct page *page, unsigned long vaddr, void *dst, const void *src,
226         unsigned long len)
227 {
228         if (cpu_has_dc_aliases &&
229             page_mapped(page) && !Page_dcache_dirty(page)) {
230                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
231                 memcpy(vto, src, len);
232                 kunmap_coherent();
233         } else {
234                 memcpy(dst, src, len);
235                 if (cpu_has_dc_aliases)
236                         SetPageDcacheDirty(page);
237         }
238         if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
239                 flush_cache_page(vma, vaddr, page_to_pfn(page));
240 }
241 
242 void copy_from_user_page(struct vm_area_struct *vma,
243         struct page *page, unsigned long vaddr, void *dst, const void *src,
244         unsigned long len)
245 {
246         if (cpu_has_dc_aliases &&
247             page_mapped(page) && !Page_dcache_dirty(page)) {
248                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
249                 memcpy(dst, vfrom, len);
250                 kunmap_coherent();
251         } else {
252                 memcpy(dst, src, len);
253                 if (cpu_has_dc_aliases)
254                         SetPageDcacheDirty(page);
255         }
256 }
257 
258 void __init fixrange_init(unsigned long start, unsigned long end,
259         pgd_t *pgd_base)
260 {
261 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
262         pgd_t *pgd;
263         pud_t *pud;
264         pmd_t *pmd;
265         pte_t *pte;
266         int i, j, k;
267         unsigned long vaddr;
268 
269         vaddr = start;
270         i = __pgd_offset(vaddr);
271         j = __pud_offset(vaddr);
272         k = __pmd_offset(vaddr);
273         pgd = pgd_base + i;
274 
275         for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
276                 pud = (pud_t *)pgd;
277                 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
278                         pmd = (pmd_t *)pud;
279                         for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
280                                 if (pmd_none(*pmd)) {
281                                         pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
282                                         set_pmd(pmd, __pmd((unsigned long)pte));
283                                         BUG_ON(pte != pte_offset_kernel(pmd, 0));
284                                 }
285                                 vaddr += PMD_SIZE;
286                         }
287                         k = 0;
288                 }
289                 j = 0;
290         }
291 #endif
292 }
293 
294 #ifndef CONFIG_NEED_MULTIPLE_NODES
295 int page_is_ram(unsigned long pagenr)
296 {
297         int i;
298 
299         for (i = 0; i < boot_mem_map.nr_map; i++) {
300                 unsigned long addr, end;
301 
302                 switch (boot_mem_map.map[i].type) {
303                 case BOOT_MEM_RAM:
304                 case BOOT_MEM_INIT_RAM:
305                         break;
306                 default:
307                         /* not usable memory */
308                         continue;
309                 }
310 
311                 addr = PFN_UP(boot_mem_map.map[i].addr);
312                 end = PFN_DOWN(boot_mem_map.map[i].addr +
313                                boot_mem_map.map[i].size);
314 
315                 if (pagenr >= addr && pagenr < end)
316                         return 1;
317         }
318 
319         return 0;
320 }
321 
322 void __init paging_init(void)
323 {
324         unsigned long max_zone_pfns[MAX_NR_ZONES];
325         unsigned long lastpfn __maybe_unused;
326 
327         pagetable_init();
328 
329 #ifdef CONFIG_HIGHMEM
330         kmap_init();
331 #endif
332         kmap_coherent_init();
333 
334 #ifdef CONFIG_ZONE_DMA
335         max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
336 #endif
337 #ifdef CONFIG_ZONE_DMA32
338         max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
339 #endif
340         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
341         lastpfn = max_low_pfn;
342 #ifdef CONFIG_HIGHMEM
343         max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
344         lastpfn = highend_pfn;
345 
346         if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
347                 printk(KERN_WARNING "This processor doesn't support highmem."
348                        " %ldk highmem ignored\n",
349                        (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
350                 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
351                 lastpfn = max_low_pfn;
352         }
353 #endif
354 
355         free_area_init_nodes(max_zone_pfns);
356 }
357 
358 #ifdef CONFIG_64BIT
359 static struct kcore_list kcore_kseg0;
360 #endif
361 
362 void __init mem_init(void)
363 {
364         unsigned long codesize, reservedpages, datasize, initsize;
365         unsigned long tmp, ram;
366 
367 #ifdef CONFIG_HIGHMEM
368 #ifdef CONFIG_DISCONTIGMEM
369 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
370 #endif
371         max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
372 #else
373         max_mapnr = max_low_pfn;
374 #endif
375         high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
376 
377         totalram_pages += free_all_bootmem();
378         setup_zero_pages();     /* Setup zeroed pages.  */
379 
380         reservedpages = ram = 0;
381         for (tmp = 0; tmp < max_low_pfn; tmp++)
382                 if (page_is_ram(tmp) && pfn_valid(tmp)) {
383                         ram++;
384                         if (PageReserved(pfn_to_page(tmp)))
385                                 reservedpages++;
386                 }
387         num_physpages = ram;
388 
389 #ifdef CONFIG_HIGHMEM
390         for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
391                 struct page *page = pfn_to_page(tmp);
392 
393                 if (!page_is_ram(tmp)) {
394                         SetPageReserved(page);
395                         continue;
396                 }
397                 free_highmem_page(page);
398         }
399         num_physpages += totalhigh_pages;
400 #endif
401 
402         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
403         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
404         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
405 
406 #ifdef CONFIG_64BIT
407         if ((unsigned long) &_text > (unsigned long) CKSEG0)
408                 /* The -4 is a hack so that user tools don't have to handle
409                    the overflow.  */
410                 kclist_add(&kcore_kseg0, (void *) CKSEG0,
411                                 0x80000000 - 4, KCORE_TEXT);
412 #endif
413 
414         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
415                "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
416                nr_free_pages() << (PAGE_SHIFT-10),
417                ram << (PAGE_SHIFT-10),
418                codesize >> 10,
419                reservedpages << (PAGE_SHIFT-10),
420                datasize >> 10,
421                initsize >> 10,
422                totalhigh_pages << (PAGE_SHIFT-10));
423 }
424 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
425 
426 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
427 {
428         unsigned long pfn;
429 
430         for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
431                 struct page *page = pfn_to_page(pfn);
432                 void *addr = phys_to_virt(PFN_PHYS(pfn));
433 
434                 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
435                 free_reserved_page(page);
436         }
437         printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
438 }
439 
440 #ifdef CONFIG_BLK_DEV_INITRD
441 void free_initrd_mem(unsigned long start, unsigned long end)
442 {
443         free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
444 }
445 #endif
446 
447 void __init_refok free_initmem(void)
448 {
449         prom_free_prom_memory();
450         free_initmem_default(POISON_FREE_INITMEM);
451 }
452 
453 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
454 unsigned long pgd_current[NR_CPUS];
455 #endif
456 
457 /*
458  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
459  * are constants.  So we use the variants from asm-offset.h until that gcc
460  * will officially be retired.
461  *
462  * Align swapper_pg_dir in to 64K, allows its address to be loaded
463  * with a single LUI instruction in the TLB handlers.  If we used
464  * __aligned(64K), its size would get rounded up to the alignment
465  * size, and waste space.  So we place it in its own section and align
466  * it in the linker script.
467  */
468 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
469 #ifndef __PAGETABLE_PMD_FOLDED
470 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
471 #endif
472 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
473 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp