~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/ppc64/mm/init.c

Version: ~ [ linux-6.3-rc3 ] ~ [ linux-6.2.7 ] ~ [ linux-6.1.20 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.103 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.175 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.237 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.278 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.310 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  PowerPC version 
  3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4  *
  5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7  *    Copyright (C) 1996 Paul Mackerras
  8  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  9  *
 10  *  Derived from "arch/i386/mm/init.c"
 11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 12  *
 13  *  Dave Engebretsen <engebret@us.ibm.com>
 14  *      Rework for PPC64 port.
 15  *
 16  *  This program is free software; you can redistribute it and/or
 17  *  modify it under the terms of the GNU General Public License
 18  *  as published by the Free Software Foundation; either version
 19  *  2 of the License, or (at your option) any later version.
 20  *
 21  */
 22 
 23 #include <linux/config.h>
 24 #include <linux/signal.h>
 25 #include <linux/sched.h>
 26 #include <linux/kernel.h>
 27 #include <linux/errno.h>
 28 #include <linux/string.h>
 29 #include <linux/types.h>
 30 #include <linux/ptrace.h>
 31 #include <linux/mman.h>
 32 #include <linux/mm.h>
 33 #include <linux/slab.h>
 34 #include <linux/swap.h>
 35 #include <linux/stddef.h>
 36 #include <linux/vmalloc.h>
 37 #include <linux/init.h>
 38 #include <linux/delay.h>
 39 #include <linux/bootmem.h>
 40 #include <linux/highmem.h>
 41 #ifdef CONFIG_BLK_DEV_INITRD
 42 #include <linux/blk.h>          /* for initrd_* */
 43 #endif
 44 
 45 #include <asm/pgalloc.h>
 46 #include <asm/page.h>
 47 #include <asm/abs_addr.h>
 48 #include <asm/prom.h>
 49 #include <asm/lmb.h>
 50 #include <asm/rtas.h>
 51 #include <asm/io.h>
 52 #include <asm/mmu_context.h>
 53 #include <asm/pgtable.h>
 54 #include <asm/mmu.h>
 55 #include <asm/uaccess.h>
 56 #include <asm/smp.h>
 57 #include <asm/machdep.h>
 58 #include <asm/tlb.h>
 59 #include <asm/naca.h>
 60 #include <asm/eeh.h>
 61 
 62 #include <asm/ppcdebug.h>
 63 
 64 #define PGTOKB(pages)   (((pages) * PAGE_SIZE) >> 10)
 65 
 66 #ifdef CONFIG_PPC_ISERIES
 67 #include <asm/iSeries/iSeries_dma.h>
 68 #endif
 69 
 70 struct mmu_context_queue_t mmu_context_queue;
 71 int mem_init_done;
 72 unsigned long ioremap_bot = IMALLOC_BASE;
 73 
 74 static int boot_mapsize;
 75 static unsigned long totalram_pages;
 76 
 77 extern pgd_t swapper_pg_dir[];
 78 extern char __init_begin, __init_end;
 79 extern char __chrp_begin, __chrp_end;
 80 extern char __openfirmware_begin, __openfirmware_end;
 81 extern struct _of_tce_table of_tce_table[];
 82 extern char _start[], _end[];
 83 extern char _stext[], etext[];
 84 extern struct task_struct *current_set[NR_CPUS];
 85 
 86 extern pgd_t ioremap_dir[];
 87 pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
 88 
 89 static void map_io_page(unsigned long va, unsigned long pa, int flags);
 90 extern void die_if_kernel(char *,struct pt_regs *,long);
 91 
 92 unsigned long klimit = (unsigned long)_end;
 93 
 94 HPTE *Hash=0;
 95 unsigned long Hash_size=0;
 96 unsigned long _SDR1=0;
 97 unsigned long _ASR=0;
 98 
 99 /* max amount of RAM to use */
100 unsigned long __max_memory;
101 
102 /* This is declared as we are using the more or less generic 
103  * include/asm-ppc64/tlb.h file -- tgall
104  */
105 mmu_gather_t     mmu_gathers[NR_CPUS];
106 
107 int do_check_pgt_cache(int low, int high)
108 {
109         int freed = 0;
110 
111         if (pgtable_cache_size > high) {
112                 do {
113                         if (pgd_quicklist)
114                                 free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
115                         if (pmd_quicklist)
116                                 free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
117                         if (pte_quicklist)
118                                 free_page((unsigned long)pte_alloc_one_fast(0, 0)), ++freed;
119                 } while (pgtable_cache_size > low);
120         }
121         return freed;   
122 }
123 
124 void show_mem(void)
125 {
126         int i,free = 0,total = 0,reserved = 0;
127         int shared = 0, cached = 0;
128 
129         printk("Mem-info:\n");
130         show_free_areas();
131         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
132         i = max_mapnr;
133         while (i-- > 0) {
134                 total++;
135                 if (PageReserved(mem_map+i))
136                         reserved++;
137                 else if (PageSwapCache(mem_map+i))
138                         cached++;
139                 else if (!atomic_read(&mem_map[i].count))
140                         free++;
141                 else
142                         shared += atomic_read(&mem_map[i].count) - 1;
143         }
144         printk("%d pages of RAM\n",total);
145         printk("%d free pages\n",free);
146         printk("%d reserved pages\n",reserved);
147         printk("%d pages shared\n",shared);
148         printk("%d pages swap cached\n",cached);
149         printk("%d pages in page table cache\n",(int)pgtable_cache_size);
150         show_buffers();
151 }
152 
153 void si_meminfo(struct sysinfo *val)
154 {
155         val->totalram = totalram_pages;
156         val->sharedram = 0;
157         val->freeram = nr_free_pages();
158         val->bufferram = atomic_read(&buffermem_pages);
159         val->totalhigh = 0;
160         val->freehigh = 0;
161         val->mem_unit = PAGE_SIZE;
162 }
163 
164 void *
165 ioremap(unsigned long addr, unsigned long size)
166 {
167 #ifdef CONFIG_PPC_ISERIES
168         return (void*)addr;
169 #else
170         void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
171         if(mem_init_done)
172                 return eeh_ioremap(addr, ret);  /* may remap the addr */
173         return ret;
174 #endif
175 }
176 
177 extern struct vm_struct * get_im_area( unsigned long size );
178 
179 void *
180 __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
181 {
182         unsigned long pa, ea, i;
183 
184         /*
185          * Choose an address to map it to.
186          * Once the imalloc system is running, we use it.
187          * Before that, we map using addresses going
188          * up from ioremap_bot.  imalloc will use
189          * the addresses from ioremap_bot through
190          * IMALLOC_END (0xE000001fffffffff)
191          * 
192          */
193         pa = addr & PAGE_MASK;
194         size = PAGE_ALIGN(addr + size) - pa;
195 
196         if (size == 0)
197                 return NULL;
198 
199         if (mem_init_done) {
200                 struct vm_struct *area;
201                 area = get_im_area(size);
202                 if (area == 0)
203                         return NULL;
204                 ea = (unsigned long)(area->addr);
205         } 
206         else {
207                 ea = ioremap_bot;
208                 ioremap_bot += size;
209         }
210 
211         if ((flags & _PAGE_PRESENT) == 0)
212                 flags |= pgprot_val(PAGE_KERNEL);
213         if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
214                 flags |= _PAGE_GUARDED;
215 
216         for (i = 0; i < size; i += PAGE_SIZE) {
217                 map_io_page(ea+i, pa+i, flags);
218         }
219 
220         return (void *) (ea + (addr & ~PAGE_MASK));
221 }
222 
223 void iounmap(void *addr) 
224 {
225 #ifdef CONFIG_PPC_ISERIES
226         /* iSeries I/O Remap is a noop              */
227         return;
228 #else
229         /* DRENG / PPPBBB todo */
230         return;
231 #endif
232 }
233 
234 /*
235  * map_io_page currently only called by __ioremap
236  * map_io_page adds an entry to the ioremap page table
237  * and adds an entry to the HPT, possibly bolting it
238  */
239 static void map_io_page(unsigned long ea, unsigned long pa, int flags)
240 {
241         pgd_t *pgdp;
242         pmd_t *pmdp;
243         pte_t *ptep;
244         unsigned long vsid;
245         
246         if (mem_init_done) {
247                 spin_lock(&ioremap_mm.page_table_lock);
248                 pgdp = pgd_offset_i(ea);
249                 pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
250                 ptep = pte_alloc(&ioremap_mm, pmdp, ea);
251 
252                 pa = absolute_to_phys(pa);
253                 set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
254                 spin_unlock(&ioremap_mm.page_table_lock);
255         } else {
256                 /* If the mm subsystem is not fully up, we cannot create a
257                  * linux page table entry for this mapping.  Simply bolt an
258                  * entry in the hardware page table. 
259                  */
260                 vsid = get_kernel_vsid(ea);
261                 make_pte(htab_data.htab,
262                         (vsid << 28) | (ea & 0xFFFFFFF), // va (NOT the ea)
263                         pa, 
264                         _PAGE_NO_CACHE | _PAGE_GUARDED | PP_RWXX,
265                         htab_data.htab_hash_mask, 0);
266         }
267 }
268 
269 #ifndef CONFIG_PPC_ISERIES
270 int
271 io_remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
272 {
273         return remap_page_range(from, eeh_token_to_phys(to), size, prot);
274 }
275 #endif
276 
277 void
278 local_flush_tlb_all(void)
279 {
280         /* Implemented to just flush the vmalloc area.
281          * vmalloc is the only user of flush_tlb_all.
282          */
283 #ifdef CONFIG_SHARED_MEMORY_ADDRESSING
284         local_flush_tlb_range( NULL, VMALLOC_START, SMALLOC_END );
285 #else
286         local_flush_tlb_range( NULL, VMALLOC_START, VMALLOC_END );
287 #endif
288 }
289 
290 void
291 local_flush_tlb_mm(struct mm_struct *mm)
292 {
293         spin_lock(&mm->page_table_lock);
294 
295         if ( mm->map_count ) {
296                 struct vm_area_struct *mp;
297                 for ( mp = mm->mmap; mp != NULL; mp = mp->vm_next )
298                         local_flush_tlb_range( mm, mp->vm_start, mp->vm_end );
299         }
300 
301         spin_unlock(&mm->page_table_lock);
302 }
303 
304 /*
305  * Callers should hold the mm->page_table_lock
306  */
307 void
308 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
309 {
310         unsigned long context = 0;
311         pgd_t *pgd;
312         pmd_t *pmd;
313         pte_t *ptep;
314         
315         switch( REGION_ID(vmaddr) ) {
316         case VMALLOC_REGION_ID:
317                 pgd = pgd_offset_k( vmaddr );
318                 break;
319         case IO_REGION_ID:
320                 pgd = pgd_offset_i( vmaddr );
321                 break;
322         case USER_REGION_ID:
323                 pgd = pgd_offset( vma->vm_mm, vmaddr );
324                 context = vma->vm_mm->context;
325                 break;
326         default:
327                 panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);
328         
329         }
330 
331         if (!pgd_none(*pgd)) {
332                 pmd = pmd_offset(pgd, vmaddr);
333                 if (!pmd_none(*pmd)) {
334                         ptep = pte_offset(pmd, vmaddr);
335                         /* Check if HPTE might exist and flush it if so */
336                         if (pte_val(*ptep) & _PAGE_HASHPTE)
337                                 flush_hash_page(context, vmaddr, ptep);
338                 }
339         }
340 }
341 
342 void
343 local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
344 {
345         pgd_t *pgd;
346         pmd_t *pmd;
347         pte_t *ptep;
348         unsigned long pgd_end, pmd_end;
349         unsigned long context;
350 
351         if ( start >= end )
352                 panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end );
353 
354         if ( REGION_ID(start) != REGION_ID(end) )
355                 panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end );
356         
357         context = 0;
358 
359         switch( REGION_ID(start) ) {
360         case VMALLOC_REGION_ID:
361                 pgd = pgd_offset_k( start );
362                 break;
363         case IO_REGION_ID:
364                 pgd = pgd_offset_i( start );
365                 break;
366         case USER_REGION_ID:
367                 pgd = pgd_offset( mm, start );
368                 context = mm->context;
369                 break;
370         default:
371                 panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);
372         
373         }
374 
375         do {
376                 pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
377                 if ( pgd_end > end ) 
378                         pgd_end = end;
379                 if ( !pgd_none( *pgd ) ) {
380                         pmd = pmd_offset( pgd, start );
381                         do {
382                                 pmd_end = ( start + PMD_SIZE ) & PMD_MASK;
383                                 if ( pmd_end > end )
384                                         pmd_end = end;
385                                 if ( !pmd_none( *pmd ) ) {
386                                         ptep = pte_offset( pmd, start );
387                                         do {
388                                                 if ( pte_val(*ptep) & _PAGE_HASHPTE )
389                                                         flush_hash_page( context, start, ptep );
390                                                 start += PAGE_SIZE;
391                                                 ++ptep;
392                                         } while ( start < pmd_end );
393                                 }
394                                 else
395                                         start = pmd_end;
396                                 ++pmd;
397                         } while ( start < pgd_end );
398                 }
399                 else
400                         start = pgd_end;
401                 ++pgd;
402         } while ( start < end );
403 }
404 
405 
406 void __init free_initmem(void)
407 {
408         unsigned long a;
409         unsigned long num_freed_pages = 0;
410 #define FREESEC(START,END,CNT) do { \
411         a = (unsigned long)(&START); \
412         for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
413                 clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
414                 set_page_count(mem_map+MAP_NR(a), 1); \
415                 free_page(a); \
416                 CNT++; \
417         } \
418 } while (0)
419 
420         FREESEC(__init_begin,__init_end,num_freed_pages);
421 
422         printk ("Freeing unused kernel memory: %ldk init\n",
423                 PGTOKB(num_freed_pages));
424 }
425 
426 #ifdef CONFIG_BLK_DEV_INITRD
427 void free_initrd_mem(unsigned long start, unsigned long end)
428 {
429         unsigned long xstart = start;
430         for (; start < end; start += PAGE_SIZE) {
431                 ClearPageReserved(mem_map + MAP_NR(start));
432                 set_page_count(mem_map+MAP_NR(start), 1);
433                 free_page(start);
434                 totalram_pages++;
435         }
436         printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);
437 }
438 #endif
439 
440 /*
441  * Do very early mm setup.
442  */
443 void __init mm_init_ppc64(void)
444 {
445         struct paca_struct *lpaca;
446         unsigned long guard_page, index;
447 
448         ppc_md.progress("MM:init", 0);
449 
450         /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.
451          * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)
452          * are stored on a stack/queue for easy allocation and deallocation.
453          */
454         mmu_context_queue.lock = SPIN_LOCK_UNLOCKED;
455         mmu_context_queue.head = 0;
456         mmu_context_queue.tail = NUM_USER_CONTEXT-1;
457         mmu_context_queue.size = NUM_USER_CONTEXT;
458         for(index=0; index < NUM_USER_CONTEXT ;index++) {
459                 mmu_context_queue.elements[index] = index+FIRST_USER_CONTEXT;
460         }
461 
462         /* Setup guard pages for the Paca's */
463         for (index = 0; index < NR_CPUS; index++) {
464                 lpaca = &paca[index];
465                 guard_page = ((unsigned long)lpaca) + 0x1000;
466                 ppc_md.hpte_updateboltedpp(PP_RXRX, guard_page);
467         }
468 
469         ppc_md.progress("MM:exit", 0x211);
470 }
471 
472 /*
473  * Initialize the bootmem system and give it all the memory we
474  * have available.
475  */
476 void __init do_init_bootmem(void)
477 {
478         unsigned long i;
479         unsigned long start, bootmap_pages;
480         unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
481 
482         PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: start\n");
483         /*
484          * Find an area to use for the bootmem bitmap.  Calculate the size of
485          * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
486          * Add 1 additional page in case the address isn't page-aligned.
487          */
488         bootmap_pages = bootmem_bootmap_pages(total_pages);
489 
490         start = (unsigned long)__a2p(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
491         if (start == 0) {
492                 udbg_printf("do_init_bootmem: failed to allocate a bitmap.\n");
493                 udbg_printf("\tbootmap_pages = 0x%lx.\n", bootmap_pages);
494                 PPCDBG_ENTER_DEBUGGER(); 
495         }
496 
497         PPCDBG(PPCDBG_MMINIT, "\tstart               = 0x%lx\n", start);
498         PPCDBG(PPCDBG_MMINIT, "\tbootmap_pages       = 0x%lx\n", bootmap_pages);
499         PPCDBG(PPCDBG_MMINIT, "\tphysicalMemorySize  = 0x%lx\n", systemcfg->physicalMemorySize);
500 
501         boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
502         PPCDBG(PPCDBG_MMINIT, "\tboot_mapsize        = 0x%lx\n", boot_mapsize);
503 
504         /* add all physical memory to the bootmem map */
505         for (i=0; i < lmb.memory.cnt; i++) {
506                 unsigned long physbase, size;
507                 unsigned long type = lmb.memory.region[i].type;
508 
509                 if ( type != LMB_MEMORY_AREA )
510                         continue;
511 
512                 physbase = lmb.memory.region[i].physbase;
513                 size = lmb.memory.region[i].size;
514                 free_bootmem(physbase, size);
515         }
516         /* reserve the sections we're already using */
517         for (i=0; i < lmb.reserved.cnt; i++) {
518                 unsigned long physbase = lmb.reserved.region[i].physbase;
519                 unsigned long size = lmb.reserved.region[i].size;
520 #if 0 /* PPPBBB */
521                 if ( (physbase == 0) && (size < (16<<20)) ) {
522                         size = 16 << 20;
523                 }
524 #endif
525                 reserve_bootmem(physbase, size);
526         }
527 
528         PPCDBG(PPCDBG_MMINIT, "do_init_bootmem: end\n");
529 }
530 
531 /*
532  * paging_init() sets up the page tables - in fact we've already done this.
533  */
534 void __init paging_init(void)
535 {
536         unsigned long zones_size[MAX_NR_ZONES], i;
537 
538         /*
539          * All pages are DMA-able so we put them all in the DMA zone.
540          */
541         zones_size[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
542         for (i = 1; i < MAX_NR_ZONES; i++)
543                 zones_size[i] = 0;
544         free_area_init(zones_size);
545 }
546 
547 void initialize_paca_hardware_interrupt_stack(void);
548 
549 void __init mem_init(void)
550 {
551         extern char *sysmap; 
552         extern unsigned long sysmap_size;
553         unsigned long addr;
554         int codepages = 0;
555         int datapages = 0;
556         int initpages = 0;
557         unsigned long va_rtas_base = (unsigned long)__va(rtas.base);
558 
559         max_mapnr = max_low_pfn;
560         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
561         num_physpages = max_mapnr;      /* RAM is assumed contiguous */
562 
563         totalram_pages += free_all_bootmem();
564 
565         ifppcdebug(PPCDBG_MMINIT) {
566                 udbg_printf("mem_init: totalram_pages = 0x%lx\n", totalram_pages);
567                 udbg_printf("mem_init: va_rtas_base   = 0x%lx\n", va_rtas_base); 
568                 udbg_printf("mem_init: va_rtas_end    = 0x%lx\n", PAGE_ALIGN(va_rtas_base+rtas.size)); 
569                 udbg_printf("mem_init: pinned start   = 0x%lx\n", __va(0)); 
570                 udbg_printf("mem_init: pinned end     = 0x%lx\n", PAGE_ALIGN(klimit)); 
571         }
572 
573         if ( sysmap_size )
574                 for (addr = (unsigned long)sysmap;
575                      addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
576                      addr += PAGE_SIZE)
577                         SetPageReserved(mem_map + MAP_NR(addr));
578         
579         for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
580              addr += PAGE_SIZE) {
581                 if (!PageReserved(mem_map + MAP_NR(addr)))
582                         continue;
583                 if (addr < (ulong) etext)
584                         codepages++;
585 
586                 else if (addr >= (unsigned long)&__init_begin
587                          && addr < (unsigned long)&__init_end)
588                         initpages++;
589                 else if (addr < klimit)
590                         datapages++;
591         }
592 
593         printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
594                (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
595                codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
596                initpages<< (PAGE_SHIFT-10),
597                PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
598         mem_init_done = 1;
599 
600         /* set the last page of each hardware interrupt stack to be protected */
601         initialize_paca_hardware_interrupt_stack();
602 
603 #ifdef CONFIG_PPC_ISERIES
604         create_virtual_bus_tce_table();
605 #endif
606 }
607 
608 /*
609  * This is called when a page has been modified by the kernel.
610  * It just marks the page as not i-cache clean.  We do the i-cache
611  * flush later when the page is given to a user process, if necessary.
612  */
613 void flush_dcache_page(struct page *page)
614 {
615         clear_bit(PG_arch_1, &page->flags);
616 }
617 
618 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
619 {
620         if (page->mapping && !PageReserved(page)
621             && !test_bit(PG_arch_1, &page->flags)) {
622                 __flush_dcache_icache(page_address(page));
623                 set_bit(PG_arch_1, &page->flags);
624         }
625 }
626 
627 void clear_user_page(void *page, unsigned long vaddr)
628 {
629         clear_page(page);
630         __flush_dcache_icache(page);
631 }
632 
633 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr)
634 {
635         copy_page(vto, vfrom);
636         __flush_dcache_icache(vto);
637 }
638 
639 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
640                              unsigned long addr, int len)
641 {
642         unsigned long maddr;
643 
644         maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
645         flush_icache_range(maddr, maddr + len);
646 }
647 
648 #ifdef CONFIG_SHARED_MEMORY_ADDRESSING
649 static spinlock_t shared_malloc_lock = SPIN_LOCK_UNLOCKED;
650 struct vm_struct *shared_list = NULL;
651 static struct vm_struct *get_shared_area(unsigned long size, 
652                                          unsigned long flags);
653 
654 void *shared_malloc(unsigned long size)
655 {
656         pgprot_t prot;
657         struct vm_struct *area;
658         unsigned long ea;
659 
660         spin_lock(&shared_malloc_lock);
661 
662         printk("shared_malloc1 (no _PAGE_USER): addr = 0x%lx, size = 0x%lx\n", 
663                SMALLOC_START, size); 
664 
665         area = get_shared_area(size, 0);
666         if (!area) {
667         spin_unlock(&shared_malloc_lock);
668                 return NULL;
669         }
670 
671         ea = (unsigned long) area->addr;
672 
673         prot = __pgprot(pgprot_val(PAGE_KERNEL));
674         if (vmalloc_area_pages(VMALLOC_VMADDR(ea), size, GFP_KERNEL, prot)) { 
675         spin_unlock(&shared_malloc_lock);
676                 return NULL;
677         } 
678 
679         printk("shared_malloc: addr = 0x%lx, size = 0x%lx\n", ea, size); 
680 
681         spin_unlock(&shared_malloc_lock);
682         return(ea); 
683 }
684 
685 void shared_free(void *addr)
686 {
687         struct vm_struct **p, *tmp;
688 
689         if (!addr)
690                 return;
691         if ((PAGE_SIZE-1) & (unsigned long) addr) {
692                 printk(KERN_ERR "Trying to shared_free() bad address (%p)\n", 
693                        addr);
694                 return;
695         }
696         spin_lock(&shared_malloc_lock);
697 
698         printk("shared_free: addr = 0x%p\n", addr);
699 
700         /* Scan the memory list for an entry matching
701          * the address to be freed, get the size (in bytes)
702          * and free the entry.  The list lock is not dropped
703          * until the page table entries are removed.
704          */
705         for(p = &shared_list; (tmp = *p); p = &tmp->next ) {
706                 if (tmp->addr == addr) {
707                         *p = tmp->next;
708                         vmfree_area_pages(VMALLOC_VMADDR(tmp->addr),tmp->size);
709                         spin_unlock(&shared_malloc_lock);
710                         kfree(tmp);
711                         return;
712                 }
713         }
714 
715         spin_unlock(&shared_malloc_lock);
716         printk("shared_free: error\n"); 
717 }
718 
719 static struct vm_struct *get_shared_area(unsigned long size, 
720                                          unsigned long flags)
721 {
722         unsigned long addr;
723         struct vm_struct **p, *tmp, *area;
724   
725         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
726         if (!area) return NULL;
727 
728         size += PAGE_SIZE;
729         if (!size) {
730                 kfree (area);
731                 return NULL;
732         }
733 
734         addr = SMALLOC_START;
735         for (p = &shared_list; (tmp = *p) ; p = &tmp->next) {
736                 if ((size + addr) < addr) {
737                         kfree(area);
738                         return NULL;
739                 }
740                 if (size + addr <= (unsigned long) tmp->addr)
741                         break;
742                 addr = tmp->size + (unsigned long) tmp->addr;
743                 if (addr > SMALLOC_END-size) {
744                         kfree(area);
745                         return NULL;
746                 }
747         }
748 
749         if (addr + size > SMALLOC_END) {
750                 kfree(area);
751                 return NULL;
752         }
753         area->flags = flags;
754         area->addr = (void *)addr;
755         area->size = size;
756         area->next = *p;
757         *p = area;
758         return area;
759 }
760 
761 int shared_task_mark(void)
762 {
763         current->thread.flags |= PPC_FLAG_SHARED;
764         printk("current->thread.flags = 0x%lx\n", current->thread.flags);
765 
766         return 0;
767 }
768 
769 int shared_task_unmark()
770 {
771         if(current->thread.flags & PPC_FLAG_SHARED) {
772                 current->thread.flags &= (~PPC_FLAG_SHARED);
773                 return 0;
774         } else {
775                 return -1;
776         }
777 }
778 #endif
779 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp