~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/mm/init.c

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  S390 version
  3  *    Copyright IBM Corp. 1999
  4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
  5  *
  6  *  Derived from "arch/i386/mm/init.c"
  7  *    Copyright (C) 1995  Linus Torvalds
  8  */
  9 
 10 #include <linux/signal.h>
 11 #include <linux/sched.h>
 12 #include <linux/kernel.h>
 13 #include <linux/errno.h>
 14 #include <linux/string.h>
 15 #include <linux/types.h>
 16 #include <linux/ptrace.h>
 17 #include <linux/mman.h>
 18 #include <linux/mm.h>
 19 #include <linux/swap.h>
 20 #include <linux/smp.h>
 21 #include <linux/init.h>
 22 #include <linux/pagemap.h>
 23 #include <linux/bootmem.h>
 24 #include <linux/memory.h>
 25 #include <linux/pfn.h>
 26 #include <linux/poison.h>
 27 #include <linux/initrd.h>
 28 #include <linux/export.h>
 29 #include <linux/gfp.h>
 30 #include <asm/processor.h>
 31 #include <asm/uaccess.h>
 32 #include <asm/pgtable.h>
 33 #include <asm/pgalloc.h>
 34 #include <asm/dma.h>
 35 #include <asm/lowcore.h>
 36 #include <asm/tlb.h>
 37 #include <asm/tlbflush.h>
 38 #include <asm/sections.h>
 39 #include <asm/ctl_reg.h>
 40 #include <asm/sclp.h>
 41 
 42 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
 43 
 44 unsigned long empty_zero_page, zero_page_mask;
 45 EXPORT_SYMBOL(empty_zero_page);
 46 EXPORT_SYMBOL(zero_page_mask);
 47 
 48 static void __init setup_zero_pages(void)
 49 {
 50         struct cpuid cpu_id;
 51         unsigned int order;
 52         struct page *page;
 53         int i;
 54 
 55         get_cpu_id(&cpu_id);
 56         switch (cpu_id.machine) {
 57         case 0x9672:    /* g5 */
 58         case 0x2064:    /* z900 */
 59         case 0x2066:    /* z900 */
 60         case 0x2084:    /* z990 */
 61         case 0x2086:    /* z990 */
 62         case 0x2094:    /* z9-109 */
 63         case 0x2096:    /* z9-109 */
 64                 order = 0;
 65                 break;
 66         case 0x2097:    /* z10 */
 67         case 0x2098:    /* z10 */
 68         case 0x2817:    /* z196 */
 69         case 0x2818:    /* z196 */
 70                 order = 2;
 71                 break;
 72         case 0x2827:    /* zEC12 */
 73         case 0x2828:    /* zEC12 */
 74         default:
 75                 order = 5;
 76                 break;
 77         }
 78         /* Limit number of empty zero pages for small memory sizes */
 79         if (order > 2 && totalram_pages <= 16384)
 80                 order = 2;
 81 
 82         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 83         if (!empty_zero_page)
 84                 panic("Out of memory in setup_zero_pages");
 85 
 86         page = virt_to_page((void *) empty_zero_page);
 87         split_page(page, order);
 88         for (i = 1 << order; i > 0; i--) {
 89                 mark_page_reserved(page);
 90                 page++;
 91         }
 92 
 93         zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 94 }
 95 
 96 /*
 97  * paging_init() sets up the page tables
 98  */
 99 void __init paging_init(void)
100 {
101         unsigned long max_zone_pfns[MAX_NR_ZONES];
102         unsigned long pgd_type, asce_bits;
103 
104         init_mm.pgd = swapper_pg_dir;
105 #ifdef CONFIG_64BIT
106         if (VMALLOC_END > (1UL << 42)) {
107                 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
108                 pgd_type = _REGION2_ENTRY_EMPTY;
109         } else {
110                 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
111                 pgd_type = _REGION3_ENTRY_EMPTY;
112         }
113 #else
114         asce_bits = _ASCE_TABLE_LENGTH;
115         pgd_type = _SEGMENT_ENTRY_EMPTY;
116 #endif
117         S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
118         clear_table((unsigned long *) init_mm.pgd, pgd_type,
119                     sizeof(unsigned long)*2048);
120         vmem_map_init();
121 
122         /* enable virtual mapping in kernel mode */
123         __ctl_load(S390_lowcore.kernel_asce, 1, 1);
124         __ctl_load(S390_lowcore.kernel_asce, 7, 7);
125         __ctl_load(S390_lowcore.kernel_asce, 13, 13);
126         arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
127 
128         atomic_set(&init_mm.context.attach_count, 1);
129 
130         sparse_memory_present_with_active_regions(MAX_NUMNODES);
131         sparse_init();
132         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
133         max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
134         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
135         free_area_init_nodes(max_zone_pfns);
136 }
137 
138 void __init mem_init(void)
139 {
140         max_mapnr = max_low_pfn;
141         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
142 
143         /* Setup guest page hinting */
144         cmma_init();
145 
146         /* this will put all low memory onto the freelists */
147         free_all_bootmem();
148         setup_zero_pages();     /* Setup zeroed pages. */
149 
150         mem_init_print_info(NULL);
151         printk("Write protected kernel read-only data: %#lx - %#lx\n",
152                (unsigned long)&_stext,
153                PFN_ALIGN((unsigned long)&_eshared) - 1);
154 }
155 
156 void free_initmem(void)
157 {
158         free_initmem_default(POISON_FREE_INITMEM);
159 }
160 
161 #ifdef CONFIG_BLK_DEV_INITRD
162 void __init free_initrd_mem(unsigned long start, unsigned long end)
163 {
164         free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
165                            "initrd");
166 }
167 #endif
168 
169 #ifdef CONFIG_MEMORY_HOTPLUG
170 int arch_add_memory(int nid, u64 start, u64 size)
171 {
172         unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
173         unsigned long start_pfn = PFN_DOWN(start);
174         unsigned long size_pages = PFN_DOWN(size);
175         struct zone *zone;
176         int rc;
177 
178         rc = vmem_add_mapping(start, size);
179         if (rc)
180                 return rc;
181         for_each_zone(zone) {
182                 if (zone_idx(zone) != ZONE_MOVABLE) {
183                         /* Add range within existing zone limits */
184                         zone_start_pfn = zone->zone_start_pfn;
185                         zone_end_pfn = zone->zone_start_pfn +
186                                        zone->spanned_pages;
187                 } else {
188                         /* Add remaining range to ZONE_MOVABLE */
189                         zone_start_pfn = start_pfn;
190                         zone_end_pfn = start_pfn + size_pages;
191                 }
192                 if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
193                         continue;
194                 nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
195                            zone_end_pfn - start_pfn : size_pages;
196                 rc = __add_pages(nid, zone, start_pfn, nr_pages);
197                 if (rc)
198                         break;
199                 start_pfn += nr_pages;
200                 size_pages -= nr_pages;
201                 if (!size_pages)
202                         break;
203         }
204         if (rc)
205                 vmem_remove_mapping(start, size);
206         return rc;
207 }
208 
209 unsigned long memory_block_size_bytes(void)
210 {
211         /*
212          * Make sure the memory block size is always greater
213          * or equal than the memory increment size.
214          */
215         return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
216 }
217 
218 #ifdef CONFIG_MEMORY_HOTREMOVE
219 int arch_remove_memory(u64 start, u64 size)
220 {
221         /*
222          * There is no hardware or firmware interface which could trigger a
223          * hot memory remove on s390. So there is nothing that needs to be
224          * implemented.
225          */
226         return -EBUSY;
227 }
228 #endif
229 #endif /* CONFIG_MEMORY_HOTPLUG */
230 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp