~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/mm/init.c

Version: ~ [ linux-5.13-rc1 ] ~ [ linux-5.12.2 ] ~ [ linux-5.11.19 ] ~ [ linux-5.10.35 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.117 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.190 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.232 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.268 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.268 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *  S390 version
  4  *    Copyright IBM Corp. 1999
  5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6  *
  7  *  Derived from "arch/i386/mm/init.c"
  8  *    Copyright (C) 1995  Linus Torvalds
  9  */
 10 
 11 #include <linux/signal.h>
 12 #include <linux/sched.h>
 13 #include <linux/kernel.h>
 14 #include <linux/errno.h>
 15 #include <linux/string.h>
 16 #include <linux/types.h>
 17 #include <linux/ptrace.h>
 18 #include <linux/mman.h>
 19 #include <linux/mm.h>
 20 #include <linux/swap.h>
 21 #include <linux/swiotlb.h>
 22 #include <linux/smp.h>
 23 #include <linux/init.h>
 24 #include <linux/pagemap.h>
 25 #include <linux/memblock.h>
 26 #include <linux/memory.h>
 27 #include <linux/pfn.h>
 28 #include <linux/poison.h>
 29 #include <linux/initrd.h>
 30 #include <linux/export.h>
 31 #include <linux/cma.h>
 32 #include <linux/gfp.h>
 33 #include <linux/dma-direct.h>
 34 #include <asm/processor.h>
 35 #include <linux/uaccess.h>
 36 #include <asm/pgtable.h>
 37 #include <asm/pgalloc.h>
 38 #include <asm/dma.h>
 39 #include <asm/lowcore.h>
 40 #include <asm/tlb.h>
 41 #include <asm/tlbflush.h>
 42 #include <asm/sections.h>
 43 #include <asm/ctl_reg.h>
 44 #include <asm/sclp.h>
 45 #include <asm/set_memory.h>
 46 #include <asm/kasan.h>
 47 #include <asm/dma-mapping.h>
 48 #include <asm/uv.h>
 49 
 50 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 51 
 52 unsigned long empty_zero_page, zero_page_mask;
 53 EXPORT_SYMBOL(empty_zero_page);
 54 EXPORT_SYMBOL(zero_page_mask);
 55 
 56 bool initmem_freed;
 57 
 58 static void __init setup_zero_pages(void)
 59 {
 60         unsigned int order;
 61         struct page *page;
 62         int i;
 63 
 64         /* Latest machines require a mapping granularity of 512KB */
 65         order = 7;
 66 
 67         /* Limit number of empty zero pages for small memory sizes */
 68         while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
 69                 order--;
 70 
 71         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 72         if (!empty_zero_page)
 73                 panic("Out of memory in setup_zero_pages");
 74 
 75         page = virt_to_page((void *) empty_zero_page);
 76         split_page(page, order);
 77         for (i = 1 << order; i > 0; i--) {
 78                 mark_page_reserved(page);
 79                 page++;
 80         }
 81 
 82         zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 83 }
 84 
 85 /*
 86  * paging_init() sets up the page tables
 87  */
 88 void __init paging_init(void)
 89 {
 90         unsigned long max_zone_pfns[MAX_NR_ZONES];
 91         unsigned long pgd_type, asce_bits;
 92         psw_t psw;
 93 
 94         init_mm.pgd = swapper_pg_dir;
 95         if (VMALLOC_END > _REGION2_SIZE) {
 96                 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
 97                 pgd_type = _REGION2_ENTRY_EMPTY;
 98         } else {
 99                 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
100                 pgd_type = _REGION3_ENTRY_EMPTY;
101         }
102         init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
103         S390_lowcore.kernel_asce = init_mm.context.asce;
104         S390_lowcore.user_asce = S390_lowcore.kernel_asce;
105         crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
106         vmem_map_init();
107         kasan_copy_shadow(init_mm.pgd);
108 
109         /* enable virtual mapping in kernel mode */
110         __ctl_load(S390_lowcore.kernel_asce, 1, 1);
111         __ctl_load(S390_lowcore.kernel_asce, 7, 7);
112         __ctl_load(S390_lowcore.kernel_asce, 13, 13);
113         psw.mask = __extract_psw();
114         psw_bits(psw).dat = 1;
115         psw_bits(psw).as = PSW_BITS_AS_HOME;
116         __load_psw_mask(psw.mask);
117         kasan_free_early_identity();
118 
119         sparse_memory_present_with_active_regions(MAX_NUMNODES);
120         sparse_init();
121         zone_dma_bits = 31;
122         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
123         max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
124         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
125         free_area_init_nodes(max_zone_pfns);
126 }
127 
128 void mark_rodata_ro(void)
129 {
130         unsigned long size = __end_ro_after_init - __start_ro_after_init;
131 
132         set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
133         pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
134 }
135 
136 int set_memory_encrypted(unsigned long addr, int numpages)
137 {
138         int i;
139 
140         /* make specified pages unshared, (swiotlb, dma_free) */
141         for (i = 0; i < numpages; ++i) {
142                 uv_remove_shared(addr);
143                 addr += PAGE_SIZE;
144         }
145         return 0;
146 }
147 
148 int set_memory_decrypted(unsigned long addr, int numpages)
149 {
150         int i;
151         /* make specified pages shared (swiotlb, dma_alloca) */
152         for (i = 0; i < numpages; ++i) {
153                 uv_set_shared(addr);
154                 addr += PAGE_SIZE;
155         }
156         return 0;
157 }
158 
159 /* are we a protected virtualization guest? */
160 bool force_dma_unencrypted(struct device *dev)
161 {
162         return is_prot_virt_guest();
163 }
164 
165 /* protected virtualization */
166 static void pv_init(void)
167 {
168         if (!is_prot_virt_guest())
169                 return;
170 
171         /* make sure bounce buffers are shared */
172         swiotlb_init(1);
173         swiotlb_update_mem_attributes();
174         swiotlb_force = SWIOTLB_FORCE;
175 }
176 
177 void __init mem_init(void)
178 {
179         cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
180         cpumask_set_cpu(0, mm_cpumask(&init_mm));
181 
182         set_max_mapnr(max_low_pfn);
183         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
184 
185         pv_init();
186 
187         /* Setup guest page hinting */
188         cmma_init();
189 
190         /* this will put all low memory onto the freelists */
191         memblock_free_all();
192         setup_zero_pages();     /* Setup zeroed pages. */
193 
194         cmma_init_nodat();
195 
196         mem_init_print_info(NULL);
197 }
198 
199 void free_initmem(void)
200 {
201         initmem_freed = true;
202         __set_memory((unsigned long)_sinittext,
203                      (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
204                      SET_MEMORY_RW | SET_MEMORY_NX);
205         free_initmem_default(POISON_FREE_INITMEM);
206 }
207 
208 unsigned long memory_block_size_bytes(void)
209 {
210         /*
211          * Make sure the memory block size is always greater
212          * or equal than the memory increment size.
213          */
214         return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
215 }
216 
217 #ifdef CONFIG_MEMORY_HOTPLUG
218 
219 #ifdef CONFIG_CMA
220 
221 /* Prevent memory blocks which contain cma regions from going offline */
222 
223 struct s390_cma_mem_data {
224         unsigned long start;
225         unsigned long end;
226 };
227 
228 static int s390_cma_check_range(struct cma *cma, void *data)
229 {
230         struct s390_cma_mem_data *mem_data;
231         unsigned long start, end;
232 
233         mem_data = data;
234         start = cma_get_base(cma);
235         end = start + cma_get_size(cma);
236         if (end < mem_data->start)
237                 return 0;
238         if (start >= mem_data->end)
239                 return 0;
240         return -EBUSY;
241 }
242 
243 static int s390_cma_mem_notifier(struct notifier_block *nb,
244                                  unsigned long action, void *data)
245 {
246         struct s390_cma_mem_data mem_data;
247         struct memory_notify *arg;
248         int rc = 0;
249 
250         arg = data;
251         mem_data.start = arg->start_pfn << PAGE_SHIFT;
252         mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
253         if (action == MEM_GOING_OFFLINE)
254                 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
255         return notifier_from_errno(rc);
256 }
257 
258 static struct notifier_block s390_cma_mem_nb = {
259         .notifier_call = s390_cma_mem_notifier,
260 };
261 
262 static int __init s390_cma_mem_init(void)
263 {
264         return register_memory_notifier(&s390_cma_mem_nb);
265 }
266 device_initcall(s390_cma_mem_init);
267 
268 #endif /* CONFIG_CMA */
269 
270 int arch_add_memory(int nid, u64 start, u64 size,
271                     struct mhp_params *params)
272 {
273         unsigned long start_pfn = PFN_DOWN(start);
274         unsigned long size_pages = PFN_DOWN(size);
275         int rc;
276 
277         if (WARN_ON_ONCE(params->altmap))
278                 return -EINVAL;
279 
280         if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
281                 return -EINVAL;
282 
283         rc = vmem_add_mapping(start, size);
284         if (rc)
285                 return rc;
286 
287         rc = __add_pages(nid, start_pfn, size_pages, params);
288         if (rc)
289                 vmem_remove_mapping(start, size);
290         return rc;
291 }
292 
293 void arch_remove_memory(int nid, u64 start, u64 size,
294                         struct vmem_altmap *altmap)
295 {
296         unsigned long start_pfn = start >> PAGE_SHIFT;
297         unsigned long nr_pages = size >> PAGE_SHIFT;
298 
299         __remove_pages(start_pfn, nr_pages, altmap);
300         vmem_remove_mapping(start, size);
301 }
302 #endif /* CONFIG_MEMORY_HOTPLUG */
303 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp