~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/tile/kernel/setup.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3  *
  4  *   This program is free software; you can redistribute it and/or
  5  *   modify it under the terms of the GNU General Public License
  6  *   as published by the Free Software Foundation, version 2.
  7  *
  8  *   This program is distributed in the hope that it will be useful, but
  9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11  *   NON INFRINGEMENT.  See the GNU General Public License for
 12  *   more details.
 13  */
 14 
 15 #include <linux/sched.h>
 16 #include <linux/kernel.h>
 17 #include <linux/mmzone.h>
 18 #include <linux/bootmem.h>
 19 #include <linux/module.h>
 20 #include <linux/node.h>
 21 #include <linux/cpu.h>
 22 #include <linux/ioport.h>
 23 #include <linux/irq.h>
 24 #include <linux/kexec.h>
 25 #include <linux/pci.h>
 26 #include <linux/swiotlb.h>
 27 #include <linux/initrd.h>
 28 #include <linux/io.h>
 29 #include <linux/highmem.h>
 30 #include <linux/smp.h>
 31 #include <linux/timex.h>
 32 #include <linux/hugetlb.h>
 33 #include <linux/start_kernel.h>
 34 #include <linux/screen_info.h>
 35 #include <linux/tick.h>
 36 #include <asm/setup.h>
 37 #include <asm/sections.h>
 38 #include <asm/cacheflush.h>
 39 #include <asm/pgalloc.h>
 40 #include <asm/mmu_context.h>
 41 #include <hv/hypervisor.h>
 42 #include <arch/interrupts.h>
 43 
 44 /* <linux/smp.h> doesn't provide this definition. */
 45 #ifndef CONFIG_SMP
 46 #define setup_max_cpus 1
 47 #endif
 48 
 49 static inline int ABS(int x) { return x >= 0 ? x : -x; }
 50 
 51 /* Chip information */
 52 char chip_model[64] __write_once;
 53 
 54 #ifdef CONFIG_VT
 55 struct screen_info screen_info;
 56 #endif
 57 
 58 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 59 EXPORT_SYMBOL(node_data);
 60 
 61 /* Information on the NUMA nodes that we compute early */
 62 unsigned long node_start_pfn[MAX_NUMNODES];
 63 unsigned long node_end_pfn[MAX_NUMNODES];
 64 unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
 65 unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
 66 unsigned long __initdata node_free_pfn[MAX_NUMNODES];
 67 
 68 static unsigned long __initdata node_percpu[MAX_NUMNODES];
 69 
 70 /*
 71  * per-CPU stack and boot info.
 72  */
 73 DEFINE_PER_CPU(unsigned long, boot_sp) =
 74         (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA;
 75 
 76 #ifdef CONFIG_SMP
 77 DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
 78 #else
 79 /*
 80  * The variable must be __initdata since it references __init code.
 81  * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
 82  */
 83 unsigned long __initdata boot_pc = (unsigned long)start_kernel;
 84 #endif
 85 
 86 #ifdef CONFIG_HIGHMEM
 87 /* Page frame index of end of lowmem on each controller. */
 88 unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
 89 
 90 /* Number of pages that can be mapped into lowmem. */
 91 static unsigned long __initdata mappable_physpages;
 92 #endif
 93 
 94 /* Data on which physical memory controller corresponds to which NUMA node */
 95 int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
 96 
 97 #ifdef CONFIG_HIGHMEM
 98 /* Map information from VAs to PAs */
 99 unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
100   __write_once __attribute__((aligned(L2_CACHE_BYTES)));
101 EXPORT_SYMBOL(pbase_map);
102 
103 /* Map information from PAs to VAs */
104 void *vbase_map[NR_PA_HIGHBIT_VALUES]
105   __write_once __attribute__((aligned(L2_CACHE_BYTES)));
106 EXPORT_SYMBOL(vbase_map);
107 #endif
108 
109 /* Node number as a function of the high PA bits */
110 int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
111 EXPORT_SYMBOL(highbits_to_node);
112 
113 static unsigned int __initdata maxmem_pfn = -1U;
114 static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
115         [0 ... MAX_NUMNODES-1] = -1U
116 };
117 static nodemask_t __initdata isolnodes;
118 
119 #if defined(CONFIG_PCI) && !defined(__tilegx__)
120 enum { DEFAULT_PCI_RESERVE_MB = 64 };
121 static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
122 unsigned long __initdata pci_reserve_start_pfn = -1U;
123 unsigned long __initdata pci_reserve_end_pfn = -1U;
124 #endif
125 
126 static int __init setup_maxmem(char *str)
127 {
128         unsigned long long maxmem;
129         if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
130                 return -EINVAL;
131 
132         maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
133         pr_info("Forcing RAM used to no more than %dMB\n",
134                 maxmem_pfn >> (20 - PAGE_SHIFT));
135         return 0;
136 }
137 early_param("maxmem", setup_maxmem);
138 
139 static int __init setup_maxnodemem(char *str)
140 {
141         char *endp;
142         unsigned long long maxnodemem;
143         long node;
144 
145         node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
146         if (node >= MAX_NUMNODES || *endp != ':')
147                 return -EINVAL;
148 
149         maxnodemem = memparse(endp+1, NULL);
150         maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
151                 (HPAGE_SHIFT - PAGE_SHIFT);
152         pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
153                 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
154         return 0;
155 }
156 early_param("maxnodemem", setup_maxnodemem);
157 
158 struct memmap_entry {
159         u64 addr;       /* start of memory segment */
160         u64 size;       /* size of memory segment */
161 };
162 static struct memmap_entry memmap_map[64];
163 static int memmap_nr;
164 
165 static void add_memmap_region(u64 addr, u64 size)
166 {
167         if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
168                 pr_err("Ooops! Too many entries in the memory map!\n");
169                 return;
170         }
171         memmap_map[memmap_nr].addr = addr;
172         memmap_map[memmap_nr].size = size;
173         memmap_nr++;
174 }
175 
176 static int __init setup_memmap(char *p)
177 {
178         char *oldp;
179         u64 start_at, mem_size;
180 
181         if (!p)
182                 return -EINVAL;
183 
184         if (!strncmp(p, "exactmap", 8)) {
185                 pr_err("\"memmap=exactmap\" not valid on tile\n");
186                 return 0;
187         }
188 
189         oldp = p;
190         mem_size = memparse(p, &p);
191         if (p == oldp)
192                 return -EINVAL;
193 
194         if (*p == '@') {
195                 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
196         } else if (*p == '#') {
197                 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
198         } else if (*p == '$') {
199                 start_at = memparse(p+1, &p);
200                 add_memmap_region(start_at, mem_size);
201         } else {
202                 if (mem_size == 0)
203                         return -EINVAL;
204                 maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
205                         (HPAGE_SHIFT - PAGE_SHIFT);
206         }
207         return *p == '\0' ? 0 : -EINVAL;
208 }
209 early_param("memmap", setup_memmap);
210 
211 static int __init setup_mem(char *str)
212 {
213         return setup_maxmem(str);
214 }
215 early_param("mem", setup_mem);  /* compatibility with x86 */
216 
217 static int __init setup_isolnodes(char *str)
218 {
219         if (str == NULL || nodelist_parse(str, isolnodes) != 0)
220                 return -EINVAL;
221 
222         pr_info("Set isolnodes value to '%*pbl'\n",
223                 nodemask_pr_args(&isolnodes));
224         return 0;
225 }
226 early_param("isolnodes", setup_isolnodes);
227 
228 #if defined(CONFIG_PCI) && !defined(__tilegx__)
229 static int __init setup_pci_reserve(char* str)
230 {
231         if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 ||
232             pci_reserve_mb > 3 * 1024)
233                 return -EINVAL;
234 
235         pr_info("Reserving %dMB for PCIE root complex mappings\n",
236                 pci_reserve_mb);
237         return 0;
238 }
239 early_param("pci_reserve", setup_pci_reserve);
240 #endif
241 
242 #ifndef __tilegx__
243 /*
244  * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
245  * This can be used to increase (or decrease) the vmalloc area.
246  */
247 static int __init parse_vmalloc(char *arg)
248 {
249         if (!arg)
250                 return -EINVAL;
251 
252         VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
253 
254         /* See validate_va() for more on this test. */
255         if ((long)_VMALLOC_START >= 0)
256                 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
257                             VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
258 
259         return 0;
260 }
261 early_param("vmalloc", parse_vmalloc);
262 #endif
263 
264 #ifdef CONFIG_HIGHMEM
265 /*
266  * Determine for each controller where its lowmem is mapped and how much of
267  * it is mapped there.  On controller zero, the first few megabytes are
268  * already mapped in as code at MEM_SV_START, so in principle we could
269  * start our data mappings higher up, but for now we don't bother, to avoid
270  * additional confusion.
271  *
272  * One question is whether, on systems with more than 768 Mb and
273  * controllers of different sizes, to map in a proportionate amount of
274  * each one, or to try to map the same amount from each controller.
275  * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
276  * respectively, do we map 256MB from each, or do we map 128 MB, 512
277  * MB, and 128 MB respectively?)  For now we use a proportionate
278  * solution like the latter.
279  *
280  * The VA/PA mapping demands that we align our decisions at 16 MB
281  * boundaries so that we can rapidly convert VA to PA.
282  */
283 static void *__init setup_pa_va_mapping(void)
284 {
285         unsigned long curr_pages = 0;
286         unsigned long vaddr = PAGE_OFFSET;
287         nodemask_t highonlynodes = isolnodes;
288         int i, j;
289 
290         memset(pbase_map, -1, sizeof(pbase_map));
291         memset(vbase_map, -1, sizeof(vbase_map));
292 
293         /* Node zero cannot be isolated for LOWMEM purposes. */
294         node_clear(0, highonlynodes);
295 
296         /* Count up the number of pages on non-highonlynodes controllers. */
297         mappable_physpages = 0;
298         for_each_online_node(i) {
299                 if (!node_isset(i, highonlynodes))
300                         mappable_physpages +=
301                                 node_end_pfn[i] - node_start_pfn[i];
302         }
303 
304         for_each_online_node(i) {
305                 unsigned long start = node_start_pfn[i];
306                 unsigned long end = node_end_pfn[i];
307                 unsigned long size = end - start;
308                 unsigned long vaddr_end;
309 
310                 if (node_isset(i, highonlynodes)) {
311                         /* Mark this controller as having no lowmem. */
312                         node_lowmem_end_pfn[i] = start;
313                         continue;
314                 }
315 
316                 curr_pages += size;
317                 if (mappable_physpages > MAXMEM_PFN) {
318                         vaddr_end = PAGE_OFFSET +
319                                 (((u64)curr_pages * MAXMEM_PFN /
320                                   mappable_physpages)
321                                  << PAGE_SHIFT);
322                 } else {
323                         vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
324                 }
325                 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
326                         unsigned long this_pfn =
327                                 start + (j << HUGETLB_PAGE_ORDER);
328                         pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
329                         if (vbase_map[__pfn_to_highbits(this_pfn)] ==
330                             (void *)-1)
331                                 vbase_map[__pfn_to_highbits(this_pfn)] =
332                                         (void *)(vaddr & HPAGE_MASK);
333                 }
334                 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
335                 BUG_ON(node_lowmem_end_pfn[i] > end);
336         }
337 
338         /* Return highest address of any mapped memory. */
339         return (void *)vaddr;
340 }
341 #endif /* CONFIG_HIGHMEM */
342 
343 /*
344  * Register our most important memory mappings with the debug stub.
345  *
346  * This is up to 4 mappings for lowmem, one mapping per memory
347  * controller, plus one for our text segment.
348  */
349 static void store_permanent_mappings(void)
350 {
351         int i;
352 
353         for_each_online_node(i) {
354                 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
355 #ifdef CONFIG_HIGHMEM
356                 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
357 #else
358                 HV_PhysAddr high_mapped_pa = node_end_pfn[i];
359 #endif
360 
361                 unsigned long pages = high_mapped_pa - node_start_pfn[i];
362                 HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
363                 hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
364         }
365 
366         hv_store_mapping((HV_VirtAddr)_text,
367                          (uint32_t)(_einittext - _text), 0);
368 }
369 
370 /*
371  * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
372  * and node_online_map, doing suitable sanity-checking.
373  * Also set min_low_pfn, max_low_pfn, and max_pfn.
374  */
375 static void __init setup_memory(void)
376 {
377         int i, j;
378         int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
379 #ifdef CONFIG_HIGHMEM
380         long highmem_pages;
381 #endif
382 #ifndef __tilegx__
383         int cap;
384 #endif
385 #if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
386         long lowmem_pages;
387 #endif
388         unsigned long physpages = 0;
389 
390         /* We are using a char to hold the cpu_2_node[] mapping */
391         BUILD_BUG_ON(MAX_NUMNODES > 127);
392 
393         /* Discover the ranges of memory available to us */
394         for (i = 0; ; ++i) {
395                 unsigned long start, size, end, highbits;
396                 HV_PhysAddrRange range = hv_inquire_physical(i);
397                 if (range.size == 0)
398                         break;
399 #ifdef CONFIG_FLATMEM
400                 if (i > 0) {
401                         pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
402                                range.size, range.start + range.size);
403                         continue;
404                 }
405 #endif
406 #ifndef __tilegx__
407                 if ((unsigned long)range.start) {
408                         pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
409                                range.start, range.start + range.size);
410                         continue;
411                 }
412 #endif
413                 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
414                     (range.size & (HPAGE_SIZE-1)) != 0) {
415                         unsigned long long start_pa = range.start;
416                         unsigned long long orig_size = range.size;
417                         range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
418                         range.size -= (range.start - start_pa);
419                         range.size &= HPAGE_MASK;
420                         pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
421                                start_pa, start_pa + orig_size,
422                                range.start, range.start + range.size);
423                 }
424                 highbits = __pa_to_highbits(range.start);
425                 if (highbits >= NR_PA_HIGHBIT_VALUES) {
426                         pr_err("PA high bits too high: %#llx..%#llx\n",
427                                range.start, range.start + range.size);
428                         continue;
429                 }
430                 if (highbits_seen[highbits]) {
431                         pr_err("Range overlaps in high bits: %#llx..%#llx\n",
432                                range.start, range.start + range.size);
433                         continue;
434                 }
435                 highbits_seen[highbits] = 1;
436                 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
437                         int max_size = maxnodemem_pfn[i];
438                         if (max_size > 0) {
439                                 pr_err("Maxnodemem reduced node %d to %d pages\n",
440                                        i, max_size);
441                                 range.size = PFN_PHYS(max_size);
442                         } else {
443                                 pr_err("Maxnodemem disabled node %d\n", i);
444                                 continue;
445                         }
446                 }
447                 if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
448                         int max_size = maxmem_pfn - physpages;
449                         if (max_size > 0) {
450                                 pr_err("Maxmem reduced node %d to %d pages\n",
451                                        i, max_size);
452                                 range.size = PFN_PHYS(max_size);
453                         } else {
454                                 pr_err("Maxmem disabled node %d\n", i);
455                                 continue;
456                         }
457                 }
458                 if (i >= MAX_NUMNODES) {
459                         pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
460                                i, range.size, range.size + range.start);
461                         continue;
462                 }
463 
464                 start = range.start >> PAGE_SHIFT;
465                 size = range.size >> PAGE_SHIFT;
466                 end = start + size;
467 
468 #ifndef __tilegx__
469                 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
470                     (range.start + range.size)) {
471                         pr_err("PAs too high to represent: %#llx..%#llx\n",
472                                range.start, range.start + range.size);
473                         continue;
474                 }
475 #endif
476 #if defined(CONFIG_PCI) && !defined(__tilegx__)
477                 /*
478                  * Blocks that overlap the pci reserved region must
479                  * have enough space to hold the maximum percpu data
480                  * region at the top of the range.  If there isn't
481                  * enough space above the reserved region, just
482                  * truncate the node.
483                  */
484                 if (start <= pci_reserve_start_pfn &&
485                     end > pci_reserve_start_pfn) {
486                         unsigned int per_cpu_size =
487                                 __per_cpu_end - __per_cpu_start;
488                         unsigned int percpu_pages =
489                                 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
490                         if (end < pci_reserve_end_pfn + percpu_pages) {
491                                 end = pci_reserve_start_pfn;
492                                 pr_err("PCI mapping region reduced node %d to %ld pages\n",
493                                        i, end - start);
494                         }
495                 }
496 #endif
497 
498                 for (j = __pfn_to_highbits(start);
499                      j <= __pfn_to_highbits(end - 1); j++)
500                         highbits_to_node[j] = i;
501 
502                 node_start_pfn[i] = start;
503                 node_end_pfn[i] = end;
504                 node_controller[i] = range.controller;
505                 physpages += size;
506                 max_pfn = end;
507 
508                 /* Mark node as online */
509                 node_set(i, node_online_map);
510                 node_set(i, node_possible_map);
511         }
512 
513 #ifndef __tilegx__
514         /*
515          * For 4KB pages, mem_map "struct page" data is 1% of the size
516          * of the physical memory, so can be quite big (640 MB for
517          * four 16G zones).  These structures must be mapped in
518          * lowmem, and since we currently cap out at about 768 MB,
519          * it's impractical to try to use this much address space.
520          * For now, arbitrarily cap the amount of physical memory
521          * we're willing to use at 8 million pages (32GB of 4KB pages).
522          */
523         cap = 8 * 1024 * 1024;  /* 8 million pages */
524         if (physpages > cap) {
525                 int num_nodes = num_online_nodes();
526                 int cap_each = cap / num_nodes;
527                 unsigned long dropped_pages = 0;
528                 for (i = 0; i < num_nodes; ++i) {
529                         int size = node_end_pfn[i] - node_start_pfn[i];
530                         if (size > cap_each) {
531                                 dropped_pages += (size - cap_each);
532                                 node_end_pfn[i] = node_start_pfn[i] + cap_each;
533                         }
534                 }
535                 physpages -= dropped_pages;
536                 pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
537                         physpages >> (20 - PAGE_SHIFT),
538                         dropped_pages >> (20 - PAGE_SHIFT));
539                 pr_warn("Consider using a larger page size\n");
540         }
541 #endif
542 
543         /* Heap starts just above the last loaded address. */
544         min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
545 
546 #ifdef CONFIG_HIGHMEM
547         /* Find where we map lowmem from each controller. */
548         high_memory = setup_pa_va_mapping();
549 
550         /* Set max_low_pfn based on what node 0 can directly address. */
551         max_low_pfn = node_lowmem_end_pfn[0];
552 
553         lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
554                 MAXMEM_PFN : mappable_physpages;
555         highmem_pages = (long) (physpages - lowmem_pages);
556 
557         pr_notice("%ldMB HIGHMEM available\n",
558                   pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
559         pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
560 #else
561         /* Set max_low_pfn based on what node 0 can directly address. */
562         max_low_pfn = node_end_pfn[0];
563 
564 #ifndef __tilegx__
565         if (node_end_pfn[0] > MAXMEM_PFN) {
566                 pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20);
567                 pr_warn("Use a HIGHMEM enabled kernel\n");
568                 max_low_pfn = MAXMEM_PFN;
569                 max_pfn = MAXMEM_PFN;
570                 node_end_pfn[0] = MAXMEM_PFN;
571         } else {
572                 pr_notice("%ldMB memory available\n",
573                           pages_to_mb(node_end_pfn[0]));
574         }
575         for (i = 1; i < MAX_NUMNODES; ++i) {
576                 node_start_pfn[i] = 0;
577                 node_end_pfn[i] = 0;
578         }
579         high_memory = __va(node_end_pfn[0]);
580 #else
581         lowmem_pages = 0;
582         for (i = 0; i < MAX_NUMNODES; ++i) {
583                 int pages = node_end_pfn[i] - node_start_pfn[i];
584                 lowmem_pages += pages;
585                 if (pages)
586                         high_memory = pfn_to_kaddr(node_end_pfn[i]);
587         }
588         pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
589 #endif
590 #endif
591 }
592 
593 /*
594  * On 32-bit machines, we only put bootmem on the low controller,
595  * since PAs > 4GB can't be used in bootmem.  In principle one could
596  * imagine, e.g., multiple 1 GB controllers all of which could support
597  * bootmem, but in practice using controllers this small isn't a
598  * particularly interesting scenario, so we just keep it simple and
599  * use only the first controller for bootmem on 32-bit machines.
600  */
601 static inline int node_has_bootmem(int nid)
602 {
603 #ifdef CONFIG_64BIT
604         return 1;
605 #else
606         return nid == 0;
607 #endif
608 }
609 
610 static inline unsigned long alloc_bootmem_pfn(int nid,
611                                               unsigned long size,
612                                               unsigned long goal)
613 {
614         void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
615                                          PAGE_SIZE, goal);
616         unsigned long pfn = kaddr_to_pfn(kva);
617         BUG_ON(goal && PFN_PHYS(pfn) != goal);
618         return pfn;
619 }
620 
621 static void __init setup_bootmem_allocator_node(int i)
622 {
623         unsigned long start, end, mapsize, mapstart;
624 
625         if (node_has_bootmem(i)) {
626                 NODE_DATA(i)->bdata = &bootmem_node_data[i];
627         } else {
628                 /* Share controller zero's bdata for now. */
629                 NODE_DATA(i)->bdata = &bootmem_node_data[0];
630                 return;
631         }
632 
633         /* Skip up to after the bss in node 0. */
634         start = (i == 0) ? min_low_pfn : node_start_pfn[i];
635 
636         /* Only lowmem, if we're a HIGHMEM build. */
637 #ifdef CONFIG_HIGHMEM
638         end = node_lowmem_end_pfn[i];
639 #else
640         end = node_end_pfn[i];
641 #endif
642 
643         /* No memory here. */
644         if (end == start)
645                 return;
646 
647         /* Figure out where the bootmem bitmap is located. */
648         mapsize = bootmem_bootmap_pages(end - start);
649         if (i == 0) {
650                 /* Use some space right before the heap on node 0. */
651                 mapstart = start;
652                 start += mapsize;
653         } else {
654                 /* Allocate bitmap on node 0 to avoid page table issues. */
655                 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
656         }
657 
658         /* Initialize a node. */
659         init_bootmem_node(NODE_DATA(i), mapstart, start, end);
660 
661         /* Free all the space back into the allocator. */
662         free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
663 
664 #if defined(CONFIG_PCI) && !defined(__tilegx__)
665         /*
666          * Throw away any memory aliased by the PCI region.
667          */
668         if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
669                 start = max(pci_reserve_start_pfn, start);
670                 end = min(pci_reserve_end_pfn, end);
671                 reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
672                                 BOOTMEM_EXCLUSIVE);
673         }
674 #endif
675 }
676 
677 static void __init setup_bootmem_allocator(void)
678 {
679         int i;
680         for (i = 0; i < MAX_NUMNODES; ++i)
681                 setup_bootmem_allocator_node(i);
682 
683         /* Reserve any memory excluded by "memmap" arguments. */
684         for (i = 0; i < memmap_nr; ++i) {
685                 struct memmap_entry *m = &memmap_map[i];
686                 reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);
687         }
688 
689 #ifdef CONFIG_BLK_DEV_INITRD
690         if (initrd_start) {
691                 /* Make sure the initrd memory region is not modified. */
692                 if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
693                                     BOOTMEM_EXCLUSIVE)) {
694                         pr_crit("The initrd memory region has been polluted. Disabling it.\n");
695                         initrd_start = 0;
696                         initrd_end = 0;
697                 } else {
698                         /*
699                          * Translate initrd_start & initrd_end from PA to VA for
700                          * future access.
701                          */
702                         initrd_start += PAGE_OFFSET;
703                         initrd_end += PAGE_OFFSET;
704                 }
705         }
706 #endif
707 
708 #ifdef CONFIG_KEXEC
709         if (crashk_res.start != crashk_res.end)
710                 reserve_bootmem(crashk_res.start, resource_size(&crashk_res),
711                                 BOOTMEM_DEFAULT);
712 #endif
713 }
714 
715 void *__init alloc_remap(int nid, unsigned long size)
716 {
717         int pages = node_end_pfn[nid] - node_start_pfn[nid];
718         void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
719         BUG_ON(size != pages * sizeof(struct page));
720         memset(map, 0, size);
721         return map;
722 }
723 
724 static int __init percpu_size(void)
725 {
726         int size = __per_cpu_end - __per_cpu_start;
727         size += PERCPU_MODULE_RESERVE;
728         size += PERCPU_DYNAMIC_EARLY_SIZE;
729         if (size < PCPU_MIN_UNIT_SIZE)
730                 size = PCPU_MIN_UNIT_SIZE;
731         size = roundup(size, PAGE_SIZE);
732 
733         /* In several places we assume the per-cpu data fits on a huge page. */
734         BUG_ON(kdata_huge && size > HPAGE_SIZE);
735         return size;
736 }
737 
738 static void __init zone_sizes_init(void)
739 {
740         unsigned long zones_size[MAX_NR_ZONES] = { 0 };
741         int size = percpu_size();
742         int num_cpus = smp_height * smp_width;
743         const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
744 
745         int i;
746 
747         for (i = 0; i < num_cpus; ++i)
748                 node_percpu[cpu_to_node(i)] += size;
749 
750         for_each_online_node(i) {
751                 unsigned long start = node_start_pfn[i];
752                 unsigned long end = node_end_pfn[i];
753 #ifdef CONFIG_HIGHMEM
754                 unsigned long lowmem_end = node_lowmem_end_pfn[i];
755 #else
756                 unsigned long lowmem_end = end;
757 #endif
758                 int memmap_size = (end - start) * sizeof(struct page);
759                 node_free_pfn[i] = start;
760 
761                 /*
762                  * Set aside pages for per-cpu data and the mem_map array.
763                  *
764                  * Since the per-cpu data requires special homecaching,
765                  * if we are in kdata_huge mode, we put it at the end of
766                  * the lowmem region.  If we're not in kdata_huge mode,
767                  * we take the per-cpu pages from the bottom of the
768                  * controller, since that avoids fragmenting a huge page
769                  * that users might want.  We always take the memmap
770                  * from the bottom of the controller, since with
771                  * kdata_huge that lets it be under a huge TLB entry.
772                  *
773                  * If the user has requested isolnodes for a controller,
774                  * though, there'll be no lowmem, so we just alloc_bootmem
775                  * the memmap.  There will be no percpu memory either.
776                  */
777                 if (i != 0 && node_isset(i, isolnodes)) {
778                         node_memmap_pfn[i] =
779                                 alloc_bootmem_pfn(0, memmap_size, 0);
780                         BUG_ON(node_percpu[i] != 0);
781                 } else if (node_has_bootmem(start)) {
782                         unsigned long goal = 0;
783                         node_memmap_pfn[i] =
784                                 alloc_bootmem_pfn(i, memmap_size, 0);
785                         if (kdata_huge)
786                                 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
787                         if (node_percpu[i])
788                                 node_percpu_pfn[i] =
789                                         alloc_bootmem_pfn(i, node_percpu[i],
790                                                           goal);
791                 } else {
792                         /* In non-bootmem zones, just reserve some pages. */
793                         node_memmap_pfn[i] = node_free_pfn[i];
794                         node_free_pfn[i] += PFN_UP(memmap_size);
795                         if (!kdata_huge) {
796                                 node_percpu_pfn[i] = node_free_pfn[i];
797                                 node_free_pfn[i] += PFN_UP(node_percpu[i]);
798                         } else {
799                                 node_percpu_pfn[i] =
800                                         lowmem_end - PFN_UP(node_percpu[i]);
801                         }
802                 }
803 
804 #ifdef CONFIG_HIGHMEM
805                 if (start > lowmem_end) {
806                         zones_size[ZONE_NORMAL] = 0;
807                         zones_size[ZONE_HIGHMEM] = end - start;
808                 } else {
809                         zones_size[ZONE_NORMAL] = lowmem_end - start;
810                         zones_size[ZONE_HIGHMEM] = end - lowmem_end;
811                 }
812 #else
813                 zones_size[ZONE_NORMAL] = end - start;
814 #endif
815 
816                 if (start < dma_end) {
817                         zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
818                                                    dma_end - start);
819                         zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
820                 } else {
821                         zones_size[ZONE_DMA] = 0;
822                 }
823 
824                 /* Take zone metadata from controller 0 if we're isolnode. */
825                 if (node_isset(i, isolnodes))
826                         NODE_DATA(i)->bdata = &bootmem_node_data[0];
827 
828                 free_area_init_node(i, zones_size, start, NULL);
829                 printk(KERN_DEBUG "  Normal zone: %ld per-cpu pages\n",
830                        PFN_UP(node_percpu[i]));
831 
832                 /* Track the type of memory on each node */
833                 if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
834                         node_set_state(i, N_NORMAL_MEMORY);
835 #ifdef CONFIG_HIGHMEM
836                 if (end != start)
837                         node_set_state(i, N_HIGH_MEMORY);
838 #endif
839 
840                 node_set_online(i);
841         }
842 }
843 
844 #ifdef CONFIG_NUMA
845 
846 /* which logical CPUs are on which nodes */
847 struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
848 EXPORT_SYMBOL(node_2_cpu_mask);
849 
850 /* which node each logical CPU is on */
851 char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
852 EXPORT_SYMBOL(cpu_2_node);
853 
854 /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
855 static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
856 {
857         if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
858                 return -1;
859         else
860                 return cpu_to_node(cpu);
861 }
862 
863 /* Return number of immediately-adjacent tiles sharing the same NUMA node. */
864 static int __init node_neighbors(int node, int cpu,
865                                  struct cpumask *unbound_cpus)
866 {
867         int neighbors = 0;
868         int w = smp_width;
869         int h = smp_height;
870         int x = cpu % w;
871         int y = cpu / w;
872         if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
873                 ++neighbors;
874         if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
875                 ++neighbors;
876         if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
877                 ++neighbors;
878         if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
879                 ++neighbors;
880         return neighbors;
881 }
882 
883 static void __init setup_numa_mapping(void)
884 {
885         int distance[MAX_NUMNODES][NR_CPUS];
886         HV_Coord coord;
887         int cpu, node, cpus, i, x, y;
888         int num_nodes = num_online_nodes();
889         struct cpumask unbound_cpus;
890         nodemask_t default_nodes;
891 
892         cpumask_clear(&unbound_cpus);
893 
894         /* Get set of nodes we will use for defaults */
895         nodes_andnot(default_nodes, node_online_map, isolnodes);
896         if (nodes_empty(default_nodes)) {
897                 BUG_ON(!node_isset(0, node_online_map));
898                 pr_err("Forcing NUMA node zero available as a default node\n");
899                 node_set(0, default_nodes);
900         }
901 
902         /* Populate the distance[] array */
903         memset(distance, -1, sizeof(distance));
904         cpu = 0;
905         for (coord.y = 0; coord.y < smp_height; ++coord.y) {
906                 for (coord.x = 0; coord.x < smp_width;
907                      ++coord.x, ++cpu) {
908                         BUG_ON(cpu >= nr_cpu_ids);
909                         if (!cpu_possible(cpu)) {
910                                 cpu_2_node[cpu] = -1;
911                                 continue;
912                         }
913                         for_each_node_mask(node, default_nodes) {
914                                 HV_MemoryControllerInfo info =
915                                         hv_inquire_memory_controller(
916                                                 coord, node_controller[node]);
917                                 distance[node][cpu] =
918                                         ABS(info.coord.x) + ABS(info.coord.y);
919                         }
920                         cpumask_set_cpu(cpu, &unbound_cpus);
921                 }
922         }
923         cpus = cpu;
924 
925         /*
926          * Round-robin through the NUMA nodes until all the cpus are
927          * assigned.  We could be more clever here (e.g. create four
928          * sorted linked lists on the same set of cpu nodes, and pull
929          * off them in round-robin sequence, removing from all four
930          * lists each time) but given the relatively small numbers
931          * involved, O(n^2) seem OK for a one-time cost.
932          */
933         node = first_node(default_nodes);
934         while (!cpumask_empty(&unbound_cpus)) {
935                 int best_cpu = -1;
936                 int best_distance = INT_MAX;
937                 for (cpu = 0; cpu < cpus; ++cpu) {
938                         if (cpumask_test_cpu(cpu, &unbound_cpus)) {
939                                 /*
940                                  * Compute metric, which is how much
941                                  * closer the cpu is to this memory
942                                  * controller than the others, shifted
943                                  * up, and then the number of
944                                  * neighbors already in the node as an
945                                  * epsilon adjustment to try to keep
946                                  * the nodes compact.
947                                  */
948                                 int d = distance[node][cpu] * num_nodes;
949                                 for_each_node_mask(i, default_nodes) {
950                                         if (i != node)
951                                                 d -= distance[i][cpu];
952                                 }
953                                 d *= 8;  /* allow space for epsilon */
954                                 d -= node_neighbors(node, cpu, &unbound_cpus);
955                                 if (d < best_distance) {
956                                         best_cpu = cpu;
957                                         best_distance = d;
958                                 }
959                         }
960                 }
961                 BUG_ON(best_cpu < 0);
962                 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
963                 cpu_2_node[best_cpu] = node;
964                 cpumask_clear_cpu(best_cpu, &unbound_cpus);
965                 node = next_node(node, default_nodes);
966                 if (node == MAX_NUMNODES)
967                         node = first_node(default_nodes);
968         }
969 
970         /* Print out node assignments and set defaults for disabled cpus */
971         cpu = 0;
972         for (y = 0; y < smp_height; ++y) {
973                 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
974                 for (x = 0; x < smp_width; ++x, ++cpu) {
975                         if (cpu_to_node(cpu) < 0) {
976                                 pr_cont(" -");
977                                 cpu_2_node[cpu] = first_node(default_nodes);
978                         } else {
979                                 pr_cont(" %d", cpu_to_node(cpu));
980                         }
981                 }
982                 pr_cont("\n");
983         }
984 }
985 
986 static struct cpu cpu_devices[NR_CPUS];
987 
988 static int __init topology_init(void)
989 {
990         int i;
991 
992         for_each_online_node(i)
993                 register_one_node(i);
994 
995         for (i = 0; i < smp_height * smp_width; ++i)
996                 register_cpu(&cpu_devices[i], i);
997 
998         return 0;
999 }
1000 
1001 subsys_initcall(topology_init);
1002 
1003 #else /* !CONFIG_NUMA */
1004 
1005 #define setup_numa_mapping() do { } while (0)
1006 
1007 #endif /* CONFIG_NUMA */
1008 
1009 /*
1010  * Initialize hugepage support on this cpu.  We do this on all cores
1011  * early in boot: before argument parsing for the boot cpu, and after
1012  * argument parsing but before the init functions run on the secondaries.
1013  * So the values we set up here in the hypervisor may be overridden on
1014  * the boot cpu as arguments are parsed.
1015  */
1016 static void init_super_pages(void)
1017 {
1018 #ifdef CONFIG_HUGETLB_SUPER_PAGES
1019         int i;
1020         for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
1021                 hv_set_pte_super_shift(i, huge_shift[i]);
1022 #endif
1023 }
1024 
1025 /**
1026  * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
1027  * @boot: Is this the boot cpu?
1028  *
1029  * Called from setup_arch() on the boot cpu, or online_secondary().
1030  */
1031 void setup_cpu(int boot)
1032 {
1033         /* The boot cpu sets up its permanent mappings much earlier. */
1034         if (!boot)
1035                 store_permanent_mappings();
1036 
1037         /* Allow asynchronous TLB interrupts. */
1038 #if CHIP_HAS_TILE_DMA()
1039         arch_local_irq_unmask(INT_DMATLB_MISS);
1040         arch_local_irq_unmask(INT_DMATLB_ACCESS);
1041 #endif
1042 #ifdef __tilegx__
1043         arch_local_irq_unmask(INT_SINGLE_STEP_K);
1044 #endif
1045 
1046         /*
1047          * Allow user access to many generic SPRs, like the cycle
1048          * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
1049          */
1050         __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
1051 
1052 #if CHIP_HAS_SN()
1053         /* Static network is not restricted. */
1054         __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
1055 #endif
1056 
1057         /*
1058          * Set the MPL for interrupt control 0 & 1 to the corresponding
1059          * values.  This includes access to the SYSTEM_SAVE and EX_CONTEXT
1060          * SPRs, as well as the interrupt mask.
1061          */
1062         __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
1063         __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
1064 
1065         /* Initialize IRQ support for this cpu. */
1066         setup_irq_regs();
1067 
1068 #ifdef CONFIG_HARDWALL
1069         /* Reset the network state on this cpu. */
1070         reset_network_state();
1071 #endif
1072 
1073         init_super_pages();
1074 }
1075 
1076 #ifdef CONFIG_BLK_DEV_INITRD
1077 
1078 static int __initdata set_initramfs_file;
1079 static char __initdata initramfs_file[128] = "initramfs";
1080 
1081 static int __init setup_initramfs_file(char *str)
1082 {
1083         if (str == NULL)
1084                 return -EINVAL;
1085         strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
1086         set_initramfs_file = 1;
1087 
1088         return 0;
1089 }
1090 early_param("initramfs_file", setup_initramfs_file);
1091 
1092 /*
1093  * We look for a file called "initramfs" in the hvfs.  If there is one, we
1094  * allocate some memory for it and it will be unpacked to the initramfs.
1095  * If it's compressed, the initd code will uncompress it first.
1096  */
1097 static void __init load_hv_initrd(void)
1098 {
1099         HV_FS_StatInfo stat;
1100         int fd, rc;
1101         void *initrd;
1102 
1103         /* If initrd has already been set, skip initramfs file in hvfs. */
1104         if (initrd_start)
1105                 return;
1106 
1107         fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1108         if (fd == HV_ENOENT) {
1109                 if (set_initramfs_file) {
1110                         pr_warn("No such hvfs initramfs file '%s'\n",
1111                                 initramfs_file);
1112                         return;
1113                 } else {
1114                         /* Try old backwards-compatible name. */
1115                         fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1116                         if (fd == HV_ENOENT)
1117                                 return;
1118                 }
1119         }
1120         BUG_ON(fd < 0);
1121         stat = hv_fs_fstat(fd);
1122         BUG_ON(stat.size < 0);
1123         if (stat.flags & HV_FS_ISDIR) {
1124                 pr_warn("Ignoring hvfs file '%s': it's a directory\n",
1125                         initramfs_file);
1126                 return;
1127         }
1128         initrd = alloc_bootmem_pages(stat.size);
1129         rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
1130         if (rc != stat.size) {
1131                 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
1132                        stat.size, initramfs_file, rc);
1133                 free_initrd_mem((unsigned long) initrd, stat.size);
1134                 return;
1135         }
1136         initrd_start = (unsigned long) initrd;
1137         initrd_end = initrd_start + stat.size;
1138 }
1139 
1140 void __init free_initrd_mem(unsigned long begin, unsigned long end)
1141 {
1142         free_bootmem_late(__pa(begin), end - begin);
1143 }
1144 
1145 static int __init setup_initrd(char *str)
1146 {
1147         char *endp;
1148         unsigned long initrd_size;
1149 
1150         initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1151         if (initrd_size == 0 || *endp != '@')
1152                 return -EINVAL;
1153 
1154         initrd_start = simple_strtoul(endp+1, &endp, 0);
1155         if (initrd_start == 0)
1156                 return -EINVAL;
1157 
1158         initrd_end = initrd_start + initrd_size;
1159 
1160         return 0;
1161 }
1162 early_param("initrd", setup_initrd);
1163 
1164 #else
1165 static inline void load_hv_initrd(void) {}
1166 #endif /* CONFIG_BLK_DEV_INITRD */
1167 
1168 static void __init validate_hv(void)
1169 {
1170         /*
1171          * It may already be too late, but let's check our built-in
1172          * configuration against what the hypervisor is providing.
1173          */
1174         unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
1175         int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
1176         int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
1177         HV_ASIDRange asid_range;
1178 
1179 #ifndef CONFIG_SMP
1180         HV_Topology topology = hv_inquire_topology();
1181         BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1182         if (topology.width != 1 || topology.height != 1) {
1183                 pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
1184                         topology.width, topology.height);
1185         }
1186 #endif
1187 
1188         if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1189                 early_panic("Hypervisor glue size %ld is too big!\n",
1190                             glue_size);
1191         if (hv_page_size != PAGE_SIZE)
1192                 early_panic("Hypervisor page size %#x != our %#lx\n",
1193                             hv_page_size, PAGE_SIZE);
1194         if (hv_hpage_size != HPAGE_SIZE)
1195                 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1196                             hv_hpage_size, HPAGE_SIZE);
1197 
1198 #ifdef CONFIG_SMP
1199         /*
1200          * Some hypervisor APIs take a pointer to a bitmap array
1201          * whose size is at least the number of cpus on the chip.
1202          * We use a struct cpumask for this, so it must be big enough.
1203          */
1204         if ((smp_height * smp_width) > nr_cpu_ids)
1205                 early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n",
1206                             smp_height, smp_width, nr_cpu_ids);
1207 #endif
1208 
1209         /*
1210          * Check that we're using allowed ASIDs, and initialize the
1211          * various asid variables to their appropriate initial states.
1212          */
1213         asid_range = hv_inquire_asid(0);
1214         min_asid = asid_range.start;
1215         __this_cpu_write(current_asid, min_asid);
1216         max_asid = asid_range.start + asid_range.size - 1;
1217 
1218         if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1219                        sizeof(chip_model)) < 0) {
1220                 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1221                 strlcpy(chip_model, "unknown", sizeof(chip_model));
1222         }
1223 }
1224 
1225 static void __init validate_va(void)
1226 {
1227 #ifndef __tilegx__   /* FIXME: GX: probably some validation relevant here */
1228         /*
1229          * Similarly, make sure we're only using allowed VAs.
1230          * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
1231          * and 0 .. KERNEL_HIGH_VADDR.
1232          * In addition, make sure we CAN'T use the end of memory, since
1233          * we use the last chunk of each pgd for the pgd_list.
1234          */
1235         int i, user_kernel_ok = 0;
1236         unsigned long max_va = 0;
1237         unsigned long list_va =
1238                 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1239 
1240         for (i = 0; ; ++i) {
1241                 HV_VirtAddrRange range = hv_inquire_virtual(i);
1242                 if (range.size == 0)
1243                         break;
1244                 if (range.start <= MEM_USER_INTRPT &&
1245                     range.start + range.size >= MEM_HV_START)
1246                         user_kernel_ok = 1;
1247                 if (range.start == 0)
1248                         max_va = range.size;
1249                 BUG_ON(range.start + range.size > list_va);
1250         }
1251         if (!user_kernel_ok)
1252                 early_panic("Hypervisor not configured for user/kernel VAs\n");
1253         if (max_va == 0)
1254                 early_panic("Hypervisor not configured for low VAs\n");
1255         if (max_va < KERNEL_HIGH_VADDR)
1256                 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1257                             max_va, KERNEL_HIGH_VADDR);
1258 
1259         /* Kernel PCs must have their high bit set; see intvec.S. */
1260         if ((long)VMALLOC_START >= 0)
1261                 early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
1262                             "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
1263                             VMALLOC_START);
1264 #endif
1265 }
1266 
1267 /*
1268  * cpu_lotar_map lists all the cpus that are valid for the supervisor
1269  * to cache data on at a page level, i.e. what cpus can be placed in
1270  * the LOTAR field of a PTE.  It is equivalent to the set of possible
1271  * cpus plus any other cpus that are willing to share their cache.
1272  * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1273  */
1274 struct cpumask __write_once cpu_lotar_map;
1275 EXPORT_SYMBOL(cpu_lotar_map);
1276 
1277 /*
1278  * hash_for_home_map lists all the tiles that hash-for-home data
1279  * will be cached on.  Note that this may includes tiles that are not
1280  * valid for this supervisor to use otherwise (e.g. if a hypervisor
1281  * device is being shared between multiple supervisors).
1282  * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1283  */
1284 struct cpumask hash_for_home_map;
1285 EXPORT_SYMBOL(hash_for_home_map);
1286 
1287 /*
1288  * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1289  * flush on our behalf.  It is set to cpu_possible_mask OR'ed with
1290  * hash_for_home_map, and it is what should be passed to
1291  * hv_flush_remote() to flush all caches.  Note that if there are
1292  * dedicated hypervisor driver tiles that have authorized use of their
1293  * cache, those tiles will only appear in cpu_lotar_map, NOT in
1294  * cpu_cacheable_map, as they are a special case.
1295  */
1296 struct cpumask __write_once cpu_cacheable_map;
1297 EXPORT_SYMBOL(cpu_cacheable_map);
1298 
1299 static __initdata struct cpumask disabled_map;
1300 
1301 static int __init disabled_cpus(char *str)
1302 {
1303         int boot_cpu = smp_processor_id();
1304 
1305         if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1306                 return -EINVAL;
1307         if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1308                 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1309                 cpumask_clear_cpu(boot_cpu, &disabled_map);
1310         }
1311         return 0;
1312 }
1313 
1314 early_param("disabled_cpus", disabled_cpus);
1315 
1316 void __init print_disabled_cpus(void)
1317 {
1318         if (!cpumask_empty(&disabled_map))
1319                 pr_info("CPUs not available for Linux: %*pbl\n",
1320                         cpumask_pr_args(&disabled_map));
1321 }
1322 
1323 static void __init setup_cpu_maps(void)
1324 {
1325         struct cpumask hv_disabled_map, cpu_possible_init;
1326         int boot_cpu = smp_processor_id();
1327         int cpus, i, rc;
1328 
1329         /* Learn which cpus are allowed by the hypervisor. */
1330         rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1331                               (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1332                               sizeof(cpu_cacheable_map));
1333         if (rc < 0)
1334                 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1335         if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1336                 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1337 
1338         /* Compute the cpus disabled by the hvconfig file. */
1339         cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1340 
1341         /* Include them with the cpus disabled by "disabled_cpus". */
1342         cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1343 
1344         /*
1345          * Disable every cpu after "setup_max_cpus".  But don't mark
1346          * as disabled the cpus that are outside of our initial rectangle,
1347          * since that turns out to be confusing.
1348          */
1349         cpus = 1;                          /* this cpu */
1350         cpumask_set_cpu(boot_cpu, &disabled_map);   /* ignore this cpu */
1351         for (i = 0; cpus < setup_max_cpus; ++i)
1352                 if (!cpumask_test_cpu(i, &disabled_map))
1353                         ++cpus;
1354         for (; i < smp_height * smp_width; ++i)
1355                 cpumask_set_cpu(i, &disabled_map);
1356         cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1357         for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1358                 cpumask_clear_cpu(i, &disabled_map);
1359 
1360         /*
1361          * Setup cpu_possible map as every cpu allocated to us, minus
1362          * the results of any "disabled_cpus" settings.
1363          */
1364         cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1365         init_cpu_possible(&cpu_possible_init);
1366 
1367         /* Learn which cpus are valid for LOTAR caching. */
1368         rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1369                               (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1370                               sizeof(cpu_lotar_map));
1371         if (rc < 0) {
1372                 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1373                 cpu_lotar_map = *cpu_possible_mask;
1374         }
1375 
1376         /* Retrieve set of CPUs used for hash-for-home caching */
1377         rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1378                               (HV_VirtAddr) hash_for_home_map.bits,
1379                               sizeof(hash_for_home_map));
1380         if (rc < 0)
1381                 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1382         cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1383 }
1384 
1385 
1386 static int __init dataplane(char *str)
1387 {
1388         pr_warn("WARNING: dataplane support disabled in this kernel\n");
1389         return 0;
1390 }
1391 
1392 early_param("dataplane", dataplane);
1393 
1394 #ifdef CONFIG_NO_HZ_FULL
1395 /* Warn if hypervisor shared cpus are marked as nohz_full. */
1396 static int __init check_nohz_full_cpus(void)
1397 {
1398         struct cpumask shared;
1399         int cpu;
1400 
1401         if (hv_inquire_tiles(HV_INQ_TILES_SHARED,
1402                              (HV_VirtAddr) shared.bits, sizeof(shared)) < 0) {
1403                 pr_warn("WARNING: No support for inquiring hv shared tiles\n");
1404                 return 0;
1405         }
1406         for_each_cpu(cpu, &shared) {
1407                 if (tick_nohz_full_cpu(cpu))
1408                         pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
1409                                cpu);
1410         }
1411         return 0;
1412 }
1413 arch_initcall(check_nohz_full_cpus);
1414 #endif
1415 
1416 #ifdef CONFIG_CMDLINE_BOOL
1417 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1418 #endif
1419 
1420 void __init setup_arch(char **cmdline_p)
1421 {
1422         int len;
1423 
1424 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1425         len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1426                                   COMMAND_LINE_SIZE);
1427         if (boot_command_line[0])
1428                 pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
1429                         boot_command_line);
1430         strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1431 #else
1432         char *hv_cmdline;
1433 #if defined(CONFIG_CMDLINE_BOOL)
1434         if (builtin_cmdline[0]) {
1435                 int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1436                                           COMMAND_LINE_SIZE);
1437                 if (builtin_len < COMMAND_LINE_SIZE-1)
1438                         boot_command_line[builtin_len++] = ' ';
1439                 hv_cmdline = &boot_command_line[builtin_len];
1440                 len = COMMAND_LINE_SIZE - builtin_len;
1441         } else
1442 #endif
1443         {
1444                 hv_cmdline = boot_command_line;
1445                 len = COMMAND_LINE_SIZE;
1446         }
1447         len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1448         if (len < 0 || len > COMMAND_LINE_SIZE)
1449                 early_panic("hv_get_command_line failed: %d\n", len);
1450 #endif
1451 
1452         *cmdline_p = boot_command_line;
1453 
1454         /* Set disabled_map and setup_max_cpus very early */
1455         parse_early_param();
1456 
1457         /* Make sure the kernel is compatible with the hypervisor. */
1458         validate_hv();
1459         validate_va();
1460 
1461         setup_cpu_maps();
1462 
1463 
1464 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1465         /*
1466          * Initialize the PCI structures.  This is done before memory
1467          * setup so that we know whether or not a pci_reserve region
1468          * is necessary.
1469          */
1470         if (tile_pci_init() == 0)
1471                 pci_reserve_mb = 0;
1472 
1473         /* PCI systems reserve a region just below 4GB for mapping iomem. */
1474         pci_reserve_end_pfn  = (1 << (32 - PAGE_SHIFT));
1475         pci_reserve_start_pfn = pci_reserve_end_pfn -
1476                 (pci_reserve_mb << (20 - PAGE_SHIFT));
1477 #endif
1478 
1479         init_mm.start_code = (unsigned long) _text;
1480         init_mm.end_code = (unsigned long) _etext;
1481         init_mm.end_data = (unsigned long) _edata;
1482         init_mm.brk = (unsigned long) _end;
1483 
1484         setup_memory();
1485         store_permanent_mappings();
1486         setup_bootmem_allocator();
1487 
1488         /*
1489          * NOTE: before this point _nobody_ is allowed to allocate
1490          * any memory using the bootmem allocator.
1491          */
1492 
1493 #ifdef CONFIG_SWIOTLB
1494         swiotlb_init(0);
1495 #endif
1496 
1497         paging_init();
1498         setup_numa_mapping();
1499         zone_sizes_init();
1500         set_page_homes();
1501         setup_cpu(1);
1502         setup_clock();
1503         load_hv_initrd();
1504 }
1505 
1506 
1507 /*
1508  * Set up per-cpu memory.
1509  */
1510 
1511 unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1512 EXPORT_SYMBOL(__per_cpu_offset);
1513 
1514 static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1515 static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1516 
1517 /*
1518  * As the percpu code allocates pages, we return the pages from the
1519  * end of the node for the specified cpu.
1520  */
1521 static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1522 {
1523         int nid = cpu_to_node(cpu);
1524         unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1525 
1526         BUG_ON(size % PAGE_SIZE != 0);
1527         pfn_offset[nid] += size / PAGE_SIZE;
1528         BUG_ON(node_percpu[nid] < size);
1529         node_percpu[nid] -= size;
1530         if (percpu_pfn[cpu] == 0)
1531                 percpu_pfn[cpu] = pfn;
1532         return pfn_to_kaddr(pfn);
1533 }
1534 
1535 /*
1536  * Pages reserved for percpu memory are not freeable, and in any case we are
1537  * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1538  */
1539 static void __init pcpu_fc_free(void *ptr, size_t size)
1540 {
1541 }
1542 
1543 /*
1544  * Set up vmalloc page tables using bootmem for the percpu code.
1545  */
1546 static void __init pcpu_fc_populate_pte(unsigned long addr)
1547 {
1548         pgd_t *pgd;
1549         pud_t *pud;
1550         pmd_t *pmd;
1551         pte_t *pte;
1552 
1553         BUG_ON(pgd_addr_invalid(addr));
1554         if (addr < VMALLOC_START || addr >= VMALLOC_END)
1555                 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
1556                       addr, VMALLOC_START, VMALLOC_END);
1557 
1558         pgd = swapper_pg_dir + pgd_index(addr);
1559         pud = pud_offset(pgd, addr);
1560         BUG_ON(!pud_present(*pud));
1561         pmd = pmd_offset(pud, addr);
1562         if (pmd_present(*pmd)) {
1563                 BUG_ON(pmd_huge_page(*pmd));
1564         } else {
1565                 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1566                                       HV_PAGE_TABLE_ALIGN, 0);
1567                 pmd_populate_kernel(&init_mm, pmd, pte);
1568         }
1569 }
1570 
1571 void __init setup_per_cpu_areas(void)
1572 {
1573         struct page *pg;
1574         unsigned long delta, pfn, lowmem_va;
1575         unsigned long size = percpu_size();
1576         char *ptr;
1577         int rc, cpu, i;
1578 
1579         rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1580                                    pcpu_fc_free, pcpu_fc_populate_pte);
1581         if (rc < 0)
1582                 panic("Cannot initialize percpu area (err=%d)", rc);
1583 
1584         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1585         for_each_possible_cpu(cpu) {
1586                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1587 
1588                 /* finv the copy out of cache so we can change homecache */
1589                 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1590                 __finv_buffer(ptr, size);
1591                 pfn = percpu_pfn[cpu];
1592 
1593                 /* Rewrite the page tables to cache on that cpu */
1594                 pg = pfn_to_page(pfn);
1595                 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1596 
1597                         /* Update the vmalloc mapping and page home. */
1598                         unsigned long addr = (unsigned long)ptr + i;
1599                         pte_t *ptep = virt_to_kpte(addr);
1600                         pte_t pte = *ptep;
1601                         BUG_ON(pfn != pte_pfn(pte));
1602                         pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1603                         pte = set_remote_cache_cpu(pte, cpu);
1604                         set_pte_at(&init_mm, addr, ptep, pte);
1605 
1606                         /* Update the lowmem mapping for consistency. */
1607                         lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1608                         ptep = virt_to_kpte(lowmem_va);
1609                         if (pte_huge(*ptep)) {
1610                                 printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
1611                                        lowmem_va);
1612                                 shatter_pmd((pmd_t *)ptep);
1613                                 ptep = virt_to_kpte(lowmem_va);
1614                                 BUG_ON(pte_huge(*ptep));
1615                         }
1616                         BUG_ON(pfn != pte_pfn(*ptep));
1617                         set_pte_at(&init_mm, lowmem_va, ptep, pte);
1618                 }
1619         }
1620 
1621         /* Set our thread pointer appropriately. */
1622         set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1623 
1624         /* Make sure the finv's have completed. */
1625         mb_incoherent();
1626 
1627         /* Flush the TLB so we reference it properly from here on out. */
1628         local_flush_tlb_all();
1629 }
1630 
1631 static struct resource data_resource = {
1632         .name   = "Kernel data",
1633         .start  = 0,
1634         .end    = 0,
1635         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
1636 };
1637 
1638 static struct resource code_resource = {
1639         .name   = "Kernel code",
1640         .start  = 0,
1641         .end    = 0,
1642         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
1643 };
1644 
1645 /*
1646  * On Pro, we reserve all resources above 4GB so that PCI won't try to put
1647  * mappings above 4GB.
1648  */
1649 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1650 static struct resource* __init
1651 insert_non_bus_resource(void)
1652 {
1653         struct resource *res =
1654                 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1655         if (!res)
1656                 return NULL;
1657         res->name = "Non-Bus Physical Address Space";
1658         res->start = (1ULL << 32);
1659         res->end = -1LL;
1660         res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1661         if (insert_resource(&iomem_resource, res)) {
1662                 kfree(res);
1663                 return NULL;
1664         }
1665         return res;
1666 }
1667 #endif
1668 
1669 static struct resource* __init
1670 insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
1671 {
1672         struct resource *res =
1673                 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1674         if (!res)
1675                 return NULL;
1676         res->name = reserved ? "Reserved" : "System RAM";
1677         res->start = start_pfn << PAGE_SHIFT;
1678         res->end = (end_pfn << PAGE_SHIFT) - 1;
1679         res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1680         if (insert_resource(&iomem_resource, res)) {
1681                 kfree(res);
1682                 return NULL;
1683         }
1684         return res;
1685 }
1686 
1687 /*
1688  * Request address space for all standard resources
1689  *
1690  * If the system includes PCI root complex drivers, we need to create
1691  * a window just below 4GB where PCI BARs can be mapped.
1692  */
1693 static int __init request_standard_resources(void)
1694 {
1695         int i;
1696         enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
1697 
1698 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1699         insert_non_bus_resource();
1700 #endif
1701 
1702         for_each_online_node(i) {
1703                 u64 start_pfn = node_start_pfn[i];
1704                 u64 end_pfn = node_end_pfn[i];
1705 
1706 #if defined(CONFIG_PCI) && !defined(__tilegx__)
1707                 if (start_pfn <= pci_reserve_start_pfn &&
1708                     end_pfn > pci_reserve_start_pfn) {
1709                         if (end_pfn > pci_reserve_end_pfn)
1710                                 insert_ram_resource(pci_reserve_end_pfn,
1711                                                     end_pfn, 0);
1712                         end_pfn = pci_reserve_start_pfn;
1713                 }
1714 #endif
1715                 insert_ram_resource(start_pfn, end_pfn, 0);
1716         }
1717 
1718         code_resource.start = __pa(_text - CODE_DELTA);
1719         code_resource.end = __pa(_etext - CODE_DELTA)-1;
1720         data_resource.start = __pa(_sdata);
1721         data_resource.end = __pa(_end)-1;
1722 
1723         insert_resource(&iomem_resource, &code_resource);
1724         insert_resource(&iomem_resource, &data_resource);
1725 
1726         /* Mark any "memmap" regions busy for the resource manager. */
1727         for (i = 0; i < memmap_nr; ++i) {
1728                 struct memmap_entry *m = &memmap_map[i];
1729                 insert_ram_resource(PFN_DOWN(m->addr),
1730                                     PFN_UP(m->addr + m->size - 1), 1);
1731         }
1732 
1733 #ifdef CONFIG_KEXEC
1734         insert_resource(&iomem_resource, &crashk_res);
1735 #endif
1736 
1737         return 0;
1738 }
1739 
1740 subsys_initcall(request_standard_resources);
1741 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp