~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/metag/kernel/setup.c

Version: ~ [ linux-5.3-rc4 ] ~ [ linux-5.2.8 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.66 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.138 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.189 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.189 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.71 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2005-2012 Imagination Technologies Ltd.
  3  *
  4  * This file contains the architecture-dependant parts of system setup.
  5  *
  6  */
  7 
  8 #include <linux/export.h>
  9 #include <linux/bootmem.h>
 10 #include <linux/console.h>
 11 #include <linux/cpu.h>
 12 #include <linux/delay.h>
 13 #include <linux/errno.h>
 14 #include <linux/fs.h>
 15 #include <linux/genhd.h>
 16 #include <linux/init.h>
 17 #include <linux/initrd.h>
 18 #include <linux/interrupt.h>
 19 #include <linux/kernel.h>
 20 #include <linux/memblock.h>
 21 #include <linux/mm.h>
 22 #include <linux/of_fdt.h>
 23 #include <linux/of_platform.h>
 24 #include <linux/pfn.h>
 25 #include <linux/root_dev.h>
 26 #include <linux/sched.h>
 27 #include <linux/seq_file.h>
 28 #include <linux/start_kernel.h>
 29 #include <linux/string.h>
 30 
 31 #include <asm/cachepart.h>
 32 #include <asm/clock.h>
 33 #include <asm/core_reg.h>
 34 #include <asm/cpu.h>
 35 #include <asm/da.h>
 36 #include <asm/highmem.h>
 37 #include <asm/hwthread.h>
 38 #include <asm/l2cache.h>
 39 #include <asm/mach/arch.h>
 40 #include <asm/metag_mem.h>
 41 #include <asm/metag_regs.h>
 42 #include <asm/mmu.h>
 43 #include <asm/mmzone.h>
 44 #include <asm/processor.h>
 45 #include <asm/prom.h>
 46 #include <asm/sections.h>
 47 #include <asm/setup.h>
 48 #include <asm/traps.h>
 49 
 50 /* Priv protect as many registers as possible. */
 51 #define DEFAULT_PRIV    (TXPRIVEXT_COPRO_BITS           | \
 52                          TXPRIVEXT_TXTRIGGER_BIT        | \
 53                          TXPRIVEXT_TXGBLCREG_BIT        | \
 54                          TXPRIVEXT_ILOCK_BIT            | \
 55                          TXPRIVEXT_TXITACCYC_BIT        | \
 56                          TXPRIVEXT_TXDIVTIME_BIT        | \
 57                          TXPRIVEXT_TXAMAREGX_BIT        | \
 58                          TXPRIVEXT_TXTIMERI_BIT         | \
 59                          TXPRIVEXT_TXSTATUS_BIT         | \
 60                          TXPRIVEXT_TXDISABLE_BIT)
 61 
 62 /* Meta2 specific bits. */
 63 #ifdef CONFIG_METAG_META12
 64 #define META2_PRIV      0
 65 #else
 66 #define META2_PRIV      (TXPRIVEXT_TXTIMER_BIT          | \
 67                          TXPRIVEXT_TRACE_BIT)
 68 #endif
 69 
 70 /* Unaligned access checking bits. */
 71 #ifdef CONFIG_METAG_UNALIGNED
 72 #define UNALIGNED_PRIV  TXPRIVEXT_ALIGNREW_BIT
 73 #else
 74 #define UNALIGNED_PRIV  0
 75 #endif
 76 
 77 #define PRIV_BITS       (DEFAULT_PRIV                   | \
 78                          META2_PRIV                     | \
 79                          UNALIGNED_PRIV)
 80 
 81 /*
 82  * Protect access to:
 83  * 0x06000000-0x07ffffff Direct mapped region
 84  * 0x05000000-0x05ffffff MMU table region (Meta1)
 85  * 0x04400000-0x047fffff Cache flush region
 86  * 0x84000000-0x87ffffff Core cache memory region (Meta2)
 87  *
 88  * Allow access to:
 89  * 0x80000000-0x81ffffff Core code memory region (Meta2)
 90  */
 91 #ifdef CONFIG_METAG_META12
 92 #define PRIVSYSR_BITS   TXPRIVSYSR_ALL_BITS
 93 #else
 94 #define PRIVSYSR_BITS   (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
 95 #endif
 96 
 97 /* Protect all 0x02xxxxxx and 0x048xxxxx. */
 98 #define PIOREG_BITS     0xffffffff
 99 
100 /*
101  * Protect all 0x04000xx0 (system events)
102  * except write combiner flush and write fence (system events 4 and 5).
103  */
104 #define PSYREG_BITS     0xfffffffb
105 
106 
107 extern char _heap_start[];
108 
109 #ifdef CONFIG_METAG_BUILTIN_DTB
110 extern u32 __dtb_start[];
111 #endif
112 
113 #ifdef CONFIG_DA_CONSOLE
114 /* Our early channel based console driver */
115 extern struct console dash_console;
116 #endif
117 
118 struct machine_desc *machine_desc __initdata;
119 
120 /*
121  * Map a Linux CPU number to a hardware thread ID
122  * In SMP this will be setup with the correct mapping at startup; in UP this
123  * will map to the HW thread on which we are running.
124  */
125 u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
126         [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
127 };
128 EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
129 
130 /*
131  * Map a hardware thread ID to a Linux CPU number
132  * In SMP this will be fleshed out with the correct CPU ID for a particular
133  * hardware thread. In UP this will be initialised with the boot CPU ID.
134  */
135 u8 hwthread_id_2_cpu[4] __read_mostly = {
136         [0 ... 3] = BAD_CPU_ID
137 };
138 
139 /* The relative offset of the MMU mapped memory (from ldlk or bootloader)
140  * to the real physical memory.  This is needed as we have to use the
141  * physical addresses in the MMU tables (pte entries), and not the virtual
142  * addresses.
143  * This variable is used in the __pa() and __va() macros, and should
144  * probably only be used via them.
145  */
146 unsigned int meta_memoffset;
147 EXPORT_SYMBOL(meta_memoffset);
148 
149 static char __initdata *original_cmd_line;
150 
151 DEFINE_PER_CPU(PTBI, pTBI);
152 
153 /*
154  * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
155  *
156  *      "hwthread_map=0:1,1:2,2:3,3:0"
157  *
158  *      Linux CPU ID    HWTHREAD_ID
159  *      ---------------------------
160  *          0                 1
161  *          1                 2
162  *          2                 3
163  *          3                 0
164  */
165 static int __init parse_hwthread_map(char *p)
166 {
167         int cpu;
168 
169         while (*p) {
170                 cpu = (*p++) - '';
171                 if (cpu < 0 || cpu > 9)
172                         goto err_cpu;
173 
174                 p++;            /* skip semi-colon */
175                 cpu_2_hwthread_id[cpu] = (*p++) - '';
176                 if (cpu_2_hwthread_id[cpu] >= 4)
177                         goto err_thread;
178                 hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
179 
180                 if (*p == ',')
181                         p++;            /* skip comma */
182         }
183 
184         return 0;
185 err_cpu:
186         pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
187         return -EINVAL;
188 err_thread:
189         pr_err("%s: hwthread_map thread argument out of range\n", __func__);
190         return -EINVAL;
191 }
192 early_param("hwthread_map", parse_hwthread_map);
193 
194 void __init dump_machine_table(void)
195 {
196         struct machine_desc *p;
197         const char **compat;
198 
199         pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
200         for_each_machine_desc(p) {
201                 pr_info("\t%s\t[", p->name);
202                 for (compat = p->dt_compat; compat && *compat; ++compat)
203                         printk(" '%s'", *compat);
204                 printk(" ]\n");
205         }
206 
207         pr_info("\nPlease check your kernel config and/or bootloader.\n");
208 
209         hard_processor_halt(HALT_PANIC);
210 }
211 
212 #ifdef CONFIG_METAG_HALT_ON_PANIC
213 static int metag_panic_event(struct notifier_block *this, unsigned long event,
214                              void *ptr)
215 {
216         hard_processor_halt(HALT_PANIC);
217         return NOTIFY_DONE;
218 }
219 
220 static struct notifier_block metag_panic_block = {
221         metag_panic_event,
222         NULL,
223         0
224 };
225 #endif
226 
227 void __init setup_arch(char **cmdline_p)
228 {
229         unsigned long start_pfn;
230         unsigned long text_start = (unsigned long)(&_stext);
231         unsigned long cpu = smp_processor_id();
232         unsigned long heap_start, heap_end;
233         unsigned long start_pte;
234         PTBI _pTBI;
235         PTBISEG p_heap;
236         int heap_id, i;
237 
238         metag_cache_probe();
239 
240         metag_da_probe();
241 #ifdef CONFIG_DA_CONSOLE
242         if (metag_da_enabled()) {
243                 /* An early channel based console driver */
244                 register_console(&dash_console);
245                 add_preferred_console("ttyDA", 1, NULL);
246         }
247 #endif
248 
249         /* try interpreting the argument as a device tree */
250         machine_desc = setup_machine_fdt(original_cmd_line);
251         /* if it doesn't look like a device tree it must be a command line */
252         if (!machine_desc) {
253 #ifdef CONFIG_METAG_BUILTIN_DTB
254                 /* try the embedded device tree */
255                 machine_desc = setup_machine_fdt(__dtb_start);
256                 if (!machine_desc)
257                         panic("Invalid embedded device tree.");
258 #else
259                 /* use the default machine description */
260                 machine_desc = default_machine_desc();
261 #endif
262 #ifndef CONFIG_CMDLINE_FORCE
263                 /* append the bootloader cmdline to any builtin fdt cmdline */
264                 if (boot_command_line[0] && original_cmd_line[0])
265                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
266                 strlcat(boot_command_line, original_cmd_line,
267                         COMMAND_LINE_SIZE);
268 #endif
269         }
270         setup_meta_clocks(machine_desc->clocks);
271 
272         *cmdline_p = boot_command_line;
273         parse_early_param();
274 
275         /*
276          * Make sure we don't alias in dcache or icache
277          */
278         check_for_cache_aliasing(cpu);
279 
280 
281 #ifdef CONFIG_METAG_HALT_ON_PANIC
282         atomic_notifier_chain_register(&panic_notifier_list,
283                                        &metag_panic_block);
284 #endif
285 
286 #ifdef CONFIG_DUMMY_CONSOLE
287         conswitchp = &dummy_con;
288 #endif
289 
290         if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
291                 panic("Privilege must be enabled for this thread.");
292 
293         _pTBI = __TBI(TBID_ISTAT_BIT);
294 
295         per_cpu(pTBI, cpu) = _pTBI;
296 
297         if (!per_cpu(pTBI, cpu))
298                 panic("No TBI found!");
299 
300         /*
301          * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
302          * rather than the version from the bootloader. This makes call
303          * stacks easier to understand and may allow us to unmap the
304          * bootloader at some point.
305          *
306          * We need to keep the LWK handler that TBI installed in order to
307          * be able to do inter-thread comms.
308          */
309         for (i = 0; i <= TBID_SIGNUM_MAX; i++)
310                 if (i != TBID_SIGNUM_LWK)
311                         _pTBI->fnSigs[i] = __TBIUnExpXXX;
312 
313         /* A Meta requirement is that the kernel is loaded (virtually)
314          * at the PAGE_OFFSET.
315          */
316         if (PAGE_OFFSET != text_start)
317                 panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
318                       PAGE_OFFSET, text_start);
319 
320         start_pte = mmu_read_second_level_page(text_start);
321 
322         /*
323          * Kernel pages should have the PRIV bit set by the bootloader.
324          */
325         if (!(start_pte & _PAGE_KERNEL))
326                 panic("kernel pte does not have PRIV set");
327 
328         /*
329          * See __pa and __va in include/asm/page.h.
330          * This value is negative when running in local space but the
331          * calculations work anyway.
332          */
333         meta_memoffset = text_start - (start_pte & PAGE_MASK);
334 
335         /* Now lets look at the heap space */
336         heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
337                 + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
338 
339         p_heap = __TBIFindSeg(NULL, heap_id);
340 
341         if (!p_heap)
342                 panic("Could not find heap from TBI!");
343 
344         /* The heap begins at the first full page after the kernel data. */
345         heap_start = (unsigned long) &_heap_start;
346 
347         /* The heap ends at the end of the heap segment specified with
348          * ldlk.
349          */
350         if (is_global_space(text_start)) {
351                 pr_debug("WARNING: running in global space!\n");
352                 heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
353         } else {
354                 heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
355         }
356 
357         ROOT_DEV = Root_RAM0;
358 
359         /* init_mm is the mm struct used for the first task.  It is then
360          * cloned for all other tasks spawned from that task.
361          *
362          * Note - we are using the virtual addresses here.
363          */
364         init_mm.start_code = (unsigned long)(&_stext);
365         init_mm.end_code = (unsigned long)(&_etext);
366         init_mm.end_data = (unsigned long)(&_edata);
367         init_mm.brk = (unsigned long)heap_start;
368 
369         min_low_pfn = PFN_UP(__pa(text_start));
370         max_low_pfn = PFN_DOWN(__pa(heap_end));
371 
372         pfn_base = min_low_pfn;
373 
374         /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
375          * call later makes sure to keep the rounded up pages marked reserved.
376          */
377         max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
378         max_pfn &= ~((1 << MAX_ORDER) - 1);
379 
380         start_pfn = PFN_UP(__pa(heap_start));
381 
382         if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
383                 /* Theoretically, we could expand the space that the
384                  * bootmem allocator covers - much as we do for the
385                  * 'high' address, and then tell the bootmem system
386                  * that the lowest chunk is 'not available'.  Right
387                  * now it is just much easier to constrain the
388                  * user to always MAX_ORDER align their kernel space.
389                  */
390 
391                 panic("Kernel must be %d byte aligned, currently at %#lx.",
392                       1 << (MAX_ORDER + PAGE_SHIFT),
393                       min_low_pfn << PAGE_SHIFT);
394         }
395 
396 #ifdef CONFIG_HIGHMEM
397         highstart_pfn = highend_pfn = max_pfn;
398         high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
399 #else
400         high_memory = (void *)__va(PFN_PHYS(max_pfn));
401 #endif
402 
403         paging_init(heap_end);
404 
405         setup_priv();
406 
407         /* Setup the boot cpu's mapping. The rest will be setup below. */
408         cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
409         hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
410 
411         /* Copy device tree blob into non-init memory before unflattening */
412         copy_fdt();
413         unflatten_device_tree();
414 
415 #ifdef CONFIG_SMP
416         smp_init_cpus();
417 #endif
418 
419         if (machine_desc->init_early)
420                 machine_desc->init_early();
421 }
422 
423 static int __init customize_machine(void)
424 {
425         /* customizes platform devices, or adds new ones */
426         if (machine_desc->init_machine)
427                 machine_desc->init_machine();
428         else
429                 of_platform_populate(NULL, of_default_bus_match_table, NULL,
430                                      NULL);
431         return 0;
432 }
433 arch_initcall(customize_machine);
434 
435 static int __init init_machine_late(void)
436 {
437         if (machine_desc->init_late)
438                 machine_desc->init_late();
439         return 0;
440 }
441 late_initcall(init_machine_late);
442 
443 #ifdef CONFIG_PROC_FS
444 /*
445  *      Get CPU information for use by the procfs.
446  */
447 static const char *get_cpu_capabilities(unsigned int txenable)
448 {
449 #ifdef CONFIG_METAG_META21
450         /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
451         int coreid = metag_in32(METAC_CORE_ID);
452         unsigned int dsp_type = (coreid >> 3) & 7;
453         unsigned int fpu_type = (coreid >> 7) & 3;
454 
455         switch (dsp_type | fpu_type << 3) {
456         case (0x00): return "EDSP";
457         case (0x01): return "DSP";
458         case (0x08): return "EDSP+LFPU";
459         case (0x09): return "DSP+LFPU";
460         case (0x10): return "EDSP+FPU";
461         case (0x11): return "DSP+FPU";
462         }
463         return "UNKNOWN";
464 
465 #else
466         if (!(txenable & TXENABLE_CLASS_BITS))
467                 return "DSP";
468         else
469                 return "";
470 #endif
471 }
472 
473 static int show_cpuinfo(struct seq_file *m, void *v)
474 {
475         const char *cpu;
476         unsigned int txenable, thread_id, major, minor;
477         unsigned long clockfreq = get_coreclock();
478 #ifdef CONFIG_SMP
479         int i;
480         unsigned long lpj;
481 #endif
482 
483         cpu = "META";
484 
485         txenable = __core_reg_get(TXENABLE);
486         major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
487         minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
488         thread_id = (txenable >> 8) & 0x3;
489 
490 #ifdef CONFIG_SMP
491         for_each_online_cpu(i) {
492                 lpj = per_cpu(cpu_data, i).loops_per_jiffy;
493                 txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
494                                                         cpu_2_hwthread_id[i]);
495 
496                 seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
497                               "Clocking:\t%lu.%1luMHz\n"
498                               "BogoMips:\t%lu.%02lu\n"
499                               "Calibration:\t%lu loops\n"
500                               "Capabilities:\t%s\n\n",
501                               cpu, major, minor, i,
502                               clockfreq / 1000000, (clockfreq / 100000) % 10,
503                               lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
504                               lpj,
505                               get_cpu_capabilities(txenable));
506         }
507 #else
508         seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
509                    "Clocking:\t%lu.%1luMHz\n"
510                    "BogoMips:\t%lu.%02lu\n"
511                    "Calibration:\t%lu loops\n"
512                    "Capabilities:\t%s\n",
513                    cpu, major, minor, thread_id,
514                    clockfreq / 1000000, (clockfreq / 100000) % 10,
515                    loops_per_jiffy / (500000 / HZ),
516                    (loops_per_jiffy / (5000 / HZ)) % 100,
517                    loops_per_jiffy,
518                    get_cpu_capabilities(txenable));
519 #endif /* CONFIG_SMP */
520 
521 #ifdef CONFIG_METAG_L2C
522         if (meta_l2c_is_present()) {
523                 seq_printf(m, "L2 cache:\t%s\n"
524                               "L2 cache size:\t%d KB\n",
525                               meta_l2c_is_enabled() ? "enabled" : "disabled",
526                               meta_l2c_size() >> 10);
527         }
528 #endif
529         return 0;
530 }
531 
532 static void *c_start(struct seq_file *m, loff_t *pos)
533 {
534         return (void *)(*pos == 0);
535 }
536 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
537 {
538         return NULL;
539 }
540 static void c_stop(struct seq_file *m, void *v)
541 {
542 }
543 const struct seq_operations cpuinfo_op = {
544         .start = c_start,
545         .next  = c_next,
546         .stop  = c_stop,
547         .show  = show_cpuinfo,
548 };
549 #endif /* CONFIG_PROC_FS */
550 
551 void __init metag_start_kernel(char *args)
552 {
553         /* Zero the timer register so timestamps are from the point at
554          * which the kernel started running.
555          */
556         __core_reg_set(TXTIMER, 0);
557 
558         /* Clear the bss. */
559         memset(__bss_start, 0,
560                (unsigned long)__bss_stop - (unsigned long)__bss_start);
561 
562         /* Remember where these are for use in setup_arch */
563         original_cmd_line = args;
564 
565         current_thread_info()->cpu = hard_processor_id();
566 
567         start_kernel();
568 }
569 
570 /**
571  * setup_priv() - Set up privilege protection registers.
572  *
573  * Set up privilege protection registers such as TXPRIVEXT to prevent userland
574  * from touching our precious registers and sensitive memory areas.
575  */
576 void setup_priv(void)
577 {
578         unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
579 
580         __core_reg_set(TXPRIVEXT, PRIV_BITS);
581 
582         metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
583         metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
584         metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
585 }
586 
587 PTBI pTBI_get(unsigned int cpu)
588 {
589         return per_cpu(pTBI, cpu);
590 }
591 EXPORT_SYMBOL(pTBI_get);
592 
593 #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
594 static char capabilities[] = "dsp fpu";
595 #elif defined(CONFIG_METAG_DSP)
596 static char capabilities[] = "dsp";
597 #elif defined(CONFIG_METAG_FPU)
598 static char capabilities[] = "fpu";
599 #else
600 static char capabilities[] = "";
601 #endif
602 
603 static struct ctl_table caps_kern_table[] = {
604         {
605                 .procname       = "capabilities",
606                 .data           = capabilities,
607                 .maxlen         = sizeof(capabilities),
608                 .mode           = 0444,
609                 .proc_handler   = proc_dostring,
610         },
611         {}
612 };
613 
614 static struct ctl_table caps_root_table[] = {
615         {
616                 .procname       = "kernel",
617                 .mode           = 0555,
618                 .child          = caps_kern_table,
619         },
620         {}
621 };
622 
623 static int __init capabilities_register_sysctl(void)
624 {
625         struct ctl_table_header *caps_table_header;
626 
627         caps_table_header = register_sysctl_table(caps_root_table);
628         if (!caps_table_header) {
629                 pr_err("Unable to register CAPABILITIES sysctl\n");
630                 return -ENOMEM;
631         }
632 
633         return 0;
634 }
635 
636 core_initcall(capabilities_register_sysctl);
637 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp