~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/mcheck/mce_amd.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  (c) 2005, 2006 Advanced Micro Devices, Inc.
  3  *  Your use of this code is subject to the terms and conditions of the
  4  *  GNU general public license version 2. See "COPYING" or
  5  *  http://www.gnu.org/licenses/gpl.html
  6  *
  7  *  Written by Jacob Shin - AMD, Inc.
  8  *
  9  *  Support : jacob.shin@amd.com
 10  *
 11  *  April 2006
 12  *     - added support for AMD Family 0x10 processors
 13  *
 14  *  All MC4_MISCi registers are shared between multi-cores
 15  */
 16 #include <linux/interrupt.h>
 17 #include <linux/notifier.h>
 18 #include <linux/kobject.h>
 19 #include <linux/percpu.h>
 20 #include <linux/sysdev.h>
 21 #include <linux/errno.h>
 22 #include <linux/sched.h>
 23 #include <linux/sysfs.h>
 24 #include <linux/init.h>
 25 #include <linux/cpu.h>
 26 #include <linux/smp.h>
 27 
 28 #include <asm/apic.h>
 29 #include <asm/idle.h>
 30 #include <asm/mce.h>
 31 #include <asm/msr.h>
 32 
 33 #define PFX               "mce_threshold: "
 34 #define VERSION           "version 1.1.1"
 35 #define NR_BANKS          6
 36 #define NR_BLOCKS         9
 37 #define THRESHOLD_MAX     0xFFF
 38 #define INT_TYPE_APIC     0x00020000
 39 #define MASK_VALID_HI     0x80000000
 40 #define MASK_CNTP_HI      0x40000000
 41 #define MASK_LOCKED_HI    0x20000000
 42 #define MASK_LVTOFF_HI    0x00F00000
 43 #define MASK_COUNT_EN_HI  0x00080000
 44 #define MASK_INT_TYPE_HI  0x00060000
 45 #define MASK_OVERFLOW_HI  0x00010000
 46 #define MASK_ERR_COUNT_HI 0x00000FFF
 47 #define MASK_BLKPTR_LO    0xFF000000
 48 #define MCG_XBLK_ADDR     0xC0000400
 49 
 50 struct threshold_block {
 51         unsigned int            block;
 52         unsigned int            bank;
 53         unsigned int            cpu;
 54         u32                     address;
 55         u16                     interrupt_enable;
 56         u16                     threshold_limit;
 57         struct kobject          kobj;
 58         struct list_head        miscj;
 59 };
 60 
 61 /* defaults used early on boot */
 62 static struct threshold_block threshold_defaults = {
 63         .interrupt_enable       = 0,
 64         .threshold_limit        = THRESHOLD_MAX,
 65 };
 66 
 67 struct threshold_bank {
 68         struct kobject          *kobj;
 69         struct threshold_block  *blocks;
 70         cpumask_var_t           cpus;
 71 };
 72 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
 73 
 74 #ifdef CONFIG_SMP
 75 static unsigned char shared_bank[NR_BANKS] = {
 76         0, 0, 0, 0, 1
 77 };
 78 #endif
 79 
 80 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
 81 
 82 static void amd_threshold_interrupt(void);
 83 
 84 /*
 85  * CPU Initialization
 86  */
 87 
 88 struct thresh_restart {
 89         struct threshold_block  *b;
 90         int                     reset;
 91         u16                     old_limit;
 92 };
 93 
 94 /* must be called with correct cpu affinity */
 95 /* Called via smp_call_function_single() */
 96 static void threshold_restart_bank(void *_tr)
 97 {
 98         struct thresh_restart *tr = _tr;
 99         u32 mci_misc_hi, mci_misc_lo;
100 
101         rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
102 
103         if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
104                 tr->reset = 1;  /* limit cannot be lower than err count */
105 
106         if (tr->reset) {                /* reset err count and overflow bit */
107                 mci_misc_hi =
108                     (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
109                     (THRESHOLD_MAX - tr->b->threshold_limit);
110         } else if (tr->old_limit) {     /* change limit w/o reset */
111                 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
112                     (tr->old_limit - tr->b->threshold_limit);
113 
114                 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
115                     (new_count & THRESHOLD_MAX);
116         }
117 
118         tr->b->interrupt_enable ?
119             (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
120             (mci_misc_hi &= ~MASK_INT_TYPE_HI);
121 
122         mci_misc_hi |= MASK_COUNT_EN_HI;
123         wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
124 }
125 
126 /* cpu init entry point, called from mce.c with preempt off */
127 void mce_amd_feature_init(struct cpuinfo_x86 *c)
128 {
129         unsigned int cpu = smp_processor_id();
130         u32 low = 0, high = 0, address = 0;
131         unsigned int bank, block;
132         struct thresh_restart tr;
133         u8 lvt_off;
134 
135         for (bank = 0; bank < NR_BANKS; ++bank) {
136                 for (block = 0; block < NR_BLOCKS; ++block) {
137                         if (block == 0)
138                                 address = MSR_IA32_MC0_MISC + bank * 4;
139                         else if (block == 1) {
140                                 address = (low & MASK_BLKPTR_LO) >> 21;
141                                 if (!address)
142                                         break;
143 
144                                 address += MCG_XBLK_ADDR;
145                         } else
146                                 ++address;
147 
148                         if (rdmsr_safe(address, &low, &high))
149                                 break;
150 
151                         if (!(high & MASK_VALID_HI))
152                                 continue;
153 
154                         if (!(high & MASK_CNTP_HI)  ||
155                              (high & MASK_LOCKED_HI))
156                                 continue;
157 
158                         if (!block)
159                                 per_cpu(bank_map, cpu) |= (1 << bank);
160 #ifdef CONFIG_SMP
161                         if (shared_bank[bank] && c->cpu_core_id)
162                                 break;
163 #endif
164                         lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
165                                                        APIC_EILVT_MSG_FIX, 0);
166 
167                         high &= ~MASK_LVTOFF_HI;
168                         high |= lvt_off << 20;
169                         wrmsr(address, low, high);
170 
171                         threshold_defaults.address = address;
172                         tr.b = &threshold_defaults;
173                         tr.reset = 0;
174                         tr.old_limit = 0;
175                         threshold_restart_bank(&tr);
176 
177                         mce_threshold_vector = amd_threshold_interrupt;
178                 }
179         }
180 }
181 
182 /*
183  * APIC Interrupt Handler
184  */
185 
186 /*
187  * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
188  * the interrupt goes off when error_count reaches threshold_limit.
189  * the handler will simply log mcelog w/ software defined bank number.
190  */
191 static void amd_threshold_interrupt(void)
192 {
193         u32 low = 0, high = 0, address = 0;
194         unsigned int bank, block;
195         struct mce m;
196 
197         mce_setup(&m);
198 
199         /* assume first bank caused it */
200         for (bank = 0; bank < NR_BANKS; ++bank) {
201                 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
202                         continue;
203                 for (block = 0; block < NR_BLOCKS; ++block) {
204                         if (block == 0) {
205                                 address = MSR_IA32_MC0_MISC + bank * 4;
206                         } else if (block == 1) {
207                                 address = (low & MASK_BLKPTR_LO) >> 21;
208                                 if (!address)
209                                         break;
210                                 address += MCG_XBLK_ADDR;
211                         } else {
212                                 ++address;
213                         }
214 
215                         if (rdmsr_safe(address, &low, &high))
216                                 break;
217 
218                         if (!(high & MASK_VALID_HI)) {
219                                 if (block)
220                                         continue;
221                                 else
222                                         break;
223                         }
224 
225                         if (!(high & MASK_CNTP_HI)  ||
226                              (high & MASK_LOCKED_HI))
227                                 continue;
228 
229                         /*
230                          * Log the machine check that caused the threshold
231                          * event.
232                          */
233                         machine_check_poll(MCP_TIMESTAMP,
234                                         &__get_cpu_var(mce_poll_banks));
235 
236                         if (high & MASK_OVERFLOW_HI) {
237                                 rdmsrl(address, m.misc);
238                                 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
239                                        m.status);
240                                 m.bank = K8_MCE_THRESHOLD_BASE
241                                        + bank * NR_BLOCKS
242                                        + block;
243                                 mce_log(&m);
244                                 return;
245                         }
246                 }
247         }
248 }
249 
250 /*
251  * Sysfs Interface
252  */
253 
254 struct threshold_attr {
255         struct attribute attr;
256         ssize_t (*show) (struct threshold_block *, char *);
257         ssize_t (*store) (struct threshold_block *, const char *, size_t count);
258 };
259 
260 #define SHOW_FIELDS(name)                                               \
261 static ssize_t show_ ## name(struct threshold_block *b, char *buf)      \
262 {                                                                       \
263         return sprintf(buf, "%lx\n", (unsigned long) b->name);          \
264 }
265 SHOW_FIELDS(interrupt_enable)
266 SHOW_FIELDS(threshold_limit)
267 
268 static ssize_t
269 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
270 {
271         struct thresh_restart tr;
272         unsigned long new;
273 
274         if (strict_strtoul(buf, 0, &new) < 0)
275                 return -EINVAL;
276 
277         b->interrupt_enable = !!new;
278 
279         tr.b            = b;
280         tr.reset        = 0;
281         tr.old_limit    = 0;
282 
283         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
284 
285         return size;
286 }
287 
288 static ssize_t
289 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
290 {
291         struct thresh_restart tr;
292         unsigned long new;
293 
294         if (strict_strtoul(buf, 0, &new) < 0)
295                 return -EINVAL;
296 
297         if (new > THRESHOLD_MAX)
298                 new = THRESHOLD_MAX;
299         if (new < 1)
300                 new = 1;
301 
302         tr.old_limit = b->threshold_limit;
303         b->threshold_limit = new;
304         tr.b = b;
305         tr.reset = 0;
306 
307         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
308 
309         return size;
310 }
311 
312 struct threshold_block_cross_cpu {
313         struct threshold_block  *tb;
314         long                    retval;
315 };
316 
317 static void local_error_count_handler(void *_tbcc)
318 {
319         struct threshold_block_cross_cpu *tbcc = _tbcc;
320         struct threshold_block *b = tbcc->tb;
321         u32 low, high;
322 
323         rdmsr(b->address, low, high);
324         tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
325 }
326 
327 static ssize_t show_error_count(struct threshold_block *b, char *buf)
328 {
329         struct threshold_block_cross_cpu tbcc = { .tb = b, };
330 
331         smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
332         return sprintf(buf, "%lx\n", tbcc.retval);
333 }
334 
335 static ssize_t store_error_count(struct threshold_block *b,
336                                  const char *buf, size_t count)
337 {
338         struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
339 
340         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
341         return 1;
342 }
343 
344 #define RW_ATTR(val)                                                    \
345 static struct threshold_attr val = {                                    \
346         .attr   = {.name = __stringify(val), .mode = 0644 },            \
347         .show   = show_## val,                                          \
348         .store  = store_## val,                                         \
349 };
350 
351 RW_ATTR(interrupt_enable);
352 RW_ATTR(threshold_limit);
353 RW_ATTR(error_count);
354 
355 static struct attribute *default_attrs[] = {
356         &interrupt_enable.attr,
357         &threshold_limit.attr,
358         &error_count.attr,
359         NULL
360 };
361 
362 #define to_block(k)     container_of(k, struct threshold_block, kobj)
363 #define to_attr(a)      container_of(a, struct threshold_attr, attr)
364 
365 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
366 {
367         struct threshold_block *b = to_block(kobj);
368         struct threshold_attr *a = to_attr(attr);
369         ssize_t ret;
370 
371         ret = a->show ? a->show(b, buf) : -EIO;
372 
373         return ret;
374 }
375 
376 static ssize_t store(struct kobject *kobj, struct attribute *attr,
377                      const char *buf, size_t count)
378 {
379         struct threshold_block *b = to_block(kobj);
380         struct threshold_attr *a = to_attr(attr);
381         ssize_t ret;
382 
383         ret = a->store ? a->store(b, buf, count) : -EIO;
384 
385         return ret;
386 }
387 
388 static struct sysfs_ops threshold_ops = {
389         .show                   = show,
390         .store                  = store,
391 };
392 
393 static struct kobj_type threshold_ktype = {
394         .sysfs_ops              = &threshold_ops,
395         .default_attrs          = default_attrs,
396 };
397 
398 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
399                                                unsigned int bank,
400                                                unsigned int block,
401                                                u32 address)
402 {
403         struct threshold_block *b = NULL;
404         u32 low, high;
405         int err;
406 
407         if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
408                 return 0;
409 
410         if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
411                 return 0;
412 
413         if (!(high & MASK_VALID_HI)) {
414                 if (block)
415                         goto recurse;
416                 else
417                         return 0;
418         }
419 
420         if (!(high & MASK_CNTP_HI)  ||
421              (high & MASK_LOCKED_HI))
422                 goto recurse;
423 
424         b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
425         if (!b)
426                 return -ENOMEM;
427 
428         b->block                = block;
429         b->bank                 = bank;
430         b->cpu                  = cpu;
431         b->address              = address;
432         b->interrupt_enable     = 0;
433         b->threshold_limit      = THRESHOLD_MAX;
434 
435         INIT_LIST_HEAD(&b->miscj);
436 
437         if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
438                 list_add(&b->miscj,
439                          &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
440         } else {
441                 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
442         }
443 
444         err = kobject_init_and_add(&b->kobj, &threshold_ktype,
445                                    per_cpu(threshold_banks, cpu)[bank]->kobj,
446                                    "misc%i", block);
447         if (err)
448                 goto out_free;
449 recurse:
450         if (!block) {
451                 address = (low & MASK_BLKPTR_LO) >> 21;
452                 if (!address)
453                         return 0;
454                 address += MCG_XBLK_ADDR;
455         } else {
456                 ++address;
457         }
458 
459         err = allocate_threshold_blocks(cpu, bank, ++block, address);
460         if (err)
461                 goto out_free;
462 
463         if (b)
464                 kobject_uevent(&b->kobj, KOBJ_ADD);
465 
466         return err;
467 
468 out_free:
469         if (b) {
470                 kobject_put(&b->kobj);
471                 list_del(&b->miscj);
472                 kfree(b);
473         }
474         return err;
475 }
476 
477 static __cpuinit long
478 local_allocate_threshold_blocks(int cpu, unsigned int bank)
479 {
480         return allocate_threshold_blocks(cpu, bank, 0,
481                                          MSR_IA32_MC0_MISC + bank * 4);
482 }
483 
484 /* symlinks sibling shared banks to first core.  first core owns dir/files. */
485 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
486 {
487         int i, err = 0;
488         struct threshold_bank *b = NULL;
489         char name[32];
490 #ifdef CONFIG_SMP
491         struct cpuinfo_x86 *c = &cpu_data(cpu);
492 #endif
493 
494         sprintf(name, "threshold_bank%i", bank);
495 
496 #ifdef CONFIG_SMP
497         if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
498                 i = cpumask_first(c->llc_shared_map);
499 
500                 /* first core not up yet */
501                 if (cpu_data(i).cpu_core_id)
502                         goto out;
503 
504                 /* already linked */
505                 if (per_cpu(threshold_banks, cpu)[bank])
506                         goto out;
507 
508                 b = per_cpu(threshold_banks, i)[bank];
509 
510                 if (!b)
511                         goto out;
512 
513                 err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
514                                         b->kobj, name);
515                 if (err)
516                         goto out;
517 
518                 cpumask_copy(b->cpus, c->llc_shared_map);
519                 per_cpu(threshold_banks, cpu)[bank] = b;
520 
521                 goto out;
522         }
523 #endif
524 
525         b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
526         if (!b) {
527                 err = -ENOMEM;
528                 goto out;
529         }
530         if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
531                 kfree(b);
532                 err = -ENOMEM;
533                 goto out;
534         }
535 
536         b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
537         if (!b->kobj)
538                 goto out_free;
539 
540 #ifndef CONFIG_SMP
541         cpumask_setall(b->cpus);
542 #else
543         cpumask_copy(b->cpus, c->llc_shared_map);
544 #endif
545 
546         per_cpu(threshold_banks, cpu)[bank] = b;
547 
548         err = local_allocate_threshold_blocks(cpu, bank);
549         if (err)
550                 goto out_free;
551 
552         for_each_cpu(i, b->cpus) {
553                 if (i == cpu)
554                         continue;
555 
556                 err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
557                                         b->kobj, name);
558                 if (err)
559                         goto out;
560 
561                 per_cpu(threshold_banks, i)[bank] = b;
562         }
563 
564         goto out;
565 
566 out_free:
567         per_cpu(threshold_banks, cpu)[bank] = NULL;
568         free_cpumask_var(b->cpus);
569         kfree(b);
570 out:
571         return err;
572 }
573 
574 /* create dir/files for all valid threshold banks */
575 static __cpuinit int threshold_create_device(unsigned int cpu)
576 {
577         unsigned int bank;
578         int err = 0;
579 
580         for (bank = 0; bank < NR_BANKS; ++bank) {
581                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
582                         continue;
583                 err = threshold_create_bank(cpu, bank);
584                 if (err)
585                         goto out;
586         }
587 out:
588         return err;
589 }
590 
591 /*
592  * let's be hotplug friendly.
593  * in case of multiple core processors, the first core always takes ownership
594  *   of shared sysfs dir/files, and rest of the cores will be symlinked to it.
595  */
596 
597 static void deallocate_threshold_block(unsigned int cpu,
598                                                  unsigned int bank)
599 {
600         struct threshold_block *pos = NULL;
601         struct threshold_block *tmp = NULL;
602         struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
603 
604         if (!head)
605                 return;
606 
607         list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
608                 kobject_put(&pos->kobj);
609                 list_del(&pos->miscj);
610                 kfree(pos);
611         }
612 
613         kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
614         per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
615 }
616 
617 static void threshold_remove_bank(unsigned int cpu, int bank)
618 {
619         struct threshold_bank *b;
620         char name[32];
621         int i = 0;
622 
623         b = per_cpu(threshold_banks, cpu)[bank];
624         if (!b)
625                 return;
626         if (!b->blocks)
627                 goto free_out;
628 
629         sprintf(name, "threshold_bank%i", bank);
630 
631 #ifdef CONFIG_SMP
632         /* sibling symlink */
633         if (shared_bank[bank] && b->blocks->cpu != cpu) {
634                 sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
635                 per_cpu(threshold_banks, cpu)[bank] = NULL;
636 
637                 return;
638         }
639 #endif
640 
641         /* remove all sibling symlinks before unregistering */
642         for_each_cpu(i, b->cpus) {
643                 if (i == cpu)
644                         continue;
645 
646                 sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
647                 per_cpu(threshold_banks, i)[bank] = NULL;
648         }
649 
650         deallocate_threshold_block(cpu, bank);
651 
652 free_out:
653         kobject_del(b->kobj);
654         kobject_put(b->kobj);
655         free_cpumask_var(b->cpus);
656         kfree(b);
657         per_cpu(threshold_banks, cpu)[bank] = NULL;
658 }
659 
660 static void threshold_remove_device(unsigned int cpu)
661 {
662         unsigned int bank;
663 
664         for (bank = 0; bank < NR_BANKS; ++bank) {
665                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
666                         continue;
667                 threshold_remove_bank(cpu, bank);
668         }
669 }
670 
671 /* get notified when a cpu comes on/off */
672 static void __cpuinit
673 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
674 {
675         switch (action) {
676         case CPU_ONLINE:
677         case CPU_ONLINE_FROZEN:
678                 threshold_create_device(cpu);
679                 break;
680         case CPU_DEAD:
681         case CPU_DEAD_FROZEN:
682                 threshold_remove_device(cpu);
683                 break;
684         default:
685                 break;
686         }
687 }
688 
689 static __init int threshold_init_device(void)
690 {
691         unsigned lcpu = 0;
692 
693         /* to hit CPUs online before the notifier is up */
694         for_each_online_cpu(lcpu) {
695                 int err = threshold_create_device(lcpu);
696 
697                 if (err)
698                         return err;
699         }
700         threshold_cpu_callback = amd_64_threshold_cpu_callback;
701 
702         return 0;
703 }
704 device_initcall(threshold_init_device);
705 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp