~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/intel_cacheinfo.c

Version: ~ [ linux-5.12-rc5 ] ~ [ linux-5.11.11 ] ~ [ linux-5.10.27 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.109 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.184 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.228 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.264 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.264 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      Routines to identify caches on Intel CPU.
  3  *
  4  *      Changes:
  5  *      Venkatesh Pallipadi     : Adding cache identification through cpuid(4)
  6  *      Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7  *      Andi Kleen / Andreas Herrmann   : CPUID4 emulation on AMD.
  8  */
  9 
 10 #include <linux/init.h>
 11 #include <linux/slab.h>
 12 #include <linux/device.h>
 13 #include <linux/compiler.h>
 14 #include <linux/cpu.h>
 15 #include <linux/sched.h>
 16 #include <linux/pci.h>
 17 
 18 #include <asm/processor.h>
 19 #include <linux/smp.h>
 20 #include <asm/amd_nb.h>
 21 #include <asm/smp.h>
 22 
 23 #define LVL_1_INST      1
 24 #define LVL_1_DATA      2
 25 #define LVL_2           3
 26 #define LVL_3           4
 27 #define LVL_TRACE       5
 28 
 29 struct _cache_table {
 30         unsigned char descriptor;
 31         char cache_type;
 32         short size;
 33 };
 34 
 35 #define MB(x)   ((x) * 1024)
 36 
 37 /* All the cache descriptor types we care about (no TLB or
 38    trace cache entries) */
 39 
 40 static const struct _cache_table cache_table[] =
 41 {
 42         { 0x06, LVL_1_INST, 8 },        /* 4-way set assoc, 32 byte line size */
 43         { 0x08, LVL_1_INST, 16 },       /* 4-way set assoc, 32 byte line size */
 44         { 0x09, LVL_1_INST, 32 },       /* 4-way set assoc, 64 byte line size */
 45         { 0x0a, LVL_1_DATA, 8 },        /* 2 way set assoc, 32 byte line size */
 46         { 0x0c, LVL_1_DATA, 16 },       /* 4-way set assoc, 32 byte line size */
 47         { 0x0d, LVL_1_DATA, 16 },       /* 4-way set assoc, 64 byte line size */
 48         { 0x0e, LVL_1_DATA, 24 },       /* 6-way set assoc, 64 byte line size */
 49         { 0x21, LVL_2,      256 },      /* 8-way set assoc, 64 byte line size */
 50         { 0x22, LVL_3,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
 51         { 0x23, LVL_3,      MB(1) },    /* 8-way set assoc, sectored cache, 64 byte line size */
 52         { 0x25, LVL_3,      MB(2) },    /* 8-way set assoc, sectored cache, 64 byte line size */
 53         { 0x29, LVL_3,      MB(4) },    /* 8-way set assoc, sectored cache, 64 byte line size */
 54         { 0x2c, LVL_1_DATA, 32 },       /* 8-way set assoc, 64 byte line size */
 55         { 0x30, LVL_1_INST, 32 },       /* 8-way set assoc, 64 byte line size */
 56         { 0x39, LVL_2,      128 },      /* 4-way set assoc, sectored cache, 64 byte line size */
 57         { 0x3a, LVL_2,      192 },      /* 6-way set assoc, sectored cache, 64 byte line size */
 58         { 0x3b, LVL_2,      128 },      /* 2-way set assoc, sectored cache, 64 byte line size */
 59         { 0x3c, LVL_2,      256 },      /* 4-way set assoc, sectored cache, 64 byte line size */
 60         { 0x3d, LVL_2,      384 },      /* 6-way set assoc, sectored cache, 64 byte line size */
 61         { 0x3e, LVL_2,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
 62         { 0x3f, LVL_2,      256 },      /* 2-way set assoc, 64 byte line size */
 63         { 0x41, LVL_2,      128 },      /* 4-way set assoc, 32 byte line size */
 64         { 0x42, LVL_2,      256 },      /* 4-way set assoc, 32 byte line size */
 65         { 0x43, LVL_2,      512 },      /* 4-way set assoc, 32 byte line size */
 66         { 0x44, LVL_2,      MB(1) },    /* 4-way set assoc, 32 byte line size */
 67         { 0x45, LVL_2,      MB(2) },    /* 4-way set assoc, 32 byte line size */
 68         { 0x46, LVL_3,      MB(4) },    /* 4-way set assoc, 64 byte line size */
 69         { 0x47, LVL_3,      MB(8) },    /* 8-way set assoc, 64 byte line size */
 70         { 0x48, LVL_2,      MB(3) },    /* 12-way set assoc, 64 byte line size */
 71         { 0x49, LVL_3,      MB(4) },    /* 16-way set assoc, 64 byte line size */
 72         { 0x4a, LVL_3,      MB(6) },    /* 12-way set assoc, 64 byte line size */
 73         { 0x4b, LVL_3,      MB(8) },    /* 16-way set assoc, 64 byte line size */
 74         { 0x4c, LVL_3,      MB(12) },   /* 12-way set assoc, 64 byte line size */
 75         { 0x4d, LVL_3,      MB(16) },   /* 16-way set assoc, 64 byte line size */
 76         { 0x4e, LVL_2,      MB(6) },    /* 24-way set assoc, 64 byte line size */
 77         { 0x60, LVL_1_DATA, 16 },       /* 8-way set assoc, sectored cache, 64 byte line size */
 78         { 0x66, LVL_1_DATA, 8 },        /* 4-way set assoc, sectored cache, 64 byte line size */
 79         { 0x67, LVL_1_DATA, 16 },       /* 4-way set assoc, sectored cache, 64 byte line size */
 80         { 0x68, LVL_1_DATA, 32 },       /* 4-way set assoc, sectored cache, 64 byte line size */
 81         { 0x70, LVL_TRACE,  12 },       /* 8-way set assoc */
 82         { 0x71, LVL_TRACE,  16 },       /* 8-way set assoc */
 83         { 0x72, LVL_TRACE,  32 },       /* 8-way set assoc */
 84         { 0x73, LVL_TRACE,  64 },       /* 8-way set assoc */
 85         { 0x78, LVL_2,      MB(1) },    /* 4-way set assoc, 64 byte line size */
 86         { 0x79, LVL_2,      128 },      /* 8-way set assoc, sectored cache, 64 byte line size */
 87         { 0x7a, LVL_2,      256 },      /* 8-way set assoc, sectored cache, 64 byte line size */
 88         { 0x7b, LVL_2,      512 },      /* 8-way set assoc, sectored cache, 64 byte line size */
 89         { 0x7c, LVL_2,      MB(1) },    /* 8-way set assoc, sectored cache, 64 byte line size */
 90         { 0x7d, LVL_2,      MB(2) },    /* 8-way set assoc, 64 byte line size */
 91         { 0x7f, LVL_2,      512 },      /* 2-way set assoc, 64 byte line size */
 92         { 0x80, LVL_2,      512 },      /* 8-way set assoc, 64 byte line size */
 93         { 0x82, LVL_2,      256 },      /* 8-way set assoc, 32 byte line size */
 94         { 0x83, LVL_2,      512 },      /* 8-way set assoc, 32 byte line size */
 95         { 0x84, LVL_2,      MB(1) },    /* 8-way set assoc, 32 byte line size */
 96         { 0x85, LVL_2,      MB(2) },    /* 8-way set assoc, 32 byte line size */
 97         { 0x86, LVL_2,      512 },      /* 4-way set assoc, 64 byte line size */
 98         { 0x87, LVL_2,      MB(1) },    /* 8-way set assoc, 64 byte line size */
 99         { 0xd0, LVL_3,      512 },      /* 4-way set assoc, 64 byte line size */
100         { 0xd1, LVL_3,      MB(1) },    /* 4-way set assoc, 64 byte line size */
101         { 0xd2, LVL_3,      MB(2) },    /* 4-way set assoc, 64 byte line size */
102         { 0xd6, LVL_3,      MB(1) },    /* 8-way set assoc, 64 byte line size */
103         { 0xd7, LVL_3,      MB(2) },    /* 8-way set assoc, 64 byte line size */
104         { 0xd8, LVL_3,      MB(4) },    /* 12-way set assoc, 64 byte line size */
105         { 0xdc, LVL_3,      MB(2) },    /* 12-way set assoc, 64 byte line size */
106         { 0xdd, LVL_3,      MB(4) },    /* 12-way set assoc, 64 byte line size */
107         { 0xde, LVL_3,      MB(8) },    /* 12-way set assoc, 64 byte line size */
108         { 0xe2, LVL_3,      MB(2) },    /* 16-way set assoc, 64 byte line size */
109         { 0xe3, LVL_3,      MB(4) },    /* 16-way set assoc, 64 byte line size */
110         { 0xe4, LVL_3,      MB(8) },    /* 16-way set assoc, 64 byte line size */
111         { 0xea, LVL_3,      MB(12) },   /* 24-way set assoc, 64 byte line size */
112         { 0xeb, LVL_3,      MB(18) },   /* 24-way set assoc, 64 byte line size */
113         { 0xec, LVL_3,      MB(24) },   /* 24-way set assoc, 64 byte line size */
114         { 0x00, 0, 0}
115 };
116 
117 
118 enum _cache_type {
119         CACHE_TYPE_NULL = 0,
120         CACHE_TYPE_DATA = 1,
121         CACHE_TYPE_INST = 2,
122         CACHE_TYPE_UNIFIED = 3
123 };
124 
125 union _cpuid4_leaf_eax {
126         struct {
127                 enum _cache_type        type:5;
128                 unsigned int            level:3;
129                 unsigned int            is_self_initializing:1;
130                 unsigned int            is_fully_associative:1;
131                 unsigned int            reserved:4;
132                 unsigned int            num_threads_sharing:12;
133                 unsigned int            num_cores_on_die:6;
134         } split;
135         u32 full;
136 };
137 
138 union _cpuid4_leaf_ebx {
139         struct {
140                 unsigned int            coherency_line_size:12;
141                 unsigned int            physical_line_partition:10;
142                 unsigned int            ways_of_associativity:10;
143         } split;
144         u32 full;
145 };
146 
147 union _cpuid4_leaf_ecx {
148         struct {
149                 unsigned int            number_of_sets:32;
150         } split;
151         u32 full;
152 };
153 
154 struct _cpuid4_info_regs {
155         union _cpuid4_leaf_eax eax;
156         union _cpuid4_leaf_ebx ebx;
157         union _cpuid4_leaf_ecx ecx;
158         unsigned long size;
159         struct amd_northbridge *nb;
160 };
161 
162 struct _cpuid4_info {
163         struct _cpuid4_info_regs base;
164         DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165 };
166 
167 unsigned short                  num_cache_leaves;
168 
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170    information to the user.  This makes some assumptions about the machine:
171    L2 not shared, no SMT etc. that is currently true on AMD CPUs.
172 
173    In theory the TLBs could be reported as fake type (they are in "dummy").
174    Maybe later */
175 union l1_cache {
176         struct {
177                 unsigned line_size:8;
178                 unsigned lines_per_tag:8;
179                 unsigned assoc:8;
180                 unsigned size_in_kb:8;
181         };
182         unsigned val;
183 };
184 
185 union l2_cache {
186         struct {
187                 unsigned line_size:8;
188                 unsigned lines_per_tag:4;
189                 unsigned assoc:4;
190                 unsigned size_in_kb:16;
191         };
192         unsigned val;
193 };
194 
195 union l3_cache {
196         struct {
197                 unsigned line_size:8;
198                 unsigned lines_per_tag:4;
199                 unsigned assoc:4;
200                 unsigned res:2;
201                 unsigned size_encoded:14;
202         };
203         unsigned val;
204 };
205 
206 static const unsigned short assocs[] = {
207         [1] = 1,
208         [2] = 2,
209         [4] = 4,
210         [6] = 8,
211         [8] = 16,
212         [0xa] = 32,
213         [0xb] = 48,
214         [0xc] = 64,
215         [0xd] = 96,
216         [0xe] = 128,
217         [0xf] = 0xffff /* fully associative - no way to show this currently */
218 };
219 
220 static const unsigned char levels[] = { 1, 1, 2, 3 };
221 static const unsigned char types[] = { 1, 2, 3, 3 };
222 
223 static void
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225                      union _cpuid4_leaf_ebx *ebx,
226                      union _cpuid4_leaf_ecx *ecx)
227 {
228         unsigned dummy;
229         unsigned line_size, lines_per_tag, assoc, size_in_kb;
230         union l1_cache l1i, l1d;
231         union l2_cache l2;
232         union l3_cache l3;
233         union l1_cache *l1 = &l1d;
234 
235         eax->full = 0;
236         ebx->full = 0;
237         ecx->full = 0;
238 
239         cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240         cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
241 
242         switch (leaf) {
243         case 1:
244                 l1 = &l1i;
245         case 0:
246                 if (!l1->val)
247                         return;
248                 assoc = assocs[l1->assoc];
249                 line_size = l1->line_size;
250                 lines_per_tag = l1->lines_per_tag;
251                 size_in_kb = l1->size_in_kb;
252                 break;
253         case 2:
254                 if (!l2.val)
255                         return;
256                 assoc = assocs[l2.assoc];
257                 line_size = l2.line_size;
258                 lines_per_tag = l2.lines_per_tag;
259                 /* cpu_data has errata corrections for K7 applied */
260                 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
261                 break;
262         case 3:
263                 if (!l3.val)
264                         return;
265                 assoc = assocs[l3.assoc];
266                 line_size = l3.line_size;
267                 lines_per_tag = l3.lines_per_tag;
268                 size_in_kb = l3.size_encoded * 512;
269                 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270                         size_in_kb = size_in_kb >> 1;
271                         assoc = assoc >> 1;
272                 }
273                 break;
274         default:
275                 return;
276         }
277 
278         eax->split.is_self_initializing = 1;
279         eax->split.type = types[leaf];
280         eax->split.level = levels[leaf];
281         eax->split.num_threads_sharing = 0;
282         eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
283 
284 
285         if (assoc == 0xffff)
286                 eax->split.is_fully_associative = 1;
287         ebx->split.coherency_line_size = line_size - 1;
288         ebx->split.ways_of_associativity = assoc - 1;
289         ebx->split.physical_line_partition = lines_per_tag - 1;
290         ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291                 (ebx->split.ways_of_associativity + 1) - 1;
292 }
293 
294 struct _cache_attr {
295         struct attribute attr;
296         ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297         ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298                          unsigned int);
299 };
300 
301 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
302 /*
303  * L3 cache descriptors
304  */
305 static void amd_calc_l3_indices(struct amd_northbridge *nb)
306 {
307         struct amd_l3_cache *l3 = &nb->l3_cache;
308         unsigned int sc0, sc1, sc2, sc3;
309         u32 val = 0;
310 
311         pci_read_config_dword(nb->misc, 0x1C4, &val);
312 
313         /* calculate subcache sizes */
314         l3->subcaches[0] = sc0 = !(val & BIT(0));
315         l3->subcaches[1] = sc1 = !(val & BIT(4));
316 
317         if (boot_cpu_data.x86 == 0x15) {
318                 l3->subcaches[0] = sc0 += !(val & BIT(1));
319                 l3->subcaches[1] = sc1 += !(val & BIT(5));
320         }
321 
322         l3->subcaches[2] = sc2 = !(val & BIT(8))  + !(val & BIT(9));
323         l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
324 
325         l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326 }
327 
328 static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329 {
330         int node;
331 
332         /* only for L3, and not in virtualized environments */
333         if (index < 3)
334                 return;
335 
336         node = amd_get_nb_id(smp_processor_id());
337         this_leaf->nb = node_to_amd_nb(node);
338         if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
339                 amd_calc_l3_indices(this_leaf->nb);
340 }
341 
342 /*
343  * check whether a slot used for disabling an L3 index is occupied.
344  * @l3: L3 cache descriptor
345  * @slot: slot number (0..1)
346  *
347  * @returns: the disabled index if used or negative value if slot free.
348  */
349 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
350 {
351         unsigned int reg = 0;
352 
353         pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
354 
355         /* check whether this slot is activated already */
356         if (reg & (3UL << 30))
357                 return reg & 0xfff;
358 
359         return -1;
360 }
361 
362 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
363                                   unsigned int slot)
364 {
365         int index;
366 
367         if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
368                 return -EINVAL;
369 
370         index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
371         if (index >= 0)
372                 return sprintf(buf, "%d\n", index);
373 
374         return sprintf(buf, "FREE\n");
375 }
376 
377 #define SHOW_CACHE_DISABLE(slot)                                        \
378 static ssize_t                                                          \
379 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf,    \
380                           unsigned int cpu)                             \
381 {                                                                       \
382         return show_cache_disable(this_leaf, buf, slot);                \
383 }
384 SHOW_CACHE_DISABLE(0)
385 SHOW_CACHE_DISABLE(1)
386 
387 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
388                                  unsigned slot, unsigned long idx)
389 {
390         int i;
391 
392         idx |= BIT(30);
393 
394         /*
395          *  disable index in all 4 subcaches
396          */
397         for (i = 0; i < 4; i++) {
398                 u32 reg = idx | (i << 20);
399 
400                 if (!nb->l3_cache.subcaches[i])
401                         continue;
402 
403                 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
404 
405                 /*
406                  * We need to WBINVD on a core on the node containing the L3
407                  * cache which indices we disable therefore a simple wbinvd()
408                  * is not sufficient.
409                  */
410                 wbinvd_on_cpu(cpu);
411 
412                 reg |= BIT(31);
413                 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
414         }
415 }
416 
417 /*
418  * disable a L3 cache index by using a disable-slot
419  *
420  * @l3:    L3 cache descriptor
421  * @cpu:   A CPU on the node containing the L3 cache
422  * @slot:  slot number (0..1)
423  * @index: index to disable
424  *
425  * @return: 0 on success, error status on failure
426  */
427 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
428                             unsigned long index)
429 {
430         int ret = 0;
431 
432         /*  check if @slot is already used or the index is already disabled */
433         ret = amd_get_l3_disable_slot(nb, slot);
434         if (ret >= 0)
435                 return -EEXIST;
436 
437         if (index > nb->l3_cache.indices)
438                 return -EINVAL;
439 
440         /* check whether the other slot has disabled the same index already */
441         if (index == amd_get_l3_disable_slot(nb, !slot))
442                 return -EEXIST;
443 
444         amd_l3_disable_index(nb, cpu, slot, index);
445 
446         return 0;
447 }
448 
449 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
450                                   const char *buf, size_t count,
451                                   unsigned int slot)
452 {
453         unsigned long val = 0;
454         int cpu, err = 0;
455 
456         if (!capable(CAP_SYS_ADMIN))
457                 return -EPERM;
458 
459         if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
460                 return -EINVAL;
461 
462         cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
463 
464         if (strict_strtoul(buf, 10, &val) < 0)
465                 return -EINVAL;
466 
467         err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
468         if (err) {
469                 if (err == -EEXIST)
470                         pr_warning("L3 slot %d in use/index already disabled!\n",
471                                    slot);
472                 return err;
473         }
474         return count;
475 }
476 
477 #define STORE_CACHE_DISABLE(slot)                                       \
478 static ssize_t                                                          \
479 store_cache_disable_##slot(struct _cpuid4_info *this_leaf,              \
480                            const char *buf, size_t count,               \
481                            unsigned int cpu)                            \
482 {                                                                       \
483         return store_cache_disable(this_leaf, buf, count, slot);        \
484 }
485 STORE_CACHE_DISABLE(0)
486 STORE_CACHE_DISABLE(1)
487 
488 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
489                 show_cache_disable_0, store_cache_disable_0);
490 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
491                 show_cache_disable_1, store_cache_disable_1);
492 
493 static ssize_t
494 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
495 {
496         if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
497                 return -EINVAL;
498 
499         return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
500 }
501 
502 static ssize_t
503 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
504                 unsigned int cpu)
505 {
506         unsigned long val;
507 
508         if (!capable(CAP_SYS_ADMIN))
509                 return -EPERM;
510 
511         if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
512                 return -EINVAL;
513 
514         if (strict_strtoul(buf, 16, &val) < 0)
515                 return -EINVAL;
516 
517         if (amd_set_subcaches(cpu, val))
518                 return -EINVAL;
519 
520         return count;
521 }
522 
523 static struct _cache_attr subcaches =
524         __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
525 
526 #else
527 #define amd_init_l3_cache(x, y)
528 #endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */
529 
530 static int
531 cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
532 {
533         union _cpuid4_leaf_eax  eax;
534         union _cpuid4_leaf_ebx  ebx;
535         union _cpuid4_leaf_ecx  ecx;
536         unsigned                edx;
537 
538         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
539                 if (cpu_has_topoext)
540                         cpuid_count(0x8000001d, index, &eax.full,
541                                     &ebx.full, &ecx.full, &edx);
542                 else
543                         amd_cpuid4(index, &eax, &ebx, &ecx);
544                 amd_init_l3_cache(this_leaf, index);
545         } else {
546                 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
547         }
548 
549         if (eax.split.type == CACHE_TYPE_NULL)
550                 return -EIO; /* better error ? */
551 
552         this_leaf->eax = eax;
553         this_leaf->ebx = ebx;
554         this_leaf->ecx = ecx;
555         this_leaf->size = (ecx.split.number_of_sets          + 1) *
556                           (ebx.split.coherency_line_size     + 1) *
557                           (ebx.split.physical_line_partition + 1) *
558                           (ebx.split.ways_of_associativity   + 1);
559         return 0;
560 }
561 
562 static int find_num_cache_leaves(struct cpuinfo_x86 *c)
563 {
564         unsigned int            eax, ebx, ecx, edx, op;
565         union _cpuid4_leaf_eax  cache_eax;
566         int                     i = -1;
567 
568         if (c->x86_vendor == X86_VENDOR_AMD)
569                 op = 0x8000001d;
570         else
571                 op = 4;
572 
573         do {
574                 ++i;
575                 /* Do cpuid(op) loop to find out num_cache_leaves */
576                 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
577                 cache_eax.full = eax;
578         } while (cache_eax.split.type != CACHE_TYPE_NULL);
579         return i;
580 }
581 
582 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
583 {
584 
585         if (cpu_has_topoext) {
586                 num_cache_leaves = find_num_cache_leaves(c);
587         } else if (c->extended_cpuid_level >= 0x80000006) {
588                 if (cpuid_edx(0x80000006) & 0xf000)
589                         num_cache_leaves = 4;
590                 else
591                         num_cache_leaves = 3;
592         }
593 }
594 
595 unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
596 {
597         /* Cache sizes */
598         unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
599         unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
600         unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
601         unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
602 #ifdef CONFIG_X86_HT
603         unsigned int cpu = c->cpu_index;
604 #endif
605 
606         if (c->cpuid_level > 3) {
607                 static int is_initialized;
608 
609                 if (is_initialized == 0) {
610                         /* Init num_cache_leaves from boot CPU */
611                         num_cache_leaves = find_num_cache_leaves(c);
612                         is_initialized++;
613                 }
614 
615                 /*
616                  * Whenever possible use cpuid(4), deterministic cache
617                  * parameters cpuid leaf to find the cache details
618                  */
619                 for (i = 0; i < num_cache_leaves; i++) {
620                         struct _cpuid4_info_regs this_leaf = {};
621                         int retval;
622 
623                         retval = cpuid4_cache_lookup_regs(i, &this_leaf);
624                         if (retval < 0)
625                                 continue;
626 
627                         switch (this_leaf.eax.split.level) {
628                         case 1:
629                                 if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
630                                         new_l1d = this_leaf.size/1024;
631                                 else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
632                                         new_l1i = this_leaf.size/1024;
633                                 break;
634                         case 2:
635                                 new_l2 = this_leaf.size/1024;
636                                 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
637                                 index_msb = get_count_order(num_threads_sharing);
638                                 l2_id = c->apicid & ~((1 << index_msb) - 1);
639                                 break;
640                         case 3:
641                                 new_l3 = this_leaf.size/1024;
642                                 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
643                                 index_msb = get_count_order(num_threads_sharing);
644                                 l3_id = c->apicid & ~((1 << index_msb) - 1);
645                                 break;
646                         default:
647                                 break;
648                         }
649                 }
650         }
651         /*
652          * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
653          * trace cache
654          */
655         if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
656                 /* supports eax=2  call */
657                 int j, n;
658                 unsigned int regs[4];
659                 unsigned char *dp = (unsigned char *)regs;
660                 int only_trace = 0;
661 
662                 if (num_cache_leaves != 0 && c->x86 == 15)
663                         only_trace = 1;
664 
665                 /* Number of times to iterate */
666                 n = cpuid_eax(2) & 0xFF;
667 
668                 for (i = 0 ; i < n ; i++) {
669                         cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
670 
671                         /* If bit 31 is set, this is an unknown format */
672                         for (j = 0 ; j < 3 ; j++)
673                                 if (regs[j] & (1 << 31))
674                                         regs[j] = 0;
675 
676                         /* Byte 0 is level count, not a descriptor */
677                         for (j = 1 ; j < 16 ; j++) {
678                                 unsigned char des = dp[j];
679                                 unsigned char k = 0;
680 
681                                 /* look up this descriptor in the table */
682                                 while (cache_table[k].descriptor != 0) {
683                                         if (cache_table[k].descriptor == des) {
684                                                 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
685                                                         break;
686                                                 switch (cache_table[k].cache_type) {
687                                                 case LVL_1_INST:
688                                                         l1i += cache_table[k].size;
689                                                         break;
690                                                 case LVL_1_DATA:
691                                                         l1d += cache_table[k].size;
692                                                         break;
693                                                 case LVL_2:
694                                                         l2 += cache_table[k].size;
695                                                         break;
696                                                 case LVL_3:
697                                                         l3 += cache_table[k].size;
698                                                         break;
699                                                 case LVL_TRACE:
700                                                         trace += cache_table[k].size;
701                                                         break;
702                                                 }
703 
704                                                 break;
705                                         }
706 
707                                         k++;
708                                 }
709                         }
710                 }
711         }
712 
713         if (new_l1d)
714                 l1d = new_l1d;
715 
716         if (new_l1i)
717                 l1i = new_l1i;
718 
719         if (new_l2) {
720                 l2 = new_l2;
721 #ifdef CONFIG_X86_HT
722                 per_cpu(cpu_llc_id, cpu) = l2_id;
723 #endif
724         }
725 
726         if (new_l3) {
727                 l3 = new_l3;
728 #ifdef CONFIG_X86_HT
729                 per_cpu(cpu_llc_id, cpu) = l3_id;
730 #endif
731         }
732 
733         c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 
735         return l2;
736 }
737 
738 #ifdef CONFIG_SYSFS
739 
740 /* pointer to _cpuid4_info array (for each cache leaf) */
741 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
742 #define CPUID4_INFO_IDX(x, y)   (&((per_cpu(ici_cpuid4_info, x))[y]))
743 
744 #ifdef CONFIG_SMP
745 
746 static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
747 {
748         struct _cpuid4_info *this_leaf;
749         int i, sibling;
750 
751         if (cpu_has_topoext) {
752                 unsigned int apicid, nshared, first, last;
753 
754                 if (!per_cpu(ici_cpuid4_info, cpu))
755                         return 0;
756 
757                 this_leaf = CPUID4_INFO_IDX(cpu, index);
758                 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
759                 apicid = cpu_data(cpu).apicid;
760                 first = apicid - (apicid % nshared);
761                 last = first + nshared - 1;
762 
763                 for_each_online_cpu(i) {
764                         apicid = cpu_data(i).apicid;
765                         if ((apicid < first) || (apicid > last))
766                                 continue;
767                         if (!per_cpu(ici_cpuid4_info, i))
768                                 continue;
769                         this_leaf = CPUID4_INFO_IDX(i, index);
770 
771                         for_each_online_cpu(sibling) {
772                                 apicid = cpu_data(sibling).apicid;
773                                 if ((apicid < first) || (apicid > last))
774                                         continue;
775                                 set_bit(sibling, this_leaf->shared_cpu_map);
776                         }
777                 }
778         } else if (index == 3) {
779                 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
780                         if (!per_cpu(ici_cpuid4_info, i))
781                                 continue;
782                         this_leaf = CPUID4_INFO_IDX(i, index);
783                         for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
784                                 if (!cpu_online(sibling))
785                                         continue;
786                                 set_bit(sibling, this_leaf->shared_cpu_map);
787                         }
788                 }
789         } else
790                 return 0;
791 
792         return 1;
793 }
794 
795 static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
796 {
797         struct _cpuid4_info *this_leaf, *sibling_leaf;
798         unsigned long num_threads_sharing;
799         int index_msb, i;
800         struct cpuinfo_x86 *c = &cpu_data(cpu);
801 
802         if (c->x86_vendor == X86_VENDOR_AMD) {
803                 if (cache_shared_amd_cpu_map_setup(cpu, index))
804                         return;
805         }
806 
807         this_leaf = CPUID4_INFO_IDX(cpu, index);
808         num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
809 
810         if (num_threads_sharing == 1)
811                 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
812         else {
813                 index_msb = get_count_order(num_threads_sharing);
814 
815                 for_each_online_cpu(i) {
816                         if (cpu_data(i).apicid >> index_msb ==
817                             c->apicid >> index_msb) {
818                                 cpumask_set_cpu(i,
819                                         to_cpumask(this_leaf->shared_cpu_map));
820                                 if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
821                                         sibling_leaf =
822                                                 CPUID4_INFO_IDX(i, index);
823                                         cpumask_set_cpu(cpu, to_cpumask(
824                                                 sibling_leaf->shared_cpu_map));
825                                 }
826                         }
827                 }
828         }
829 }
830 static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
831 {
832         struct _cpuid4_info     *this_leaf, *sibling_leaf;
833         int sibling;
834 
835         this_leaf = CPUID4_INFO_IDX(cpu, index);
836         for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
837                 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
838                 cpumask_clear_cpu(cpu,
839                                   to_cpumask(sibling_leaf->shared_cpu_map));
840         }
841 }
842 #else
843 static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
844 {
845 }
846 
847 static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
848 {
849 }
850 #endif
851 
852 static void free_cache_attributes(unsigned int cpu)
853 {
854         int i;
855 
856         for (i = 0; i < num_cache_leaves; i++)
857                 cache_remove_shared_cpu_map(cpu, i);
858 
859         kfree(per_cpu(ici_cpuid4_info, cpu));
860         per_cpu(ici_cpuid4_info, cpu) = NULL;
861 }
862 
863 static void get_cpu_leaves(void *_retval)
864 {
865         int j, *retval = _retval, cpu = smp_processor_id();
866 
867         /* Do cpuid and store the results */
868         for (j = 0; j < num_cache_leaves; j++) {
869                 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
870 
871                 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
872                 if (unlikely(*retval < 0)) {
873                         int i;
874 
875                         for (i = 0; i < j; i++)
876                                 cache_remove_shared_cpu_map(cpu, i);
877                         break;
878                 }
879                 cache_shared_cpu_map_setup(cpu, j);
880         }
881 }
882 
883 static int detect_cache_attributes(unsigned int cpu)
884 {
885         int                     retval;
886 
887         if (num_cache_leaves == 0)
888                 return -ENOENT;
889 
890         per_cpu(ici_cpuid4_info, cpu) = kzalloc(
891             sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
892         if (per_cpu(ici_cpuid4_info, cpu) == NULL)
893                 return -ENOMEM;
894 
895         smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
896         if (retval) {
897                 kfree(per_cpu(ici_cpuid4_info, cpu));
898                 per_cpu(ici_cpuid4_info, cpu) = NULL;
899         }
900 
901         return retval;
902 }
903 
904 #include <linux/kobject.h>
905 #include <linux/sysfs.h>
906 #include <linux/cpu.h>
907 
908 /* pointer to kobject for cpuX/cache */
909 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
910 
911 struct _index_kobject {
912         struct kobject kobj;
913         unsigned int cpu;
914         unsigned short index;
915 };
916 
917 /* pointer to array of kobjects for cpuX/cache/indexY */
918 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
919 #define INDEX_KOBJECT_PTR(x, y)         (&((per_cpu(ici_index_kobject, x))[y]))
920 
921 #define show_one_plus(file_name, object, val)                           \
922 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
923                                 unsigned int cpu)                       \
924 {                                                                       \
925         return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
926 }
927 
928 show_one_plus(level, base.eax.split.level, 0);
929 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
930 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
931 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
932 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
933 
934 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
935                          unsigned int cpu)
936 {
937         return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
938 }
939 
940 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
941                                         int type, char *buf)
942 {
943         ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
944         int n = 0;
945 
946         if (len > 1) {
947                 const struct cpumask *mask;
948 
949                 mask = to_cpumask(this_leaf->shared_cpu_map);
950                 n = type ?
951                         cpulist_scnprintf(buf, len-2, mask) :
952                         cpumask_scnprintf(buf, len-2, mask);
953                 buf[n++] = '\n';
954                 buf[n] = '\0';
955         }
956         return n;
957 }
958 
959 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
960                                           unsigned int cpu)
961 {
962         return show_shared_cpu_map_func(leaf, 0, buf);
963 }
964 
965 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
966                                            unsigned int cpu)
967 {
968         return show_shared_cpu_map_func(leaf, 1, buf);
969 }
970 
971 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
972                          unsigned int cpu)
973 {
974         switch (this_leaf->base.eax.split.type) {
975         case CACHE_TYPE_DATA:
976                 return sprintf(buf, "Data\n");
977         case CACHE_TYPE_INST:
978                 return sprintf(buf, "Instruction\n");
979         case CACHE_TYPE_UNIFIED:
980                 return sprintf(buf, "Unified\n");
981         default:
982                 return sprintf(buf, "Unknown\n");
983         }
984 }
985 
986 #define to_object(k)    container_of(k, struct _index_kobject, kobj)
987 #define to_attr(a)      container_of(a, struct _cache_attr, attr)
988 
989 #define define_one_ro(_name) \
990 static struct _cache_attr _name = \
991         __ATTR(_name, 0444, show_##_name, NULL)
992 
993 define_one_ro(level);
994 define_one_ro(type);
995 define_one_ro(coherency_line_size);
996 define_one_ro(physical_line_partition);
997 define_one_ro(ways_of_associativity);
998 define_one_ro(number_of_sets);
999 define_one_ro(size);
1000 define_one_ro(shared_cpu_map);
1001 define_one_ro(shared_cpu_list);
1002 
1003 static struct attribute *default_attrs[] = {
1004         &type.attr,
1005         &level.attr,
1006         &coherency_line_size.attr,
1007         &physical_line_partition.attr,
1008         &ways_of_associativity.attr,
1009         &number_of_sets.attr,
1010         &size.attr,
1011         &shared_cpu_map.attr,
1012         &shared_cpu_list.attr,
1013         NULL
1014 };
1015 
1016 #ifdef CONFIG_AMD_NB
1017 static struct attribute **amd_l3_attrs(void)
1018 {
1019         static struct attribute **attrs;
1020         int n;
1021 
1022         if (attrs)
1023                 return attrs;
1024 
1025         n = ARRAY_SIZE(default_attrs);
1026 
1027         if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1028                 n += 2;
1029 
1030         if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1031                 n += 1;
1032 
1033         attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1034         if (attrs == NULL)
1035                 return attrs = default_attrs;
1036 
1037         for (n = 0; default_attrs[n]; n++)
1038                 attrs[n] = default_attrs[n];
1039 
1040         if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1041                 attrs[n++] = &cache_disable_0.attr;
1042                 attrs[n++] = &cache_disable_1.attr;
1043         }
1044 
1045         if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1046                 attrs[n++] = &subcaches.attr;
1047 
1048         return attrs;
1049 }
1050 #endif
1051 
1052 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1053 {
1054         struct _cache_attr *fattr = to_attr(attr);
1055         struct _index_kobject *this_leaf = to_object(kobj);
1056         ssize_t ret;
1057 
1058         ret = fattr->show ?
1059                 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1060                         buf, this_leaf->cpu) :
1061                 0;
1062         return ret;
1063 }
1064 
1065 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1066                      const char *buf, size_t count)
1067 {
1068         struct _cache_attr *fattr = to_attr(attr);
1069         struct _index_kobject *this_leaf = to_object(kobj);
1070         ssize_t ret;
1071 
1072         ret = fattr->store ?
1073                 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1074                         buf, count, this_leaf->cpu) :
1075                 0;
1076         return ret;
1077 }
1078 
1079 static const struct sysfs_ops sysfs_ops = {
1080         .show   = show,
1081         .store  = store,
1082 };
1083 
1084 static struct kobj_type ktype_cache = {
1085         .sysfs_ops      = &sysfs_ops,
1086         .default_attrs  = default_attrs,
1087 };
1088 
1089 static struct kobj_type ktype_percpu_entry = {
1090         .sysfs_ops      = &sysfs_ops,
1091 };
1092 
1093 static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1094 {
1095         kfree(per_cpu(ici_cache_kobject, cpu));
1096         kfree(per_cpu(ici_index_kobject, cpu));
1097         per_cpu(ici_cache_kobject, cpu) = NULL;
1098         per_cpu(ici_index_kobject, cpu) = NULL;
1099         free_cache_attributes(cpu);
1100 }
1101 
1102 static int cpuid4_cache_sysfs_init(unsigned int cpu)
1103 {
1104         int err;
1105 
1106         if (num_cache_leaves == 0)
1107                 return -ENOENT;
1108 
1109         err = detect_cache_attributes(cpu);
1110         if (err)
1111                 return err;
1112 
1113         /* Allocate all required memory */
1114         per_cpu(ici_cache_kobject, cpu) =
1115                 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1116         if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1117                 goto err_out;
1118 
1119         per_cpu(ici_index_kobject, cpu) = kzalloc(
1120             sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1121         if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1122                 goto err_out;
1123 
1124         return 0;
1125 
1126 err_out:
1127         cpuid4_cache_sysfs_exit(cpu);
1128         return -ENOMEM;
1129 }
1130 
1131 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1132 
1133 /* Add/Remove cache interface for CPU device */
1134 static int cache_add_dev(struct device *dev)
1135 {
1136         unsigned int cpu = dev->id;
1137         unsigned long i, j;
1138         struct _index_kobject *this_object;
1139         struct _cpuid4_info   *this_leaf;
1140         int retval;
1141 
1142         retval = cpuid4_cache_sysfs_init(cpu);
1143         if (unlikely(retval < 0))
1144                 return retval;
1145 
1146         retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1147                                       &ktype_percpu_entry,
1148                                       &dev->kobj, "%s", "cache");
1149         if (retval < 0) {
1150                 cpuid4_cache_sysfs_exit(cpu);
1151                 return retval;
1152         }
1153 
1154         for (i = 0; i < num_cache_leaves; i++) {
1155                 this_object = INDEX_KOBJECT_PTR(cpu, i);
1156                 this_object->cpu = cpu;
1157                 this_object->index = i;
1158 
1159                 this_leaf = CPUID4_INFO_IDX(cpu, i);
1160 
1161                 ktype_cache.default_attrs = default_attrs;
1162 #ifdef CONFIG_AMD_NB
1163                 if (this_leaf->base.nb)
1164                         ktype_cache.default_attrs = amd_l3_attrs();
1165 #endif
1166                 retval = kobject_init_and_add(&(this_object->kobj),
1167                                               &ktype_cache,
1168                                               per_cpu(ici_cache_kobject, cpu),
1169                                               "index%1lu", i);
1170                 if (unlikely(retval)) {
1171                         for (j = 0; j < i; j++)
1172                                 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1173                         kobject_put(per_cpu(ici_cache_kobject, cpu));
1174                         cpuid4_cache_sysfs_exit(cpu);
1175                         return retval;
1176                 }
1177                 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1178         }
1179         cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1180 
1181         kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1182         return 0;
1183 }
1184 
1185 static void cache_remove_dev(struct device *dev)
1186 {
1187         unsigned int cpu = dev->id;
1188         unsigned long i;
1189 
1190         if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1191                 return;
1192         if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1193                 return;
1194         cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1195 
1196         for (i = 0; i < num_cache_leaves; i++)
1197                 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1198         kobject_put(per_cpu(ici_cache_kobject, cpu));
1199         cpuid4_cache_sysfs_exit(cpu);
1200 }
1201 
1202 static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1203                                   unsigned long action, void *hcpu)
1204 {
1205         unsigned int cpu = (unsigned long)hcpu;
1206         struct device *dev;
1207 
1208         dev = get_cpu_device(cpu);
1209         switch (action) {
1210         case CPU_ONLINE:
1211         case CPU_ONLINE_FROZEN:
1212                 cache_add_dev(dev);
1213                 break;
1214         case CPU_DEAD:
1215         case CPU_DEAD_FROZEN:
1216                 cache_remove_dev(dev);
1217                 break;
1218         }
1219         return NOTIFY_OK;
1220 }
1221 
1222 static struct notifier_block cacheinfo_cpu_notifier = {
1223         .notifier_call = cacheinfo_cpu_callback,
1224 };
1225 
1226 static int __init cache_sysfs_init(void)
1227 {
1228         int i;
1229 
1230         if (num_cache_leaves == 0)
1231                 return 0;
1232 
1233         for_each_online_cpu(i) {
1234                 int err;
1235                 struct device *dev = get_cpu_device(i);
1236 
1237                 err = cache_add_dev(dev);
1238                 if (err)
1239                         return err;
1240         }
1241         register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1242         return 0;
1243 }
1244 
1245 device_initcall(cache_sysfs_init);
1246 
1247 #endif
1248 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp