~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/amd.c

Version: ~ [ linux-5.18 ] ~ [ linux-5.17.9 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.41 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.117 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.195 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.244 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.280 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.315 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 #include <linux/export.h>
  3 #include <linux/bitops.h>
  4 #include <linux/elf.h>
  5 #include <linux/mm.h>
  6 
  7 #include <linux/io.h>
  8 #include <linux/sched.h>
  9 #include <linux/sched/clock.h>
 10 #include <linux/random.h>
 11 #include <asm/processor.h>
 12 #include <asm/apic.h>
 13 #include <asm/cacheinfo.h>
 14 #include <asm/cpu.h>
 15 #include <asm/spec-ctrl.h>
 16 #include <asm/smp.h>
 17 #include <asm/pci-direct.h>
 18 #include <asm/delay.h>
 19 #include <asm/debugreg.h>
 20 
 21 #ifdef CONFIG_X86_64
 22 # include <asm/mmconfig.h>
 23 # include <asm/set_memory.h>
 24 #endif
 25 
 26 #include "cpu.h"
 27 
 28 static const int amd_erratum_383[];
 29 static const int amd_erratum_400[];
 30 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
 31 
 32 /*
 33  * nodes_per_socket: Stores the number of nodes per socket.
 34  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
 35  * Node Identifiers[10:8]
 36  */
 37 static u32 nodes_per_socket = 1;
 38 
 39 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 40 {
 41         u32 gprs[8] = { 0 };
 42         int err;
 43 
 44         WARN_ONCE((boot_cpu_data.x86 != 0xf),
 45                   "%s should only be used on K8!\n", __func__);
 46 
 47         gprs[1] = msr;
 48         gprs[7] = 0x9c5a203a;
 49 
 50         err = rdmsr_safe_regs(gprs);
 51 
 52         *p = gprs[0] | ((u64)gprs[2] << 32);
 53 
 54         return err;
 55 }
 56 
 57 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
 58 {
 59         u32 gprs[8] = { 0 };
 60 
 61         WARN_ONCE((boot_cpu_data.x86 != 0xf),
 62                   "%s should only be used on K8!\n", __func__);
 63 
 64         gprs[0] = (u32)val;
 65         gprs[1] = msr;
 66         gprs[2] = val >> 32;
 67         gprs[7] = 0x9c5a203a;
 68 
 69         return wrmsr_safe_regs(gprs);
 70 }
 71 
 72 /*
 73  *      B step AMD K6 before B 9730xxxx have hardware bugs that can cause
 74  *      misexecution of code under Linux. Owners of such processors should
 75  *      contact AMD for precise details and a CPU swap.
 76  *
 77  *      See     http://www.multimania.com/poulot/k6bug.html
 78  *      and     section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
 79  *              (Publication # 21266  Issue Date: August 1998)
 80  *
 81  *      The following test is erm.. interesting. AMD neglected to up
 82  *      the chip setting when fixing the bug but they also tweaked some
 83  *      performance at the same time..
 84  */
 85 
 86 #ifdef CONFIG_X86_32
 87 extern __visible void vide(void);
 88 __asm__(".text\n"
 89         ".globl vide\n"
 90         ".type vide, @function\n"
 91         ".align 4\n"
 92         "vide: ret\n");
 93 #endif
 94 
 95 static void init_amd_k5(struct cpuinfo_x86 *c)
 96 {
 97 #ifdef CONFIG_X86_32
 98 /*
 99  * General Systems BIOSen alias the cpu frequency registers
100  * of the Elan at 0x000df000. Unfortunately, one of the Linux
101  * drivers subsequently pokes it, and changes the CPU speed.
102  * Workaround : Remove the unneeded alias.
103  */
104 #define CBAR            (0xfffc) /* Configuration Base Address  (32-bit) */
105 #define CBAR_ENB        (0x80000000)
106 #define CBAR_KEY        (0X000000CB)
107         if (c->x86_model == 9 || c->x86_model == 10) {
108                 if (inl(CBAR) & CBAR_ENB)
109                         outl(0 | CBAR_KEY, CBAR);
110         }
111 #endif
112 }
113 
114 static void init_amd_k6(struct cpuinfo_x86 *c)
115 {
116 #ifdef CONFIG_X86_32
117         u32 l, h;
118         int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
119 
120         if (c->x86_model < 6) {
121                 /* Based on AMD doc 20734R - June 2000 */
122                 if (c->x86_model == 0) {
123                         clear_cpu_cap(c, X86_FEATURE_APIC);
124                         set_cpu_cap(c, X86_FEATURE_PGE);
125                 }
126                 return;
127         }
128 
129         if (c->x86_model == 6 && c->x86_stepping == 1) {
130                 const int K6_BUG_LOOP = 1000000;
131                 int n;
132                 void (*f_vide)(void);
133                 u64 d, d2;
134 
135                 pr_info("AMD K6 stepping B detected - ");
136 
137                 /*
138                  * It looks like AMD fixed the 2.6.2 bug and improved indirect
139                  * calls at the same time.
140                  */
141 
142                 n = K6_BUG_LOOP;
143                 f_vide = vide;
144                 OPTIMIZER_HIDE_VAR(f_vide);
145                 d = rdtsc();
146                 while (n--)
147                         f_vide();
148                 d2 = rdtsc();
149                 d = d2-d;
150 
151                 if (d > 20*K6_BUG_LOOP)
152                         pr_cont("system stability may be impaired when more than 32 MB are used.\n");
153                 else
154                         pr_cont("probably OK (after B9730xxxx).\n");
155         }
156 
157         /* K6 with old style WHCR */
158         if (c->x86_model < 8 ||
159            (c->x86_model == 8 && c->x86_stepping < 8)) {
160                 /* We can only write allocate on the low 508Mb */
161                 if (mbytes > 508)
162                         mbytes = 508;
163 
164                 rdmsr(MSR_K6_WHCR, l, h);
165                 if ((l&0x0000FFFF) == 0) {
166                         unsigned long flags;
167                         l = (1<<0)|((mbytes/4)<<1);
168                         local_irq_save(flags);
169                         wbinvd();
170                         wrmsr(MSR_K6_WHCR, l, h);
171                         local_irq_restore(flags);
172                         pr_info("Enabling old style K6 write allocation for %d Mb\n",
173                                 mbytes);
174                 }
175                 return;
176         }
177 
178         if ((c->x86_model == 8 && c->x86_stepping > 7) ||
179              c->x86_model == 9 || c->x86_model == 13) {
180                 /* The more serious chips .. */
181 
182                 if (mbytes > 4092)
183                         mbytes = 4092;
184 
185                 rdmsr(MSR_K6_WHCR, l, h);
186                 if ((l&0xFFFF0000) == 0) {
187                         unsigned long flags;
188                         l = ((mbytes>>2)<<22)|(1<<16);
189                         local_irq_save(flags);
190                         wbinvd();
191                         wrmsr(MSR_K6_WHCR, l, h);
192                         local_irq_restore(flags);
193                         pr_info("Enabling new style K6 write allocation for %d Mb\n",
194                                 mbytes);
195                 }
196 
197                 return;
198         }
199 
200         if (c->x86_model == 10) {
201                 /* AMD Geode LX is model 10 */
202                 /* placeholder for any needed mods */
203                 return;
204         }
205 #endif
206 }
207 
208 static void init_amd_k7(struct cpuinfo_x86 *c)
209 {
210 #ifdef CONFIG_X86_32
211         u32 l, h;
212 
213         /*
214          * Bit 15 of Athlon specific MSR 15, needs to be 0
215          * to enable SSE on Palomino/Morgan/Barton CPU's.
216          * If the BIOS didn't enable it already, enable it here.
217          */
218         if (c->x86_model >= 6 && c->x86_model <= 10) {
219                 if (!cpu_has(c, X86_FEATURE_XMM)) {
220                         pr_info("Enabling disabled K7/SSE Support.\n");
221                         msr_clear_bit(MSR_K7_HWCR, 15);
222                         set_cpu_cap(c, X86_FEATURE_XMM);
223                 }
224         }
225 
226         /*
227          * It's been determined by AMD that Athlons since model 8 stepping 1
228          * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
229          * As per AMD technical note 27212 0.2
230          */
231         if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
232                 rdmsr(MSR_K7_CLK_CTL, l, h);
233                 if ((l & 0xfff00000) != 0x20000000) {
234                         pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
235                                 l, ((l & 0x000fffff)|0x20000000));
236                         wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
237                 }
238         }
239 
240         /* calling is from identify_secondary_cpu() ? */
241         if (!c->cpu_index)
242                 return;
243 
244         /*
245          * Certain Athlons might work (for various values of 'work') in SMP
246          * but they are not certified as MP capable.
247          */
248         /* Athlon 660/661 is valid. */
249         if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
250             (c->x86_stepping == 1)))
251                 return;
252 
253         /* Duron 670 is valid */
254         if ((c->x86_model == 7) && (c->x86_stepping == 0))
255                 return;
256 
257         /*
258          * Athlon 662, Duron 671, and Athlon >model 7 have capability
259          * bit. It's worth noting that the A5 stepping (662) of some
260          * Athlon XP's have the MP bit set.
261          * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
262          * more.
263          */
264         if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
265             ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
266              (c->x86_model > 7))
267                 if (cpu_has(c, X86_FEATURE_MP))
268                         return;
269 
270         /* If we get here, not a certified SMP capable AMD system. */
271 
272         /*
273          * Don't taint if we are running SMP kernel on a single non-MP
274          * approved Athlon
275          */
276         WARN_ONCE(1, "WARNING: This combination of AMD"
277                 " processors is not suitable for SMP.\n");
278         add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
279 #endif
280 }
281 
282 #ifdef CONFIG_NUMA
283 /*
284  * To workaround broken NUMA config.  Read the comment in
285  * srat_detect_node().
286  */
287 static int nearby_node(int apicid)
288 {
289         int i, node;
290 
291         for (i = apicid - 1; i >= 0; i--) {
292                 node = __apicid_to_node[i];
293                 if (node != NUMA_NO_NODE && node_online(node))
294                         return node;
295         }
296         for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
297                 node = __apicid_to_node[i];
298                 if (node != NUMA_NO_NODE && node_online(node))
299                         return node;
300         }
301         return first_node(node_online_map); /* Shouldn't happen */
302 }
303 #endif
304 
305 /*
306  * Fix up cpu_core_id for pre-F17h systems to be in the
307  * [0 .. cores_per_node - 1] range. Not really needed but
308  * kept so as not to break existing setups.
309  */
310 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
311 {
312         u32 cus_per_node;
313 
314         if (c->x86 >= 0x17)
315                 return;
316 
317         cus_per_node = c->x86_max_cores / nodes_per_socket;
318         c->cpu_core_id %= cus_per_node;
319 }
320 
321 
322 static void amd_get_topology_early(struct cpuinfo_x86 *c)
323 {
324         if (cpu_has(c, X86_FEATURE_TOPOEXT))
325                 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
326 }
327 
328 /*
329  * Fixup core topology information for
330  * (1) AMD multi-node processors
331  *     Assumption: Number of cores in each internal node is the same.
332  * (2) AMD processors supporting compute units
333  */
334 static void amd_get_topology(struct cpuinfo_x86 *c)
335 {
336         u8 node_id;
337         int cpu = smp_processor_id();
338 
339         /* get information required for multi-node processors */
340         if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
341                 int err;
342                 u32 eax, ebx, ecx, edx;
343 
344                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
345 
346                 node_id  = ecx & 0xff;
347 
348                 if (c->x86 == 0x15)
349                         c->cu_id = ebx & 0xff;
350 
351                 if (c->x86 >= 0x17) {
352                         c->cpu_core_id = ebx & 0xff;
353 
354                         if (smp_num_siblings > 1)
355                                 c->x86_max_cores /= smp_num_siblings;
356                 }
357 
358                 /*
359                  * In case leaf B is available, use it to derive
360                  * topology information.
361                  */
362                 err = detect_extended_topology(c);
363                 if (!err)
364                         c->x86_coreid_bits = get_count_order(c->x86_max_cores);
365 
366                 cacheinfo_amd_init_llc_id(c, cpu, node_id);
367 
368         } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
369                 u64 value;
370 
371                 rdmsrl(MSR_FAM10H_NODE_ID, value);
372                 node_id = value & 7;
373 
374                 per_cpu(cpu_llc_id, cpu) = node_id;
375         } else
376                 return;
377 
378         if (nodes_per_socket > 1) {
379                 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
380                 legacy_fixup_core_id(c);
381         }
382 }
383 
384 /*
385  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
386  * Assumes number of cores is a power of two.
387  */
388 static void amd_detect_cmp(struct cpuinfo_x86 *c)
389 {
390         unsigned bits;
391         int cpu = smp_processor_id();
392 
393         bits = c->x86_coreid_bits;
394         /* Low order bits define the core id (index of core in socket) */
395         c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
396         /* Convert the initial APIC ID into the socket ID */
397         c->phys_proc_id = c->initial_apicid >> bits;
398         /* use socket ID also for last level cache */
399         per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
400 }
401 
402 u16 amd_get_nb_id(int cpu)
403 {
404         return per_cpu(cpu_llc_id, cpu);
405 }
406 EXPORT_SYMBOL_GPL(amd_get_nb_id);
407 
408 u32 amd_get_nodes_per_socket(void)
409 {
410         return nodes_per_socket;
411 }
412 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
413 
414 static void srat_detect_node(struct cpuinfo_x86 *c)
415 {
416 #ifdef CONFIG_NUMA
417         int cpu = smp_processor_id();
418         int node;
419         unsigned apicid = c->apicid;
420 
421         node = numa_cpu_node(cpu);
422         if (node == NUMA_NO_NODE)
423                 node = per_cpu(cpu_llc_id, cpu);
424 
425         /*
426          * On multi-fabric platform (e.g. Numascale NumaChip) a
427          * platform-specific handler needs to be called to fixup some
428          * IDs of the CPU.
429          */
430         if (x86_cpuinit.fixup_cpu_id)
431                 x86_cpuinit.fixup_cpu_id(c, node);
432 
433         if (!node_online(node)) {
434                 /*
435                  * Two possibilities here:
436                  *
437                  * - The CPU is missing memory and no node was created.  In
438                  *   that case try picking one from a nearby CPU.
439                  *
440                  * - The APIC IDs differ from the HyperTransport node IDs
441                  *   which the K8 northbridge parsing fills in.  Assume
442                  *   they are all increased by a constant offset, but in
443                  *   the same order as the HT nodeids.  If that doesn't
444                  *   result in a usable node fall back to the path for the
445                  *   previous case.
446                  *
447                  * This workaround operates directly on the mapping between
448                  * APIC ID and NUMA node, assuming certain relationship
449                  * between APIC ID, HT node ID and NUMA topology.  As going
450                  * through CPU mapping may alter the outcome, directly
451                  * access __apicid_to_node[].
452                  */
453                 int ht_nodeid = c->initial_apicid;
454 
455                 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
456                         node = __apicid_to_node[ht_nodeid];
457                 /* Pick a nearby node */
458                 if (!node_online(node))
459                         node = nearby_node(apicid);
460         }
461         numa_set_node(cpu, node);
462 #endif
463 }
464 
465 static void early_init_amd_mc(struct cpuinfo_x86 *c)
466 {
467 #ifdef CONFIG_SMP
468         unsigned bits, ecx;
469 
470         /* Multi core CPU? */
471         if (c->extended_cpuid_level < 0x80000008)
472                 return;
473 
474         ecx = cpuid_ecx(0x80000008);
475 
476         c->x86_max_cores = (ecx & 0xff) + 1;
477 
478         /* CPU telling us the core id bits shift? */
479         bits = (ecx >> 12) & 0xF;
480 
481         /* Otherwise recompute */
482         if (bits == 0) {
483                 while ((1 << bits) < c->x86_max_cores)
484                         bits++;
485         }
486 
487         c->x86_coreid_bits = bits;
488 #endif
489 }
490 
491 static void bsp_init_amd(struct cpuinfo_x86 *c)
492 {
493 
494 #ifdef CONFIG_X86_64
495         if (c->x86 >= 0xf) {
496                 unsigned long long tseg;
497 
498                 /*
499                  * Split up direct mapping around the TSEG SMM area.
500                  * Don't do it for gbpages because there seems very little
501                  * benefit in doing so.
502                  */
503                 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
504                         unsigned long pfn = tseg >> PAGE_SHIFT;
505 
506                         pr_debug("tseg: %010llx\n", tseg);
507                         if (pfn_range_is_mapped(pfn, pfn + 1))
508                                 set_memory_4k((unsigned long)__va(tseg), 1);
509                 }
510         }
511 #endif
512 
513         if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
514 
515                 if (c->x86 > 0x10 ||
516                     (c->x86 == 0x10 && c->x86_model >= 0x2)) {
517                         u64 val;
518 
519                         rdmsrl(MSR_K7_HWCR, val);
520                         if (!(val & BIT(24)))
521                                 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
522                 }
523         }
524 
525         if (c->x86 == 0x15) {
526                 unsigned long upperbit;
527                 u32 cpuid, assoc;
528 
529                 cpuid    = cpuid_edx(0x80000005);
530                 assoc    = cpuid >> 16 & 0xff;
531                 upperbit = ((cpuid >> 24) << 10) / assoc;
532 
533                 va_align.mask     = (upperbit - 1) & PAGE_MASK;
534                 va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
535 
536                 /* A random value per boot for bit slice [12:upper_bit) */
537                 va_align.bits = get_random_int() & va_align.mask;
538         }
539 
540         if (cpu_has(c, X86_FEATURE_MWAITX))
541                 use_mwaitx_delay();
542 
543         if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
544                 u32 ecx;
545 
546                 ecx = cpuid_ecx(0x8000001e);
547                 nodes_per_socket = ((ecx >> 8) & 7) + 1;
548         } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
549                 u64 value;
550 
551                 rdmsrl(MSR_FAM10H_NODE_ID, value);
552                 nodes_per_socket = ((value >> 3) & 7) + 1;
553         }
554 
555         if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
556             !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
557             c->x86 >= 0x15 && c->x86 <= 0x17) {
558                 unsigned int bit;
559 
560                 switch (c->x86) {
561                 case 0x15: bit = 54; break;
562                 case 0x16: bit = 33; break;
563                 case 0x17: bit = 10; break;
564                 default: return;
565                 }
566                 /*
567                  * Try to cache the base value so further operations can
568                  * avoid RMW. If that faults, do not enable SSBD.
569                  */
570                 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
571                         setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
572                         setup_force_cpu_cap(X86_FEATURE_SSBD);
573                         x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
574                 }
575         }
576 }
577 
578 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
579 {
580         u64 msr;
581 
582         /*
583          * BIOS support is required for SME and SEV.
584          *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
585          *            the SME physical address space reduction value.
586          *            If BIOS has not enabled SME then don't advertise the
587          *            SME feature (set in scattered.c).
588          *   For SEV: If BIOS has not enabled SEV then don't advertise the
589          *            SEV feature (set in scattered.c).
590          *
591          *   In all cases, since support for SME and SEV requires long mode,
592          *   don't advertise the feature under CONFIG_X86_32.
593          */
594         if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
595                 /* Check if memory encryption is enabled */
596                 rdmsrl(MSR_K8_SYSCFG, msr);
597                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
598                         goto clear_all;
599 
600                 /*
601                  * Always adjust physical address bits. Even though this
602                  * will be a value above 32-bits this is still done for
603                  * CONFIG_X86_32 so that accurate values are reported.
604                  */
605                 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
606 
607                 if (IS_ENABLED(CONFIG_X86_32))
608                         goto clear_all;
609 
610                 rdmsrl(MSR_K7_HWCR, msr);
611                 if (!(msr & MSR_K7_HWCR_SMMLOCK))
612                         goto clear_sev;
613 
614                 return;
615 
616 clear_all:
617                 clear_cpu_cap(c, X86_FEATURE_SME);
618 clear_sev:
619                 clear_cpu_cap(c, X86_FEATURE_SEV);
620         }
621 }
622 
623 static void early_init_amd(struct cpuinfo_x86 *c)
624 {
625         u64 value;
626         u32 dummy;
627 
628         early_init_amd_mc(c);
629 
630 #ifdef CONFIG_X86_32
631         if (c->x86 == 6)
632                 set_cpu_cap(c, X86_FEATURE_K7);
633 #endif
634 
635         if (c->x86 >= 0xf)
636                 set_cpu_cap(c, X86_FEATURE_K8);
637 
638         rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
639 
640         /*
641          * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
642          * with P/T states and does not stop in deep C-states
643          */
644         if (c->x86_power & (1 << 8)) {
645                 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
646                 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
647         }
648 
649         /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
650         if (c->x86_power & BIT(12))
651                 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
652 
653 #ifdef CONFIG_X86_64
654         set_cpu_cap(c, X86_FEATURE_SYSCALL32);
655 #else
656         /*  Set MTRR capability flag if appropriate */
657         if (c->x86 == 5)
658                 if (c->x86_model == 13 || c->x86_model == 9 ||
659                     (c->x86_model == 8 && c->x86_stepping >= 8))
660                         set_cpu_cap(c, X86_FEATURE_K6_MTRR);
661 #endif
662 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
663         /*
664          * ApicID can always be treated as an 8-bit value for AMD APIC versions
665          * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
666          * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
667          * after 16h.
668          */
669         if (boot_cpu_has(X86_FEATURE_APIC)) {
670                 if (c->x86 > 0x16)
671                         set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
672                 else if (c->x86 >= 0xf) {
673                         /* check CPU config space for extended APIC ID */
674                         unsigned int val;
675 
676                         val = read_pci_config(0, 24, 0, 0x68);
677                         if ((val >> 17 & 0x3) == 0x3)
678                                 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
679                 }
680         }
681 #endif
682 
683         /*
684          * This is only needed to tell the kernel whether to use VMCALL
685          * and VMMCALL.  VMMCALL is never executed except under virt, so
686          * we can set it unconditionally.
687          */
688         set_cpu_cap(c, X86_FEATURE_VMMCALL);
689 
690         /* F16h erratum 793, CVE-2013-6885 */
691         if (c->x86 == 0x16 && c->x86_model <= 0xf)
692                 msr_set_bit(MSR_AMD64_LS_CFG, 15);
693 
694         /*
695          * Check whether the machine is affected by erratum 400. This is
696          * used to select the proper idle routine and to enable the check
697          * whether the machine is affected in arch_post_acpi_init(), which
698          * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
699          */
700         if (cpu_has_amd_erratum(c, amd_erratum_400))
701                 set_cpu_bug(c, X86_BUG_AMD_E400);
702 
703         early_detect_mem_encrypt(c);
704 
705         /* Re-enable TopologyExtensions if switched off by BIOS */
706         if (c->x86 == 0x15 &&
707             (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
708             !cpu_has(c, X86_FEATURE_TOPOEXT)) {
709 
710                 if (msr_set_bit(0xc0011005, 54) > 0) {
711                         rdmsrl(0xc0011005, value);
712                         if (value & BIT_64(54)) {
713                                 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
714                                 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
715                         }
716                 }
717         }
718 
719         amd_get_topology_early(c);
720 }
721 
722 static void init_amd_k8(struct cpuinfo_x86 *c)
723 {
724         u32 level;
725         u64 value;
726 
727         /* On C+ stepping K8 rep microcode works well for copy/memset */
728         level = cpuid_eax(1);
729         if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
730                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
731 
732         /*
733          * Some BIOSes incorrectly force this feature, but only K8 revision D
734          * (model = 0x14) and later actually support it.
735          * (AMD Erratum #110, docId: 25759).
736          */
737         if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
738                 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
739                 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
740                         value &= ~BIT_64(32);
741                         wrmsrl_amd_safe(0xc001100d, value);
742                 }
743         }
744 
745         if (!c->x86_model_id[0])
746                 strcpy(c->x86_model_id, "Hammer");
747 
748 #ifdef CONFIG_SMP
749         /*
750          * Disable TLB flush filter by setting HWCR.FFDIS on K8
751          * bit 6 of msr C001_0015
752          *
753          * Errata 63 for SH-B3 steppings
754          * Errata 122 for all steppings (F+ have it disabled by default)
755          */
756         msr_set_bit(MSR_K7_HWCR, 6);
757 #endif
758         set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
759 }
760 
761 static void init_amd_gh(struct cpuinfo_x86 *c)
762 {
763 #ifdef CONFIG_MMCONF_FAM10H
764         /* do this for boot cpu */
765         if (c == &boot_cpu_data)
766                 check_enable_amd_mmconf_dmi();
767 
768         fam10h_check_enable_mmcfg();
769 #endif
770 
771         /*
772          * Disable GART TLB Walk Errors on Fam10h. We do this here because this
773          * is always needed when GART is enabled, even in a kernel which has no
774          * MCE support built in. BIOS should disable GartTlbWlk Errors already.
775          * If it doesn't, we do it here as suggested by the BKDG.
776          *
777          * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
778          */
779         msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
780 
781         /*
782          * On family 10h BIOS may not have properly enabled WC+ support, causing
783          * it to be converted to CD memtype. This may result in performance
784          * degradation for certain nested-paging guests. Prevent this conversion
785          * by clearing bit 24 in MSR_AMD64_BU_CFG2.
786          *
787          * NOTE: we want to use the _safe accessors so as not to #GP kvm
788          * guests on older kvm hosts.
789          */
790         msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
791 
792         if (cpu_has_amd_erratum(c, amd_erratum_383))
793                 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
794 }
795 
796 #define MSR_AMD64_DE_CFG        0xC0011029
797 
798 static void init_amd_ln(struct cpuinfo_x86 *c)
799 {
800         /*
801          * Apply erratum 665 fix unconditionally so machines without a BIOS
802          * fix work.
803          */
804         msr_set_bit(MSR_AMD64_DE_CFG, 31);
805 }
806 
807 static bool rdrand_force;
808 
809 static int __init rdrand_cmdline(char *str)
810 {
811         if (!str)
812                 return -EINVAL;
813 
814         if (!strcmp(str, "force"))
815                 rdrand_force = true;
816         else
817                 return -EINVAL;
818 
819         return 0;
820 }
821 early_param("rdrand", rdrand_cmdline);
822 
823 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
824 {
825         /*
826          * Saving of the MSR used to hide the RDRAND support during
827          * suspend/resume is done by arch/x86/power/cpu.c, which is
828          * dependent on CONFIG_PM_SLEEP.
829          */
830         if (!IS_ENABLED(CONFIG_PM_SLEEP))
831                 return;
832 
833         /*
834          * The nordrand option can clear X86_FEATURE_RDRAND, so check for
835          * RDRAND support using the CPUID function directly.
836          */
837         if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
838                 return;
839 
840         msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
841 
842         /*
843          * Verify that the CPUID change has occurred in case the kernel is
844          * running virtualized and the hypervisor doesn't support the MSR.
845          */
846         if (cpuid_ecx(1) & BIT(30)) {
847                 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
848                 return;
849         }
850 
851         clear_cpu_cap(c, X86_FEATURE_RDRAND);
852         pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
853 }
854 
855 static void init_amd_jg(struct cpuinfo_x86 *c)
856 {
857         /*
858          * Some BIOS implementations do not restore proper RDRAND support
859          * across suspend and resume. Check on whether to hide the RDRAND
860          * instruction support via CPUID.
861          */
862         clear_rdrand_cpuid_bit(c);
863 }
864 
865 static void init_amd_bd(struct cpuinfo_x86 *c)
866 {
867         u64 value;
868 
869         /*
870          * The way access filter has a performance penalty on some workloads.
871          * Disable it on the affected CPUs.
872          */
873         if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
874                 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
875                         value |= 0x1E;
876                         wrmsrl_safe(MSR_F15H_IC_CFG, value);
877                 }
878         }
879 
880         /*
881          * Some BIOS implementations do not restore proper RDRAND support
882          * across suspend and resume. Check on whether to hide the RDRAND
883          * instruction support via CPUID.
884          */
885         clear_rdrand_cpuid_bit(c);
886 }
887 
888 static void init_amd_zn(struct cpuinfo_x86 *c)
889 {
890         set_cpu_cap(c, X86_FEATURE_ZEN);
891 
892         /*
893          * Fix erratum 1076: CPB feature bit not being set in CPUID.
894          * Always set it, except when running under a hypervisor.
895          */
896         if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
897                 set_cpu_cap(c, X86_FEATURE_CPB);
898 }
899 
900 static void init_amd(struct cpuinfo_x86 *c)
901 {
902         early_init_amd(c);
903 
904         /*
905          * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
906          * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
907          */
908         clear_cpu_cap(c, 0*32+31);
909 
910         if (c->x86 >= 0x10)
911                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
912 
913         /* get apicid instead of initial apic id from cpuid */
914         c->apicid = hard_smp_processor_id();
915 
916         /* K6s reports MCEs but don't actually have all the MSRs */
917         if (c->x86 < 6)
918                 clear_cpu_cap(c, X86_FEATURE_MCE);
919 
920         switch (c->x86) {
921         case 4:    init_amd_k5(c); break;
922         case 5:    init_amd_k6(c); break;
923         case 6:    init_amd_k7(c); break;
924         case 0xf:  init_amd_k8(c); break;
925         case 0x10: init_amd_gh(c); break;
926         case 0x12: init_amd_ln(c); break;
927         case 0x15: init_amd_bd(c); break;
928         case 0x16: init_amd_jg(c); break;
929         case 0x17: init_amd_zn(c); break;
930         }
931 
932         /*
933          * Enable workaround for FXSAVE leak on CPUs
934          * without a XSaveErPtr feature
935          */
936         if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
937                 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
938 
939         cpu_detect_cache_sizes(c);
940 
941         amd_detect_cmp(c);
942         amd_get_topology(c);
943         srat_detect_node(c);
944 
945         init_amd_cacheinfo(c);
946 
947         if (cpu_has(c, X86_FEATURE_XMM2)) {
948                 unsigned long long val;
949                 int ret;
950 
951                 /*
952                  * A serializing LFENCE has less overhead than MFENCE, so
953                  * use it for execution serialization.  On families which
954                  * don't have that MSR, LFENCE is already serializing.
955                  * msr_set_bit() uses the safe accessors, too, even if the MSR
956                  * is not present.
957                  */
958                 msr_set_bit(MSR_F10H_DECFG,
959                             MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
960 
961                 /*
962                  * Verify that the MSR write was successful (could be running
963                  * under a hypervisor) and only then assume that LFENCE is
964                  * serializing.
965                  */
966                 ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
967                 if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
968                         /* A serializing LFENCE stops RDTSC speculation */
969                         set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
970                 } else {
971                         /* MFENCE stops RDTSC speculation */
972                         set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
973                 }
974         }
975 
976         /*
977          * Family 0x12 and above processors have APIC timer
978          * running in deep C states.
979          */
980         if (c->x86 > 0x11)
981                 set_cpu_cap(c, X86_FEATURE_ARAT);
982 
983         /* 3DNow or LM implies PREFETCHW */
984         if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
985                 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
986                         set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
987 
988         /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
989         if (!cpu_has(c, X86_FEATURE_XENPV))
990                 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
991 }
992 
993 #ifdef CONFIG_X86_32
994 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
995 {
996         /* AMD errata T13 (order #21922) */
997         if (c->x86 == 6) {
998                 /* Duron Rev A0 */
999                 if (c->x86_model == 3 && c->x86_stepping == 0)
1000                         size = 64;
1001                 /* Tbird rev A1/A2 */
1002                 if (c->x86_model == 4 &&
1003                         (c->x86_stepping == 0 || c->x86_stepping == 1))
1004                         size = 256;
1005         }
1006         return size;
1007 }
1008 #endif
1009 
1010 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1011 {
1012         u32 ebx, eax, ecx, edx;
1013         u16 mask = 0xfff;
1014 
1015         if (c->x86 < 0xf)
1016                 return;
1017 
1018         if (c->extended_cpuid_level < 0x80000006)
1019                 return;
1020 
1021         cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1022 
1023         tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1024         tlb_lli_4k[ENTRIES] = ebx & mask;
1025 
1026         /*
1027          * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1028          * characteristics from the CPUID function 0x80000005 instead.
1029          */
1030         if (c->x86 == 0xf) {
1031                 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1032                 mask = 0xff;
1033         }
1034 
1035         /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1036         if (!((eax >> 16) & mask))
1037                 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1038         else
1039                 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1040 
1041         /* a 4M entry uses two 2M entries */
1042         tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1043 
1044         /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1045         if (!(eax & mask)) {
1046                 /* Erratum 658 */
1047                 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1048                         tlb_lli_2m[ENTRIES] = 1024;
1049                 } else {
1050                         cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1051                         tlb_lli_2m[ENTRIES] = eax & 0xff;
1052                 }
1053         } else
1054                 tlb_lli_2m[ENTRIES] = eax & mask;
1055 
1056         tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1057 }
1058 
1059 static const struct cpu_dev amd_cpu_dev = {
1060         .c_vendor       = "AMD",
1061         .c_ident        = { "AuthenticAMD" },
1062 #ifdef CONFIG_X86_32
1063         .legacy_models = {
1064                 { .family = 4, .model_names =
1065                   {
1066                           [3] = "486 DX/2",
1067                           [7] = "486 DX/2-WB",
1068                           [8] = "486 DX/4",
1069                           [9] = "486 DX/4-WB",
1070                           [14] = "Am5x86-WT",
1071                           [15] = "Am5x86-WB"
1072                   }
1073                 },
1074         },
1075         .legacy_cache_size = amd_size_cache,
1076 #endif
1077         .c_early_init   = early_init_amd,
1078         .c_detect_tlb   = cpu_detect_tlb_amd,
1079         .c_bsp_init     = bsp_init_amd,
1080         .c_init         = init_amd,
1081         .c_x86_vendor   = X86_VENDOR_AMD,
1082 };
1083 
1084 cpu_dev_register(amd_cpu_dev);
1085 
1086 /*
1087  * AMD errata checking
1088  *
1089  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1090  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1091  * have an OSVW id assigned, which it takes as first argument. Both take a
1092  * variable number of family-specific model-stepping ranges created by
1093  * AMD_MODEL_RANGE().
1094  *
1095  * Example:
1096  *
1097  * const int amd_erratum_319[] =
1098  *      AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1099  *                         AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1100  *                         AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1101  */
1102 
1103 #define AMD_LEGACY_ERRATUM(...)         { -1, __VA_ARGS__, 0 }
1104 #define AMD_OSVW_ERRATUM(osvw_id, ...)  { osvw_id, __VA_ARGS__, 0 }
1105 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1106         ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1107 #define AMD_MODEL_RANGE_FAMILY(range)   (((range) >> 24) & 0xff)
1108 #define AMD_MODEL_RANGE_START(range)    (((range) >> 12) & 0xfff)
1109 #define AMD_MODEL_RANGE_END(range)      ((range) & 0xfff)
1110 
1111 static const int amd_erratum_400[] =
1112         AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1113                             AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1114 
1115 static const int amd_erratum_383[] =
1116         AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1117 
1118 
1119 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1120 {
1121         int osvw_id = *erratum++;
1122         u32 range;
1123         u32 ms;
1124 
1125         if (osvw_id >= 0 && osvw_id < 65536 &&
1126             cpu_has(cpu, X86_FEATURE_OSVW)) {
1127                 u64 osvw_len;
1128 
1129                 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1130                 if (osvw_id < osvw_len) {
1131                         u64 osvw_bits;
1132 
1133                         rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1134                             osvw_bits);
1135                         return osvw_bits & (1ULL << (osvw_id & 0x3f));
1136                 }
1137         }
1138 
1139         /* OSVW unavailable or ID unknown, match family-model-stepping range */
1140         ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1141         while ((range = *erratum++))
1142                 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1143                     (ms >= AMD_MODEL_RANGE_START(range)) &&
1144                     (ms <= AMD_MODEL_RANGE_END(range)))
1145                         return true;
1146 
1147         return false;
1148 }
1149 
1150 void set_dr_addr_mask(unsigned long mask, int dr)
1151 {
1152         if (!boot_cpu_has(X86_FEATURE_BPEXT))
1153                 return;
1154 
1155         switch (dr) {
1156         case 0:
1157                 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1158                 break;
1159         case 1:
1160         case 2:
1161         case 3:
1162                 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1163                 break;
1164         default:
1165                 break;
1166         }
1167 }
1168 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp