~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/cpufeature.c

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Contains CPU feature definitions
  3  *
  4  * Copyright (C) 2015 ARM Ltd.
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  *
 10  * This program is distributed in the hope that it will be useful,
 11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13  * GNU General Public License for more details.
 14  *
 15  * You should have received a copy of the GNU General Public License
 16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 17  */
 18 
 19 #define pr_fmt(fmt) "CPU features: " fmt
 20 
 21 #include <linux/bsearch.h>
 22 #include <linux/cpumask.h>
 23 #include <linux/sort.h>
 24 #include <linux/stop_machine.h>
 25 #include <linux/types.h>
 26 #include <linux/mm.h>
 27 #include <asm/cpu.h>
 28 #include <asm/cpufeature.h>
 29 #include <asm/cpu_ops.h>
 30 #include <asm/fpsimd.h>
 31 #include <asm/mmu_context.h>
 32 #include <asm/processor.h>
 33 #include <asm/sysreg.h>
 34 #include <asm/traps.h>
 35 #include <asm/virt.h>
 36 
 37 unsigned long elf_hwcap __read_mostly;
 38 EXPORT_SYMBOL_GPL(elf_hwcap);
 39 
 40 #ifdef CONFIG_COMPAT
 41 #define COMPAT_ELF_HWCAP_DEFAULT        \
 42                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
 43                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
 44                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
 45                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
 46                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
 47                                  COMPAT_HWCAP_LPAE)
 48 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
 49 unsigned int compat_elf_hwcap2 __read_mostly;
 50 #endif
 51 
 52 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 53 EXPORT_SYMBOL(cpu_hwcaps);
 54 
 55 /*
 56  * Flag to indicate if we have computed the system wide
 57  * capabilities based on the boot time active CPUs. This
 58  * will be used to determine if a new booting CPU should
 59  * go through the verification process to make sure that it
 60  * supports the system capabilities, without using a hotplug
 61  * notifier.
 62  */
 63 static bool sys_caps_initialised;
 64 
 65 static inline void set_sys_caps_initialised(void)
 66 {
 67         sys_caps_initialised = true;
 68 }
 69 
 70 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
 71 {
 72         /* file-wide pr_fmt adds "CPU features: " prefix */
 73         pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
 74         return 0;
 75 }
 76 
 77 static struct notifier_block cpu_hwcaps_notifier = {
 78         .notifier_call = dump_cpu_hwcaps
 79 };
 80 
 81 static int __init register_cpu_hwcaps_dumper(void)
 82 {
 83         atomic_notifier_chain_register(&panic_notifier_list,
 84                                        &cpu_hwcaps_notifier);
 85         return 0;
 86 }
 87 __initcall(register_cpu_hwcaps_dumper);
 88 
 89 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
 90 EXPORT_SYMBOL(cpu_hwcap_keys);
 91 
 92 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 93         {                                               \
 94                 .sign = SIGNED,                         \
 95                 .visible = VISIBLE,                     \
 96                 .strict = STRICT,                       \
 97                 .type = TYPE,                           \
 98                 .shift = SHIFT,                         \
 99                 .width = WIDTH,                         \
100                 .safe_val = SAFE_VAL,                   \
101         }
102 
103 /* Define a feature with unsigned values */
104 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
105         __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
106 
107 /* Define a feature with a signed value */
108 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
109         __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
110 
111 #define ARM64_FTR_END                                   \
112         {                                               \
113                 .width = 0,                             \
114         }
115 
116 /* meta feature for alternatives */
117 static bool __maybe_unused
118 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
119 
120 
121 /*
122  * NOTE: Any changes to the visibility of features should be kept in
123  * sync with the documentation of the CPU feature register ABI.
124  */
125 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
126         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
127         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
128         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
129         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
130         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
131         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
132         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
133         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
134         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
135         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
136         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
137         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
138         ARM64_FTR_END,
139 };
140 
141 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
142         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
143         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
144         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
145         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
146         ARM64_FTR_END,
147 };
148 
149 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
150         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
151         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
152         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
153         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
154                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
155         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
156         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
157         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
158         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
159         /* Linux doesn't care about the EL3 */
160         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
161         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
162         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
163         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
164         ARM64_FTR_END,
165 };
166 
167 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
168         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
169         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
170         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
171         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
172         /* Linux shouldn't care about secure memory */
173         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
174         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
175         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
176         /*
177          * Differing PARange is fine as long as all peripherals and memory are mapped
178          * within the minimum PARange of all CPUs
179          */
180         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
181         ARM64_FTR_END,
182 };
183 
184 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
185         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
186         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
187         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
188         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
189         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
190         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
191         ARM64_FTR_END,
192 };
193 
194 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
195         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
196         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
197         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
198         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
199         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
200         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
201         ARM64_FTR_END,
202 };
203 
204 static const struct arm64_ftr_bits ftr_ctr[] = {
205         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
206         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
207         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
208         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
209         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
210         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
211         /*
212          * Linux can handle differing I-cache policies. Userspace JITs will
213          * make use of *minLine.
214          * If we have differing I-cache policies, report it as the weakest - VIPT.
215          */
216         ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),       /* L1Ip */
217         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
218         ARM64_FTR_END,
219 };
220 
221 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
222         .name           = "SYS_CTR_EL0",
223         .ftr_bits       = ftr_ctr
224 };
225 
226 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
227         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
228         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
229         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
230         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
231         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
232         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
233         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
234         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
235         ARM64_FTR_END,
236 };
237 
238 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
239         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
240         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
241         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
242         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
243         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
244         /*
245          * We can instantiate multiple PMU instances with different levels
246          * of support.
247          */
248         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
249         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
250         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
251         ARM64_FTR_END,
252 };
253 
254 static const struct arm64_ftr_bits ftr_mvfr2[] = {
255         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
256         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
257         ARM64_FTR_END,
258 };
259 
260 static const struct arm64_ftr_bits ftr_dczid[] = {
261         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),            /* DZP */
262         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* BS */
263         ARM64_FTR_END,
264 };
265 
266 
267 static const struct arm64_ftr_bits ftr_id_isar5[] = {
268         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
269         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
270         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
271         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
272         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
273         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
274         ARM64_FTR_END,
275 };
276 
277 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
278         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
279         ARM64_FTR_END,
280 };
281 
282 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
283         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
284         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
285         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
286         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
287         ARM64_FTR_END,
288 };
289 
290 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
291         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
292         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),   /* PerfMon */
293         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
294         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
295         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
296         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
297         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
298         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
299         ARM64_FTR_END,
300 };
301 
302 static const struct arm64_ftr_bits ftr_zcr[] = {
303         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
304                 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),        /* LEN */
305         ARM64_FTR_END,
306 };
307 
308 /*
309  * Common ftr bits for a 32bit register with all hidden, strict
310  * attributes, with 4bit feature fields and a default safe value of
311  * 0. Covers the following 32bit registers:
312  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
313  */
314 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
315         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
316         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
317         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
318         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
319         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
320         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
321         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
322         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
323         ARM64_FTR_END,
324 };
325 
326 /* Table for a single 32bit feature value */
327 static const struct arm64_ftr_bits ftr_single32[] = {
328         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
329         ARM64_FTR_END,
330 };
331 
332 static const struct arm64_ftr_bits ftr_raz[] = {
333         ARM64_FTR_END,
334 };
335 
336 #define ARM64_FTR_REG(id, table) {              \
337         .sys_id = id,                           \
338         .reg =  &(struct arm64_ftr_reg){        \
339                 .name = #id,                    \
340                 .ftr_bits = &((table)[0]),      \
341         }}
342 
343 static const struct __ftr_reg_entry {
344         u32                     sys_id;
345         struct arm64_ftr_reg    *reg;
346 } arm64_ftr_regs[] = {
347 
348         /* Op1 = 0, CRn = 0, CRm = 1 */
349         ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
350         ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
351         ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
352         ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
353         ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
354         ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
355         ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
356 
357         /* Op1 = 0, CRn = 0, CRm = 2 */
358         ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
359         ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
360         ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
361         ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
362         ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
363         ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
364         ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
365 
366         /* Op1 = 0, CRn = 0, CRm = 3 */
367         ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
368         ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
369         ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
370 
371         /* Op1 = 0, CRn = 0, CRm = 4 */
372         ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
373         ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
374         ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
375 
376         /* Op1 = 0, CRn = 0, CRm = 5 */
377         ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
378         ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
379 
380         /* Op1 = 0, CRn = 0, CRm = 6 */
381         ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
382         ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
383 
384         /* Op1 = 0, CRn = 0, CRm = 7 */
385         ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
386         ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
387         ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
388 
389         /* Op1 = 0, CRn = 1, CRm = 2 */
390         ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
391 
392         /* Op1 = 3, CRn = 0, CRm = 0 */
393         { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
394         ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
395 
396         /* Op1 = 3, CRn = 14, CRm = 0 */
397         ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
398 };
399 
400 static int search_cmp_ftr_reg(const void *id, const void *regp)
401 {
402         return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
403 }
404 
405 /*
406  * get_arm64_ftr_reg - Lookup a feature register entry using its
407  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
408  * ascending order of sys_id , we use binary search to find a matching
409  * entry.
410  *
411  * returns - Upon success,  matching ftr_reg entry for id.
412  *         - NULL on failure. It is upto the caller to decide
413  *           the impact of a failure.
414  */
415 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
416 {
417         const struct __ftr_reg_entry *ret;
418 
419         ret = bsearch((const void *)(unsigned long)sys_id,
420                         arm64_ftr_regs,
421                         ARRAY_SIZE(arm64_ftr_regs),
422                         sizeof(arm64_ftr_regs[0]),
423                         search_cmp_ftr_reg);
424         if (ret)
425                 return ret->reg;
426         return NULL;
427 }
428 
429 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
430                                s64 ftr_val)
431 {
432         u64 mask = arm64_ftr_mask(ftrp);
433 
434         reg &= ~mask;
435         reg |= (ftr_val << ftrp->shift) & mask;
436         return reg;
437 }
438 
439 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
440                                 s64 cur)
441 {
442         s64 ret = 0;
443 
444         switch (ftrp->type) {
445         case FTR_EXACT:
446                 ret = ftrp->safe_val;
447                 break;
448         case FTR_LOWER_SAFE:
449                 ret = new < cur ? new : cur;
450                 break;
451         case FTR_HIGHER_SAFE:
452                 ret = new > cur ? new : cur;
453                 break;
454         default:
455                 BUG();
456         }
457 
458         return ret;
459 }
460 
461 static void __init sort_ftr_regs(void)
462 {
463         int i;
464 
465         /* Check that the array is sorted so that we can do the binary search */
466         for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
467                 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
468 }
469 
470 /*
471  * Initialise the CPU feature register from Boot CPU values.
472  * Also initiliases the strict_mask for the register.
473  * Any bits that are not covered by an arm64_ftr_bits entry are considered
474  * RES0 for the system-wide value, and must strictly match.
475  */
476 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
477 {
478         u64 val = 0;
479         u64 strict_mask = ~0x0ULL;
480         u64 user_mask = 0;
481         u64 valid_mask = 0;
482 
483         const struct arm64_ftr_bits *ftrp;
484         struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
485 
486         BUG_ON(!reg);
487 
488         for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
489                 u64 ftr_mask = arm64_ftr_mask(ftrp);
490                 s64 ftr_new = arm64_ftr_value(ftrp, new);
491 
492                 val = arm64_ftr_set_value(ftrp, val, ftr_new);
493 
494                 valid_mask |= ftr_mask;
495                 if (!ftrp->strict)
496                         strict_mask &= ~ftr_mask;
497                 if (ftrp->visible)
498                         user_mask |= ftr_mask;
499                 else
500                         reg->user_val = arm64_ftr_set_value(ftrp,
501                                                             reg->user_val,
502                                                             ftrp->safe_val);
503         }
504 
505         val &= valid_mask;
506 
507         reg->sys_val = val;
508         reg->strict_mask = strict_mask;
509         reg->user_mask = user_mask;
510 }
511 
512 extern const struct arm64_cpu_capabilities arm64_errata[];
513 static void __init setup_boot_cpu_capabilities(void);
514 
515 void __init init_cpu_features(struct cpuinfo_arm64 *info)
516 {
517         /* Before we start using the tables, make sure it is sorted */
518         sort_ftr_regs();
519 
520         init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
521         init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
522         init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
523         init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
524         init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
525         init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
526         init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
527         init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
528         init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
529         init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
530         init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
531         init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
532         init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
533 
534         if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
535                 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
536                 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
537                 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
538                 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
539                 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
540                 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
541                 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
542                 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
543                 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
544                 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
545                 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
546                 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
547                 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
548                 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
549                 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
550                 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
551         }
552 
553         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
554                 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
555                 sve_init_vq_map();
556         }
557 
558         /*
559          * Detect and enable early CPU capabilities based on the boot CPU,
560          * after we have initialised the CPU feature infrastructure.
561          */
562         setup_boot_cpu_capabilities();
563 }
564 
565 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
566 {
567         const struct arm64_ftr_bits *ftrp;
568 
569         for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
570                 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
571                 s64 ftr_new = arm64_ftr_value(ftrp, new);
572 
573                 if (ftr_cur == ftr_new)
574                         continue;
575                 /* Find a safe value */
576                 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
577                 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
578         }
579 
580 }
581 
582 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
583 {
584         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
585 
586         BUG_ON(!regp);
587         update_cpu_ftr_reg(regp, val);
588         if ((boot & regp->strict_mask) == (val & regp->strict_mask))
589                 return 0;
590         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
591                         regp->name, boot, cpu, val);
592         return 1;
593 }
594 
595 /*
596  * Update system wide CPU feature registers with the values from a
597  * non-boot CPU. Also performs SANITY checks to make sure that there
598  * aren't any insane variations from that of the boot CPU.
599  */
600 void update_cpu_features(int cpu,
601                          struct cpuinfo_arm64 *info,
602                          struct cpuinfo_arm64 *boot)
603 {
604         int taint = 0;
605 
606         /*
607          * The kernel can handle differing I-cache policies, but otherwise
608          * caches should look identical. Userspace JITs will make use of
609          * *minLine.
610          */
611         taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
612                                       info->reg_ctr, boot->reg_ctr);
613 
614         /*
615          * Userspace may perform DC ZVA instructions. Mismatched block sizes
616          * could result in too much or too little memory being zeroed if a
617          * process is preempted and migrated between CPUs.
618          */
619         taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
620                                       info->reg_dczid, boot->reg_dczid);
621 
622         /* If different, timekeeping will be broken (especially with KVM) */
623         taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
624                                       info->reg_cntfrq, boot->reg_cntfrq);
625 
626         /*
627          * The kernel uses self-hosted debug features and expects CPUs to
628          * support identical debug features. We presently need CTX_CMPs, WRPs,
629          * and BRPs to be identical.
630          * ID_AA64DFR1 is currently RES0.
631          */
632         taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
633                                       info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
634         taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
635                                       info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
636         /*
637          * Even in big.LITTLE, processors should be identical instruction-set
638          * wise.
639          */
640         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
641                                       info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
642         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
643                                       info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
644 
645         /*
646          * Differing PARange support is fine as long as all peripherals and
647          * memory are mapped within the minimum PARange of all CPUs.
648          * Linux should not care about secure memory.
649          */
650         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
651                                       info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
652         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
653                                       info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
654         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
655                                       info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
656 
657         /*
658          * EL3 is not our concern.
659          * ID_AA64PFR1 is currently RES0.
660          */
661         taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
662                                       info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
663         taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
664                                       info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
665 
666         taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
667                                       info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
668 
669         /*
670          * If we have AArch32, we care about 32-bit features for compat.
671          * If the system doesn't support AArch32, don't update them.
672          */
673         if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
674                 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
675 
676                 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
677                                         info->reg_id_dfr0, boot->reg_id_dfr0);
678                 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
679                                         info->reg_id_isar0, boot->reg_id_isar0);
680                 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
681                                         info->reg_id_isar1, boot->reg_id_isar1);
682                 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
683                                         info->reg_id_isar2, boot->reg_id_isar2);
684                 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
685                                         info->reg_id_isar3, boot->reg_id_isar3);
686                 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
687                                         info->reg_id_isar4, boot->reg_id_isar4);
688                 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
689                                         info->reg_id_isar5, boot->reg_id_isar5);
690 
691                 /*
692                  * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
693                  * ACTLR formats could differ across CPUs and therefore would have to
694                  * be trapped for virtualization anyway.
695                  */
696                 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
697                                         info->reg_id_mmfr0, boot->reg_id_mmfr0);
698                 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
699                                         info->reg_id_mmfr1, boot->reg_id_mmfr1);
700                 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
701                                         info->reg_id_mmfr2, boot->reg_id_mmfr2);
702                 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
703                                         info->reg_id_mmfr3, boot->reg_id_mmfr3);
704                 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
705                                         info->reg_id_pfr0, boot->reg_id_pfr0);
706                 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
707                                         info->reg_id_pfr1, boot->reg_id_pfr1);
708                 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
709                                         info->reg_mvfr0, boot->reg_mvfr0);
710                 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
711                                         info->reg_mvfr1, boot->reg_mvfr1);
712                 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
713                                         info->reg_mvfr2, boot->reg_mvfr2);
714         }
715 
716         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
717                 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
718                                         info->reg_zcr, boot->reg_zcr);
719 
720                 /* Probe vector lengths, unless we already gave up on SVE */
721                 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
722                     !sys_caps_initialised)
723                         sve_update_vq_map();
724         }
725 
726         /*
727          * Mismatched CPU features are a recipe for disaster. Don't even
728          * pretend to support them.
729          */
730         if (taint) {
731                 pr_warn_once("Unsupported CPU feature variation detected.\n");
732                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
733         }
734 }
735 
736 u64 read_sanitised_ftr_reg(u32 id)
737 {
738         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
739 
740         /* We shouldn't get a request for an unsupported register */
741         BUG_ON(!regp);
742         return regp->sys_val;
743 }
744 
745 #define read_sysreg_case(r)     \
746         case r:         return read_sysreg_s(r)
747 
748 /*
749  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
750  * Read the system register on the current CPU
751  */
752 static u64 __read_sysreg_by_encoding(u32 sys_id)
753 {
754         switch (sys_id) {
755         read_sysreg_case(SYS_ID_PFR0_EL1);
756         read_sysreg_case(SYS_ID_PFR1_EL1);
757         read_sysreg_case(SYS_ID_DFR0_EL1);
758         read_sysreg_case(SYS_ID_MMFR0_EL1);
759         read_sysreg_case(SYS_ID_MMFR1_EL1);
760         read_sysreg_case(SYS_ID_MMFR2_EL1);
761         read_sysreg_case(SYS_ID_MMFR3_EL1);
762         read_sysreg_case(SYS_ID_ISAR0_EL1);
763         read_sysreg_case(SYS_ID_ISAR1_EL1);
764         read_sysreg_case(SYS_ID_ISAR2_EL1);
765         read_sysreg_case(SYS_ID_ISAR3_EL1);
766         read_sysreg_case(SYS_ID_ISAR4_EL1);
767         read_sysreg_case(SYS_ID_ISAR5_EL1);
768         read_sysreg_case(SYS_MVFR0_EL1);
769         read_sysreg_case(SYS_MVFR1_EL1);
770         read_sysreg_case(SYS_MVFR2_EL1);
771 
772         read_sysreg_case(SYS_ID_AA64PFR0_EL1);
773         read_sysreg_case(SYS_ID_AA64PFR1_EL1);
774         read_sysreg_case(SYS_ID_AA64DFR0_EL1);
775         read_sysreg_case(SYS_ID_AA64DFR1_EL1);
776         read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
777         read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
778         read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
779         read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
780         read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
781 
782         read_sysreg_case(SYS_CNTFRQ_EL0);
783         read_sysreg_case(SYS_CTR_EL0);
784         read_sysreg_case(SYS_DCZID_EL0);
785 
786         default:
787                 BUG();
788                 return 0;
789         }
790 }
791 
792 #include <linux/irqchip/arm-gic-v3.h>
793 
794 static bool
795 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
796 {
797         int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
798 
799         return val >= entry->min_field_value;
800 }
801 
802 static bool
803 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
804 {
805         u64 val;
806 
807         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
808         if (scope == SCOPE_SYSTEM)
809                 val = read_sanitised_ftr_reg(entry->sys_reg);
810         else
811                 val = __read_sysreg_by_encoding(entry->sys_reg);
812 
813         return feature_matches(val, entry);
814 }
815 
816 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
817 {
818         bool has_sre;
819 
820         if (!has_cpuid_feature(entry, scope))
821                 return false;
822 
823         has_sre = gic_enable_sre();
824         if (!has_sre)
825                 pr_warn_once("%s present but disabled by higher exception level\n",
826                              entry->desc);
827 
828         return has_sre;
829 }
830 
831 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
832 {
833         u32 midr = read_cpuid_id();
834 
835         /* Cavium ThunderX pass 1.x and 2.x */
836         return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
837                 MIDR_CPU_VAR_REV(0, 0),
838                 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
839 }
840 
841 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
842 {
843         u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
844 
845         return cpuid_feature_extract_signed_field(pfr0,
846                                         ID_AA64PFR0_FP_SHIFT) < 0;
847 }
848 
849 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
850                           int scope)
851 {
852         u64 ctr;
853 
854         if (scope == SCOPE_SYSTEM)
855                 ctr = arm64_ftr_reg_ctrel0.sys_val;
856         else
857                 ctr = read_cpuid_cachetype();
858 
859         return ctr & BIT(CTR_IDC_SHIFT);
860 }
861 
862 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
863                           int scope)
864 {
865         u64 ctr;
866 
867         if (scope == SCOPE_SYSTEM)
868                 ctr = arm64_ftr_reg_ctrel0.sys_val;
869         else
870                 ctr = read_cpuid_cachetype();
871 
872         return ctr & BIT(CTR_DIC_SHIFT);
873 }
874 
875 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
876 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
877 
878 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
879                                 int scope)
880 {
881         /* List of CPUs that are not vulnerable and don't need KPTI */
882         static const struct midr_range kpti_safe_list[] = {
883                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
884                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
885                 { /* sentinel */ }
886         };
887         char const *str = "command line option";
888 
889         /*
890          * For reasons that aren't entirely clear, enabling KPTI on Cavium
891          * ThunderX leads to apparent I-cache corruption of kernel text, which
892          * ends as well as you might imagine. Don't even try.
893          */
894         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
895                 str = "ARM64_WORKAROUND_CAVIUM_27456";
896                 __kpti_forced = -1;
897         }
898 
899         /* Forced? */
900         if (__kpti_forced) {
901                 pr_info_once("kernel page table isolation forced %s by %s\n",
902                              __kpti_forced > 0 ? "ON" : "OFF", str);
903                 return __kpti_forced > 0;
904         }
905 
906         /* Useful for KASLR robustness */
907         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
908                 return true;
909 
910         /* Don't force KPTI for CPUs that are not vulnerable */
911         if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
912                 return false;
913 
914         /* Defer to CPU feature registers */
915         return !has_cpuid_feature(entry, scope);
916 }
917 
918 static void
919 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
920 {
921         typedef void (kpti_remap_fn)(int, int, phys_addr_t);
922         extern kpti_remap_fn idmap_kpti_install_ng_mappings;
923         kpti_remap_fn *remap_fn;
924 
925         static bool kpti_applied = false;
926         int cpu = smp_processor_id();
927 
928         if (kpti_applied)
929                 return;
930 
931         remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
932 
933         cpu_install_idmap();
934         remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
935         cpu_uninstall_idmap();
936 
937         if (!cpu)
938                 kpti_applied = true;
939 
940         return;
941 }
942 
943 static int __init parse_kpti(char *str)
944 {
945         bool enabled;
946         int ret = strtobool(str, &enabled);
947 
948         if (ret)
949                 return ret;
950 
951         __kpti_forced = enabled ? 1 : -1;
952         return 0;
953 }
954 early_param("kpti", parse_kpti);
955 #endif  /* CONFIG_UNMAP_KERNEL_AT_EL0 */
956 
957 #ifdef CONFIG_ARM64_HW_AFDBM
958 static inline void __cpu_enable_hw_dbm(void)
959 {
960         u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
961 
962         write_sysreg(tcr, tcr_el1);
963         isb();
964 }
965 
966 static bool cpu_has_broken_dbm(void)
967 {
968         /* List of CPUs which have broken DBM support. */
969         static const struct midr_range cpus[] = {
970 #ifdef CONFIG_ARM64_ERRATUM_1024718
971                 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
972 #endif
973                 {},
974         };
975 
976         return is_midr_in_range_list(read_cpuid_id(), cpus);
977 }
978 
979 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
980 {
981         return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
982                !cpu_has_broken_dbm();
983 }
984 
985 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
986 {
987         if (cpu_can_use_dbm(cap))
988                 __cpu_enable_hw_dbm();
989 }
990 
991 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
992                        int __unused)
993 {
994         static bool detected = false;
995         /*
996          * DBM is a non-conflicting feature. i.e, the kernel can safely
997          * run a mix of CPUs with and without the feature. So, we
998          * unconditionally enable the capability to allow any late CPU
999          * to use the feature. We only enable the control bits on the
1000          * CPU, if it actually supports.
1001          *
1002          * We have to make sure we print the "feature" detection only
1003          * when at least one CPU actually uses it. So check if this CPU
1004          * can actually use it and print the message exactly once.
1005          *
1006          * This is safe as all CPUs (including secondary CPUs - due to the
1007          * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1008          * goes through the "matches" check exactly once. Also if a CPU
1009          * matches the criteria, it is guaranteed that the CPU will turn
1010          * the DBM on, as the capability is unconditionally enabled.
1011          */
1012         if (!detected && cpu_can_use_dbm(cap)) {
1013                 detected = true;
1014                 pr_info("detected: Hardware dirty bit management\n");
1015         }
1016 
1017         return true;
1018 }
1019 
1020 #endif
1021 
1022 #ifdef CONFIG_ARM64_VHE
1023 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1024 {
1025         return is_kernel_in_hyp_mode();
1026 }
1027 
1028 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1029 {
1030         /*
1031          * Copy register values that aren't redirected by hardware.
1032          *
1033          * Before code patching, we only set tpidr_el1, all CPUs need to copy
1034          * this value to tpidr_el2 before we patch the code. Once we've done
1035          * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1036          * do anything here.
1037          */
1038         if (!alternatives_applied)
1039                 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1040 }
1041 #endif
1042 
1043 static const struct arm64_cpu_capabilities arm64_features[] = {
1044         {
1045                 .desc = "GIC system register CPU interface",
1046                 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1047                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1048                 .matches = has_useable_gicv3_cpuif,
1049                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1050                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1051                 .sign = FTR_UNSIGNED,
1052                 .min_field_value = 1,
1053         },
1054 #ifdef CONFIG_ARM64_PAN
1055         {
1056                 .desc = "Privileged Access Never",
1057                 .capability = ARM64_HAS_PAN,
1058                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1059                 .matches = has_cpuid_feature,
1060                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1061                 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
1062                 .sign = FTR_UNSIGNED,
1063                 .min_field_value = 1,
1064                 .cpu_enable = cpu_enable_pan,
1065         },
1066 #endif /* CONFIG_ARM64_PAN */
1067 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1068         {
1069                 .desc = "LSE atomic instructions",
1070                 .capability = ARM64_HAS_LSE_ATOMICS,
1071                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1072                 .matches = has_cpuid_feature,
1073                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1074                 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1075                 .sign = FTR_UNSIGNED,
1076                 .min_field_value = 2,
1077         },
1078 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1079         {
1080                 .desc = "Software prefetching using PRFM",
1081                 .capability = ARM64_HAS_NO_HW_PREFETCH,
1082                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1083                 .matches = has_no_hw_prefetch,
1084         },
1085 #ifdef CONFIG_ARM64_UAO
1086         {
1087                 .desc = "User Access Override",
1088                 .capability = ARM64_HAS_UAO,
1089                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1090                 .matches = has_cpuid_feature,
1091                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1092                 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
1093                 .min_field_value = 1,
1094                 /*
1095                  * We rely on stop_machine() calling uao_thread_switch() to set
1096                  * UAO immediately after patching.
1097                  */
1098         },
1099 #endif /* CONFIG_ARM64_UAO */
1100 #ifdef CONFIG_ARM64_PAN
1101         {
1102                 .capability = ARM64_ALT_PAN_NOT_UAO,
1103                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1104                 .matches = cpufeature_pan_not_uao,
1105         },
1106 #endif /* CONFIG_ARM64_PAN */
1107 #ifdef CONFIG_ARM64_VHE
1108         {
1109                 .desc = "Virtualization Host Extensions",
1110                 .capability = ARM64_HAS_VIRT_HOST_EXTN,
1111                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1112                 .matches = runs_at_el2,
1113                 .cpu_enable = cpu_copy_el2regs,
1114         },
1115 #endif  /* CONFIG_ARM64_VHE */
1116         {
1117                 .desc = "32-bit EL0 Support",
1118                 .capability = ARM64_HAS_32BIT_EL0,
1119                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1120                 .matches = has_cpuid_feature,
1121                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1122                 .sign = FTR_UNSIGNED,
1123                 .field_pos = ID_AA64PFR0_EL0_SHIFT,
1124                 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1125         },
1126 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1127         {
1128                 .desc = "Kernel page table isolation (KPTI)",
1129                 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
1130                 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1131                 /*
1132                  * The ID feature fields below are used to indicate that
1133                  * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1134                  * more details.
1135                  */
1136                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1137                 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
1138                 .min_field_value = 1,
1139                 .matches = unmap_kernel_at_el0,
1140                 .cpu_enable = kpti_install_ng_mappings,
1141         },
1142 #endif
1143         {
1144                 /* FP/SIMD is not implemented */
1145                 .capability = ARM64_HAS_NO_FPSIMD,
1146                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1147                 .min_field_value = 0,
1148                 .matches = has_no_fpsimd,
1149         },
1150 #ifdef CONFIG_ARM64_PMEM
1151         {
1152                 .desc = "Data cache clean to Point of Persistence",
1153                 .capability = ARM64_HAS_DCPOP,
1154                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1155                 .matches = has_cpuid_feature,
1156                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1157                 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1158                 .min_field_value = 1,
1159         },
1160 #endif
1161 #ifdef CONFIG_ARM64_SVE
1162         {
1163                 .desc = "Scalable Vector Extension",
1164                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1165                 .capability = ARM64_SVE,
1166                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1167                 .sign = FTR_UNSIGNED,
1168                 .field_pos = ID_AA64PFR0_SVE_SHIFT,
1169                 .min_field_value = ID_AA64PFR0_SVE,
1170                 .matches = has_cpuid_feature,
1171                 .cpu_enable = sve_kernel_enable,
1172         },
1173 #endif /* CONFIG_ARM64_SVE */
1174 #ifdef CONFIG_ARM64_RAS_EXTN
1175         {
1176                 .desc = "RAS Extension Support",
1177                 .capability = ARM64_HAS_RAS_EXTN,
1178                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1179                 .matches = has_cpuid_feature,
1180                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1181                 .sign = FTR_UNSIGNED,
1182                 .field_pos = ID_AA64PFR0_RAS_SHIFT,
1183                 .min_field_value = ID_AA64PFR0_RAS_V1,
1184                 .cpu_enable = cpu_clear_disr,
1185         },
1186 #endif /* CONFIG_ARM64_RAS_EXTN */
1187         {
1188                 .desc = "Data cache clean to the PoU not required for I/D coherence",
1189                 .capability = ARM64_HAS_CACHE_IDC,
1190                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1191                 .matches = has_cache_idc,
1192         },
1193         {
1194                 .desc = "Instruction cache invalidation not required for I/D coherence",
1195                 .capability = ARM64_HAS_CACHE_DIC,
1196                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1197                 .matches = has_cache_dic,
1198         },
1199 #ifdef CONFIG_ARM64_HW_AFDBM
1200         {
1201                 /*
1202                  * Since we turn this on always, we don't want the user to
1203                  * think that the feature is available when it may not be.
1204                  * So hide the description.
1205                  *
1206                  * .desc = "Hardware pagetable Dirty Bit Management",
1207                  *
1208                  */
1209                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1210                 .capability = ARM64_HW_DBM,
1211                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1212                 .sign = FTR_UNSIGNED,
1213                 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1214                 .min_field_value = 2,
1215                 .matches = has_hw_dbm,
1216                 .cpu_enable = cpu_enable_hw_dbm,
1217         },
1218 #endif
1219         {},
1220 };
1221 
1222 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)      \
1223         {                                                       \
1224                 .desc = #cap,                                   \
1225                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,            \
1226                 .matches = has_cpuid_feature,                   \
1227                 .sys_reg = reg,                                 \
1228                 .field_pos = field,                             \
1229                 .sign = s,                                      \
1230                 .min_field_value = min_value,                   \
1231                 .hwcap_type = cap_type,                         \
1232                 .hwcap = cap,                                   \
1233         }
1234 
1235 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1236         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1237         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1238         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1239         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1240         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1241         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1242         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1243         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1244         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
1245         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
1246         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
1247         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1248         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1249         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
1250         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1251         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1252         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1253         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1254         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
1255         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1256         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1257         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1258         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1259         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
1260         HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
1261 #ifdef CONFIG_ARM64_SVE
1262         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
1263 #endif
1264         {},
1265 };
1266 
1267 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1268 #ifdef CONFIG_COMPAT
1269         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1270         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1271         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1272         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1273         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1274 #endif
1275         {},
1276 };
1277 
1278 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1279 {
1280         switch (cap->hwcap_type) {
1281         case CAP_HWCAP:
1282                 elf_hwcap |= cap->hwcap;
1283                 break;
1284 #ifdef CONFIG_COMPAT
1285         case CAP_COMPAT_HWCAP:
1286                 compat_elf_hwcap |= (u32)cap->hwcap;
1287                 break;
1288         case CAP_COMPAT_HWCAP2:
1289                 compat_elf_hwcap2 |= (u32)cap->hwcap;
1290                 break;
1291 #endif
1292         default:
1293                 WARN_ON(1);
1294                 break;
1295         }
1296 }
1297 
1298 /* Check if we have a particular HWCAP enabled */
1299 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1300 {
1301         bool rc;
1302 
1303         switch (cap->hwcap_type) {
1304         case CAP_HWCAP:
1305                 rc = (elf_hwcap & cap->hwcap) != 0;
1306                 break;
1307 #ifdef CONFIG_COMPAT
1308         case CAP_COMPAT_HWCAP:
1309                 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1310                 break;
1311         case CAP_COMPAT_HWCAP2:
1312                 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1313                 break;
1314 #endif
1315         default:
1316                 WARN_ON(1);
1317                 rc = false;
1318         }
1319 
1320         return rc;
1321 }
1322 
1323 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1324 {
1325         /* We support emulation of accesses to CPU ID feature registers */
1326         elf_hwcap |= HWCAP_CPUID;
1327         for (; hwcaps->matches; hwcaps++)
1328                 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1329                         cap_set_elf_hwcap(hwcaps);
1330 }
1331 
1332 /*
1333  * Check if the current CPU has a given feature capability.
1334  * Should be called from non-preemptible context.
1335  */
1336 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1337                                unsigned int cap)
1338 {
1339         const struct arm64_cpu_capabilities *caps;
1340 
1341         if (WARN_ON(preemptible()))
1342                 return false;
1343 
1344         for (caps = cap_array; caps->matches; caps++)
1345                 if (caps->capability == cap)
1346                         return caps->matches(caps, SCOPE_LOCAL_CPU);
1347 
1348         return false;
1349 }
1350 
1351 static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1352                                       u16 scope_mask, const char *info)
1353 {
1354         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1355         for (; caps->matches; caps++) {
1356                 if (!(caps->type & scope_mask) ||
1357                     !caps->matches(caps, cpucap_default_scope(caps)))
1358                         continue;
1359 
1360                 if (!cpus_have_cap(caps->capability) && caps->desc)
1361                         pr_info("%s %s\n", info, caps->desc);
1362                 cpus_set_cap(caps->capability);
1363         }
1364 }
1365 
1366 static void update_cpu_capabilities(u16 scope_mask)
1367 {
1368         __update_cpu_capabilities(arm64_errata, scope_mask,
1369                                   "enabling workaround for");
1370         __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1371 }
1372 
1373 static int __enable_cpu_capability(void *arg)
1374 {
1375         const struct arm64_cpu_capabilities *cap = arg;
1376 
1377         cap->cpu_enable(cap);
1378         return 0;
1379 }
1380 
1381 /*
1382  * Run through the enabled capabilities and enable() it on all active
1383  * CPUs
1384  */
1385 static void __init
1386 __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1387                           u16 scope_mask)
1388 {
1389         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1390         for (; caps->matches; caps++) {
1391                 unsigned int num = caps->capability;
1392 
1393                 if (!(caps->type & scope_mask) || !cpus_have_cap(num))
1394                         continue;
1395 
1396                 /* Ensure cpus_have_const_cap(num) works */
1397                 static_branch_enable(&cpu_hwcap_keys[num]);
1398 
1399                 if (caps->cpu_enable) {
1400                         /*
1401                          * Capabilities with SCOPE_BOOT_CPU scope are finalised
1402                          * before any secondary CPU boots. Thus, each secondary
1403                          * will enable the capability as appropriate via
1404                          * check_local_cpu_capabilities(). The only exception is
1405                          * the boot CPU, for which the capability must be
1406                          * enabled here. This approach avoids costly
1407                          * stop_machine() calls for this case.
1408                          *
1409                          * Otherwise, use stop_machine() as it schedules the
1410                          * work allowing us to modify PSTATE, instead of
1411                          * on_each_cpu() which uses an IPI, giving us a PSTATE
1412                          * that disappears when we return.
1413                          */
1414                         if (scope_mask & SCOPE_BOOT_CPU)
1415                                 caps->cpu_enable(caps);
1416                         else
1417                                 stop_machine(__enable_cpu_capability,
1418                                              (void *)caps, cpu_online_mask);
1419                 }
1420         }
1421 }
1422 
1423 static void __init enable_cpu_capabilities(u16 scope_mask)
1424 {
1425         __enable_cpu_capabilities(arm64_errata, scope_mask);
1426         __enable_cpu_capabilities(arm64_features, scope_mask);
1427 }
1428 
1429 /*
1430  * Run through the list of capabilities to check for conflicts.
1431  * If the system has already detected a capability, take necessary
1432  * action on this CPU.
1433  *
1434  * Returns "false" on conflicts.
1435  */
1436 static bool
1437 __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
1438                         u16 scope_mask)
1439 {
1440         bool cpu_has_cap, system_has_cap;
1441 
1442         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1443 
1444         for (; caps->matches; caps++) {
1445                 if (!(caps->type & scope_mask))
1446                         continue;
1447 
1448                 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
1449                 system_has_cap = cpus_have_cap(caps->capability);
1450 
1451                 if (system_has_cap) {
1452                         /*
1453                          * Check if the new CPU misses an advertised feature,
1454                          * which is not safe to miss.
1455                          */
1456                         if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
1457                                 break;
1458                         /*
1459                          * We have to issue cpu_enable() irrespective of
1460                          * whether the CPU has it or not, as it is enabeld
1461                          * system wide. It is upto the call back to take
1462                          * appropriate action on this CPU.
1463                          */
1464                         if (caps->cpu_enable)
1465                                 caps->cpu_enable(caps);
1466                 } else {
1467                         /*
1468                          * Check if the CPU has this capability if it isn't
1469                          * safe to have when the system doesn't.
1470                          */
1471                         if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
1472                                 break;
1473                 }
1474         }
1475 
1476         if (caps->matches) {
1477                 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
1478                         smp_processor_id(), caps->capability,
1479                         caps->desc, system_has_cap, cpu_has_cap);
1480                 return false;
1481         }
1482 
1483         return true;
1484 }
1485 
1486 static bool verify_local_cpu_caps(u16 scope_mask)
1487 {
1488         return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
1489                __verify_local_cpu_caps(arm64_features, scope_mask);
1490 }
1491 
1492 /*
1493  * Check for CPU features that are used in early boot
1494  * based on the Boot CPU value.
1495  */
1496 static void check_early_cpu_features(void)
1497 {
1498         verify_cpu_asid_bits();
1499         /*
1500          * Early features are used by the kernel already. If there
1501          * is a conflict, we cannot proceed further.
1502          */
1503         if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1504                 cpu_panic_kernel();
1505 }
1506 
1507 static void
1508 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1509 {
1510 
1511         for (; caps->matches; caps++)
1512                 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1513                         pr_crit("CPU%d: missing HWCAP: %s\n",
1514                                         smp_processor_id(), caps->desc);
1515                         cpu_die_early();
1516                 }
1517 }
1518 
1519 static void verify_sve_features(void)
1520 {
1521         u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
1522         u64 zcr = read_zcr_features();
1523 
1524         unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
1525         unsigned int len = zcr & ZCR_ELx_LEN_MASK;
1526 
1527         if (len < safe_len || sve_verify_vq_map()) {
1528                 pr_crit("CPU%d: SVE: required vector length(s) missing\n",
1529                         smp_processor_id());
1530                 cpu_die_early();
1531         }
1532 
1533         /* Add checks on other ZCR bits here if necessary */
1534 }
1535 
1536 
1537 /*
1538  * Run through the enabled system capabilities and enable() it on this CPU.
1539  * The capabilities were decided based on the available CPUs at the boot time.
1540  * Any new CPU should match the system wide status of the capability. If the
1541  * new CPU doesn't have a capability which the system now has enabled, we
1542  * cannot do anything to fix it up and could cause unexpected failures. So
1543  * we park the CPU.
1544  */
1545 static void verify_local_cpu_capabilities(void)
1546 {
1547         /*
1548          * The capabilities with SCOPE_BOOT_CPU are checked from
1549          * check_early_cpu_features(), as they need to be verified
1550          * on all secondary CPUs.
1551          */
1552         if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1553                 cpu_die_early();
1554 
1555         verify_local_elf_hwcaps(arm64_elf_hwcaps);
1556 
1557         if (system_supports_32bit_el0())
1558                 verify_local_elf_hwcaps(compat_elf_hwcaps);
1559 
1560         if (system_supports_sve())
1561                 verify_sve_features();
1562 }
1563 
1564 void check_local_cpu_capabilities(void)
1565 {
1566         /*
1567          * All secondary CPUs should conform to the early CPU features
1568          * in use by the kernel based on boot CPU.
1569          */
1570         check_early_cpu_features();
1571 
1572         /*
1573          * If we haven't finalised the system capabilities, this CPU gets
1574          * a chance to update the errata work arounds and local features.
1575          * Otherwise, this CPU should verify that it has all the system
1576          * advertised capabilities.
1577          */
1578         if (!sys_caps_initialised)
1579                 update_cpu_capabilities(SCOPE_LOCAL_CPU);
1580         else
1581                 verify_local_cpu_capabilities();
1582 }
1583 
1584 static void __init setup_boot_cpu_capabilities(void)
1585 {
1586         /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
1587         update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
1588         /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
1589         enable_cpu_capabilities(SCOPE_BOOT_CPU);
1590 }
1591 
1592 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1593 EXPORT_SYMBOL(arm64_const_caps_ready);
1594 
1595 static void __init mark_const_caps_ready(void)
1596 {
1597         static_branch_enable(&arm64_const_caps_ready);
1598 }
1599 
1600 extern const struct arm64_cpu_capabilities arm64_errata[];
1601 
1602 bool this_cpu_has_cap(unsigned int cap)
1603 {
1604         return (__this_cpu_has_cap(arm64_features, cap) ||
1605                 __this_cpu_has_cap(arm64_errata, cap));
1606 }
1607 
1608 static void __init setup_system_capabilities(void)
1609 {
1610         /*
1611          * We have finalised the system-wide safe feature
1612          * registers, finalise the capabilities that depend
1613          * on it. Also enable all the available capabilities,
1614          * that are not enabled already.
1615          */
1616         update_cpu_capabilities(SCOPE_SYSTEM);
1617         enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
1618 }
1619 
1620 void __init setup_cpu_features(void)
1621 {
1622         u32 cwg;
1623 
1624         setup_system_capabilities();
1625         mark_const_caps_ready();
1626         setup_elf_hwcaps(arm64_elf_hwcaps);
1627 
1628         if (system_supports_32bit_el0())
1629                 setup_elf_hwcaps(compat_elf_hwcaps);
1630 
1631         if (system_uses_ttbr0_pan())
1632                 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
1633 
1634         sve_setup();
1635         minsigstksz_setup();
1636 
1637         /* Advertise that we have computed the system capabilities */
1638         set_sys_caps_initialised();
1639 
1640         /*
1641          * Check for sane CTR_EL0.CWG value.
1642          */
1643         cwg = cache_type_cwg();
1644         if (!cwg)
1645                 pr_warn("No Cache Writeback Granule information, assuming %d\n",
1646                         ARCH_DMA_MINALIGN);
1647 }
1648 
1649 static bool __maybe_unused
1650 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1651 {
1652         return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1653 }
1654 
1655 /*
1656  * We emulate only the following system register space.
1657  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
1658  * See Table C5-6 System instruction encodings for System register accesses,
1659  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
1660  */
1661 static inline bool __attribute_const__ is_emulated(u32 id)
1662 {
1663         return (sys_reg_Op0(id) == 0x3 &&
1664                 sys_reg_CRn(id) == 0x0 &&
1665                 sys_reg_Op1(id) == 0x0 &&
1666                 (sys_reg_CRm(id) == 0 ||
1667                  ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
1668 }
1669 
1670 /*
1671  * With CRm == 0, reg should be one of :
1672  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
1673  */
1674 static inline int emulate_id_reg(u32 id, u64 *valp)
1675 {
1676         switch (id) {
1677         case SYS_MIDR_EL1:
1678                 *valp = read_cpuid_id();
1679                 break;
1680         case SYS_MPIDR_EL1:
1681                 *valp = SYS_MPIDR_SAFE_VAL;
1682                 break;
1683         case SYS_REVIDR_EL1:
1684                 /* IMPLEMENTATION DEFINED values are emulated with 0 */
1685                 *valp = 0;
1686                 break;
1687         default:
1688                 return -EINVAL;
1689         }
1690 
1691         return 0;
1692 }
1693 
1694 static int emulate_sys_reg(u32 id, u64 *valp)
1695 {
1696         struct arm64_ftr_reg *regp;
1697 
1698         if (!is_emulated(id))
1699                 return -EINVAL;
1700 
1701         if (sys_reg_CRm(id) == 0)
1702                 return emulate_id_reg(id, valp);
1703 
1704         regp = get_arm64_ftr_reg(id);
1705         if (regp)
1706                 *valp = arm64_ftr_reg_user_value(regp);
1707         else
1708                 /*
1709                  * The untracked registers are either IMPLEMENTATION DEFINED
1710                  * (e.g, ID_AFR0_EL1) or reserved RAZ.
1711                  */
1712                 *valp = 0;
1713         return 0;
1714 }
1715 
1716 static int emulate_mrs(struct pt_regs *regs, u32 insn)
1717 {
1718         int rc;
1719         u32 sys_reg, dst;
1720         u64 val;
1721 
1722         /*
1723          * sys_reg values are defined as used in mrs/msr instruction.
1724          * shift the imm value to get the encoding.
1725          */
1726         sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1727         rc = emulate_sys_reg(sys_reg, &val);
1728         if (!rc) {
1729                 dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1730                 pt_regs_write_reg(regs, dst, val);
1731                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1732         }
1733 
1734         return rc;
1735 }
1736 
1737 static struct undef_hook mrs_hook = {
1738         .instr_mask = 0xfff00000,
1739         .instr_val  = 0xd5300000,
1740         .pstate_mask = COMPAT_PSR_MODE_MASK,
1741         .pstate_val = PSR_MODE_EL0t,
1742         .fn = emulate_mrs,
1743 };
1744 
1745 static int __init enable_mrs_emulation(void)
1746 {
1747         register_undef_hook(&mrs_hook);
1748         return 0;
1749 }
1750 
1751 core_initcall(enable_mrs_emulation);
1752 
1753 void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1754 {
1755         /* Firmware may have left a deferred SError in this register. */
1756         write_sysreg_s(0, SYS_DISR_EL1);
1757 }
1758 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp