diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /arch/x86/kernel/cpu/common.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 46 | 
1 files changed, 44 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 38276f58d3bf..0df7151cfef4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -66,6 +66,13 @@ cpumask_var_t cpu_callin_mask;  /* representing cpus for which sibling maps can be computed */  cpumask_var_t cpu_sibling_setup_mask; +/* Number of siblings per CPU package */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Last level cache ID of each logical CPU */ +DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; +  /* correctly size the local cpu masks */  void __init setup_cpu_local_masks(void)  { @@ -577,6 +584,19 @@ static void get_model_name(struct cpuinfo_x86 *c)  	*(s + 1) = '\0';  } +void detect_num_cpu_cores(struct cpuinfo_x86 *c) +{ +	unsigned int eax, ebx, ecx, edx; + +	c->x86_max_cores = 1; +	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) +		return; + +	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); +	if (eax & 0x1f) +		c->x86_max_cores = (eax >> 26) + 1; +} +  void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)  {  	unsigned int n, dummy, ebx, ecx, edx, l2size; @@ -783,6 +803,12 @@ static void init_speculation_control(struct cpuinfo_x86 *c)  		set_cpu_cap(c, X86_FEATURE_STIBP);  		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);  	} + +	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { +		set_cpu_cap(c, X86_FEATURE_SSBD); +		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); +		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); +	}  }  void get_cpu_cap(struct cpuinfo_x86 *c) @@ -972,7 +998,8 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);  	if (!x86_match_cpu(cpu_no_spec_store_bypass) && -	   !(ia32_cap & ARCH_CAP_SSB_NO)) +	   !(ia32_cap & ARCH_CAP_SSB_NO) && +	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))  		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);  	if (x86_match_cpu(cpu_no_meltdown)) @@ -1044,6 +1071,21 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)  	 */  	setup_clear_cpu_cap(X86_FEATURE_PCID);  #endif + +	/* +	 * Later in the boot process pgtable_l5_enabled() relies on +	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not +	 * enabled by this point we need to clear the feature bit to avoid +	 * false-positives at the later stage. +	 * +	 * pgtable_l5_enabled() can be false here for several reasons: +	 *  - 5-level paging is disabled compile-time; +	 *  - it's 32-bit kernel; +	 *  - machine doesn't support 5-level paging; +	 *  - user specified 'no5lvl' in kernel command line. +	 */ +	if (!pgtable_l5_enabled()) +		setup_clear_cpu_cap(X86_FEATURE_LA57);  }  void __init early_cpu_init(void) @@ -1557,7 +1599,7 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =  	(unsigned long)&init_thread_union + THREAD_SIZE;  EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR  DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);  #endif  | 
