diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2023-05-19 12:21:02 +0200 | 
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2023-06-05 21:11:05 +0200 | 
| commit | 24ee7607b286b44a5112ced38652df14cd80d5e2 (patch) | |
| tree | 4c91f31da9f727240a191af66cc90077bca3e648 /drivers/clocksource/arm_arch_timer.c | |
| parent | c1d26c0f0295953d35307f9ee07f3e5295741315 (diff) | |
arm64/arch_timer: Provide noinstr sched_clock_read() functions
With the intent to provide local_clock_noinstr(), a variant of
local_clock() that's safe to be called from noinstr code (with the
assumption that any such code will already be non-preemptible),
prepare for things by providing a noinstr sched_clock_read() function.
Specifically, preempt_enable_*() calls out to schedule(), which upsets
noinstr validation efforts.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>  # Hyper-V
Link: https://lore.kernel.org/r/20230519102715.435618812@infradead.org
Diffstat (limited to 'drivers/clocksource/arm_arch_timer.c')
| -rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 54 | 
1 files changed, 40 insertions, 14 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index e09d4427f604..b23d23b033cc 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,  	return val;  } -static notrace u64 arch_counter_get_cntpct_stable(void) +static noinstr u64 raw_counter_get_cntpct_stable(void)  {  	return __arch_counter_get_cntpct_stable();  } -static notrace u64 arch_counter_get_cntpct(void) +static notrace u64 arch_counter_get_cntpct_stable(void) +{ +	u64 val; +	preempt_disable_notrace(); +	val = __arch_counter_get_cntpct_stable(); +	preempt_enable_notrace(); +	return val; +} + +static noinstr u64 arch_counter_get_cntpct(void)  {  	return __arch_counter_get_cntpct();  } -static notrace u64 arch_counter_get_cntvct_stable(void) +static noinstr u64 raw_counter_get_cntvct_stable(void)  {  	return __arch_counter_get_cntvct_stable();  } -static notrace u64 arch_counter_get_cntvct(void) +static notrace u64 arch_counter_get_cntvct_stable(void) +{ +	u64 val; +	preempt_disable_notrace(); +	val = __arch_counter_get_cntvct_stable(); +	preempt_enable_notrace(); +	return val; +} + +static noinstr u64 arch_counter_get_cntvct(void)  {  	return __arch_counter_get_cntvct();  } @@ -753,14 +771,14 @@ static int arch_timer_set_next_event_phys(unsigned long evt,  	return 0;  } -static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) +static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)  {  	u32 cnt_lo, cnt_hi, tmp_hi;  	do { -		cnt_hi = readl_relaxed(t->base + offset_lo + 4); -		cnt_lo = readl_relaxed(t->base + offset_lo); -		tmp_hi = readl_relaxed(t->base + offset_lo + 4); +		cnt_hi = __raw_readl(t->base + offset_lo + 4); +		cnt_lo = __raw_readl(t->base + offset_lo); +		tmp_hi = __raw_readl(t->base + offset_lo + 4);  	} while (cnt_hi != tmp_hi);  	return ((u64) cnt_hi << 32) | cnt_lo; @@ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void)  	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);  } -static u64 arch_counter_get_cntvct_mem(void) +static noinstr u64 arch_counter_get_cntvct_mem(void)  {  	return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);  } @@ -1074,6 +1092,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)  static void __init arch_counter_register(unsigned type)  { +	u64 (*scr)(void);  	u64 start_count;  	int width; @@ -1083,21 +1102,28 @@ static void __init arch_counter_register(unsigned type)  		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||  		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { -			if (arch_timer_counter_has_wa()) +			if (arch_timer_counter_has_wa()) {  				rd = arch_counter_get_cntvct_stable; -			else +				scr = raw_counter_get_cntvct_stable; +			} else {  				rd = arch_counter_get_cntvct; +				scr = arch_counter_get_cntvct; +			}  		} else { -			if (arch_timer_counter_has_wa()) +			if (arch_timer_counter_has_wa()) {  				rd = arch_counter_get_cntpct_stable; -			else +				scr = raw_counter_get_cntpct_stable; +			} else {  				rd = arch_counter_get_cntpct; +				scr = arch_counter_get_cntpct; +			}  		}  		arch_timer_read_counter = rd;  		clocksource_counter.vdso_clock_mode = vdso_default;  	} else {  		arch_timer_read_counter = arch_counter_get_cntvct_mem; +		scr = arch_counter_get_cntvct_mem;  	}  	width = arch_counter_get_width(); @@ -1113,7 +1139,7 @@ static void __init arch_counter_register(unsigned type)  	timecounter_init(&arch_timer_kvm_info.timecounter,  			 &cyclecounter, start_count); -	sched_clock_register(arch_timer_read_counter, width, arch_timer_rate); +	sched_clock_register(scr, width, arch_timer_rate);  }  static void arch_timer_stop(struct clock_event_device *clk)  | 
