diff options
Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
| -rw-r--r-- | arch/x86/kernel/tsc_sync.c | 18 | 
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index fc25e60a5884..adfdf56a3714 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -25,24 +25,24 @@   * Entry/exit counters that make sure that both CPUs   * run the measurement code at once:   */ -static __cpuinitdata atomic_t start_count; -static __cpuinitdata atomic_t stop_count; +static atomic_t start_count; +static atomic_t stop_count;  /*   * We use a raw spinlock in this exceptional case, because   * we want to have the fastest, inlined, non-debug version   * of a critical section, to be able to prove TSC time-warps:   */ -static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; -static __cpuinitdata cycles_t last_tsc; -static __cpuinitdata cycles_t max_warp; -static __cpuinitdata int nr_warps; +static cycles_t last_tsc; +static cycles_t max_warp; +static int nr_warps;  /*   * TSC-warp measurement loop running on both CPUs:   */ -static __cpuinit void check_tsc_warp(unsigned int timeout) +static void check_tsc_warp(unsigned int timeout)  {  	cycles_t start, now, prev, end;  	int i; @@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)   * Source CPU calls into this - it waits for the freshly booted   * target CPU to arrive and then starts the measurement:   */ -void __cpuinit check_tsc_sync_source(int cpu) +void check_tsc_sync_source(int cpu)  {  	int cpus = 2; @@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)  /*   * Freshly booted CPUs call into this:   */ -void __cpuinit check_tsc_sync_target(void) +void check_tsc_sync_target(void)  {  	int cpus = 2;  | 
