From bd624d75db21ea5402f9ecf4450b311794d80352 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 13 Feb 2015 08:54:56 +0800 Subject: clockevents: Introduce mode specific callbacks It is not possible for the clockevents core to know which modes (other than those with a corresponding feature flag) are supported by a particular implementation. And drivers are expected to handle transition to all modes elegantly, as ->set_mode() would be issued for them unconditionally. Now, adding support for a new mode complicates things a bit if we want to use the legacy ->set_mode() callback. We need to closely review all clockevents drivers to see if they would break on addition of a new mode. And after such reviews, it is found that we have to do non-trivial changes to most of the drivers [1]. Introduce mode-specific set_mode_*() callbacks, some of which the drivers may or may not implement. A missing callback would clearly convey the message that the corresponding mode isn't supported. A driver may still choose to keep supporting the legacy ->set_mode() callback, but ->set_mode() wouldn't be supporting any new modes beyond RESUME. If a driver wants to benefit from using a new mode, it would be required to migrate to the mode specific callbacks. The legacy ->set_mode() callback and the newly introduced mode-specific callbacks are mutually exclusive. Only one of them should be supported by the driver. Sanity check is done at the time of registration to distinguish between optional and required callbacks and to make error recovery and handling simpler. If the legacy ->set_mode() callback is provided, all mode specific ones would be ignored by the core but a warning is thrown if they are present. Call sites calling ->set_mode() directly are also updated to use __clockevents_set_mode() instead, as ->set_mode() may not be available anymore for few drivers. [1] https://lkml.org/lkml/2014/12/9/605 [2] https://lkml.org/lkml/2015/1/23/255 Suggested-by: Thomas Gleixner [2] Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Daniel Lezcano Cc: Frederic Weisbecker Cc: John Stultz Cc: Kevin Hilman Cc: Linus Torvalds Cc: Preeti U Murthy Cc: linaro-kernel@lists.linaro.org Cc: linaro-networking@linaro.org Link: http://lkml.kernel.org/r/792d59a40423f0acffc9bb0bec9de1341a06fa02.1423788565.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/time/timer_list.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'kernel/time/timer_list.c') diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 61ed862cdd37..2cfd19485824 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -228,9 +228,35 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) print_name_offset(m, dev->set_next_event); SEQ_printf(m, "\n"); - SEQ_printf(m, " set_mode: "); - print_name_offset(m, dev->set_mode); - SEQ_printf(m, "\n"); + if (dev->set_mode) { + SEQ_printf(m, " set_mode: "); + print_name_offset(m, dev->set_mode); + SEQ_printf(m, "\n"); + } else { + if (dev->set_mode_shutdown) { + SEQ_printf(m, " shutdown: "); + print_name_offset(m, dev->set_mode_shutdown); + SEQ_printf(m, "\n"); + } + + if (dev->set_mode_periodic) { + SEQ_printf(m, " periodic: "); + print_name_offset(m, dev->set_mode_periodic); + SEQ_printf(m, "\n"); + } + + if (dev->set_mode_oneshot) { + SEQ_printf(m, " oneshot: "); + print_name_offset(m, dev->set_mode_oneshot); + SEQ_printf(m, "\n"); + } + + if (dev->set_mode_resume) { + SEQ_printf(m, " resume: "); + print_name_offset(m, dev->set_mode_resume); + SEQ_printf(m, "\n"); + } + } SEQ_printf(m, " event_handler: "); print_name_offset(m, dev->event_handler); -- cgit v1.2.3 From 554ef3876c6acdff1331feab10275e9e9e0adb84 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 27 Feb 2015 17:21:32 +0530 Subject: clockevents: Handle tick device's resume separately Upcoming patch will redefine possible states of a clockevent device. The RESUME mode is a special case only for tick's clockevent devices. In future it can be replaced by ->resume() callback already available for clockevent devices. Lets handle it separately so that clockevents_set_mode() only handles states valid across all devices. This also renames set_mode_resume() to tick_resume() to make it more explicit. Signed-off-by: Viresh Kumar Acked-by: Peter Zijlstra Cc: Daniel Lezcano Cc: Frederic Weisbecker Cc: Kevin Hilman Cc: Peter Zijlstra Cc: Preeti U Murthy Cc: linaro-kernel@lists.linaro.org Cc: linaro-networking@linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/c1b0112410870f49e7bf06958e1483eac6c15e20.1425037853.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- include/linux/clockchips.h | 4 ++-- kernel/time/clockevents.c | 30 +++++++++++++++++++++--------- kernel/time/tick-broadcast.c | 2 +- kernel/time/tick-common.c | 2 +- kernel/time/tick-internal.h | 1 + kernel/time/timer_list.c | 4 ++-- 6 files changed, 28 insertions(+), 15 deletions(-) (limited to 'kernel/time/timer_list.c') diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 59af26b54d15..a41749543d48 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -87,7 +87,7 @@ enum clock_event_mode { * @set_mode_periodic: switch mode to periodic, if !set_mode * @set_mode_oneshot: switch mode to oneshot, if !set_mode * @set_mode_shutdown: switch mode to shutdown, if !set_mode - * @set_mode_resume: resume clkevt device, if !set_mode + * @tick_resume: resume clkevt device, if !set_mode * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration @@ -125,7 +125,7 @@ struct clock_event_device { int (*set_mode_periodic)(struct clock_event_device *); int (*set_mode_oneshot)(struct clock_event_device *); int (*set_mode_shutdown)(struct clock_event_device *); - int (*set_mode_resume)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *mask); void (*suspend)(struct clock_event_device *); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 489642b08d64..1b0ea63de69c 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -100,7 +100,7 @@ static int __clockevents_set_mode(struct clock_event_device *dev, /* Transition with legacy set_mode() callback */ if (dev->set_mode) { /* Legacy callback doesn't support new modes */ - if (mode > CLOCK_EVT_MODE_RESUME) + if (mode > CLOCK_EVT_MODE_ONESHOT) return -ENOSYS; dev->set_mode(mode, dev); return 0; @@ -133,13 +133,6 @@ static int __clockevents_set_mode(struct clock_event_device *dev, return -ENOSYS; return dev->set_mode_oneshot(dev); - case CLOCK_EVT_MODE_RESUME: - /* Optional callback */ - if (dev->set_mode_resume) - return dev->set_mode_resume(dev); - else - return 0; - default: return -ENOSYS; } @@ -184,6 +177,25 @@ void clockevents_shutdown(struct clock_event_device *dev) dev->next_event.tv64 = KTIME_MAX; } +/** + * clockevents_tick_resume - Resume the tick device before using it again + * @dev: device to resume + */ +int clockevents_tick_resume(struct clock_event_device *dev) +{ + int ret = 0; + + if (dev->set_mode) + dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); + else if (dev->tick_resume) + ret = dev->tick_resume(dev); + + if (likely(!ret)) + dev->mode = CLOCK_EVT_MODE_RESUME; + + return ret; +} + #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST /* Limit min_delta to a jiffie */ @@ -433,7 +445,7 @@ static int clockevents_sanity_check(struct clock_event_device *dev) if (dev->set_mode) { /* We shouldn't be supporting new modes now */ WARN_ON(dev->set_mode_periodic || dev->set_mode_oneshot || - dev->set_mode_shutdown || dev->set_mode_resume); + dev->set_mode_shutdown || dev->tick_resume); return 0; } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 066f0ec05e48..542d5bb5c13d 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -464,7 +464,7 @@ int tick_resume_broadcast(void) bc = tick_broadcast_device.evtdev; if (bc) { - clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); + clockevents_tick_resume(bc); switch (tick_broadcast_device.mode) { case TICKDEV_MODE_PERIODIC: diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index f7c515595b42..5c50664c21d7 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -384,7 +384,7 @@ void tick_resume(void) struct tick_device *td = this_cpu_ptr(&tick_cpu_device); int broadcast = tick_resume_broadcast(); - clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); + clockevents_tick_resume(td->evtdev); if (!broadcast) { if (td->mode == TICKDEV_MODE_PERIODIC) diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 366aeb4f2c66..98700e4a2000 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -32,6 +32,7 @@ extern bool tick_check_replacement(struct clock_event_device *curdev, extern void tick_install_replacement(struct clock_event_device *dev); extern void clockevents_shutdown(struct clock_event_device *dev); +extern int clockevents_tick_resume(struct clock_event_device *dev); extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 2cfd19485824..2b3e9393034d 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -251,9 +251,9 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) SEQ_printf(m, "\n"); } - if (dev->set_mode_resume) { + if (dev->tick_resume) { SEQ_printf(m, " resume: "); - print_name_offset(m, dev->set_mode_resume); + print_name_offset(m, dev->tick_resume); SEQ_printf(m, "\n"); } } -- cgit v1.2.3 From 77e32c89a7117614ab3d66d20c1088de721abfaa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 27 Feb 2015 17:21:33 +0530 Subject: clockevents: Manage device's state separately for the core 'enum clock_event_mode' is used for two purposes today: - to pass mode to the driver of clockevent device::set_mode(). - for managing state of the device for clockevents core. For supporting new modes/states we have moved away from the legacy set_mode() callback to new per-mode/state callbacks. New modes/states shouldn't be exposed to the legacy (now OBSOLOTE) callbacks and so we shouldn't add new states to 'enum clock_event_mode'. Lets have separate enums for the two use cases mentioned above. Keep using the earlier enum for legacy set_mode() callback and mark it OBSOLETE. And add another enum to clearly specify the possible states of a clockevent device. This also renames the newly added per-mode callbacks to reflect state changes. We haven't got rid of 'mode' member of 'struct clock_event_device' as it is used by some of the clockevent drivers and it would automatically die down once we migrate those drivers to the new interface. It ('mode') is only updated now for the drivers using the legacy interface. Suggested-by: Peter Zijlstra Suggested-by: Ingo Molnar Signed-off-by: Viresh Kumar Acked-by: Peter Zijlstra Cc: Daniel Lezcano Cc: Frederic Weisbecker Cc: Kevin Hilman Cc: Preeti U Murthy Cc: linaro-kernel@lists.linaro.org Cc: linaro-networking@linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/b6b0143a8a57bd58352ad35e08c25424c879c0cb.1425037853.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- arch/arm/common/bL_switcher.c | 8 ++-- include/linux/clockchips.h | 44 +++++++++++++------ kernel/time/clockevents.c | 99 +++++++++++++++++++++++-------------------- kernel/time/tick-broadcast.c | 20 ++++----- kernel/time/tick-common.c | 7 +-- kernel/time/tick-oneshot.c | 6 +-- kernel/time/timer_list.c | 12 +++--- 7 files changed, 111 insertions(+), 85 deletions(-) (limited to 'kernel/time/timer_list.c') diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 6eaddc47c43d..d4f970a4d255 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -152,7 +152,7 @@ static int bL_switch_to(unsigned int new_cluster_id) unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; struct completion inbound_alive; struct tick_device *tdev; - enum clock_event_mode tdev_mode; + enum clock_event_state tdev_state; long volatile *handshake_ptr; int ipi_nr, ret; @@ -223,8 +223,8 @@ static int bL_switch_to(unsigned int new_cluster_id) if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) tdev = NULL; if (tdev) { - tdev_mode = tdev->evtdev->mode; - clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); + tdev_state = tdev->evtdev->state; + clockevents_set_state(tdev->evtdev, CLOCK_EVT_STATE_SHUTDOWN); } ret = cpu_pm_enter(); @@ -252,7 +252,7 @@ static int bL_switch_to(unsigned int new_cluster_id) ret = cpu_pm_exit(); if (tdev) { - clockevents_set_mode(tdev->evtdev, tdev_mode); + clockevents_set_state(tdev->evtdev, tdev_state); clockevents_program_event(tdev->evtdev, tdev->evtdev->next_event, 1); } diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index a41749543d48..e20232c3320a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -32,15 +32,31 @@ enum clock_event_nofitiers { struct clock_event_device; struct module; -/* Clock event mode commands */ +/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ enum clock_event_mode { CLOCK_EVT_MODE_UNUSED = 0, CLOCK_EVT_MODE_SHUTDOWN, CLOCK_EVT_MODE_PERIODIC, CLOCK_EVT_MODE_ONESHOT, CLOCK_EVT_MODE_RESUME, +}; - /* Legacy ->set_mode() callback doesn't support below modes */ +/* + * Possible states of a clock event device. + * + * DETACHED: Device is not used by clockevents core. Initial state or can be + * reached from SHUTDOWN. + * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. + * PERIODIC: Device is programmed to generate events periodically. Can be + * reached from DETACHED or SHUTDOWN. + * ONESHOT: Device is programmed to generate event only once. Can be reached + * from DETACHED or SHUTDOWN. + */ +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN, + CLOCK_EVT_STATE_PERIODIC, + CLOCK_EVT_STATE_ONESHOT, }; /* @@ -80,13 +96,14 @@ enum clock_event_mode { * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode assigned by the management code + * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE + * @state: current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. - * @set_mode_periodic: switch mode to periodic, if !set_mode - * @set_mode_oneshot: switch mode to oneshot, if !set_mode - * @set_mode_shutdown: switch mode to shutdown, if !set_mode + * @set_state_periodic: switch state to periodic, if !set_mode + * @set_state_oneshot: switch state to oneshot, if !set_mode + * @set_state_shutdown: switch state to shutdown, if !set_mode * @tick_resume: resume clkevt device, if !set_mode * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration @@ -111,20 +128,21 @@ struct clock_event_device { u32 mult; u32 shift; enum clock_event_mode mode; + enum clock_event_state state; unsigned int features; unsigned long retries; /* - * Mode transition callback(s): Only one of the two groups should be + * State transition callback(s): Only one of the two groups should be * defined: * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. - * - set_mode_{shutdown|periodic|oneshot|resume}(). + * - set_state_{shutdown|periodic|oneshot}(), tick_resume(). */ void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); - int (*set_mode_periodic)(struct clock_event_device *); - int (*set_mode_oneshot)(struct clock_event_device *); - int (*set_mode_shutdown)(struct clock_event_device *); + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *mask); @@ -177,8 +195,8 @@ extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); -extern void clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode); +extern void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state); extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1b0ea63de69c..6e53e9a0c2e8 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -94,44 +94,49 @@ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) } EXPORT_SYMBOL_GPL(clockevent_delta2ns); -static int __clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode) +static int __clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state) { /* Transition with legacy set_mode() callback */ if (dev->set_mode) { /* Legacy callback doesn't support new modes */ - if (mode > CLOCK_EVT_MODE_ONESHOT) + if (state > CLOCK_EVT_STATE_ONESHOT) return -ENOSYS; - dev->set_mode(mode, dev); + /* + * 'clock_event_state' and 'clock_event_mode' have 1-to-1 + * mapping until *_ONESHOT, and so a simple cast will work. + */ + dev->set_mode((enum clock_event_mode)state, dev); + dev->mode = (enum clock_event_mode)state; return 0; } if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; - /* Transition with new mode-specific callbacks */ - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: + /* Transition with new state-specific callbacks */ + switch (state) { + case CLOCK_EVT_STATE_DETACHED: /* * This is an internal state, which is guaranteed to go from - * SHUTDOWN to UNUSED. No driver interaction required. + * SHUTDOWN to DETACHED. No driver interaction required. */ return 0; - case CLOCK_EVT_MODE_SHUTDOWN: - return dev->set_mode_shutdown(dev); + case CLOCK_EVT_STATE_SHUTDOWN: + return dev->set_state_shutdown(dev); - case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_STATE_PERIODIC: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) return -ENOSYS; - return dev->set_mode_periodic(dev); + return dev->set_state_periodic(dev); - case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_STATE_ONESHOT: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return -ENOSYS; - return dev->set_mode_oneshot(dev); + return dev->set_state_oneshot(dev); default: return -ENOSYS; @@ -139,26 +144,26 @@ static int __clockevents_set_mode(struct clock_event_device *dev, } /** - * clockevents_set_mode - set the operating mode of a clock event device + * clockevents_set_state - set the operating state of a clock event device * @dev: device to modify - * @mode: new mode + * @state: new state * * Must be called with interrupts disabled ! */ -void clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode) +void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state) { - if (dev->mode != mode) { - if (__clockevents_set_mode(dev, mode)) + if (dev->state != state) { + if (__clockevents_set_state(dev, state)) return; - dev->mode = mode; + dev->state = state; /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ - if (mode == CLOCK_EVT_MODE_ONESHOT) { + if (state == CLOCK_EVT_STATE_ONESHOT) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); @@ -173,7 +178,7 @@ void clockevents_set_mode(struct clock_event_device *dev, */ void clockevents_shutdown(struct clock_event_device *dev) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); + clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); dev->next_event.tv64 = KTIME_MAX; } @@ -185,13 +190,12 @@ int clockevents_tick_resume(struct clock_event_device *dev) { int ret = 0; - if (dev->set_mode) + if (dev->set_mode) { dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); - else if (dev->tick_resume) - ret = dev->tick_resume(dev); - - if (likely(!ret)) dev->mode = CLOCK_EVT_MODE_RESUME; + } else if (dev->tick_resume) { + ret = dev->tick_resume(dev); + } return ret; } @@ -248,7 +252,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; dev->retries++; @@ -285,7 +289,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; dev->retries++; @@ -317,7 +321,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, dev->next_event = expires; - if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) + if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) return 0; /* Shortcut for clockevent devices that can deal with ktime. */ @@ -362,7 +366,7 @@ static int clockevents_replace(struct clock_event_device *ced) struct clock_event_device *dev, *newdev = NULL; list_for_each_entry(dev, &clockevent_devices, list) { - if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) + if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) continue; if (!tick_check_replacement(newdev, dev)) @@ -388,7 +392,7 @@ static int clockevents_replace(struct clock_event_device *ced) static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) { /* Fast track. Device is unused */ - if (ced->mode == CLOCK_EVT_MODE_UNUSED) { + if (ced->state == CLOCK_EVT_STATE_DETACHED) { list_del_init(&ced->list); return 0; } @@ -438,30 +442,30 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) } EXPORT_SYMBOL_GPL(clockevents_unbind); -/* Sanity check of mode transition callbacks */ +/* Sanity check of state transition callbacks */ static int clockevents_sanity_check(struct clock_event_device *dev) { /* Legacy set_mode() callback */ if (dev->set_mode) { /* We shouldn't be supporting new modes now */ - WARN_ON(dev->set_mode_periodic || dev->set_mode_oneshot || - dev->set_mode_shutdown || dev->tick_resume); + WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || + dev->set_state_shutdown || dev->tick_resume); return 0; } if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; - /* New mode-specific callbacks */ - if (!dev->set_mode_shutdown) + /* New state-specific callbacks */ + if (!dev->set_state_shutdown) return -EINVAL; if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && - !dev->set_mode_periodic) + !dev->set_state_periodic) return -EINVAL; if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && - !dev->set_mode_oneshot) + !dev->set_state_oneshot) return -EINVAL; return 0; @@ -478,6 +482,9 @@ void clockevents_register_device(struct clock_event_device *dev) BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); BUG_ON(clockevents_sanity_check(dev)); + /* Initialize state to DETACHED */ + dev->state = CLOCK_EVT_STATE_DETACHED; + if (!dev->cpumask) { WARN_ON(num_possible_cpus() > 1); dev->cpumask = cpumask_of(smp_processor_id()); @@ -541,11 +548,11 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); - if (dev->mode == CLOCK_EVT_MODE_ONESHOT) + if (dev->state == CLOCK_EVT_STATE_ONESHOT) return clockevents_program_event(dev, dev->next_event, false); - if (dev->mode == CLOCK_EVT_MODE_PERIODIC) - return __clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); + if (dev->state == CLOCK_EVT_STATE_PERIODIC) + return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); return 0; } @@ -601,13 +608,13 @@ void clockevents_exchange_device(struct clock_event_device *old, */ if (old) { module_put(old->owner); - clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); + clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED); list_del(&old->list); list_add(&old->list, &clockevents_released); } if (new) { - BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); + BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); clockevents_shutdown(new); } local_irq_restore(flags); @@ -693,7 +700,7 @@ int clockevents_notify(unsigned long reason, void *arg) if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { - BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); + BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); list_del(&dev->list); } } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 542d5bb5c13d..f0f8ee9dbc28 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -303,7 +303,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) /* * The device is in periodic mode. No reprogramming necessary: */ - if (dev->mode == CLOCK_EVT_MODE_PERIODIC) + if (dev->state == CLOCK_EVT_STATE_PERIODIC) goto unlock; /* @@ -532,8 +532,8 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, { int ret; - if (bc->mode != CLOCK_EVT_MODE_ONESHOT) - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + if (bc->state != CLOCK_EVT_STATE_ONESHOT) + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); ret = clockevents_program_event(bc, expires, force); if (!ret) @@ -543,7 +543,7 @@ static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, int tick_resume_broadcast_oneshot(struct clock_event_device *bc) { - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); return 0; } @@ -562,8 +562,8 @@ void tick_check_oneshot_broadcast_this_cpu(void) * switched over, leave the device alone. */ if (td->mode == TICKDEV_MODE_ONESHOT) { - clockevents_set_mode(td->evtdev, - CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(td->evtdev, + CLOCK_EVT_STATE_ONESHOT); } } } @@ -666,7 +666,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, if (dev->next_event.tv64 < bc->next_event.tv64) return; } - clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); + clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); } static void broadcast_move_bc(int deadcpu) @@ -741,7 +741,7 @@ int tick_broadcast_oneshot_control(unsigned long reason) cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); } else { if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); /* * The cpu which was handling the broadcast * timer marked this cpu in the broadcast @@ -842,7 +842,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { - int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; + int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC; bc->event_handler = tick_handle_oneshot_broadcast; @@ -858,7 +858,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_broadcast_oneshot_mask, tmpmask); if (was_periodic && !cpumask_empty(tmpmask)) { - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_init_next_event(tmpmask, tick_next_period); tick_broadcast_set_event(bc, cpu, tick_next_period, 1); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 5c50664c21d7..a5b877130ae9 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -102,7 +102,7 @@ void tick_handle_periodic(struct clock_event_device *dev) tick_periodic(cpu); - if (dev->mode != CLOCK_EVT_MODE_ONESHOT) + if (dev->state != CLOCK_EVT_STATE_ONESHOT) return; for (;;) { /* @@ -140,7 +140,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { - clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); + clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); } else { unsigned long seq; ktime_t next; @@ -150,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) next = tick_next_period; } while (read_seqretry(&jiffies_lock, seq)); - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); for (;;) { if (!clockevents_program_event(dev, next, false)) @@ -365,6 +365,7 @@ void tick_shutdown(unsigned int *cpup) * Prevent that the clock events layer tries to call * the set mode function! */ + dev->state = CLOCK_EVT_STATE_DETACHED; dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); dev->event_handler = clockevents_handle_noop; diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 7ce740e78e1b..67a64b1670bf 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -38,7 +38,7 @@ void tick_resume_oneshot(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(dev, ktime_get(), true); } @@ -50,7 +50,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, ktime_t next_event) { newdev->event_handler = handler; - clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(newdev, next_event, true); } @@ -81,7 +81,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) td->mode = TICKDEV_MODE_ONESHOT; dev->event_handler = handler; - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_switch_to_oneshot(); return 0; } diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 2b3e9393034d..05aa5590106a 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -233,21 +233,21 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) print_name_offset(m, dev->set_mode); SEQ_printf(m, "\n"); } else { - if (dev->set_mode_shutdown) { + if (dev->set_state_shutdown) { SEQ_printf(m, " shutdown: "); - print_name_offset(m, dev->set_mode_shutdown); + print_name_offset(m, dev->set_state_shutdown); SEQ_printf(m, "\n"); } - if (dev->set_mode_periodic) { + if (dev->set_state_periodic) { SEQ_printf(m, " periodic: "); - print_name_offset(m, dev->set_mode_periodic); + print_name_offset(m, dev->set_state_periodic); SEQ_printf(m, "\n"); } - if (dev->set_mode_oneshot) { + if (dev->set_state_oneshot) { SEQ_printf(m, " oneshot: "); - print_name_offset(m, dev->set_mode_oneshot); + print_name_offset(m, dev->set_state_oneshot); SEQ_printf(m, "\n"); } -- cgit v1.2.3 From c1797baf6880174f899ce3960d0598f5bbeeb7ff Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 25 Mar 2015 13:07:37 +0100 Subject: tick: Move core only declarations and functions to core No point to expose everything to the world. People just believe such functions can be abused for whatever purposes. Sigh. Signed-off-by: Thomas Gleixner [ Rebased on top of 4.0-rc5 ] Signed-off-by: Rafael J. Wysocki Cc: Nicolas Pitre Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/28017337.VbCUc39Gme@vostro.rjw.lan [ Merged to latest timers/core ] Signed-off-by: Ingo Molnar --- include/linux/clockchips.h | 15 ++--- include/linux/tick.h | 134 ++++++-------------------------------------- kernel/time/clocksource.c | 2 +- kernel/time/hrtimer.c | 2 +- kernel/time/tick-internal.h | 15 +++++ kernel/time/tick-sched.c | 7 ++- kernel/time/tick-sched.h | 64 +++++++++++++++++++++ kernel/time/timer_list.c | 2 +- 8 files changed, 112 insertions(+), 129 deletions(-) create mode 100644 kernel/time/tick-sched.h (limited to 'kernel/time/timer_list.c') diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 57975131dac1..bc3af821350b 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -193,15 +193,6 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); -extern void clockevents_exchange_device(struct clock_event_device *old, - struct clock_event_device *new); -extern void clockevents_set_state(struct clock_event_device *dev, - enum clock_event_state state); -extern int clockevents_program_event(struct clock_event_device *dev, - ktime_t expires, bool force); - -extern void clockevents_handle_noop(struct clock_event_device *dev); - static inline void clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) { @@ -209,6 +200,12 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) freq, minsec); } +/* Should be core only, but is abused by arm bl_switcher */ +extern void clockevents_set_state(struct clock_event_device *dev, + enum clock_event_state state); +extern int clockevents_program_event(struct clock_event_device *dev, + ktime_t expires, bool force); + extern void clockevents_suspend(void); extern void clockevents_resume(void); diff --git a/include/linux/tick.h b/include/linux/tick.h index 9c085dc12ae9..f9a2d2687a46 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -1,7 +1,5 @@ -/* linux/include/linux/tick.h - * - * This file contains the structure definitions for tick related functions - * +/* + * Tick related global functions */ #ifndef _LINUX_TICK_H #define _LINUX_TICK_H @@ -9,13 +7,12 @@ #include #include #include -#include #include #include #include +/* ARM BL switcher abuse support */ #ifdef CONFIG_GENERIC_CLOCKEVENTS - enum tick_device_mode { TICKDEV_MODE_PERIODIC, TICKDEV_MODE_ONESHOT, @@ -25,133 +22,38 @@ struct tick_device { struct clock_event_device *evtdev; enum tick_device_mode mode; }; - -enum tick_nohz_mode { - NOHZ_MODE_INACTIVE, - NOHZ_MODE_LOWRES, - NOHZ_MODE_HIGHRES, -}; - -/** - * struct tick_sched - sched tick emulation and no idle tick control/stats - * @sched_timer: hrtimer to schedule the periodic tick in high - * resolution mode - * @last_tick: Store the last tick expiry time when the tick - * timer is modified for nohz sleeps. This is necessary - * to resume the tick timer operation in the timeline - * when the CPU returns from nohz sleep. - * @tick_stopped: Indicator that the idle tick has been stopped - * @idle_jiffies: jiffies at the entry to idle for idle time accounting - * @idle_calls: Total number of idle calls - * @idle_sleeps: Number of idle calls, where the sched tick was stopped - * @idle_entrytime: Time when the idle call was entered - * @idle_waketime: Time when the idle was interrupted - * @idle_exittime: Time when the idle state was left - * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped - * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding - * @sleep_length: Duration of the current idle sleep - * @do_timer_lst: CPU was the last one doing do_timer before going idle - */ -struct tick_sched { - struct hrtimer sched_timer; - unsigned long check_clocks; - enum tick_nohz_mode nohz_mode; - ktime_t last_tick; - int inidle; - int tick_stopped; - unsigned long idle_jiffies; - unsigned long idle_calls; - unsigned long idle_sleeps; - int idle_active; - ktime_t idle_entrytime; - ktime_t idle_waketime; - ktime_t idle_exittime; - ktime_t idle_sleeptime; - ktime_t iowait_sleeptime; - ktime_t sleep_length; - unsigned long last_jiffies; - unsigned long next_jiffies; - ktime_t idle_expires; - int do_timer_last; -}; - -extern void __init tick_init(void); -extern int tick_is_oneshot_available(void); extern struct tick_device *tick_get_device(int cpu); +#endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS +extern void __init tick_init(void); extern void tick_freeze(void); extern void tick_unfreeze(void); +#else /* CONFIG_GENERIC_CLOCKEVENTS */ +static inline void tick_init(void) { } +static inline void tick_freeze(void) { } +static inline void tick_unfreeze(void) { } +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ -# ifdef CONFIG_HIGH_RES_TIMERS -extern int tick_init_highres(void); -extern int tick_program_event(ktime_t expires, int force); -extern void tick_setup_sched_timer(void); -# endif - -# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS -extern void tick_cancel_sched_timer(int cpu); -# else -static inline void tick_cancel_sched_timer(int cpu) { } -# endif - -# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -extern struct tick_device *tick_get_broadcast_device(void); -extern struct cpumask *tick_get_broadcast_mask(void); - -# ifdef CONFIG_TICK_ONESHOT -extern struct cpumask *tick_get_broadcast_oneshot_mask(void); -# endif - -# endif /* BROADCAST */ - -# ifdef CONFIG_TICK_ONESHOT -extern void tick_clock_notify(void); -extern int tick_check_oneshot_change(int allow_nohz); -extern struct tick_sched *tick_get_tick_sched(int cpu); +#ifdef CONFIG_TICK_ONESHOT extern void tick_irq_enter(void); -extern int tick_oneshot_mode_active(void); # ifndef arch_needs_cpu # define arch_needs_cpu() (0) # endif # else -static inline void tick_clock_notify(void) { } -static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } -static inline void tick_irq_enter(void) { } -static inline int tick_oneshot_mode_active(void) { return 0; } -# endif - -#else /* CONFIG_GENERIC_CLOCKEVENTS */ -static inline void tick_init(void) { } -static inline void tick_freeze(void) { } -static inline void tick_unfreeze(void) { } -static inline void tick_cancel_sched_timer(int cpu) { } -static inline void tick_clock_notify(void) { } -static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_irq_enter(void) { } -static inline int tick_oneshot_mode_active(void) { return 0; } -#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ - -# ifdef CONFIG_NO_HZ_COMMON -DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); - -static inline int tick_nohz_tick_stopped(void) -{ - return __this_cpu_read(tick_cpu_sched.tick_stopped); -} +#endif +#ifdef CONFIG_NO_HZ_COMMON +extern int tick_nohz_tick_stopped(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); - -# else /* !CONFIG_NO_HZ_COMMON */ -static inline int tick_nohz_tick_stopped(void) -{ - return 0; -} - +#else /* !CONFIG_NO_HZ_COMMON */ +static inline int tick_nohz_tick_stopped(void) { return 0; } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } @@ -163,7 +65,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } -# endif /* !CONFIG_NO_HZ_COMMON */ +#endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 8b4010f0b1b4..c3be3c71bbad 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -31,7 +31,7 @@ #include #include -#include "timekeeping.h" +#include "tick-internal.h" #include "timekeeping_internal.h" /** diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index bee0c1f78091..721d29b99d10 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -54,7 +54,7 @@ #include -#include "timekeeping.h" +#include "tick-internal.h" /* * The timer bases: diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index d86eb8d485e9..dd2c45d057b9 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -5,6 +5,7 @@ #include #include "timekeeping.h" +#include "tick-sched.h" #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -26,6 +27,7 @@ extern void tick_resume(void); extern bool tick_check_replacement(struct clock_event_device *curdev, struct clock_event_device *newdev); extern void tick_install_replacement(struct clock_event_device *dev); +extern int tick_is_oneshot_available(void); extern int clockevents_tick_resume(struct clock_event_device *dev); /* Check, if the device is functional or a dummy for broadcast */ @@ -35,6 +37,9 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) } extern void clockevents_shutdown(struct clock_event_device *dev); +extern void clockevents_exchange_device(struct clock_event_device *old, + struct clock_event_device *new); +extern void clockevents_handle_noop(struct clock_event_device *dev); extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); #endif /* GENERIC_CLOCKEVENTS */ @@ -49,6 +54,10 @@ extern void tick_oneshot_notify(void); extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); extern void tick_resume_oneshot(void); static inline bool tick_oneshot_possible(void) { return true; } +extern int tick_oneshot_mode_active(void); +extern void tick_clock_notify(void); +extern int tick_check_oneshot_change(int allow_nohz); +extern int tick_init_highres(void); #else /* !ONESHOT */ static inline void tick_setup_oneshot(struct clock_event_device *newdev, @@ -58,6 +67,9 @@ static inline void tick_resume_oneshot(void) { BUG(); } static inline int tick_program_event(ktime_t expires, int force) { return 0; } static inline void tick_oneshot_notify(void) { } static inline bool tick_oneshot_possible(void) { return false; } +static inline int tick_oneshot_mode_active(void) { return 0; } +static inline void tick_clock_notify(void) { } +static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } #endif /* !TICK_ONESHOT */ /* Broadcasting support */ @@ -72,6 +84,8 @@ extern int tick_resume_broadcast(void); extern void tick_broadcast_init(void); extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); +extern struct tick_device *tick_get_broadcast_device(void); +extern struct cpumask *tick_get_broadcast_mask(void); #else /* !BROADCAST */ static inline void tick_install_broadcast_device(struct clock_event_device *dev) { } static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; } @@ -101,6 +115,7 @@ extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast_this_cpu(void); bool tick_broadcast_oneshot_available(void); +extern struct cpumask *tick_get_broadcast_oneshot_mask(void); #else /* BROADCAST && ONESHOT */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a4c4edac4528..914259128145 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -34,7 +34,7 @@ /* * Per cpu nohz control structure */ -DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); +static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); /* * The time, when the last jiffy update happened. Protected by jiffies_lock. @@ -416,6 +416,11 @@ static int __init setup_tick_nohz(char *str) __setup("nohz=", setup_tick_nohz); +int tick_nohz_tick_stopped(void) +{ + return __this_cpu_read(tick_cpu_sched.tick_stopped); +} + /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h new file mode 100644 index 000000000000..930743249127 --- /dev/null +++ b/kernel/time/tick-sched.h @@ -0,0 +1,64 @@ +#ifndef _TICK_SCHED_H +#define _TICK_SCHED_H + +#include + +enum tick_nohz_mode { + NOHZ_MODE_INACTIVE, + NOHZ_MODE_LOWRES, + NOHZ_MODE_HIGHRES, +}; + +/** + * struct tick_sched - sched tick emulation and no idle tick control/stats + * @sched_timer: hrtimer to schedule the periodic tick in high + * resolution mode + * @last_tick: Store the last tick expiry time when the tick + * timer is modified for nohz sleeps. This is necessary + * to resume the tick timer operation in the timeline + * when the CPU returns from nohz sleep. + * @tick_stopped: Indicator that the idle tick has been stopped + * @idle_jiffies: jiffies at the entry to idle for idle time accounting + * @idle_calls: Total number of idle calls + * @idle_sleeps: Number of idle calls, where the sched tick was stopped + * @idle_entrytime: Time when the idle call was entered + * @idle_waketime: Time when the idle was interrupted + * @idle_exittime: Time when the idle state was left + * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped + * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding + * @sleep_length: Duration of the current idle sleep + * @do_timer_lst: CPU was the last one doing do_timer before going idle + */ +struct tick_sched { + struct hrtimer sched_timer; + unsigned long check_clocks; + enum tick_nohz_mode nohz_mode; + ktime_t last_tick; + int inidle; + int tick_stopped; + unsigned long idle_jiffies; + unsigned long idle_calls; + unsigned long idle_sleeps; + int idle_active; + ktime_t idle_entrytime; + ktime_t idle_waketime; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + ktime_t sleep_length; + unsigned long last_jiffies; + unsigned long next_jiffies; + ktime_t idle_expires; + int do_timer_last; +}; + +extern struct tick_sched *tick_get_tick_sched(int cpu); + +extern void tick_setup_sched_timer(void); +#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS +extern void tick_cancel_sched_timer(int cpu); +#else +static inline void tick_cancel_sched_timer(int cpu) { } +#endif + +#endif diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 05aa5590106a..e878c2e0ba45 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -16,10 +16,10 @@ #include #include #include -#include #include +#include "tick-internal.h" struct timer_list_iter { int cpu; -- cgit v1.2.3