From 3eb0f5193b497083391aa05d35210d5645211eef Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 17 Apr 2018 15:26:37 -0500 Subject: signal: Ensure every siginfo we send has all bits initialized Call clear_siginfo to ensure every stack allocated siginfo is properly initialized before being passed to the signal sending functions. Note: It is not safe to depend on C initializers to initialize struct siginfo on the stack because C is allowed to skip holes when initializing a structure. The initialization of struct siginfo in tracehook_report_syscall_exit was moved from the helper user_single_step_siginfo into tracehook_report_syscall_exit itself, to make it clear that the local variable siginfo gets fully initialized. In a few cases the scope of struct siginfo has been reduced to make it clear that siginfo siginfo is not used on other paths in the function in which it is declared. Instances of using memset to initialize siginfo have been replaced with calls clear_siginfo for clarity. Signed-off-by: "Eric W. Biederman" --- arch/powerpc/kernel/process.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1237f13fed51..26ea9793d290 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -632,6 +632,7 @@ void do_break (struct pt_regs *regs, unsigned long address, hw_breakpoint_disable(); /* Deliver the signal to userspace */ + clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_HWBKPT; -- cgit v1.2.3 From d1c7211281c5e1799f00b2228157530e0f7a671c Mon Sep 17 00:00:00 2001 From: Simon Guo Date: Wed, 23 May 2018 15:01:44 +0800 Subject: powerpc: Export msr_check_and_set() to modules PR KVM will need to reuse msr_check_and_set(). This patch exports this API for reuse. Signed-off-by: Simon Guo Reviewed-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1237f13fed51..25db000fa5b3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -154,6 +154,7 @@ unsigned long msr_check_and_set(unsigned long bits) return newmsr; } +EXPORT_SYMBOL_GPL(msr_check_and_set); void __msr_check_and_clear(unsigned long bits) { -- cgit v1.2.3 From 36d632ea831fd2fa3cb62599a465825f59076f64 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 5 May 2018 03:19:29 +1000 Subject: powerpc/64: remove start_tb and accum_tb from thread_struct These fields are only written to. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/processor.h | 4 ---- arch/powerpc/kernel/process.c | 6 +----- 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index c4b36a494a63..eff269adfa71 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -264,10 +264,6 @@ struct thread_struct { struct thread_fp_state *fp_save_area; int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ -#ifdef CONFIG_PPC64 - unsigned long start_tb; /* Start purr when proc switched in */ - unsigned long accum_tb; /* Total accumulated purr for process */ -#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *ptrace_bps[HBP_NUM]; /* diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 25db000fa5b3..f4e5291584c5 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1188,11 +1188,7 @@ struct task_struct *__switch_to(struct task_struct *prev, */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); - long unsigned start_tb, current_tb; - start_tb = old_thread->start_tb; - cu->current_tb = current_tb = mfspr(SPRN_PURR); - old_thread->accum_tb += (current_tb - start_tb); - new_thread->start_tb = current_tb; + cu->current_tb = mfspr(SPRN_PURR); } #endif /* CONFIG_PPC64 */ -- cgit v1.2.3 From 3d3a6021ddcbe9c31520e4e7b65e5ce5dc58274d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 5 May 2018 03:19:30 +1000 Subject: powerpc/pseries: lparcfg calculate PURR on demand For SPLPAR, lparcfg provides a sum of PURR registers for all CPUs. Currently this is done by reading PURR in context switch and timer interrupt, and storing that into a per-CPU variable. These are summed to provide the value. This does not work with all timer schemes (e.g., NO_HZ_FULL), and it is sub-optimal for performance because it reads the PURR register on every context switch, although that's been difficult to distinguish from noise in the contxt_switch microbenchmark. This patch implements the sum by calling a function on each CPU, to read and add PURR values of each CPU. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/time.h | 8 -------- arch/powerpc/kernel/process.c | 14 -------------- arch/powerpc/kernel/time.c | 8 -------- arch/powerpc/platforms/pseries/lparcfg.c | 18 ++++++++++-------- 4 files changed, 10 insertions(+), 38 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index db546c034905..c965c79765c4 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -196,14 +196,6 @@ extern u64 mulhdu(u64, u64); extern void div128_by_32(u64 dividend_high, u64 dividend_low, unsigned divisor, struct div_result *dr); -/* Used to store Processor Utilization register (purr) values */ - -struct cpu_usage { - u64 current_tb; /* Holds the current purr register values */ -}; - -DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); - extern void secondary_cpu_time_init(void); extern void __init time_init(void); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index f4e5291584c5..2a7fa5000cce 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -846,10 +846,6 @@ bool ppc_breakpoint_available(void) } EXPORT_SYMBOL_GPL(ppc_breakpoint_available); -#ifdef CONFIG_PPC64 -DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); -#endif - static inline bool hw_brk_match(struct arch_hw_breakpoint *a, struct arch_hw_breakpoint *b) { @@ -1182,16 +1178,6 @@ struct task_struct *__switch_to(struct task_struct *prev, WARN_ON(!irqs_disabled()); -#ifdef CONFIG_PPC64 - /* - * Collect processor utilization data per process - */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); - cu->current_tb = mfspr(SPRN_PURR); - } -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_PPC_BOOK3S_64 batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->active) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e7e8611e8863..1fe6a24357e7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -597,14 +597,6 @@ static void __timer_interrupt(void) __this_cpu_inc(irq_stat.timer_irqs_others); } -#ifdef CONFIG_PPC64 - /* collect purr register values often, for accurate calculations */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); - cu->current_tb = mfspr(SPRN_PURR); - } -#endif - trace_timer_interrupt_exit(regs); } diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index c508c938dc71..7c872dc01bdb 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -52,18 +52,20 @@ * Track sum of all purrs across all processors. This is used to further * calculate usage values by different applications */ +static void cpu_get_purr(void *arg) +{ + atomic64_t *sum = arg; + + atomic64_add(mfspr(SPRN_PURR), sum); +} + static unsigned long get_purr(void) { - unsigned long sum_purr = 0; - int cpu; + atomic64_t purr = ATOMIC64_INIT(0); - for_each_possible_cpu(cpu) { - struct cpu_usage *cu; + on_each_cpu(cpu_get_purr, &purr, 1); - cu = &per_cpu(cpu_usage_array, cpu); - sum_purr += cu->current_tb; - } - return sum_purr; + return atomic64_read(&purr); } /* -- cgit v1.2.3 From 3130a7bb6eb595f2d963976a4d3e57db77bcf06f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 10 May 2018 11:04:24 +1000 Subject: powerpc/64: change softe to irqmask in show_regs and xmon When the soft enabled flag was changed to a soft disable mask, xmon and register dump code was not updated to reflect that, which is confusing ('SOFTE: 1' previously meant interrupts were soft enabled, currently it means the opposite, the general interrupt type has been disabled). Fix this by using the name irqmask, and printing it in hex. Signed-off-by: Nicholas Piggin Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 2a7fa5000cce..8f35b30956f4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1420,7 +1420,7 @@ void show_regs(struct pt_regs * regs) pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); #endif #ifdef CONFIG_PPC64 - pr_cont("SOFTE: %ld ", regs->softe); + pr_cont("IRQMASK: %lx ", regs->softe); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d94a41254b11..0561c14b276b 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1637,7 +1637,7 @@ static void excprint(struct pt_regs *fp) printf(" current = 0x%px\n", current); #ifdef CONFIG_PPC64 - printf(" paca = 0x%px\t softe: %d\t irq_happened: 0x%02x\n", + printf(" paca = 0x%px\t irqmask: 0x%02x\t irq_happened: 0x%02x\n", local_paca, local_paca->irq_soft_mask, local_paca->irq_happened); #endif if (current) { -- cgit v1.2.3 From 3449f191ca9be1a6ac9757b8ab55f239092362e5 Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Fri, 11 May 2018 16:12:58 +1000 Subject: powerpc: Use TIDR CPU feature to control TIDR allocation Switch the use of TIDR on it's CPU feature, rather than assuming it is available based on architecture. Signed-off-by: Alastair D'Silva Reviewed-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8f35b30956f4..e8b1d3c30669 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1151,7 +1151,7 @@ static inline void restore_sprs(struct thread_struct *old_thread, mtspr(SPRN_TAR, new_thread->tar); } - if (cpu_has_feature(CPU_FTR_ARCH_300) && + if (cpu_has_feature(CPU_FTR_P9_TIDR) && old_thread->tidr != new_thread->tidr) mtspr(SPRN_TIDR, new_thread->tidr); #endif @@ -1553,7 +1553,7 @@ void clear_thread_tidr(struct task_struct *t) if (!t->thread.tidr) return; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + if (!cpu_has_feature(CPU_FTR_P9_TIDR)) { WARN_ON_ONCE(1); return; } @@ -1576,7 +1576,7 @@ int set_thread_tidr(struct task_struct *t) { int rc; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) + if (!cpu_has_feature(CPU_FTR_P9_TIDR)) return -EINVAL; if (t != current) -- cgit v1.2.3 From 71cc64a85d8d99936f6851709a07f18c87a0adab Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Fri, 11 May 2018 16:12:59 +1000 Subject: powerpc: use task_pid_nr() for TID allocation The current implementation of TID allocation, using a global IDR, may result in an errant process starving the system of available TIDs. Instead, use task_pid_nr(), as mentioned by the original author. The scenario described which prevented it's use is not applicable, as set_thread_tidr can only be called after the task struct has been populated. In the unlikely event that 2 threads share the TID and are waiting, all potential outcomes have been determined safe. Signed-off-by: Alastair D'Silva Reviewed-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/switch_to.h | 1 - arch/powerpc/kernel/process.c | 122 ++++++++--------------------------- 2 files changed, 28 insertions(+), 95 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index be8c9fa23983..5b03d8a82409 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -94,6 +94,5 @@ static inline void clear_task_ebb(struct task_struct *t) extern int set_thread_uses_vas(void); extern int set_thread_tidr(struct task_struct *t); -extern void clear_thread_tidr(struct task_struct *t); #endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e8b1d3c30669..ebcd3956f2be 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1479,103 +1479,41 @@ int set_thread_uses_vas(void) } #ifdef CONFIG_PPC64 -static DEFINE_SPINLOCK(vas_thread_id_lock); -static DEFINE_IDA(vas_thread_ida); - -/* - * We need to assign a unique thread id to each thread in a process. +/** + * Assign a TIDR (thread ID) for task @t and set it in the thread + * structure. For now, we only support setting TIDR for 'current' task. * - * This thread id, referred to as TIDR, and separate from the Linux's tgid, - * is intended to be used to direct an ASB_Notify from the hardware to the - * thread, when a suitable event occurs in the system. + * Since the TID value is a truncated form of it PID, it is possible + * (but unlikely) for 2 threads to have the same TID. In the unlikely event + * that 2 threads share the same TID and are waiting, one of the following + * cases will happen: * - * One such event is a "paste" instruction in the context of Fast Thread - * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard - * (VAS) in POWER9. + * 1. The correct thread is running, the wrong thread is not + * In this situation, the correct thread is woken and proceeds to pass it's + * condition check. * - * To get a unique TIDR per process we could simply reuse task_pid_nr() but - * the problem is that task_pid_nr() is not yet available copy_thread() is - * called. Fixing that would require changing more intrusive arch-neutral - * code in code path in copy_process()?. + * 2. Neither threads are running + * In this situation, neither thread will be woken. When scheduled, the waiting + * threads will execute either a wait, which will return immediately, followed + * by a condition check, which will pass for the correct thread and fail + * for the wrong thread, or they will execute the condition check immediately. * - * Further, to assign unique TIDRs within each process, we need an atomic - * field (or an IDR) in task_struct, which again intrudes into the arch- - * neutral code. So try to assign globally unique TIDRs for now. + * 3. The wrong thread is running, the correct thread is not + * The wrong thread will be woken, but will fail it's condition check and + * re-execute wait. The correct thread, when scheduled, will execute either + * it's condition check (which will pass), or wait, which returns immediately + * when called the first time after the thread is scheduled, followed by it's + * condition check (which will pass). * - * NOTE: TIDR 0 indicates that the thread does not need a TIDR value. - * For now, only threads that expect to be notified by the VAS - * hardware need a TIDR value and we assign values > 0 for those. - */ -#define MAX_THREAD_CONTEXT ((1 << 16) - 1) -static int assign_thread_tidr(void) -{ - int index; - int err; - unsigned long flags; - -again: - if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock_irqsave(&vas_thread_id_lock, flags); - err = ida_get_new_above(&vas_thread_ida, 1, &index); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > MAX_THREAD_CONTEXT) { - spin_lock_irqsave(&vas_thread_id_lock, flags); - ida_remove(&vas_thread_ida, index); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); - return -ENOMEM; - } - - return index; -} - -static void free_thread_tidr(int id) -{ - unsigned long flags; - - spin_lock_irqsave(&vas_thread_id_lock, flags); - ida_remove(&vas_thread_ida, id); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); -} - -/* - * Clear any TIDR value assigned to this thread. - */ -void clear_thread_tidr(struct task_struct *t) -{ - if (!t->thread.tidr) - return; - - if (!cpu_has_feature(CPU_FTR_P9_TIDR)) { - WARN_ON_ONCE(1); - return; - } - - mtspr(SPRN_TIDR, 0); - free_thread_tidr(t->thread.tidr); - t->thread.tidr = 0; -} - -void arch_release_task_struct(struct task_struct *t) -{ - clear_thread_tidr(t); -} - -/* - * Assign a unique TIDR (thread id) for task @t and set it in the thread - * structure. For now, we only support setting TIDR for 'current' task. + * 4. Both threads are running + * Both threads will be woken. The wrong thread will fail it's condition check + * and execute another wait, while the correct thread will pass it's condition + * check. + * + * @t: the task to set the thread ID for */ int set_thread_tidr(struct task_struct *t) { - int rc; - if (!cpu_has_feature(CPU_FTR_P9_TIDR)) return -EINVAL; @@ -1585,11 +1523,7 @@ int set_thread_tidr(struct task_struct *t) if (t->thread.tidr) return 0; - rc = assign_thread_tidr(); - if (rc < 0) - return rc; - - t->thread.tidr = rc; + t->thread.tidr = (u16)task_pid_nr(t); mtspr(SPRN_TIDR, t->thread.tidr); return 0; -- cgit v1.2.3