summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2003-07-06 23:01:50 -0700
committerSteve French <cifs.adm@hostme.bitkeeper.com>2003-07-06 23:01:50 -0700
commit8a6879c603dc4ea40f89fb1bda8f2b5039e19396 (patch)
treec0d883a41eff495d708bba2b95b2839065e8ab90 /include
parentb993be7e4517f328fd6bd8bcea2f038c894a292e (diff)
[PATCH] switch_mm and enter_lazy_tlb: remove cpu arg
switch_mm and enter_lazy_tlb take a CPU arg, which is always smp_processor_id(). This is misleading, and pointless if they use per-cpu variables or other optimizations. gcc will eliminate redundant smp_processor_id() (in inline functions) anyway. This removes that arg from all the architectures.
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/machvec.h2
-rw-r--r--include/asm-alpha/mmu_context.h15
-rw-r--r--include/asm-arm/mmu_context.h4
-rw-r--r--include/asm-arm26/mmu_context.h4
-rw-r--r--include/asm-cris/mmu_context.h6
-rw-r--r--include/asm-h8300/mmu_context.h4
-rw-r--r--include/asm-i386/mmu_context.h11
-rw-r--r--include/asm-ia64/mmu_context.h4
-rw-r--r--include/asm-m68k/mmu_context.h6
-rw-r--r--include/asm-m68knommu/mmu_context.h4
-rw-r--r--include/asm-mips/mmu_context.h5
-rw-r--r--include/asm-mips64/mmu_context.h2
-rw-r--r--include/asm-parisc/mmu_context.h6
-rw-r--r--include/asm-ppc/mmu_context.h4
-rw-r--r--include/asm-ppc64/mmu_context.h8
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-sh/mmu_context.h10
-rw-r--r--include/asm-sparc/mmu_context.h8
-rw-r--r--include/asm-sparc64/mmu_context.h6
-rw-r--r--include/asm-um/mmu_context.h6
-rw-r--r--include/asm-v850/mmu_context.h4
-rw-r--r--include/asm-x86_64/mmu_context.h9
22 files changed, 73 insertions, 63 deletions
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 6d1b408e3ed5..f09f71909aa6 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -68,7 +68,7 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *, long);
+ struct task_struct *);
void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
void (*mv_flush_tlb_current)(struct mm_struct *);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a087254ea38a..3ae6408acaed 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -130,11 +130,12 @@ __get_new_mm_context(struct mm_struct *mm, long cpu)
__EXTERN_INLINE void
ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next, long cpu)
+ struct task_struct *next)
{
/* Check if our ASN is of an older version, and thus invalid. */
unsigned long asn;
unsigned long mmc;
+ long cpu = smp_processor_id();
#ifdef CONFIG_SMP
cpu_data[cpu].asn_lock = 1;
@@ -159,7 +160,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
__EXTERN_INLINE void
ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next, long cpu)
+ struct task_struct *next)
{
/* As described, ASN's are broken for TLB usage. But we can
optimize for switching between threads -- if the mm is
@@ -174,7 +175,7 @@ ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Do continue to allocate ASNs, because we can still use them
to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next, cpu);
+ ev5_switch_mm(prev_mm, next_mm, next);
}
extern void __load_new_mm_context(struct mm_struct *);
@@ -212,14 +213,14 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
#define deactivate_mm(tsk,mm) do { } while (0)
#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c,d) alpha_mv.mv_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
#else
# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c,d) ev4_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev4_activate_mm((x),(y))
# else
-# define switch_mm(a,b,c,d) ev5_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev5_activate_mm((x),(y))
# endif
#endif
@@ -245,7 +246,7 @@ destroy_context(struct mm_struct *mm)
}
static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
tsk->thread_info->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index 3b3b473c668f..e0340f5fbf32 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -28,7 +28,7 @@
* tsk->mm will be NULL
*/
static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -40,7 +40,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned int cpu)
+ struct task_struct *tsk)
{
if (prev != next) {
cpu_switch_mm(next->pgd, next);
diff --git a/include/asm-arm26/mmu_context.h b/include/asm-arm26/mmu_context.h
index 88b7b4f8f210..1a929bfe5c3a 100644
--- a/include/asm-arm26/mmu_context.h
+++ b/include/asm-arm26/mmu_context.h
@@ -26,7 +26,7 @@
* tsk->mm will be NULL
*/
static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -36,7 +36,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned int cpu)
+ struct task_struct *tsk)
{
cpu_switch_mm(next->pgd, next);
}
diff --git a/include/asm-cris/mmu_context.h b/include/asm-cris/mmu_context.h
index 6a6ea71a85cd..f9308c5bbd99 100644
--- a/include/asm-cris/mmu_context.h
+++ b/include/asm-cris/mmu_context.h
@@ -5,11 +5,11 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void get_mmu_context(struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, int cpu);
+ struct task_struct *tsk);
#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm((prev),(next),NULL,smp_processor_id())
+#define activate_mm(prev,next) switch_mm((prev),(next),NULL)
/* current active pgd - this is similar to other processors pgd
* registers like cr3 on the i386
@@ -17,7 +17,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index ffdf723191cf..23b555b7b4b9 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -6,7 +6,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#define destroy_context(mm) do { } while(0)
#define deactivate_mm(tsk,mm) do { } while(0)
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
}
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 14b1fa1ebef8..938fc1364344 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -14,16 +14,21 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
+ unsigned cpu = smp_processor_id();
if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
#endif
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
{
+ int cpu = smp_processor_id();
+
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
@@ -62,6 +67,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL,smp_processor_id())
+ switch_mm((prev),(next),NULL)
#endif
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index a614a1dbbb61..dee1cd007f5a 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -71,7 +71,7 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void wrap_mmu_context (struct mm_struct *mm);
static inline void
-enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -198,7 +198,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
activate_context(next);
}
-#define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm)
+#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/include/asm-m68k/mmu_context.h b/include/asm-m68k/mmu_context.h
index 04cfa101eb70..4983fb7b6a0c 100644
--- a/include/asm-m68k/mmu_context.h
+++ b/include/asm-m68k/mmu_context.h
@@ -3,7 +3,7 @@
#include <linux/config.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -79,7 +79,7 @@ extern inline void switch_mm_0460(struct mm_struct *mm)
asm volatile (".chip 68k");
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
if (CPU_IS_020_OR_030)
@@ -137,7 +137,7 @@ static inline void activate_context(struct mm_struct *mm)
sun3_put_context(mm->context);
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
activate_context(tsk->mm);
}
diff --git a/include/asm-m68knommu/mmu_context.h b/include/asm-m68knommu/mmu_context.h
index a4286176513c..9bc0fd49b8aa 100644
--- a/include/asm-m68knommu/mmu_context.h
+++ b/include/asm-m68knommu/mmu_context.h
@@ -6,7 +6,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -19,7 +19,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#define destroy_context(mm) do { } while(0)
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
}
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index cf19cd768f95..8d2153f323ef 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -49,7 +49,7 @@ extern unsigned long pgd_current[];
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -92,9 +92,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
unsigned long flags;
+ unsigned cpu = smp_processor_id();
local_irq_save(flags);
diff --git a/include/asm-mips64/mmu_context.h b/include/asm-mips64/mmu_context.h
index 107e2459ccd0..b7ca8c085452 100644
--- a/include/asm-mips64/mmu_context.h
+++ b/include/asm-mips64/mmu_context.h
@@ -40,7 +40,7 @@ extern unsigned long pgd_current[];
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/include/asm-parisc/mmu_context.h b/include/asm-parisc/mmu_context.h
index 1ff9e5f0582c..6327156282f2 100644
--- a/include/asm-parisc/mmu_context.h
+++ b/include/asm-parisc/mmu_context.h
@@ -6,7 +6,7 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -43,7 +43,7 @@ static inline void load_context(mm_context_t context)
#endif
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
@@ -69,6 +69,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
if (next->context == 0)
next->context = alloc_sid();
- switch_mm(prev,next,current,0);
+ switch_mm(prev,next,current);
}
#endif
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index 131269863309..1e8176b2d122 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -48,7 +48,7 @@
-- Dan
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -153,7 +153,7 @@ static inline void destroy_context(struct mm_struct *mm)
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, int cpu)
+ struct task_struct *tsk)
{
tsk->thread.pgdir = next->pgd;
get_mmu_context(next);
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index 1014f086e5e7..a84ee5812e9f 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -56,7 +56,7 @@ struct mmu_context_queue_t {
extern struct mmu_context_queue_t mmu_context_queue;
static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -140,10 +140,10 @@ extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, int cpu)
+ struct task_struct *tsk)
{
flush_stab(tsk, next);
- set_bit(cpu, &next->cpu_vm_mask);
+ set_bit(smp_processor_id(), &next->cpu_vm_mask);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -153,7 +153,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* the context for the new mm so we see the new mappings.
*/
#define activate_mm(active_mm, mm) \
- switch_mm(active_mm, mm, current, smp_processor_id());
+ switch_mm(active_mm, mm, current);
#define VSID_RANDOMIZER 42470972311
#define VSID_MASK 0xfffffffff
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 87be0aab7028..0e63fd521893 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -17,12 +17,12 @@
#define destroy_context(mm) flush_tlb_mm(mm)
static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
unsigned long pgd;
@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
: : "m" (pgd) );
#endif /* __s390x__ */
}
- set_bit(cpu, &next->cpu_vm_mask);
+ set_bit(smp_processor_id(), &next->cpu_vm_mask);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
extern inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
- switch_mm(prev, next, current, smp_processor_id());
+ switch_mm(prev, next, current);
}
#endif
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 17775400498b..c956ddea556b 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -129,7 +129,7 @@ static __inline__ void activate_context(struct mm_struct *mm)
(Currently not used) */
static __inline__ void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
- struct task_struct *tsk, unsigned int cpu)
+ struct task_struct *tsk)
{
if (likely(prev != next)) {
unsigned long __pgdir = (unsigned long)next->pgd;
@@ -144,10 +144,10 @@ static __inline__ void switch_mm(struct mm_struct *prev,
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL,smp_processor_id())
+ switch_mm((prev),(next),NULL)
static __inline__ void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#else /* !CONFIG_MMU */
@@ -157,10 +157,10 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
#define set_asid(asid) do { } while (0)
#define get_asid() (0)
#define activate_context(mm) do { } while (0)
-#define switch_mm(prev,next,tsk,cpu) do { } while (0)
+#define switch_mm(prev,next,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) do { } while (0)
-#define enter_lazy_tlb(mm,tsk,cpu) do { } while (0)
+#define enter_lazy_tlb(mm,tsk) do { } while (0)
#endif /* CONFIG_MMU */
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index f386a8f4bbe8..ed1e01d04d21 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -26,14 +26,14 @@ BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
/* Switch the current MM context. */
-BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *, int)
+BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
-#define switch_mm(old_mm, mm, tsk, cpu) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk, cpu)
+#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
#define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */
-#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL, smp_processor_id())
+#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ab9eedf22860..292757aa3176 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -27,7 +27,7 @@
#include <asm/system.h>
#include <asm/spitfire.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
@@ -106,7 +106,7 @@ do { \
extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Switch the current MM context. */
-static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
+static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid;
@@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
}
{
- unsigned long vm_mask = (1UL << cpu);
+ unsigned long vm_mask = (1UL << smp_processor_id());
/* Even if (mm == old_mm) we _must_ check
* the cpu_vm_mask. If we do not we could
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 14ca8b2a4628..4ddffc1a7832 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -21,8 +21,10 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
extern void switch_mm_skas(int mm_fd);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
+ unsigned cpu = smp_processor_id();
+
if(prev != next){
clear_bit(cpu, &prev->cpu_vm_mask);
set_bit(cpu, &next->cpu_vm_mask);
@@ -33,7 +35,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
}
diff --git a/include/asm-v850/mmu_context.h b/include/asm-v850/mmu_context.h
index 24301a46a92e..f521c8050d3c 100644
--- a/include/asm-v850/mmu_context.h
+++ b/include/asm-v850/mmu_context.h
@@ -3,9 +3,9 @@
#define destroy_context(mm) ((void)0)
#define init_new_context(tsk,mm) 0
-#define switch_mm(prev,next,tsk,cpu) ((void)0)
+#define switch_mm(prev,next,tsk) ((void)0)
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) ((void)0)
-#define enter_lazy_tlb(mm,tsk,cpu) ((void)0)
+#define enter_lazy_tlb(mm,tsk) ((void)0)
#endif /* __V850_MMU_CONTEXT_H__ */
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index 41fc9820ee94..8f80f157035e 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -17,20 +17,21 @@ void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SMP
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
}
#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#endif
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk, unsigned cpu)
+ struct task_struct *tsk)
{
+ unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
@@ -68,7 +69,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
} while(0)
#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL,smp_processor_id())
+ switch_mm((prev),(next),NULL)
#endif