summaryrefslogtreecommitdiff
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2003-07-06 23:01:50 -0700
committerSteve French <cifs.adm@hostme.bitkeeper.com>2003-07-06 23:01:50 -0700
commit8a6879c603dc4ea40f89fb1bda8f2b5039e19396 (patch)
treec0d883a41eff495d708bba2b95b2839065e8ab90 /include/asm-alpha
parentb993be7e4517f328fd6bd8bcea2f038c894a292e (diff)
[PATCH] switch_mm and enter_lazy_tlb: remove cpu arg
switch_mm and enter_lazy_tlb take a CPU arg, which is always smp_processor_id(). This is misleading, and pointless if they use per-cpu variables or other optimizations. gcc will eliminate redundant smp_processor_id() (in inline functions) anyway. This removes that arg from all the architectures.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/machvec.h2
-rw-r--r--include/asm-alpha/mmu_context.h15
2 files changed, 9 insertions, 8 deletions
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 6d1b408e3ed5..f09f71909aa6 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -68,7 +68,7 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *, long);
+ struct task_struct *);
void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
void (*mv_flush_tlb_current)(struct mm_struct *);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a087254ea38a..3ae6408acaed 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -130,11 +130,12 @@ __get_new_mm_context(struct mm_struct *mm, long cpu)
__EXTERN_INLINE void
ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next, long cpu)
+ struct task_struct *next)
{
/* Check if our ASN is of an older version, and thus invalid. */
unsigned long asn;
unsigned long mmc;
+ long cpu = smp_processor_id();
#ifdef CONFIG_SMP
cpu_data[cpu].asn_lock = 1;
@@ -159,7 +160,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
__EXTERN_INLINE void
ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next, long cpu)
+ struct task_struct *next)
{
/* As described, ASN's are broken for TLB usage. But we can
optimize for switching between threads -- if the mm is
@@ -174,7 +175,7 @@ ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Do continue to allocate ASNs, because we can still use them
to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next, cpu);
+ ev5_switch_mm(prev_mm, next_mm, next);
}
extern void __load_new_mm_context(struct mm_struct *);
@@ -212,14 +213,14 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
#define deactivate_mm(tsk,mm) do { } while (0)
#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c,d) alpha_mv.mv_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
#else
# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c,d) ev4_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev4_activate_mm((x),(y))
# else
-# define switch_mm(a,b,c,d) ev5_switch_mm((a),(b),(c),(d))
+# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev5_activate_mm((x),(y))
# endif
#endif
@@ -245,7 +246,7 @@ destroy_context(struct mm_struct *mm)
}
static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
tsk->thread_info->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;