summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2002-08-14 17:06:32 +1000
committerAnton Blanchard <anton@samba.org>2002-08-14 17:06:32 +1000
commit949341959ef8295f3683a991452338f6c5c35335 (patch)
treecc194e3be036dbba884d41141c51a663eef52c55 /include
parent53405a4e5637f879bace6ce8985f8a6754f5a809 (diff)
ppc64: Disable irqs in init_new_context, destroy_context
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc64/mmu_context.h27
1 files changed, 14 insertions, 13 deletions
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index 1688129a2f23..08d8bd20976a 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -72,19 +72,19 @@ mmu_context_underflow(void)
panic("mmu_context_underflow");
}
-
/*
* Set up the context for a new address space.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- long head, size;
+ long head;
+ unsigned long flags;
- spin_lock( &mmu_context_queue.lock );
+ spin_lock_irqsave(&mmu_context_queue.lock, flags);
- if ( (size = mmu_context_queue.size) <= 0 ) {
- spin_unlock( &mmu_context_queue.lock );
+ if (mmu_context_queue.size <= 0) {
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return -ENOMEM;
}
@@ -93,9 +93,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
mmu_context_queue.head = head;
- mmu_context_queue.size = size-1;
+ mmu_context_queue.size--;
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return 0;
}
@@ -106,12 +106,13 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void
destroy_context(struct mm_struct *mm)
{
- long index, size = mmu_context_queue.size;
+ long index;
+ unsigned long flags;
- spin_lock( &mmu_context_queue.lock );
+ spin_lock_irqsave(&mmu_context_queue.lock, flags);
- if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
- spin_unlock( &mmu_context_queue.lock );
+ if (mmu_context_queue.size >= NUM_USER_CONTEXT) {
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
mmu_context_underflow();
}
@@ -125,10 +126,10 @@ destroy_context(struct mm_struct *mm)
mmu_context_queue.tail = index;
#endif
- mmu_context_queue.size = size+1;
+ mmu_context_queue.size++;
mmu_context_queue.elements[index] = mm->context;
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);