summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2003-09-03 11:12:43 -0700
committerLinus Torvalds <torvalds@home.osdl.org>2003-09-03 11:12:43 -0700
commit5eebb6f205162c0a7b0075e8d86453f81994aa2f (patch)
tree439671fe1bdc0c1a80ceab3bf8bfdab36792821c
parent55308a20138c3ea39a0882265f255c5b0f931f20 (diff)
[PATCH] might_sleep() improvements
From: Mitchell Blank Jr <mitch@sfgoth.com> This patch makes the following improvements to might_sleep(): o Add a "might_sleep_if()" macro for when we might sleep only if some condition is met. It's a bit tidier, and has an unlikely() in it. o Add might_sleep checks to skb_share_check() and skb_unshare() which sometimes need to allocate memory. o Make all architectures call might_sleep() in both down() and down_interruptible(). Before only ppc, ppc64, and i386 did this check. (sh did the check on down() but not down_interruptible())
-rw-r--r--arch/sparc64/kernel/semaphore.c2
-rw-r--r--include/asm-alpha/semaphore.h8
-rw-r--r--include/asm-arm/semaphore.h4
-rw-r--r--include/asm-arm26/semaphore.h4
-rw-r--r--include/asm-cris/semaphore.h2
-rw-r--r--include/asm-h8300/semaphore.h2
-rw-r--r--include/asm-ia64/semaphore.h2
-rw-r--r--include/asm-m68k/semaphore.h4
-rw-r--r--include/asm-m68knommu/semaphore.h4
-rw-r--r--include/asm-mips/semaphore.h2
-rw-r--r--include/asm-parisc/semaphore.h4
-rw-r--r--include/asm-s390/semaphore.h2
-rw-r--r--include/asm-sh/semaphore.h1
-rw-r--r--include/asm-sparc/semaphore.h2
-rw-r--r--include/asm-v850/semaphore.h2
-rw-r--r--include/asm-x86_64/semaphore.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/slab.c3
21 files changed, 42 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
index 4ce2f9369019..a9e66d666ceb 100644
--- a/arch/sparc64/kernel/semaphore.c
+++ b/arch/sparc64/kernel/semaphore.c
@@ -110,6 +110,7 @@ static void __down(struct semaphore * sem)
void down(struct semaphore *sem)
{
+ might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
@@ -219,6 +220,7 @@ int down_interruptible(struct semaphore *sem)
{
int ret = 0;
+ might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index ab26cfbaddbf..a363f018a35f 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -88,14 +88,18 @@ extern void __up_wakeup(struct semaphore *);
static inline void __down(struct semaphore *sem)
{
- long count = atomic_dec_return(&sem->count);
+ long count;
+ might_sleep();
+ count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
- long count = atomic_dec_return(&sem->count);
+ long count;
+ might_sleep();
+ count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 656120edc11b..76284ff21f49 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -88,7 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__down_op(sem, __down_failed);
}
@@ -101,7 +101,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
diff --git a/include/asm-arm26/semaphore.h b/include/asm-arm26/semaphore.h
index a7d94526309d..49946274a67b 100644
--- a/include/asm-arm26/semaphore.h
+++ b/include/asm-arm26/semaphore.h
@@ -84,7 +84,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__down_op(sem, __down_failed);
}
@@ -97,7 +97,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 011a3bdc9ca7..d0821f6cef7f 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -79,6 +79,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
@@ -104,6 +105,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index 962f5eb32d16..62e6e9ce669e 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -90,6 +90,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
count = &(sem->count);
__asm__ __volatile__(
@@ -117,6 +118,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
count = &(sem->count);
__asm__ __volatile__(
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index dd0eade35495..44165a637add 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -73,6 +73,7 @@ down (struct semaphore *sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
@@ -89,6 +90,7 @@ down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 8581f361c52d..cc844ef36a83 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -89,7 +89,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"subql #1,%0@\n\t"
@@ -112,7 +112,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic interruptible down operation\n\t"
"subql #1,%1@\n\t"
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 4720a09e6191..f85d311bc2d8 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %0, %%a1\n\t"
@@ -108,7 +108,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %1, %%a1\n\t"
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index 34fc00d60460..a867bb1014be 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -88,6 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
@@ -103,6 +104,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index 80686a7ab250..f01554398f1a 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -84,7 +84,7 @@ extern __inline__ void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
@@ -100,7 +100,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 474abcd85b34..5720dea2459f 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -60,6 +60,7 @@ asmlinkage void __up(struct semaphore * sem);
static inline void down(struct semaphore * sem)
{
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
@@ -68,6 +69,7 @@ static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 2ba0930035f6..dc8955e3448c 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -107,6 +107,7 @@ static inline int down_interruptible(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index cf3135b928a0..0e6122ae3058 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -71,6 +71,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
ptr = &(sem->count.counter);
increment = 1;
@@ -107,6 +108,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
ptr = &(sem->count.counter);
increment = 1;
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 0d6560f3ba98..c514062bb69e 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -57,6 +57,7 @@ extern void __up (struct semaphore * sem);
extern inline void down (struct semaphore * sem)
{
+ might_sleep();
if (atomic_dec_return (&sem->count) < 0)
__down (sem);
}
@@ -64,6 +65,7 @@ extern inline void down (struct semaphore * sem)
extern inline int down_interruptible (struct semaphore * sem)
{
int ret = 0;
+ might_sleep();
if (atomic_dec_return (&sem->count) < 0)
ret = __down_interruptible (sem);
return ret;
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index 6f42c7af790b..5fe25482facc 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -118,6 +118,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
@@ -144,6 +145,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b0aebfeda888..b2d17ea0dac0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -52,8 +52,10 @@ struct completion;
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line);
#define might_sleep() __might_sleep(__FILE__, __LINE__)
+#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
#else
#define might_sleep() do {} while(0)
+#define might_sleep_if(cond) do {} while (0)
#endif
extern struct notifier_block *panic_notifier_list;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a372186cb6d6..4b5f057f8bfb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -389,6 +389,7 @@ static inline int skb_shared(struct sk_buff *skb)
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
+ might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
@@ -419,6 +420,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
+ might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2ac25b6c141f..4051a19b0ab5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -543,8 +543,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
int do_retry;
struct reclaim_state reclaim_state;
- if (wait)
- might_sleep();
+ might_sleep_if(wait);
cold = 0;
if (gfp_mask & __GFP_COLD)
diff --git a/mm/rmap.c b/mm/rmap.c
index 06377cb9c907..c4434f218e4d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -503,8 +503,7 @@ struct pte_chain *pte_chain_alloc(int gfp_flags)
struct pte_chain *ret;
struct pte_chain **pte_chainp;
- if (gfp_flags & __GFP_WAIT)
- might_sleep();
+ might_sleep_if(gfp_flags & __GFP_WAIT);
pte_chainp = &get_cpu_var(local_pte_chain);
if (*pte_chainp) {
diff --git a/mm/slab.c b/mm/slab.c
index f54d4e83c7f3..34b62ca48429 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1814,8 +1814,7 @@ alloc_done:
static inline void
cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
{
- if (flags & __GFP_WAIT)
- might_sleep();
+ might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif