diff options
| author | Andrew Morton <akpm@digeo.com> | 2002-09-07 22:22:03 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-09-07 22:22:03 -0700 |
| commit | 4b19c9405c4bf7e26cd173bd4cae93b1485bfacb (patch) | |
| tree | a257aaf979f8ccffe49af681024c822dc37f99d3 /include | |
| parent | 5f607d6ecf03d5fc3512d2c0a2fc3d447ccd4174 (diff) | |
[PATCH] atomic copy_*_user infrastructure
The patch implements the atomic copy_*_user() function.
If the kernel takes a pagefault while running copy_*_user() in an
atomic region, the copy_*_user() will fail (return a short value).
And with this patch, holding an atomic kmap() puts the CPU into an
atomic region.
- Increment preempt_count() in kmap_atomic() regardless of the
setting of CONFIG_PREEMPT. The pagefault handler recognises this as
an atomic region and refuses to service the fault. copy_*_user will
return a non-zero value.
- Attempts to propagate the in_atomic() predicate to all the other
highmem-capable architectures' pagefault handlers. But the code is
only tested on x86.
- Fixed a PPC bug in kunmap_atomic(): it forgot to reenable
preemption if HIGHMEM_DEBUG is turned on.
- Fixed a sparc bug in kunmap_atomic(): it forgot to reenable
preemption all the time, for non-fixmap pages.
- Fix an error in <linux/highmem.h> - in the CONFIG_HIGHMEM=n case,
kunmap_atomic() takes an address, not a page *.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-i386/highmem.h | 6 | ||||
| -rw-r--r-- | include/asm-ppc/hardirq.h | 2 | ||||
| -rw-r--r-- | include/asm-ppc/highmem.h | 6 | ||||
| -rw-r--r-- | include/asm-sparc/hardirq.h | 6 | ||||
| -rw-r--r-- | include/asm-sparc/highmem.h | 6 | ||||
| -rw-r--r-- | include/linux/highmem.h | 4 |
6 files changed, 23 insertions, 7 deletions
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h index 1cba7fc45882..0316b53f868f 100644 --- a/include/asm-i386/highmem.h +++ b/include/asm-i386/highmem.h @@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - preempt_disable(); + inc_preempt_count(); if (page < highmem_start_page) return page_address(page); @@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME - preempt_enable(); + dec_preempt_count(); return; } @@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) __flush_tlb_one(vaddr); #endif - preempt_enable(); + dec_preempt_count(); } #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/hardirq.h b/include/asm-ppc/hardirq.h index d56152a03ccc..547f2491000f 100644 --- a/include/asm-ppc/hardirq.h +++ b/include/asm-ppc/hardirq.h @@ -85,8 +85,10 @@ typedef struct { #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #if CONFIG_PREEMPT +# define in_atomic() (preempt_count() != kernel_locked()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else +# define in_atomic() (preempt_count() != 0) # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET #endif #define irq_exit() \ diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h index 5a630083d014..472482ca3f36 100644 --- a/include/asm-ppc/highmem.h +++ b/include/asm-ppc/highmem.h @@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) unsigned int idx; unsigned long vaddr; + inc_preempt_count(); if (page < highmem_start_page) return page_address(page); @@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); - if (vaddr < KMAP_FIX_BEGIN) // FIXME + if (vaddr < KMAP_FIX_BEGIN) { // FIXME + dec_preempt_count(); return; + } if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE) BUG(); @@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) pte_clear(kmap_pte+idx); flush_tlb_page(0, vaddr); #endif + dec_preempt_count(); } #endif /* __KERNEL__ */ diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h index a80212dc3a2a..f77ee7e415cf 100644 --- a/include/asm-sparc/hardirq.h +++ b/include/asm-sparc/hardirq.h @@ -113,6 +113,12 @@ do { \ #define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK) #endif +#if CONFIG_PREEMPT +# define in_atomic() (preempt_count() != kernel_locked()) +#else +# define in_atomic() (preempt_count() != 0) +#endif + #ifndef CONFIG_SMP #define synchronize_irq() barrier() diff --git a/include/asm-sparc/highmem.h b/include/asm-sparc/highmem.h index bb2fc2331b5b..2ba438ea6111 100644 --- a/include/asm-sparc/highmem.h +++ b/include/asm-sparc/highmem.h @@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) unsigned long idx; unsigned long vaddr; + inc_preempt_count(); if (page < highmem_start_page) return page_address(page); @@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr; unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); - if (vaddr < FIX_KMAP_BEGIN) // FIXME + if (vaddr < FIX_KMAP_BEGIN) { // FIXME + dec_preempt_count(); return; + } if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE) BUG(); @@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) flush_tlb_all(); #endif #endif + dec_preempt_count(); } #endif /* __KERNEL__ */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b389a75be5d7..370177037315 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); } #define kunmap(page) do { (void) (page); } while (0) -#define kmap_atomic(page,idx) kmap(page) -#define kunmap_atomic(page,idx) kunmap(page) +#define kmap_atomic(page, idx) page_address(page) +#define kunmap_atomic(addr, idx) do { } while (0) #endif /* CONFIG_HIGHMEM */ |
