summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-10-04 20:35:48 -0700
committerRussell King <rmk@flint.arm.linux.org.uk>2002-10-04 20:35:48 -0700
commita27efcaff9ffd5ad05f4e111751da41a8820f7ab (patch)
treecc525c035c0e53f1edaadd4a9c572a52ef702bc1 /include
parent343893e647d27c96bf07e3f549b77b89aa9581ce (diff)
[PATCH] remove page->virtual
The patch removes page->virtual for all architectures which do not define WANT_PAGE_VIRTUAL. Hash for it instead. Possibly we could define WANT_PAGE_VIRTUAL for CONFIG_HIGHMEM4G, but it seems unlikely. A lot of the pressure went off kmap() and page_address() as a result of the move to kmap_atomic(). That should be the preferred way to address CPU load in the set_page_address() and page_address() hashing and locking. If kmap_atomic is not usable then the next best approach is for users to cache the result of kmap() in a local rather than calling page_address() repeatedly. One heavy user of kmap() and page_address() is the ext2 directory code. On a 7G Quad PIII, running four concurrent instances of while true do find /usr/src/linux > /dev/null done on ext2 with everything cached, profiling shows that the new hashed set_page_address() and page_address() implementations consume 0.4% and 1.3% of CPU time respectively. I think that's OK.
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h48
1 files changed, 22 insertions, 26 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4ae8eb10dcb2..a5107b5043f7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -176,7 +176,7 @@ struct page {
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+#if defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
@@ -289,38 +289,34 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num)
page->flags |= zone_num << ZONE_SHIFT;
}
-/*
- * In order to avoid #ifdefs within C code itself, we define
- * set_page_address to a noop for non-highmem machines, where
- * the field isn't useful.
- * The same is true for page_address() in arch-dependent code.
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+#define lowmem_page_address(page) \
+ __va( ( ((page) - page_zone(page)->zone_mem_map) \
+ + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
+
+#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
+#define HASHED_PAGE_VIRTUAL
+#endif
+#if defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) ((page)->virtual)
#define set_page_address(page, address) \
do { \
(page)->virtual = (address); \
} while(0)
+#define page_address_init() do { } while(0)
+#endif
-#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-#define set_page_address(page, address) do { } while(0)
-#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
-
-#define page_address(page) ((page)->virtual)
-
-#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-#define page_address(page) \
- __va( ( ((page) - page_zone(page)->zone_mem_map) \
- + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
+#if defined(HASHED_PAGE_VIRTUAL)
+void *page_address(struct page *page);
+void set_page_address(struct page *page, void *virtual);
+void page_address_init(void);
+#endif
-#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) lowmem_page_address(page)
+#define set_page_address(page, address) do { } while(0)
+#define page_address_init() do { } while(0)
+#endif
/*
* Return true if this page is mapped into pagetables. Subtle: test pte.direct