summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@zip.com.au>2002-08-27 21:03:50 -0700
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-08-27 21:03:50 -0700
commita8382cf1153689a1caac0e707e951e7869bb92e1 (patch)
tree71e2722fd8fd5e08fb7862171f8fdb1443ce31c6 /include
parente6f0e61d9ed94134f57bcf6c72b81848b9d3c2fe (diff)
[PATCH] per-zone LRU locking
Now the LRUs are per-zone, make their lock per-zone as well. In this patch the per-zone lock shares a cacheline with the zone's buddy list lock, which is very bad. Some groundwork is needed to fix this well. This change is expected to be a significant win on NUMA, where most page allocation comes from the local node's zones. For NUMA the `struct zone' itself should really be placed in that node's memory, which is something the platform owners should look at. However the internode cache will help here. Per-node kswapd would make heaps of sense too.
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/swap.h2
4 files changed, 3 insertions, 4 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7416dae6b550..045a861e4024 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -157,7 +157,7 @@ struct page {
struct address_space *mapping; /* The inode (or ...) we belong to. */
unsigned long index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list;
- protected by pagemap_lru_lock !! */
+ protected by zone->lru_lock !! */
union {
struct pte_chain * chain; /* Reverse pte mapping pointer.
* protected by PG_chainlock */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 928000348e6b..f62e36b902a2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -44,6 +44,7 @@ struct zone {
unsigned long pages_min, pages_low, pages_high;
int need_balance;
+ spinlock_t lru_lock;
struct list_head active_list;
struct list_head inactive_list;
atomic_t refill_counter;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index cc74c699f3ad..5a49020e728b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -28,7 +28,7 @@
*
* Note that the referenced bit, the page->lru list_head and the active,
* inactive_dirty and inactive_clean lists are protected by the
- * pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
+ * zone->lru_lock, and *NOT* by the usual PG_locked bit!
*
* PG_error is set to indicate that an I/O error occurred on this page.
*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d9a4a9dc00ce..affa89d77eb1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -209,8 +209,6 @@ extern struct swap_list_t swap_list;
asmlinkage long sys_swapoff(const char *);
asmlinkage long sys_swapon(const char *, int);
-extern spinlock_t _pagemap_lru_lock;
-
extern void FASTCALL(mark_page_accessed(struct page *));
extern spinlock_t swaplock;