summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-03-22 07:34:59 -0800
committerDavid S. Miller <davem@kernel.bkbits.net>2003-03-22 07:34:59 -0800
commit34f2047de1ac6c5a34791b782138cccfcce6cd2e (patch)
tree4f1958bea3bbaf72b6c182f9d5e371a9086708cc /include/linux
parentff0cbc7881e74bd5a7f6ee864d6474a3fd0213bb (diff)
[PATCH] pagecache accounting speedup
From: Alex Tomas <bzzz@tmi.comex.ru> This is the second half of the vm_enough_memory() speedup. When overcommit_memory != 1, vm_enough_memory() calls get_page_state() to calculate the amount of used pagecache. It does this on every call to sys_brk(). get_page_state() is really expensive on SMP. So the patch arranges for pagecache accounting to be in a global atomic_t, with per-cpu batching and approximate accounting to amortise the cost of the global atomic. The nr_pagecache field of /proc/vmstat is removed.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/page-flags.h1
-rw-r--r--include/linux/pagemap.h44
3 files changed, 43 insertions, 3 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8a174fe6c25e..637c7569c68e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -594,7 +594,6 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
-extern unsigned long get_page_cache_size(void);
extern unsigned int nr_used_zone_pages(void);
#ifdef CONFIG_MMU
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5c3bded564d8..efe51537c564 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,7 +82,6 @@
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
- unsigned long nr_pagecache; /* Pages in pagecache */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_reverse_maps; /* includes PageDirect */
unsigned long nr_mapped; /* mapped into pagetables */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 96ce1b4acd27..3be40da2e1aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -74,6 +74,48 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
+extern atomic_t nr_pagecache;
+
+#ifdef CONFIG_SMP
+
+#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
+DECLARE_PER_CPU(long, nr_pagecache_local);
+
+/*
+ * pagecache_acct implements approximate accounting for pagecache.
+ * vm_enough_memory() do not need high accuracy. Writers will keep
+ * an offset in their per-cpu arena and will spill that into the
+ * global count whenever the absolute value of the local count
+ * exceeds the counter's threshold.
+ *
+ * MUST be protected from preemption.
+ * current protection is mapping->page_lock.
+ */
+static inline void pagecache_acct(int count)
+{
+ long *local;
+
+ local = &__get_cpu_var(nr_pagecache_local);
+ *local += count;
+ if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
+ atomic_add(*local, &nr_pagecache);
+ *local = 0;
+ }
+}
+
+#else
+
+static inline void pagecache_acct(int count)
+{
+ atomic_add(count, &nr_pagecache);
+}
+#endif
+
+static inline unsigned long get_page_cache_size(void)
+{
+ return atomic_read(&nr_pagecache);
+}
+
static inline void ___add_to_page_cache(struct page *page,
struct address_space *mapping, unsigned long index)
{
@@ -82,7 +124,7 @@ static inline void ___add_to_page_cache(struct page *page,
page->index = index;
mapping->nrpages++;
- inc_page_state(nr_pagecache);
+ pagecache_acct(1);
}
extern void FASTCALL(__lock_page(struct page *page));