summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/dcache.h11
-rw-r--r--include/linux/iobuf.h88
-rw-r--r--include/linux/mm.h23
4 files changed, 23 insertions, 100 deletions
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 71732e1216fc..0760d97cd6f9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -32,7 +32,6 @@ enum bh_state_bits {
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
struct page;
-struct kiobuf;
struct buffer_head;
struct address_space;
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0abaaaa2c96d..71708edafce9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -180,17 +180,6 @@ extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_anon(struct list_head *);
extern int d_invalidate(struct dentry *);
-/* dcache memory management */
-extern int shrink_dcache_memory(int, unsigned int);
-extern void prune_dcache(int);
-
-/* icache memory management (defined in linux/fs/inode.c) */
-extern int shrink_icache_memory(int, unsigned int);
-extern void prune_icache(int);
-
-/* quota cache memory management (defined in linux/fs/dquot.c) */
-extern int shrink_dqcache_memory(int, unsigned int);
-
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
diff --git a/include/linux/iobuf.h b/include/linux/iobuf.h
deleted file mode 100644
index fb147b5c48a7..000000000000
--- a/include/linux/iobuf.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * iobuf.h
- *
- * Defines the structures used to track abstract kernel-space io buffers.
- *
- */
-
-#ifndef __LINUX_IOBUF_H
-#define __LINUX_IOBUF_H
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <asm/atomic.h>
-
-/*
- * The kiobuf structure describes a physical set of pages reserved
- * locked for IO. The reference counts on each page will have been
- * incremented, and the flags field will indicate whether or not we have
- * pre-locked all of the pages for IO.
- *
- * kiobufs may be passed in arrays to form a kiovec, but we must
- * preserve the property that no page is present more than once over the
- * entire iovec.
- */
-
-#define KIO_MAX_ATOMIC_IO 512 /* in kb */
-#define KIO_STATIC_PAGES (KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10) + 1)
-#define KIO_MAX_SECTORS (KIO_MAX_ATOMIC_IO * 2)
-
-/* The main kiobuf struct */
-
-struct kiobuf
-{
- int nr_pages; /* Pages actually referenced */
- int array_len; /* Space in the allocated lists */
- int offset; /* Offset to start of valid data */
- int length; /* Number of valid bytes of data */
-
- /* Keep separate track of the physical addresses and page
- * structs involved. If we do IO to a memory-mapped device
- * region, there won't necessarily be page structs defined for
- * every address. */
-
- struct page ** maplist;
-
- unsigned int locked : 1; /* If set, pages has been locked */
-
- /* Always embed enough struct pages for atomic IO */
- struct page * map_array[KIO_STATIC_PAGES];
- sector_t blocks[KIO_MAX_SECTORS];
-
- /* Dynamic state for IO completion: */
- atomic_t io_count; /* IOs still in progress */
- int errno; /* Status of completed IO */
- void (*end_io) (struct kiobuf *); /* Completion callback */
- wait_queue_head_t wait_queue;
-};
-
-
-/* mm/memory.c */
-
-int map_user_kiobuf(int rw, struct kiobuf *, unsigned long va, size_t len);
-void unmap_kiobuf(struct kiobuf *iobuf);
-int lock_kiovec(int nr, struct kiobuf *iovec[], int wait);
-int unlock_kiovec(int nr, struct kiobuf *iovec[]);
-void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes);
-
-/* fs/iobuf.c */
-
-int end_kio_request(struct kiobuf *, int);
-void simple_wakeup_kiobuf(struct kiobuf *);
-int alloc_kiovec(int nr, struct kiobuf **);
-void free_kiovec(int nr, struct kiobuf **);
-int expand_kiobuf(struct kiobuf *, int);
-void kiobuf_wait_for_io(struct kiobuf *);
-extern int alloc_kiobuf_bhs(struct kiobuf *);
-extern void free_kiobuf_bhs(struct kiobuf *);
-
-/* fs/buffer.c */
-
-int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
- struct block_device *bdev, sector_t [], int size);
-
-/* fs/bio.c */
-void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t block);
-
-#endif /* __LINUX_IOBUF_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5107b5043f7..a6c66cc418ee 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -392,6 +392,29 @@ extern int free_hugepages(struct vm_area_struct *);
/*
+ * Prototype to add a shrinker callback for ageable caches.
+ *
+ * These functions are passed a count `nr_to_scan' and a gfpmask. They should
+ * scan `nr_to_scan' objects, attempting to free them.
+ *
+ * The callback must the number of objects which remain in the cache.
+ *
+ * The callback will be passes nr_to_scan == 0 when the VM is querying the
+ * cache size, so a fastpath for that case is appropriate.
+ */
+typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask);
+
+/*
+ * Add an aging callback. The int is the number of 'seeks' it takes
+ * to recreate one of the objects that these functions age.
+ */
+
+#define DEFAULT_SEEKS 2
+struct shrinker;
+extern struct shrinker *set_shrinker(int, shrinker_t);
+extern void remove_shrinker(struct shrinker *shrinker);
+
+/*
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
* FIXME: make the method unconditional.