summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@zip.com.au>2002-04-29 23:51:50 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-04-29 23:51:50 -0700
commit00d6555e3c1568842beef2085045baaae59d347c (patch)
tree2a8bd17b14f41e5f58527e8d36f7aa445fba112b /include
parentd878155c293e65354c2acf653aef60011c4114bb (diff)
[PATCH] readahead fix
Changes the way in which the readahead code locates the readahead setting for the underlying device. - struct block_device and struct address_space gain a *pointer* to the current readahead tunable. - The tunable lives in the request queue and is altered with the traditional ioctl. - The value gets *copied* into the struct file at open() time. So a fcntl() mode to modify it per-fd is simple. - Filesystems which are not request_queue-backed get the address of the global `default_ra_pages'. If we want, this can become a tunable. - Filesystems are at liberty to alter address_space.ra_pages to point at some other fs-private default at new_inode/read_inode/alloc_inode time. - The ra_pages pointer can become a structure pointer if, at some time in the future, high-level code needs more detailed information about device characteristics. In fact, it'll need to become a struct pointer for use by writeback: my current writeback code has the problem that multiple pdflush threads can get stuck on the same request queue. That's a waste of resources. I currently have a silly flag in the superblock to try to avoid this. The proper way to get this exclusion is for the high-level writeback code to be able to do a test-and-set against a per-request_queue flag. That flag can live in a structure alongside ra_pages, conveniently accessible at the pagemap level. One thing still to-be-done is going into all callers of blk_init_queue and blk_queue_make_request and making sure that they're setting up a sensible default. ATA wants 248 sectors, and floppy drives don't want 128kbytes, I suspect. Later.
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/mm.h1
3 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5cb20016dd99..01497e05a70a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -150,9 +150,9 @@ struct request_queue
/*
* The VM-level readahead tunable for this device. In
- * units of 512-byte sectors.
+ * units of PAGE_CACHE_SIZE pages.
*/
- unsigned ra_sectors;
+ unsigned long ra_pages;
/*
* The queue owner gets to use this for whatever they like.
@@ -310,8 +310,7 @@ extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn);
-extern int blk_set_readahead(struct block_device *bdev, unsigned sectors);
-extern unsigned blk_get_readahead(struct block_device *bdev);
+extern unsigned long *blk_get_ra_pages(kdev_t kdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 31e255227683..f0d997aeecb4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -398,6 +398,7 @@ struct address_space {
list_t i_mmap_shared; /* list of private mappings */
spinlock_t i_shared_lock; /* and spinlock protecting it */
int gfp_mask; /* how to allocate the pages */
+ unsigned long *ra_pages; /* device readahead */
};
struct char_device {
@@ -513,6 +514,7 @@ struct file_ra_state {
unsigned long prev_page; /* Cache last read() position */
unsigned long ahead_start; /* Ahead window */
unsigned long ahead_size;
+ unsigned long ra_pages; /* Maximum readahead window */
};
struct file {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 56b5f325e0df..5f1c731ddde1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -504,6 +504,7 @@ void do_page_cache_readahead(struct file *file,
void page_cache_readahead(struct file *file, unsigned long offset);
void page_cache_readaround(struct file *file, unsigned long offset);
void handle_ra_thrashing(struct file *file);
+extern unsigned long default_ra_pages;
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */