diff options
| author | Steven Pratt <slpratt@austin.ibm.com> | 2005-01-03 04:14:55 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-01-03 04:14:55 -0800 |
| commit | 6f734a1af323ab4690610ecd575198ae219b6fe8 (patch) | |
| tree | 2f9ff1edd8ccca18f57c939d856bd354a3b31e09 /include/linux | |
| parent | d4cf10128caffbe419a483894261ca8d2f72c1eb (diff) | |
[PATCH] Simplified readahead
With Ram Pai <linuxram@us.ibm.com>
- request size is now passed into page_cache_readahead. This allows the
removal of the size averaging code in the current readahead logic.
- readahead rampup is now faster (especially for larger request sizes)
- No longer "slow read path". Readahead is turn off at first random access,
turned back on at first sequential access.
- Code now handles thrashing, slowly reducing readahead window until
thrashing stops, or min size reached.
- Returned to old behavior where first access is assumed sequential only if
at offset 0.
- designed to handle larger (1M or above) window sizes efficiently
Benchmark results:
machine 1: 8 way pentiumIV 1GB memory, tests run to 36GB SCSI disk
(Similar results were seen on a 1 way 866Mhz box with IDE disk.)
TioBench:
tiobench.pl --dir /mnt/tmp --block 4096 --size 4000 --numruns 2 --threads 1(4,16,64)
4k request size sequential read results in MB/sec
Threads 2.6.9 w/patches %diff diff
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/fs.h | 7 | ||||
| -rw-r--r-- | include/linux/mm.h | 7 |
2 files changed, 9 insertions, 5 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h index 2d369c688cd2..3d1cc5dd9a94 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -563,16 +563,17 @@ struct fown_struct { struct file_ra_state { unsigned long start; /* Current window */ unsigned long size; - unsigned long next_size; /* Next window size */ + unsigned long flags; /* ra flags RA_FLAG_xxx*/ + unsigned long cache_hit; /* cache hit count*/ unsigned long prev_page; /* Cache last read() position */ unsigned long ahead_start; /* Ahead window */ unsigned long ahead_size; - unsigned long currnt_wnd_hit; /* locality in the current window */ - unsigned long average; /* size of next current window */ unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ }; +#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ +#define RA_FLAG_INCACHE 0x02 /* file is already in cache */ struct file { struct list_head f_list; diff --git a/include/linux/mm.h b/include/linux/mm.h index ec79c15139fe..7c11cf54fb96 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -732,15 +732,18 @@ int write_one_page(struct page *page, int wait); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ +#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before + * turning readahead off */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, unsigned long offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, unsigned long offset, unsigned long nr_to_read); -void page_cache_readahead(struct address_space *mapping, +unsigned long page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, - unsigned long offset); + unsigned long offset, + unsigned long size); void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset); unsigned long max_sane_readahead(unsigned long nr); |
