summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-16 21:58:53 -0700
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-16 21:58:53 -0700
commit60db5f20c33788d10edcd93f9a8ebaae05836efa (patch)
tree63335ea1dddf2d76c87a140c481404dbed4f40d5
parentb5a6c077bd73df8c25dd8df5ca9259b56529822d (diff)
parent9de0520543f1585ff2bcae7ce15eb9b55e742ad9 (diff)
Merge penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/read-ahead
into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/mm.h15
-rw-r--r--kernel/ksyms.c2
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/readahead.c46
6 files changed, 68 insertions, 41 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bca164f4265a..e5ced4dc214a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1251,7 +1251,8 @@ extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, lof
ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos);
extern ssize_t generic_file_sendfile(struct file *, struct file *, loff_t *, size_t);
-extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
+extern void do_generic_mapping_read(struct address_space *, struct file_ra_state *, struct file *,
+ loff_t *, read_descriptor_t *, read_actor_t);
extern ssize_t generic_file_direct_IO(int rw, struct file *file,
const struct iovec *iov, loff_t offset, unsigned long nr_segs);
extern int generic_direct_IO(int rw, struct inode *inode, const struct iovec
@@ -1268,6 +1269,18 @@ extern int generic_file_open(struct inode * inode, struct file * filp);
extern int generic_vm_writeback(struct page *page,
struct writeback_control *wbc);
+static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
+ read_descriptor_t * desc,
+ read_actor_t actor)
+{
+ do_generic_mapping_read(filp->f_dentry->d_inode->i_mapping,
+ &filp->f_ra,
+ filp,
+ ppos,
+ desc,
+ actor);
+}
+
extern struct file_operations generic_ro_fops;
extern int vfs_readlink(struct dentry *, char *, int, const char *);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d0b9bdb97523..cab2c4342047 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -512,11 +512,18 @@ int write_one_page(struct page *page, int wait);
/* readahead.c */
#define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
-int do_page_cache_readahead(struct file *file,
+int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
unsigned long offset, unsigned long nr_to_read);
-void page_cache_readahead(struct file *file, unsigned long offset);
-void page_cache_readaround(struct file *file, unsigned long offset);
-void handle_ra_miss(struct file *file);
+void page_cache_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ unsigned long offset);
+void page_cache_readaround(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ unsigned long offset);
+void handle_ra_miss(struct address_space *mapping,
+ struct file_ra_state *ra);
/* Do stack extension */
extern int expand_stack(struct vm_area_struct * vma, unsigned long address);
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index d3eddedb7468..941d5f9eec9d 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(generic_file_sendfile);
-EXPORT_SYMBOL(do_generic_file_read);
+EXPORT_SYMBOL(do_generic_mapping_read);
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_write_nolock);
EXPORT_SYMBOL(generic_file_mmap);
diff --git a/mm/filemap.c b/mm/filemap.c
index 65efd20d29d4..67d03d5d7732 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -570,10 +570,15 @@ void mark_page_accessed(struct page *page)
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
+ * - note the struct file * is only passed for the use of readpage
*/
-void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
+void do_generic_mapping_read(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file * filp,
+ loff_t *ppos,
+ read_descriptor_t * desc,
+ read_actor_t actor)
{
- struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
struct inode *inode = mapping->host;
unsigned long index, offset;
struct page *cached_page;
@@ -598,7 +603,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
break;
}
- page_cache_readahead(filp, index);
+ page_cache_readahead(mapping, ra, filp, index);
nr = nr - offset;
@@ -610,7 +615,7 @@ find_page:
page = radix_tree_lookup(&mapping->page_tree, index);
if (!page) {
read_unlock(&mapping->page_lock);
- handle_ra_miss(filp);
+ handle_ra_miss(mapping,ra);
goto no_cached_page;
}
page_cache_get(page);
@@ -947,9 +952,9 @@ ssize_t generic_file_sendfile(struct file *out_file, struct file *in_file,
}
static ssize_t
-do_readahead(struct file *file, unsigned long index, unsigned long nr)
+do_readahead(struct address_space *mapping, struct file *filp,
+ unsigned long index, unsigned long nr)
{
- struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
unsigned long max;
unsigned long active;
unsigned long inactive;
@@ -963,7 +968,7 @@ do_readahead(struct file *file, unsigned long index, unsigned long nr)
if (nr > max)
nr = max;
- do_page_cache_readahead(file, index, nr);
+ do_page_cache_readahead(mapping, filp, index, nr);
return 0;
}
@@ -976,10 +981,11 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
file = fget(fd);
if (file) {
if (file->f_mode & FMODE_READ) {
+ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
unsigned long start = offset >> PAGE_CACHE_SHIFT;
unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
unsigned long len = end - start + 1;
- ret = do_readahead(file, start, len);
+ ret = do_readahead(mapping, file, start, len);
}
fput(file);
}
@@ -1000,6 +1006,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
int error;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+ struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
struct page *page;
unsigned long size, pgoff, endoff;
@@ -1032,7 +1039,7 @@ retry_all:
*/
if (VM_SequentialReadHint(area)) {
did_readahead = 1;
- page_cache_readahead(area->vm_file, pgoff);
+ page_cache_readahead(mapping, ra, file, pgoff);
}
/*
@@ -1041,7 +1048,7 @@ retry_all:
*/
if ((pgoff < size) && !VM_RandomReadHint(area)) {
did_readahead = 1;
- page_cache_readaround(file, pgoff);
+ page_cache_readaround(mapping, ra, file, pgoff);
}
/*
@@ -1051,7 +1058,7 @@ retry_find:
page = find_get_page(mapping, pgoff);
if (!page) {
if (did_readahead) {
- handle_ra_miss(file);
+ handle_ra_miss(mapping,ra);
did_readahead = 0;
}
goto no_cached_page;
diff --git a/mm/madvise.c b/mm/madvise.c
index 4a232cead910..ac845fe3553a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -80,7 +80,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
return error;
- do_page_cache_readahead(file, start, end - start);
+ do_page_cache_readahead(file->f_dentry->d_inode->i_mapping, file, start, end - start);
return 0;
}
diff --git a/mm/readahead.c b/mm/readahead.c
index e1e68fc006dc..63528f6eed98 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -22,18 +22,18 @@ struct backing_dev_info default_backing_dev_info = {
/*
* Return max readahead size for this inode in number-of-pages.
*/
-static inline unsigned long get_max_readahead(struct file *file)
+static inline unsigned long get_max_readahead(struct file_ra_state *ra)
{
- return file->f_ra.ra_pages;
+ return ra->ra_pages;
}
-static inline unsigned long get_min_readahead(struct file *file)
+static inline unsigned long get_min_readahead(struct file_ra_state *ra)
{
return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
}
static int
-read_pages(struct file *file, struct address_space *mapping,
+read_pages(struct address_space *mapping, struct file *filp,
struct list_head *pages, unsigned nr_pages)
{
unsigned page_idx;
@@ -48,7 +48,7 @@ read_pages(struct file *file, struct address_space *mapping,
struct page *page = list_entry(pages->prev, struct page, list);
list_del(&page->list);
if (!add_to_page_cache(page, mapping, page->index)) {
- mapping->a_ops->readpage(file, page);
+ mapping->a_ops->readpage(filp, page);
if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec);
} else {
@@ -134,10 +134,11 @@ read_pages(struct file *file, struct address_space *mapping,
*
* Returns the number of pages which actually had IO started against them.
*/
-int do_page_cache_readahead(struct file *file,
- unsigned long offset, unsigned long nr_to_read)
+int do_page_cache_readahead(struct address_space *mapping,
+ struct file *filp,
+ unsigned long offset,
+ unsigned long nr_to_read)
{
- struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
struct inode *inode = mapping->host;
struct page *page;
unsigned long end_index; /* The last page we want to read */
@@ -181,7 +182,7 @@ int do_page_cache_readahead(struct file *file,
* will then handle the error.
*/
if (ret) {
- read_pages(file, mapping, &page_pool, ret);
+ read_pages(mapping, filp, &page_pool, ret);
blk_run_queues();
}
BUG_ON(!list_empty(&page_pool));
@@ -216,9 +217,9 @@ check_ra_success(struct file_ra_state *ra, pgoff_t attempt,
* page_cache_readahead is the main function. If performs the adaptive
* readahead window size management and submits the readahead I/O.
*/
-void page_cache_readahead(struct file *file, unsigned long offset)
+void page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
+ struct file *filp, unsigned long offset)
{
- struct file_ra_state *ra = &file->f_ra;
unsigned max;
unsigned min;
unsigned orig_next_size;
@@ -239,11 +240,11 @@ void page_cache_readahead(struct file *file, unsigned long offset)
if (ra->next_size == -1UL)
goto out; /* Maximally shrunk */
- max = get_max_readahead(file);
+ max = get_max_readahead(ra);
if (max == 0)
goto out; /* No readahead */
- min = get_min_readahead(file);
+ min = get_min_readahead(ra);
orig_next_size = ra->next_size;
if (ra->next_size == 0 && offset == 0) {
@@ -316,7 +317,8 @@ do_io:
ra->ahead_start = 0; /* Invalidate these */
ra->ahead_size = 0;
- actual = do_page_cache_readahead(file, offset, ra->size);
+ actual = do_page_cache_readahead(mapping, filp, offset,
+ ra->size);
check_ra_success(ra, ra->size, actual, orig_next_size);
} else {
/*
@@ -327,7 +329,7 @@ do_io:
if (ra->ahead_start == 0) {
ra->ahead_start = ra->start + ra->size;
ra->ahead_size = ra->next_size;
- actual = do_page_cache_readahead(file,
+ actual = do_page_cache_readahead(mapping, filp,
ra->ahead_start, ra->ahead_size);
check_ra_success(ra, ra->ahead_size,
actual, orig_next_size);
@@ -342,12 +344,11 @@ out:
* but somewhat ascending. So readaround favours pages beyond the target one.
* We also boost the window size, as it can easily shrink due to misses.
*/
-void page_cache_readaround(struct file *file, unsigned long offset)
+void page_cache_readaround(struct address_space *mapping, struct file_ra_state *ra,
+ struct file *filp, unsigned long offset)
{
- struct file_ra_state *ra = &file->f_ra;
-
if (ra->next_size != -1UL) {
- const unsigned long min = get_min_readahead(file) * 2;
+ const unsigned long min = get_min_readahead(ra) * 2;
unsigned long target;
unsigned long backward;
@@ -365,7 +366,7 @@ void page_cache_readaround(struct file *file, unsigned long offset)
target = 0;
else
target -= backward;
- page_cache_readahead(file, target);
+ page_cache_readahead(mapping, ra, filp, target);
}
}
@@ -383,10 +384,9 @@ void page_cache_readaround(struct file *file, unsigned long offset)
* that the readahead window size will stabilise around the maximum level at
* which there is no thrashing.
*/
-void handle_ra_miss(struct file *file)
+void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra)
{
- struct file_ra_state *ra = &file->f_ra;
- const unsigned long min = get_min_readahead(file);
+ const unsigned long min = get_min_readahead(ra);
if (ra->next_size == -1UL) {
ra->next_size = min;