diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /mm/readahead.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'mm/readahead.c')
| -rw-r--r-- | mm/readahead.c | 39 | 
1 files changed, 22 insertions, 17 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 539bbb6c1fad..e273f0de3376 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -140,23 +140,23 @@ out:  }  /* - * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all - * the pages first, then submits them all for I/O. This avoids the very bad + * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates + * the pages first, then submits them for I/O. This avoids the very bad   * behaviour which would occur if page allocations are causing VM writeback.   * We really don't want to intermingle reads and writes like that.   *   * Returns the number of pages requested, or the maximum amount of I/O allowed.   */ -int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, -			pgoff_t offset, unsigned long nr_to_read, -			unsigned long lookahead_size) +unsigned int __do_page_cache_readahead(struct address_space *mapping, +		struct file *filp, pgoff_t offset, unsigned long nr_to_read, +		unsigned long lookahead_size)  {  	struct inode *inode = mapping->host;  	struct page *page;  	unsigned long end_index;	/* The last page we want to read */  	LIST_HEAD(page_pool);  	int page_idx; -	int ret = 0; +	unsigned int nr_pages = 0;  	loff_t isize = i_size_read(inode);  	gfp_t gfp_mask = readahead_gfp_mask(mapping); @@ -177,8 +177,18 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,  		rcu_read_lock();  		page = radix_tree_lookup(&mapping->i_pages, page_offset);  		rcu_read_unlock(); -		if (page && !radix_tree_exceptional_entry(page)) +		if (page && !radix_tree_exceptional_entry(page)) { +			/* +			 * Page already present?  Kick off the current batch of +			 * contiguous pages before continuing with the next +			 * batch. +			 */ +			if (nr_pages) +				read_pages(mapping, filp, &page_pool, nr_pages, +						gfp_mask); +			nr_pages = 0;  			continue; +		}  		page = __page_cache_alloc(gfp_mask);  		if (!page) @@ -187,7 +197,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,  		list_add(&page->lru, &page_pool);  		if (page_idx == nr_to_read - lookahead_size)  			SetPageReadahead(page); -		ret++; +		nr_pages++;  	}  	/* @@ -195,11 +205,11 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,  	 * uptodate then the caller will launch readpage again, and  	 * will then handle the error.  	 */ -	if (ret) -		read_pages(mapping, filp, &page_pool, ret, gfp_mask); +	if (nr_pages) +		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);  	BUG_ON(!list_empty(&page_pool));  out: -	return ret; +	return nr_pages;  }  /* @@ -223,16 +233,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,  	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);  	nr_to_read = min(nr_to_read, max_pages);  	while (nr_to_read) { -		int err; -  		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;  		if (this_chunk > nr_to_read)  			this_chunk = nr_to_read; -		err = __do_page_cache_readahead(mapping, filp, -						offset, this_chunk, 0); -		if (err < 0) -			return err; +		__do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);  		offset += this_chunk;  		nr_to_read -= this_chunk;  | 
