diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /fs/btrfs/extent_io.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'fs/btrfs/extent_io.c')
| -rw-r--r-- | fs/btrfs/extent_io.c | 91 | 
1 files changed, 22 insertions, 69 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e99b329002cf..cce6087d6880 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -26,7 +26,7 @@  static struct kmem_cache *extent_state_cache;  static struct kmem_cache *extent_buffer_cache; -static struct bio_set *btrfs_bioset; +static struct bio_set btrfs_bioset;  static inline bool extent_state_in_tree(const struct extent_state *state)  { @@ -162,20 +162,18 @@ int __init extent_io_init(void)  	if (!extent_buffer_cache)  		goto free_state_cache; -	btrfs_bioset = bioset_create(BIO_POOL_SIZE, -				     offsetof(struct btrfs_io_bio, bio), -				     BIOSET_NEED_BVECS); -	if (!btrfs_bioset) +	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, +			offsetof(struct btrfs_io_bio, bio), +			BIOSET_NEED_BVECS))  		goto free_buffer_cache; -	if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE)) +	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))  		goto free_bioset;  	return 0;  free_bioset: -	bioset_free(btrfs_bioset); -	btrfs_bioset = NULL; +	bioset_exit(&btrfs_bioset);  free_buffer_cache:  	kmem_cache_destroy(extent_buffer_cache); @@ -198,8 +196,7 @@ void __cold extent_io_exit(void)  	rcu_barrier();  	kmem_cache_destroy(extent_state_cache);  	kmem_cache_destroy(extent_buffer_cache); -	if (btrfs_bioset) -		bioset_free(btrfs_bioset); +	bioset_exit(&btrfs_bioset);  }  void extent_io_tree_init(struct extent_io_tree *tree, @@ -2679,7 +2676,7 @@ struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)  {  	struct bio *bio; -	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset); +	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);  	bio_set_dev(bio, bdev);  	bio->bi_iter.bi_sector = first_byte >> 9;  	btrfs_io_bio_init(btrfs_io_bio(bio)); @@ -2692,7 +2689,7 @@ struct bio *btrfs_bio_clone(struct bio *bio)  	struct bio *new;  	/* Bio allocation backed by a bioset does not fail */ -	new = bio_clone_fast(bio, GFP_NOFS, btrfs_bioset); +	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);  	btrfs_bio = btrfs_io_bio(new);  	btrfs_io_bio_init(btrfs_bio);  	btrfs_bio->iter = bio->bi_iter; @@ -2704,7 +2701,7 @@ struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)  	struct bio *bio;  	/* Bio allocation backed by a bioset does not fail */ -	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, btrfs_bioset); +	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);  	btrfs_io_bio_init(btrfs_io_bio(bio));  	return bio;  } @@ -2715,7 +2712,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)  	struct btrfs_io_bio *btrfs_bio;  	/* this will never fail when it's backed by a bioset */ -	bio = bio_clone_fast(orig, GFP_NOFS, btrfs_bioset); +	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);  	ASSERT(bio);  	btrfs_bio = btrfs_io_bio(bio); @@ -4109,14 +4106,13 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,  	return ret;  } -int extent_writepages(struct extent_io_tree *tree, -		      struct address_space *mapping, +int extent_writepages(struct address_space *mapping,  		      struct writeback_control *wbc)  {  	int ret = 0;  	struct extent_page_data epd = {  		.bio = NULL, -		.tree = tree, +		.tree = &BTRFS_I(mapping->host)->io_tree,  		.extent_locked = 0,  		.sync_io = wbc->sync_mode == WB_SYNC_ALL,  	}; @@ -4126,9 +4122,8 @@ int extent_writepages(struct extent_io_tree *tree,  	return ret;  } -int extent_readpages(struct extent_io_tree *tree, -		     struct address_space *mapping, -		     struct list_head *pages, unsigned nr_pages) +int extent_readpages(struct address_space *mapping, struct list_head *pages, +		     unsigned nr_pages)  {  	struct bio *bio = NULL;  	unsigned page_idx; @@ -4136,6 +4131,7 @@ int extent_readpages(struct extent_io_tree *tree,  	struct page *pagepool[16];  	struct page *page;  	struct extent_map *em_cached = NULL; +	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;  	int nr = 0;  	u64 prev_em_start = (u64)-1; @@ -4202,8 +4198,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,   * are locked or under IO and drops the related state bits if it is safe   * to drop the page.   */ -static int try_release_extent_state(struct extent_map_tree *map, -				    struct extent_io_tree *tree, +static int try_release_extent_state(struct extent_io_tree *tree,  				    struct page *page, gfp_t mask)  {  	u64 start = page_offset(page); @@ -4238,13 +4233,13 @@ static int try_release_extent_state(struct extent_map_tree *map,   * in the range corresponding to the page, both state records and extent   * map records are removed   */ -int try_release_extent_mapping(struct extent_map_tree *map, -			       struct extent_io_tree *tree, struct page *page, -			       gfp_t mask) +int try_release_extent_mapping(struct page *page, gfp_t mask)  {  	struct extent_map *em;  	u64 start = page_offset(page);  	u64 end = start + PAGE_SIZE - 1; +	struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; +	struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree;  	if (gfpflags_allow_blocking(mask) &&  	    page->mapping->host->i_size > SZ_16M) { @@ -4278,7 +4273,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,  			free_extent_map(em);  		}  	} -	return try_release_extent_state(map, tree, page, mask); +	return try_release_extent_state(tree, page, mask);  }  /* @@ -4547,7 +4542,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,  			offset_in_extent = em_start - em->start;  		em_end = extent_map_end(em);  		em_len = em_end - em_start; -		disko = 0; +		disko = em->block_start + offset_in_extent;  		flags = 0;  		/* @@ -4570,8 +4565,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,  			u64 bytenr = em->block_start -  				(em->start - em->orig_start); -			disko = em->block_start + offset_in_extent; -  			/*  			 * As btrfs supports shared space, this information  			 * can be exported to userspace tools via @@ -5620,46 +5613,6 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,  	}  } -void le_bitmap_set(u8 *map, unsigned int start, int len) -{ -	u8 *p = map + BIT_BYTE(start); -	const unsigned int size = start + len; -	int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); -	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); - -	while (len - bits_to_set >= 0) { -		*p |= mask_to_set; -		len -= bits_to_set; -		bits_to_set = BITS_PER_BYTE; -		mask_to_set = ~0; -		p++; -	} -	if (len) { -		mask_to_set &= BITMAP_LAST_BYTE_MASK(size); -		*p |= mask_to_set; -	} -} - -void le_bitmap_clear(u8 *map, unsigned int start, int len) -{ -	u8 *p = map + BIT_BYTE(start); -	const unsigned int size = start + len; -	int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE); -	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start); - -	while (len - bits_to_clear >= 0) { -		*p &= ~mask_to_clear; -		len -= bits_to_clear; -		bits_to_clear = BITS_PER_BYTE; -		mask_to_clear = ~0; -		p++; -	} -	if (len) { -		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); -		*p &= ~mask_to_clear; -	} -} -  /*   * eb_bitmap_offset() - calculate the page and offset of the byte containing the   * given bit number  | 
