From 5a302308ef86f0030628fd9244e8495007ce9e70 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sun, 2 Jun 2002 03:23:07 -0700 Subject: [PATCH] rename block_symlink() to page_symlink() block_symlink() is not a "block" function at all. It is a pure pagecache/address_space function. Seeing driverfs calling it was the last straw. The patch renames it to `page_symlink()' and moves it into fs/namei.c --- include/linux/buffer_head.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/buffer_head.h') diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9bcb2b34df84..38e533d62fa9 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -192,7 +192,6 @@ void FASTCALL(unlock_buffer(struct buffer_head *bh)); */ int try_to_release_page(struct page * page, int gfp_mask); int block_flushpage(struct page *page, unsigned long offset); -int block_symlink(struct inode *, const char *, int); int block_write_full_page(struct page*, get_block_t*); int block_read_full_page(struct page*, get_block_t*); int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); -- cgit v1.2.3 From 53b478c60dd9914ce1668128ba466188ef42c918 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sun, 2 Jun 2002 03:24:28 -0700 Subject: [PATCH] rename flushpage to invalidatepage Fixes a pet peeve: the identifier "flushpage" implies "flush the page to disk". Which is very much not what the flushpage functions actually do. The patch renames block_flushpage and the flushpage address_space_operation to "invalidatepage". It also fixes a buglet in invalidate_this_page2(), which was calling block_flushpage() directly - it needs to call do_flushpage() (now do_invalidatepage()) so that the filesystem's ->flushpage (now ->invalidatepage) a_op gets a chance to relinquish any interest which it has in the page's buffers. --- Documentation/filesystems/Locking | 10 +++++----- fs/buffer.c | 22 +++++++++++----------- fs/ext3/inode.c | 8 ++++---- fs/jbd/journal.c | 2 +- fs/jbd/transaction.c | 6 +++--- fs/jfs/jfs_metapage.c | 6 +++--- include/linux/buffer_head.h | 2 +- include/linux/fs.h | 2 +- include/linux/jbd.h | 3 ++- mm/filemap.c | 22 +++++++++++----------- 10 files changed, 42 insertions(+), 41 deletions(-) (limited to 'include/linux/buffer_head.h') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 0bb774bb0c3b..1b73f2a9ea79 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -138,7 +138,7 @@ prototypes: int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); int (*commit_write)(struct file *, struct page *, unsigned, unsigned); int (*bmap)(struct address_space *, long); - int (*flushpage) (struct page *, unsigned long); + int (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); int (*direct_IO)(int, struct inode *, struct kiobuf *, unsigned long, int); @@ -156,7 +156,7 @@ set_page_dirty no no prepare_write: no yes commit_write: no yes bmap: yes -flushpage: no yes +invalidatepage: no yes releasepage: no yes ->prepare_write(), ->commit_write(), ->sync_page() and ->readpage() @@ -206,10 +206,10 @@ filesystems and by the swapper. The latter will eventually go away. All instances do not actually need the BKL. Please, keep it that way and don't breed new callers. - ->flushpage() is called when the filesystem must attempt to drop + ->invalidatepage() is called when the filesystem must attempt to drop some or all of the buffers from the page when it is being truncated. It -returns zero on success. If ->flushpage is zero, the kernel uses -block_flushpage() instead. +returns zero on success. If ->invalidatepage is zero, the kernel uses +block_invalidatepage() instead. ->releasepage() is called when the kernel is about to try to drop the buffers from the page in preparation for freeing it. It returns zero to diff --git a/fs/buffer.c b/fs/buffer.c index 80b01a96ee35..dd21d3ed1ad1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1341,22 +1341,21 @@ int try_to_release_page(struct page *page, int gfp_mask) } /** - * block_flushpage - invalidate part of all of a buffer-backed page + * block_invalidatepage - invalidate part of all of a buffer-backed page * * @page: the page which is affected * @offset: the index of the truncation point * - * block_flushpage() should be called block_invalidatepage(). It is - * called when all or part of the page has become invalidatedby a truncate - * operation. + * block_invalidatepage() is called when all or part of the page has become + * invalidatedby a truncate operation. * - * block_flushpage() does not have to release all buffers, but it must + * block_invalidatepage() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -int block_flushpage(struct page *page, unsigned long offset) +int block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; @@ -1393,7 +1392,7 @@ int block_flushpage(struct page *page, unsigned long offset) return 1; } -EXPORT_SYMBOL(block_flushpage); +EXPORT_SYMBOL(block_invalidatepage); /* * We attach and possibly dirty the buffers atomically wrt @@ -2276,10 +2275,11 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], * some of the bmap kludges and interface ugliness here. * * NOTE: unlike file pages, swap pages are locked while under writeout. - * This is to avoid a deadlock which occurs when free_swap_and_cache() - * calls block_flushpage() under spinlock and hits a locked buffer, and - * schedules under spinlock. Another approach would be to teach - * find_trylock_page() to also trylock the page's writeback flags. + * This is to throttle processes which reuse their swapcache pages while + * they are under writeout, and to ensure that there is no I/O going on + * when the page has been successfully locked. Functions such as + * free_swap_and_cache() need to guarantee that there is no I/O in progress + * because they will be freeing up swap blocks, which may then be reused. * * Swap pages are also marked PageWriteback when they are being written * so that memory allocators will throttle on them. diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index aad5acd5d381..b339c253628e 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1364,10 +1364,10 @@ ext3_readpages(struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); } -static int ext3_flushpage(struct page *page, unsigned long offset) +static int ext3_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); - return journal_flushpage(journal, page, offset); + return journal_invalidatepage(journal, page, offset); } static int ext3_releasepage(struct page *page, int wait) @@ -1385,7 +1385,7 @@ struct address_space_operations ext3_aops = { prepare_write: ext3_prepare_write, /* BKL not held. We take it */ commit_write: ext3_commit_write, /* BKL not held. We take it */ bmap: ext3_bmap, /* BKL held */ - flushpage: ext3_flushpage, /* BKL not held. Don't need */ + invalidatepage: ext3_invalidatepage, /* BKL not held. Don't need */ releasepage: ext3_releasepage, /* BKL not held. Don't need */ }; @@ -1413,7 +1413,7 @@ struct address_space_operations ext3_writeback_aops = { prepare_write: ext3_prepare_write, /* BKL not held. We take it */ commit_write: ext3_commit_write, /* BKL not held. We take it */ bmap: ext3_bmap, /* BKL held */ - flushpage: ext3_flushpage, /* BKL not held. Don't need */ + invalidatepage: ext3_invalidatepage, /* BKL not held. Don't need */ releasepage: ext3_releasepage, /* BKL not held. Don't need */ }; diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index f20e9f4e8c35..052dd4ef3f01 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -78,7 +78,7 @@ EXPORT_SYMBOL(log_wait_commit); EXPORT_SYMBOL(log_start_commit); EXPORT_SYMBOL(journal_wipe); EXPORT_SYMBOL(journal_blocks_per_page); -EXPORT_SYMBOL(journal_flushpage); +EXPORT_SYMBOL(journal_invalidatepage); EXPORT_SYMBOL(journal_try_to_free_buffers); EXPORT_SYMBOL(journal_bmap); EXPORT_SYMBOL(journal_force_commit); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 82886e98f26f..89c625bf9fa8 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1749,13 +1749,13 @@ static int dispose_buffer(struct journal_head *jh, } /* - * journal_flushpage + * journal_invalidatepage * * This code is tricky. It has a number of cases to deal with. * * There are two invariants which this code relies on: * - * i_size must be updated on disk before we start calling flushpage on the + * i_size must be updated on disk before we start calling invalidatepage on the * data. * * This is done in ext3 by defining an ext3_setattr method which @@ -1891,7 +1891,7 @@ zap_buffer: /* * Return non-zero if the page's buffers were successfully reaped */ -int journal_flushpage(journal_t *journal, +int journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 7d0e39c683b7..37aaec2e67f7 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -537,7 +537,7 @@ void release_metapage(metapage_t * mp) if (test_bit(META_discard, &mp->flag)) { lock_page(mp->page); - block_flushpage(mp->page, 0); + block_invalidatepage(mp->page, 0); unlock_page(mp->page); } @@ -587,13 +587,13 @@ void invalidate_metapages(struct inode *ip, unsigned long addr, set_bit(META_discard, &mp->flag); spin_unlock(&meta_lock); lock_page(mp->page); - block_flushpage(mp->page, 0); + block_invalidatepage(mp->page, 0); unlock_page(mp->page); } else { spin_unlock(&meta_lock); page = find_lock_page(mapping, lblock>>l2BlocksPerPage); if (page) { - block_flushpage(page, 0); + block_invalidatepage(page, 0); unlock_page(page); } } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 38e533d62fa9..90767fc78617 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -191,7 +191,7 @@ void FASTCALL(unlock_buffer(struct buffer_head *bh)); * address_spaces. */ int try_to_release_page(struct page * page, int gfp_mask); -int block_flushpage(struct page *page, unsigned long offset); +int block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page*, get_block_t*); int block_read_full_page(struct page*, get_block_t*); int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); diff --git a/include/linux/fs.h b/include/linux/fs.h index f4ae9af76b69..839dfbd712e2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -306,7 +306,7 @@ struct address_space_operations { int (*commit_write)(struct file *, struct page *, unsigned, unsigned); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ int (*bmap)(struct address_space *, long); - int (*flushpage) (struct page *, unsigned long); + int (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); #define KERNEL_HAS_O_DIRECT /* this is for modules out of the kernel */ int (*direct_IO)(int, struct inode *, struct kiobuf *, unsigned long, int); diff --git a/include/linux/jbd.h b/include/linux/jbd.h index d0c52537a274..835d38c9dbfc 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -641,7 +641,8 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *); extern void journal_release_buffer (handle_t *, struct buffer_head *); extern void journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); -extern int journal_flushpage(journal_t *, struct page *, unsigned long); +extern int journal_invalidatepage(journal_t *, + struct page *, unsigned long); extern int journal_try_to_free_buffers(journal_t *, struct page *, int); extern int journal_stop(handle_t *); extern int journal_flush (journal_t *); diff --git a/mm/filemap.c b/mm/filemap.c index bf7be669af45..a8283c49fcd0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -23,7 +23,7 @@ /* * This is needed for the following functions: * - try_to_release_page - * - block_flushpage + * - block_invalidatepage * - page_has_buffers * - generic_osync_inode * @@ -154,30 +154,30 @@ unlock: spin_unlock(&pagemap_lru_lock); } -static int do_flushpage(struct page *page, unsigned long offset) +static int do_invalidatepage(struct page *page, unsigned long offset) { - int (*flushpage) (struct page *, unsigned long); - flushpage = page->mapping->a_ops->flushpage; - if (flushpage) - return (*flushpage)(page, offset); - return block_flushpage(page, offset); + int (*invalidatepage)(struct page *, unsigned long); + invalidatepage = page->mapping->a_ops->invalidatepage; + if (invalidatepage) + return (*invalidatepage)(page, offset); + return block_invalidatepage(page, offset); } static inline void truncate_partial_page(struct page *page, unsigned partial) { memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); if (PagePrivate(page)) - do_flushpage(page, partial); + do_invalidatepage(page, partial); } /* * AKPM: the PagePrivate test here seems a bit bogus. It bypasses the - * mapping's ->flushpage, which may still want to be called. + * mapping's ->invalidatepage, which may still want to be called. */ static void truncate_complete_page(struct page *page) { /* Leave it on the LRU if it gets converted into anonymous buffers */ - if (!PagePrivate(page) || do_flushpage(page, 0)) + if (!PagePrivate(page) || do_invalidatepage(page, 0)) lru_cache_del(page); ClearPageDirty(page); ClearPageUptodate(page); @@ -339,7 +339,7 @@ static inline int invalidate_this_page2(struct address_space * mapping, page_cache_get(page); write_unlock(&mapping->page_lock); - block_flushpage(page, 0); + do_invalidatepage(page, 0); } else unlocked = 0; -- cgit v1.2.3