diff options
| author | Dave Kleikamp <shaggy@kleikamp.austin.ibm.com> | 2002-04-09 21:23:02 -0500 |
|---|---|---|
| committer | Dave Kleikamp <shaggy@kleikamp.austin.ibm.com> | 2002-04-09 21:23:02 -0500 |
| commit | 0ec6ef22403a1b8bfa4e726c15060d469e6fb548 (patch) | |
| tree | 5f227f8a1740471146063696272f063e363d9b12 | |
| parent | 22e962f9b7a7abbc2d17ceaf3917bb8e67b68a8f (diff) | |
Export discard_bh_page and use block_flushpage in JFS
Note: discard_bh_page is already exported in 2.4.
Currently the 2.5 version of JFS works around the lack of
block_flushpage availability in modules by abusing
generic_buffer_fdatasync instead. Exporting discard_bh_page
as in 2.4 solves this.
Submitted by Christoph Hellwig
| -rw-r--r-- | fs/jfs/jfs_metapage.c | 50 | ||||
| -rw-r--r-- | kernel/ksyms.c | 1 |
2 files changed, 7 insertions, 44 deletions
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index dba6e88fc67b..29a7c34c0031 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -91,32 +91,6 @@ static inline void lock_metapage(struct metapage *mp) __lock_metapage(mp); } -/* We're currently re-evaluating the method we use to write metadata - * pages. Currently, we have to make sure there no dirty buffer_heads - * hanging around after we free the metadata page, since the same - * physical disk blocks may be used in a different address space and we - * can't write old data over the good data. - * - * The best way to do this now is with block_invalidate_page. However, - * this is only available in the newer kernels and is not exported - * to modules. block_flushpage is the next best, but it too is not exported - * to modules. - * - * In a module, about the best we have is generic_buffer_fdatasync. This - * synchronously writes any dirty buffers. This is not optimal, but it will - * keep old dirty buffers from overwriting newer data. - */ -static inline void invalidate_page(metapage_t *mp) -{ -#ifdef MODULE - generic_buffer_fdatasync(mp->mapping->host, mp->index, mp->index + 1); -#else - lock_page(mp->page); - block_flushpage(mp->page, 0); - UnlockPage(mp->page); -#endif -} - int __init metapage_init(void) { int i; @@ -559,8 +533,11 @@ void release_metapage(metapage_t * mp) clear_bit(META_sync, &mp->flag); } - if (test_bit(META_discard, &mp->flag)) - invalidate_page(mp); + if (test_bit(META_discard, &mp->flag)) { + lock_page(mp->page); + block_flushpage(mp->page, 0); + UnlockPage(mp->page); + } page_cache_release(mp->page); INCREMENT(mpStat.pagefree); @@ -593,9 +570,7 @@ void invalidate_metapages(struct inode *ip, unsigned long addr, int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_sb->s_blocksize_bits; struct address_space *mapping = ip->i_mapping; metapage_t *mp; -#ifndef MODULE struct page *page; -#endif /* * First, mark metapages to discard. They will eventually be @@ -612,27 +587,14 @@ void invalidate_metapages(struct inode *ip, unsigned long addr, /* * If in the metapage cache, we've got the page locked */ -#ifdef MODULE - UnlockPage(mp->page); - generic_buffer_fdatasync(mp->mapping->host, mp->index, - mp->index+1); - lock_page(mp->page); -#else block_flushpage(mp->page, 0); -#endif } else { spin_unlock(&meta_lock); -#ifdef MODULE - generic_buffer_fdatasync(ip, lblock << l2BlocksPerPage, - (lblock + 1) << l2BlocksPerPage); -#else - page = find_lock_page(mapping, - lblock >> l2BlocksPerPage); + page = find_lock_page(mapping, lblock>>l2BlocksPerPage); if (page) { block_flushpage(page, 0); UnlockPage(page); } -#endif } } } diff --git a/kernel/ksyms.c b/kernel/ksyms.c index 519a500fb547..5c70afbe2d28 100644 --- a/kernel/ksyms.c +++ b/kernel/ksyms.c @@ -209,6 +209,7 @@ EXPORT_SYMBOL(unlock_buffer); EXPORT_SYMBOL(__wait_on_buffer); EXPORT_SYMBOL(___wait_on_page); EXPORT_SYMBOL(generic_direct_IO); +EXPORT_SYMBOL(discard_bh_page); EXPORT_SYMBOL(block_write_full_page); EXPORT_SYMBOL(block_read_full_page); EXPORT_SYMBOL(block_prepare_write); |
