diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-04-29 23:52:37 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-04-29 23:52:37 -0700 |
| commit | aa78091f0f2860df00a4ccc5d4609afd9d831dd0 (patch) | |
| tree | fe91202ae1fed74c403bf6f7cc9caf5a3f2ca5da /mm | |
| parent | 68872e78c90ddfe683bf1c10ee93323f6d640251 (diff) | |
[PATCH] cleanup page flags
page->flags cleanup.
Moves the definitions of the page->flags bits and all the PageFoo
macros into linux/page-flags.h. That file is currently included from
mm.h, but the stage is set to remove that and include page-flags.h
direct in all .c files which require that. (120 of them).
The patch also makes all the page flag macros and functions consistent:
For PG_foo, the following functions are defined:
SetPageFoo
ClearPageFoo
TestSetPageFoo
TestClearPageFoo
PageFoo
and that's it.
- Page_Uptodate is renamed to PageUptodate
- LockPage is removed. All users updated to use SetPageLocked
- UnlockPage is removed. All callers updated to use unlock_page().
it's a real function - there's no need to hide that fact.
- PageTestandClearReferenced renamed to TestClearPageReferenced
- PageSetSlab renamed to SetPageSlab
- __SetPageReserved is removed. It's an infinitesimally small
microoptimisation, and is inconsistent.
- TryLockPage is renamed to TestSetPageLocked
- PageSwapCache() is renamed to page_swap_cache(), so it doesn't
pretend to be a page->flags bit test.
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/filemap.c | 83 | ||||
| -rw-r--r-- | mm/memory.c | 10 | ||||
| -rw-r--r-- | mm/mincore.c | 2 | ||||
| -rw-r--r-- | mm/page-writeback.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 2 | ||||
| -rw-r--r-- | mm/page_io.c | 4 | ||||
| -rw-r--r-- | mm/shmem.c | 16 | ||||
| -rw-r--r-- | mm/slab.c | 4 | ||||
| -rw-r--r-- | mm/swap_state.c | 6 | ||||
| -rw-r--r-- | mm/swapfile.c | 4 | ||||
| -rw-r--r-- | mm/vmscan.c | 25 |
11 files changed, 84 insertions, 74 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index d95ba0691800..80211d1aa33e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -120,7 +120,7 @@ void invalidate_inode_pages(struct inode * inode) continue; /* ..or locked */ - if (TryLockPage(page)) + if (TestSetPageLocked(page)) continue; if (PagePrivate(page) && !try_to_release_page(page, 0)) @@ -131,11 +131,11 @@ void invalidate_inode_pages(struct inode * inode) __lru_cache_del(page); __remove_inode_page(page); - UnlockPage(page); + unlock_page(page); page_cache_release(page); continue; unlock: - UnlockPage(page); + unlock_page(page); continue; } @@ -201,7 +201,7 @@ static int truncate_list_pages(struct address_space *mapping, int failed; page_cache_get(page); - failed = TryLockPage(page); + failed = TestSetPageLocked(page); list_del(head); if (!failed) @@ -221,7 +221,7 @@ static int truncate_list_pages(struct address_space *mapping, } else truncate_complete_page(page); - UnlockPage(page); + unlock_page(page); } else wait_on_page(page); @@ -321,11 +321,11 @@ static int invalidate_list_pages2(struct address_space * mapping, while (curr != head) { page = list_entry(curr, struct page, list); - if (!TryLockPage(page)) { + if (!TestSetPageLocked(page)) { int __unlocked; __unlocked = invalidate_this_page2(mapping, page, curr, head); - UnlockPage(page); + unlock_page(page); unlocked |= __unlocked; if (!__unlocked) { curr = curr->prev; @@ -405,7 +405,7 @@ static int do_buffer_fdatasync(struct address_space *mapping, if (page_has_buffers(page)) retval |= fn(page); - UnlockPage(page); + unlock_page(page); write_lock(&mapping->page_lock); curr = page->list.next; page_cache_release(page); @@ -470,7 +470,7 @@ int fail_writepage(struct page *page) /* Set the page dirty again, unlock */ SetPageDirty(page); - UnlockPage(page); + unlock_page(page); return 0; } @@ -659,6 +659,11 @@ void ___wait_on_page(struct page *page) /* * Unlock the page and wake up sleepers in ___wait_on_page. + * + * The first mb is necessary to safely close the critical section opened by the + * TryLockPage(), the second mb is necessary to enforce ordering between + * the clear_bit and the read of the waitqueue (to avoid SMP races with a + * parallel wait_on_page). */ void unlock_page(struct page *page) { @@ -689,7 +694,7 @@ static void __lock_page(struct page *page) sync_page(page); schedule(); } - if (!TryLockPage(page)) + if (!TestSetPageLocked(page)) break; } __set_task_state(tsk, TASK_RUNNING); @@ -708,7 +713,7 @@ EXPORT_SYMBOL(wake_up_page); */ void lock_page(struct page *page) { - if (TryLockPage(page)) + if (TestSetPageLocked(page)) __lock_page(page); } @@ -741,7 +746,7 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs read_lock(&mapping->page_lock); page = radix_tree_lookup(&mapping->page_tree, offset); - if (page && TryLockPage(page)) + if (page && TestSetPageLocked(page)) page = NULL; read_unlock(&mapping->page_lock); return page; @@ -765,14 +770,14 @@ repeat: page = radix_tree_lookup(&mapping->page_tree, offset); if (page) { page_cache_get(page); - if (TryLockPage(page)) { + if (TestSetPageLocked(page)) { write_unlock(&mapping->page_lock); lock_page(page); write_lock(&mapping->page_lock); /* Has the page been truncated while we slept? */ if (page->mapping != mapping || page->index != offset) { - UnlockPage(page); + unlock_page(page); page_cache_release(page); goto repeat; } @@ -881,12 +886,12 @@ struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long page = find_get_page(mapping, index); if ( page ) { - if ( !TryLockPage(page) ) { + if ( !TestSetPageLocked(page) ) { /* Page found and locked */ /* This test is overly paranoid, but what the heck... */ if ( unlikely(page->mapping != mapping || page->index != index) ) { /* Someone reallocated this page under us. */ - UnlockPage(page); + unlock_page(page); page_cache_release(page); return NULL; } else { @@ -990,7 +995,7 @@ found_page: page_cache_get(page); write_unlock(&mapping->page_lock); - if (!Page_Uptodate(page)) + if (!PageUptodate(page)) goto page_not_up_to_date; page_ok: /* If users can be writing to this page using arbitrary @@ -1027,7 +1032,7 @@ page_ok: break; page_not_up_to_date: - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto page_ok; /* Get exclusive access to the page ... */ @@ -1035,14 +1040,14 @@ page_not_up_to_date: /* Did it get unhashed before we got the lock? */ if (!page->mapping) { - UnlockPage(page); + unlock_page(page); page_cache_release(page); continue; } /* Did somebody else fill it already? */ - if (Page_Uptodate(page)) { - UnlockPage(page); + if (PageUptodate(page)) { + unlock_page(page); goto page_ok; } @@ -1051,10 +1056,10 @@ readpage: error = mapping->a_ops->readpage(filp, page); if (!error) { - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto page_ok; wait_on_page(page); - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto page_ok; error = -EIO; } @@ -1518,7 +1523,7 @@ retry_find: * Ok, found a page in the page cache, now we need to check * that it's up-to-date. */ - if (!Page_Uptodate(page)) + if (!PageUptodate(page)) goto page_not_uptodate; success: @@ -1559,20 +1564,20 @@ page_not_uptodate: /* Did it get unhashed while we waited for it? */ if (!page->mapping) { - UnlockPage(page); + unlock_page(page); page_cache_release(page); goto retry_all; } /* Did somebody else get it up-to-date? */ - if (Page_Uptodate(page)) { - UnlockPage(page); + if (PageUptodate(page)) { + unlock_page(page); goto success; } if (!mapping->a_ops->readpage(file, page)) { wait_on_page(page); - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto success; } @@ -1586,20 +1591,20 @@ page_not_uptodate: /* Somebody truncated the page on us? */ if (!page->mapping) { - UnlockPage(page); + unlock_page(page); page_cache_release(page); goto retry_all; } /* Somebody else successfully read it in? */ - if (Page_Uptodate(page)) { - UnlockPage(page); + if (PageUptodate(page)) { + unlock_page(page); goto success; } ClearPageError(page); if (!mapping->a_ops->readpage(file, page)) { wait_on_page(page); - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto success; } @@ -2001,7 +2006,7 @@ repeat: /* * Read into the page cache. If a page already exists, - * and Page_Uptodate() is not set, try to fill the page. + * and PageUptodate() is not set, try to fill the page. */ struct page *read_cache_page(struct address_space *mapping, unsigned long index, @@ -2016,17 +2021,17 @@ retry: if (IS_ERR(page)) goto out; mark_page_accessed(page); - if (Page_Uptodate(page)) + if (PageUptodate(page)) goto out; lock_page(page); if (!page->mapping) { - UnlockPage(page); + unlock_page(page); page_cache_release(page); goto retry; } - if (Page_Uptodate(page)) { - UnlockPage(page); + if (PageUptodate(page)) { + unlock_page(page); goto out; } err = filler(data, page); @@ -2271,7 +2276,7 @@ unlock: kunmap(page); /* Mark it unlocked again and drop the page.. */ SetPageReferenced(page); - UnlockPage(page); + unlock_page(page); page_cache_release(page); if (status < 0) @@ -2307,7 +2312,7 @@ sync_failure: * few blocks outside i_size. Trim these off again. */ kunmap(page); - UnlockPage(page); + unlock_page(page); page_cache_release(page); if (pos + bytes > inode->i_size) vmtruncate(inode, inode->i_size); diff --git a/mm/memory.c b/mm/memory.c index 0bad3cf9636c..b3158d2574ae 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -653,7 +653,7 @@ void unmap_kiobuf (struct kiobuf *iobuf) map = iobuf->maplist[i]; if (map) { if (iobuf->locked) - UnlockPage(map); + unlock_page(map); /* FIXME: cache flush missing for rw==READ * FIXME: call the correct reference counting function */ @@ -698,11 +698,11 @@ int lock_kiovec(int nr, struct kiobuf *iovec[], int wait) if (!page) continue; - if (TryLockPage(page)) { + if (TestSetPageLocked(page)) { while (j--) { struct page *tmp = *--ppage; if (tmp) - UnlockPage(tmp); + unlock_page(tmp); } goto retry; } @@ -768,7 +768,7 @@ int unlock_kiovec(int nr, struct kiobuf *iovec[]) page = *ppage; if (!page) continue; - UnlockPage(page); + unlock_page(page); } } return 0; @@ -982,7 +982,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, if (!VALID_PAGE(old_page)) goto bad_wp_page; - if (!TryLockPage(old_page)) { + if (!TestSetPageLocked(old_page)) { int reuse = can_share_swap_page(old_page); unlock_page(old_page); if (reuse) { diff --git a/mm/mincore.c b/mm/mincore.c index 21ce8614da80..1fd140582547 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -31,7 +31,7 @@ static unsigned char mincore_page(struct vm_area_struct * vma, page = find_get_page(as, pgoff); if (page) { - present = Page_Uptodate(page); + present = PageUptodate(page); page_cache_release(page); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 521971726ada..940f2e673693 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -355,7 +355,7 @@ int generic_writeback_mapping(struct address_space *mapping, int *nr_to_write) done = 1; } } else - UnlockPage(page); + unlock_page(page); page_cache_release(page); write_lock(&mapping->page_lock); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 041aa8b944a1..b4d6cb34d7e1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -879,7 +879,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, struct page *page = mem_map + offset + i; set_page_zone(page, nid * MAX_NR_ZONES + j); set_page_count(page, 0); - __SetPageReserved(page); + SetPageReserved(page); memlist_init(&page->list); if (j != ZONE_HIGHMEM) set_page_address(page, __va(zone_start_paddr)); diff --git a/mm/page_io.c b/mm/page_io.c index cacb3952178f..cc615a809b51 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -99,7 +99,7 @@ void rw_swap_page(int rw, struct page *page) if (page->mapping != &swapper_space) PAGE_BUG(page); if (!rw_swap_page_base(rw, entry, page)) - UnlockPage(page); + unlock_page(page); } /* @@ -120,7 +120,7 @@ void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf) /* needs sync_page to wait I/O completation */ page->mapping = &swapper_space; if (!rw_swap_page_base(rw, entry, page)) - UnlockPage(page); + unlock_page(page); wait_on_page(page); page->mapping = NULL; } diff --git a/mm/shmem.c b/mm/shmem.c index 31a19c4584be..fdabed5509a8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -466,7 +466,7 @@ static int shmem_writepage(struct page * page) spin_unlock(&info->lock); SetPageUptodate(page); set_page_dirty(page); - UnlockPage(page); + unlock_page(page); return 0; } @@ -512,7 +512,7 @@ repeat: page = find_get_page(mapping, idx); if (page) { - if (TryLockPage(page)) + if (TestSetPageLocked(page)) goto wait_retry; spin_unlock (&info->lock); return page; @@ -533,7 +533,7 @@ repeat: return ERR_PTR(-ENOMEM); } wait_on_page(page); - if (!Page_Uptodate(page) && entry->val == swap.val) { + if (!PageUptodate(page) && entry->val == swap.val) { page_cache_release(page); return ERR_PTR(-EIO); } @@ -545,12 +545,12 @@ repeat: } /* We have to do this with page locked to prevent races */ - if (TryLockPage(page)) + if (TestSetPageLocked(page)) goto wait_retry; error = move_from_swap_cache(page, idx, mapping); if (error < 0) { - UnlockPage(page); + unlock_page(page); return ERR_PTR(error); } @@ -614,7 +614,7 @@ static int shmem_getpage(struct inode * inode, unsigned long idx, struct page ** if (IS_ERR (*ptr)) goto failed; - UnlockPage(*ptr); + unlock_page(*ptr); up (&info->sem); return 0; failed: @@ -864,7 +864,7 @@ shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) } unlock: /* Mark it unlocked again and drop the page.. */ - UnlockPage(page); + unlock_page(page); page_cache_release(page); if (status < 0) @@ -1140,7 +1140,7 @@ static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * memcpy(kaddr, symname, len); kunmap(page); SetPageDirty(page); - UnlockPage(page); + unlock_page(page); page_cache_release(page); up(&info->sem); inode->i_op = &shmem_symlink_inode_operations; diff --git a/mm/slab.c b/mm/slab.c index 109777f4a79c..d5e2817db02d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -544,7 +544,7 @@ static inline void kmem_freepages (kmem_cache_t *cachep, void *addr) * vm_scan(). Shouldn't be a worry. */ while (i--) { - PageClearSlab(page); + ClearPageSlab(page); page++; } free_pages((unsigned long)addr, cachep->gfporder); @@ -1198,7 +1198,7 @@ static int kmem_cache_grow (kmem_cache_t * cachep, int flags) do { SET_PAGE_CACHE(page, cachep); SET_PAGE_SLAB(page, slabp); - PageSetSlab(page); + SetPageSlab(page); page++; } while (--i); diff --git a/mm/swap_state.c b/mm/swap_state.c index 6e918948a4de..85002f16a6fa 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -24,7 +24,7 @@ static int swap_writepage(struct page *page) { if (remove_exclusive_swap_page(page)) { - UnlockPage(page); + unlock_page(page); return 0; } rw_swap_page(WRITE, page); @@ -244,9 +244,9 @@ void free_page_and_swap_cache(struct page *page) * exclusive_swap_page() _with_ the lock. * - Marcelo */ - if (PageSwapCache(page) && !TryLockPage(page)) { + if (PageSwapCache(page) && !TestSetPageLocked(page)) { remove_exclusive_swap_page(page); - UnlockPage(page); + unlock_page(page); } page_cache_release(page); } diff --git a/mm/swapfile.c b/mm/swapfile.c index f93135a3f2d2..4037406ce132 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -348,7 +348,7 @@ void free_swap_and_cache(swp_entry_t entry) delete_from_swap_cache(page); SetPageDirty(page); } - UnlockPage(page); + unlock_page(page); page_cache_release(page); } } @@ -689,7 +689,7 @@ static int try_to_unuse(unsigned int type) * mark page dirty so try_to_swap_out will preserve it. */ SetPageDirty(page); - UnlockPage(page); + unlock_page(page); page_cache_release(page); /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 91effe15b29b..f097d65537dc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -35,6 +35,11 @@ */ #define DEF_PRIORITY (6) +static inline int is_page_cache_freeable(struct page * page) +{ + return page_count(page) - !!PagePrivate(page) == 1; +} + /* * On the swap_out path, the radix-tree node allocations are performing * GFP_ATOMIC allocations under PF_MEMALLOC. They can completely @@ -87,7 +92,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* if (!memclass(page_zone(page), classzone)) return 0; - if (TryLockPage(page)) + if (TestSetPageLocked(page)) return 0; /* From this point on, the odds are that we're going to @@ -114,7 +119,7 @@ set_swap_pte: set_pte(page_table, swp_entry_to_pte(entry)); drop_pte: mm->rss--; - UnlockPage(page); + unlock_page(page); { int freeable = page_count(page) - !!PagePrivate(page) <= 2; @@ -181,7 +186,7 @@ drop_pte: /* No swap space left */ preserve: set_pte(page_table, pte); - UnlockPage(page); + unlock_page(page); return 0; } @@ -416,7 +421,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, * The page is locked. IO in progress? * Move it to the back of the list. */ - if (unlikely(TryLockPage(page))) { + if (unlikely(TestSetPageLocked(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); @@ -490,7 +495,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, * taking the lru lock */ spin_lock(&pagemap_lru_lock); - UnlockPage(page); + unlock_page(page); __lru_cache_del(page); /* effectively free the page here */ @@ -511,7 +516,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, } } else { /* failed to drop the buffers so stop here */ - UnlockPage(page); + unlock_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); @@ -528,7 +533,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, goto page_freeable; write_unlock(&mapping->page_lock); } - UnlockPage(page); + unlock_page(page); page_mapped: if (--max_mapped >= 0) continue; @@ -548,7 +553,7 @@ page_freeable: */ if (PageDirty(page)) { write_unlock(&mapping->page_lock); - UnlockPage(page); + unlock_page(page); continue; } @@ -565,7 +570,7 @@ page_freeable: } __lru_cache_del(page); - UnlockPage(page); + unlock_page(page); /* effectively free the page here */ page_cache_release(page); @@ -597,7 +602,7 @@ static void refill_inactive(int nr_pages) page = list_entry(entry, struct page, lru); entry = entry->prev; - if (PageTestandClearReferenced(page)) { + if (TestClearPageReferenced(page)) { list_del(&page->lru); list_add(&page->lru, &active_list); continue; |
