diff options
| -rw-r--r-- | Makefile | 4 | ||||
| -rw-r--r-- | arch/i386/kernel/pci-pc.c | 10 | ||||
| -rw-r--r-- | drivers/net/ppp_generic.c | 11 | ||||
| -rw-r--r-- | drivers/net/pppoe.c | 6 | ||||
| -rw-r--r-- | drivers/usb/usb-uhci.c | 5 | ||||
| -rw-r--r-- | fs/block_dev.c | 30 | ||||
| -rw-r--r-- | fs/buffer.c | 111 | ||||
| -rw-r--r-- | include/linux/fs.h | 5 | ||||
| -rw-r--r-- | include/linux/list.h | 1 | ||||
| -rw-r--r-- | include/linux/mm.h | 13 | ||||
| -rw-r--r-- | include/linux/slab.h | 2 | ||||
| -rw-r--r-- | mm/filemap.c | 74 | ||||
| -rw-r--r-- | mm/memory.c | 4 | ||||
| -rw-r--r-- | mm/page_alloc.c | 2 |
14 files changed, 103 insertions, 175 deletions
@@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 -SUBLEVEL = 10 -EXTRAVERSION = +SUBLEVEL = 11 +EXTRAVERSION =-pre1 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff --git a/arch/i386/kernel/pci-pc.c b/arch/i386/kernel/pci-pc.c index a0aa4f46b887..1ff3e1805d4b 100644 --- a/arch/i386/kernel/pci-pc.c +++ b/arch/i386/kernel/pci-pc.c @@ -261,18 +261,14 @@ static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value u32 data; result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, 2, &data); - *value = (u8)data; + *value = (u16)data; return result; } static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value) { - int result; - u32 data; - result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn), where, 4, &data); - *value = (u8)data; - return result; + return pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), where, 4, value); } static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value) diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index fd1a5d852472..c74dac6ca6b9 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2105,13 +2105,12 @@ ppp_register_compressor(struct compressor *cp) { struct compressor_entry *ce; int ret; - spin_lock(&compressor_list_lock); ret = -EEXIST; if (find_comp_entry(cp->compress_proto) != 0) goto out; ret = -ENOMEM; - ce = kmalloc(sizeof(struct compressor_entry), GFP_KERNEL); + ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); if (ce == 0) goto out; ret = 0; @@ -2216,11 +2215,11 @@ ppp_create_interface(int unit, int *retp) /* Create a new ppp structure and link it before `list'. */ ret = -ENOMEM; - ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); + ppp = kmalloc(sizeof(struct ppp), GFP_ATOMIC); if (ppp == 0) goto out; memset(ppp, 0, sizeof(struct ppp)); - dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); + dev = kmalloc(sizeof(struct net_device), GFP_ATOMIC); if (dev == 0) { kfree(ppp); goto out; @@ -2285,6 +2284,7 @@ init_ppp_file(struct ppp_file *pf, int kind) static void ppp_destroy_interface(struct ppp *ppp) { struct net_device *dev; + int n_channels ; spin_lock(&all_ppp_lock); list_del(&ppp->file.list); @@ -2314,6 +2314,7 @@ static void ppp_destroy_interface(struct ppp *ppp) #endif /* CONFIG_PPP_FILTER */ dev = ppp->dev; ppp->dev = 0; + n_channels = ppp->n_channels ; ppp_unlock(ppp); if (dev) { @@ -2329,7 +2330,7 @@ static void ppp_destroy_interface(struct ppp *ppp) * ppp structure. Otherwise we leave it around until the * last channel disconnects from it. */ - if (ppp->n_channels == 0) + if (n_channels == 0) kfree(ppp); spin_unlock(&all_ppp_lock); diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index f40814662fd9..c333e6180b20 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -541,12 +541,16 @@ int pppoe_release(struct socket *sock) sk->state = PPPOX_DEAD; po = sk->protinfo.pppox; - if (po->pppoe_pa.sid) + if (po->pppoe_pa.sid) { delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); + po->pppoe_pa.sid = 0 ; + } if (po->pppoe_dev) dev_put(po->pppoe_dev); + po->pppoe_dev = NULL ; + sock_orphan(sk); sock->sk = NULL; diff --git a/drivers/usb/usb-uhci.c b/drivers/usb/usb-uhci.c index 6bb4b6c926e2..65929d24d7da 100644 --- a/drivers/usb/usb-uhci.c +++ b/drivers/usb/usb-uhci.c @@ -2528,7 +2528,7 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode) int i; int ret = 0; urb_priv_t *urb_priv = urb->hcpriv; - struct list_head *p = urb_priv->desc_list.next; + struct list_head *p = urb_priv->desc_list.next, *p_tmp; uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list); dbg("urb contains iso request"); @@ -2578,8 +2578,9 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode) dbg("process_iso: %i: len:%d %08x status:%x", i, urb->iso_frame_desc[i].actual_length, le32_to_cpu(desc->hw.td.status),urb->iso_frame_desc[i].status); - list_del (p); + p_tmp = p; p = p->next; + list_del (p_tmp); delete_desc (s, desc); } diff --git a/fs/block_dev.c b/fs/block_dev.c index b731a11a09a7..a56d71023901 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -67,6 +67,17 @@ static unsigned int max_block(kdev_t dev) return retval; } +static loff_t blkdev_size(kdev_t dev) +{ + unsigned int blocks = ~0U; + int major = MAJOR(dev); + + if (blk_size[major]) { + int minor = MINOR(dev); + blocks = blk_size[major][minor]; + } + return (loff_t) blocks << BLOCK_SIZE_BITS; +} static inline int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result) { @@ -308,7 +319,6 @@ static int __blkdev_commit_write(struct inode *inode, struct page *page, set_bit(BH_Uptodate, &bh->b_state); if (!atomic_set_buffer_dirty(bh)) { __mark_dirty(bh); - buffer_insert_inode_data_queue(bh, inode); need_balance_dirty = 1; } } @@ -404,6 +414,7 @@ static struct super_block *bd_read_super(struct super_block *sb, void *data, int root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; root->i_uid = root->i_gid = 0; root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; + sb->s_maxbytes = ~0ULL; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = 0x62646576; @@ -521,9 +532,11 @@ struct block_device *bdget(dev_t dev) new_bdev->bd_dev = dev; new_bdev->bd_op = NULL; new_bdev->bd_inode = inode; + inode->i_size = blkdev_size(dev); inode->i_rdev = to_kdev_t(dev); inode->i_bdev = new_bdev; inode->i_data.a_ops = &def_blk_aops; + inode->i_data.gfp_mask = GFP_USER; spin_lock(&bdev_lock); bdev = bdfind(dev, head); if (!bdev) { @@ -810,22 +823,7 @@ int blkdev_put(struct block_device *bdev, int kind) down(&bdev->bd_sem); lock_kernel(); if (kind == BDEV_FILE) { - struct super_block * sb; - __block_fsync(bd_inode); - - /* Janitorianism: this shit must go away */ - sb = get_super(bd_inode->i_rdev); - if (sb) { - if (sb->s_flags & MS_RDONLY) { - shrink_dcache_sb(sb); - invalidate_inodes(sb); - invalidate_buffers(bd_inode->i_rdev); - } - lock_super(sb); - unlock_super(sb); - drop_super(sb); - } } else if (kind == BDEV_FS) fsync_no_super(rdev); if (!--bdev->bd_openers) { diff --git a/fs/buffer.c b/fs/buffer.c index 6a2433203dee..c70bda8fed5d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -52,22 +52,13 @@ #include <asm/bitops.h> #include <asm/mmu_context.h> -#define NR_SIZES 7 -static char buffersize_index[65] = -{-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, - 4, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1, - 5, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1, - 6}; - -#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9]) #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) #define NR_RESERVED (10*MAX_BUF_PER_PAGE) #define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this number of unused buffer heads */ /* Anti-deadlock ordering: - * lru_list_lock > hash_table_lock > free_list_lock > unused_list_lock + * lru_list_lock > hash_table_lock > unused_list_lock */ #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers) @@ -80,6 +71,11 @@ static unsigned int bh_hash_shift; static struct buffer_head **hash_table; static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED; +#define BUF_CLEAN 0 +#define BUF_LOCKED 1 /* Buffers scheduled for write */ +#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */ +#define NR_LIST 3 + static struct buffer_head *lru_list[NR_LIST]; static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED; static int nr_buffers_type[NR_LIST]; @@ -90,14 +86,8 @@ static int nr_unused_buffer_heads; static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED; static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); -struct bh_free_head { - struct buffer_head *list; - spinlock_t lock; -}; -static struct bh_free_head free_list[NR_SIZES]; - static void truncate_buffers(kdev_t dev); -static int grow_buffers(kdev_t dev, int block, int size); +static int grow_buffers(kdev_t dev, unsigned long block, int size); static void __refile_buffer(struct buffer_head *); /* This is used by some architectures to estimate available memory. */ @@ -482,12 +472,16 @@ out: ((block) << (bh_hash_shift - 12)))) #define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)] -static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head **head) +static inline void __insert_into_hash_list(struct buffer_head *bh) { - if ((bh->b_next = *head) != NULL) - bh->b_next->b_pprev = &bh->b_next; + struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr); + struct buffer_head *next = *head; + *head = bh; bh->b_pprev = head; + bh->b_next = next; + if (next != NULL) + next->b_pprev = &bh->b_next; } static __inline__ void __hash_unlink(struct buffer_head *bh) @@ -504,6 +498,8 @@ static void __insert_into_lru_list(struct buffer_head * bh, int blist) { struct buffer_head **bhp = &lru_list[blist]; + if (bh->b_prev_free || bh->b_next_free) BUG(); + if(!*bhp) { *bhp = bh; bh->b_prev_free = bh; @@ -531,19 +527,6 @@ static void __remove_from_lru_list(struct buffer_head * bh, int blist) } } -static void __remove_from_free_list(struct buffer_head * bh, int index) -{ - if(bh->b_next_free == bh) - free_list[index].list = NULL; - else { - bh->b_prev_free->b_next_free = bh->b_next_free; - bh->b_next_free->b_prev_free = bh->b_prev_free; - if (free_list[index].list == bh) - free_list[index].list = bh->b_next_free; - } - bh->b_next_free = bh->b_prev_free = NULL; -} - /* must be called with both the hash_table_lock and the lru_list_lock held */ static void __remove_from_queues(struct buffer_head *bh) @@ -552,14 +535,6 @@ static void __remove_from_queues(struct buffer_head *bh) __remove_from_lru_list(bh, bh->b_list); } -static void __insert_into_queues(struct buffer_head *bh) -{ - struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr); - - __hash_link(bh, head); - __insert_into_lru_list(bh, bh->b_list); -} - struct buffer_head * get_hash_table(kdev_t dev, int block, int size) { struct buffer_head *bh, **p = &hash(dev, block); @@ -1214,6 +1189,7 @@ static __inline__ void __put_unused_buffer_head(struct buffer_head * bh) if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) { kmem_cache_free(bh_cachep, bh); } else { + bh->b_dev = B_FREE; bh->b_blocknr = -1; bh->b_this_page = NULL; @@ -1320,7 +1296,7 @@ try_again: if (!bh) goto no_grow; - bh->b_dev = B_FREE; /* Flag as unused */ + bh->b_dev = NODEV; bh->b_this_page = head; head = bh; @@ -1376,15 +1352,18 @@ no_grow: /* * Called when truncating a buffer on a page completely. - * - * We can avoid IO by marking it clean. - * FIXME!! FIXME!! FIXME!! We need to unmap it too, - * so that the filesystem won't write to it. There's - * some bug somewhere.. */ static void discard_buffer(struct buffer_head * bh) { - mark_buffer_clean(bh); + if (buffer_mapped(bh)) { + mark_buffer_clean(bh); + lock_buffer(bh); + clear_bit(BH_Uptodate, &bh->b_state); + clear_bit(BH_Mapped, &bh->b_state); + clear_bit(BH_Req, &bh->b_state); + clear_bit(BH_New, &bh->b_state); + unlock_buffer(bh); + } } /* @@ -2120,7 +2099,6 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], } tmp = bhs[bhind++]; - tmp->b_dev = B_FREE; tmp->b_size = size; set_bh_page(tmp, map, offset); tmp->b_this_page = tmp; @@ -2304,7 +2282,6 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size if (Page_Uptodate(page)) uptodate |= 1 << BH_Uptodate; - spin_lock(&lru_list_lock); write_lock(&hash_table_lock); do { if (!(bh->b_state & (1 << BH_Mapped))) { @@ -2314,23 +2291,21 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size bh->b_state = uptodate; } - /* Insert the buffer into the regular lists */ - if (!bh->b_pprev) { - __insert_into_queues(bh); - } + /* Insert the buffer into the hash lists if necessary */ + if (!bh->b_pprev) + __insert_into_hash_list(bh); block++; bh = bh->b_this_page; } while (bh != head); write_unlock(&hash_table_lock); - spin_unlock(&lru_list_lock); } /* * Try to increase the number of buffers available: the size argument * is used to determine what kind of buffers we want. */ -static int grow_buffers(kdev_t dev, int block, int size) +static int grow_buffers(kdev_t dev, unsigned long block, int size) { struct page * page; struct block_device *bdev; @@ -2389,7 +2364,7 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask) ll_rw_block(WRITE, 1, &p); tryagain = 0; } else if (buffer_locked(p)) { - if (gfp_mask & __GFP_WAIT) { + if (gfp_mask & __GFP_WAITBUF) { wait_on_buffer(p); tryagain = 1; } else @@ -2424,12 +2399,10 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask) int try_to_free_buffers(struct page * page, unsigned int gfp_mask) { struct buffer_head * tmp, * bh = page->buffers; - int index = BUFSIZE_INDEX(bh->b_size); cleaned_buffers_try_again: spin_lock(&lru_list_lock); write_lock(&hash_table_lock); - spin_lock(&free_list[index].lock); tmp = bh; do { if (buffer_busy(tmp)) @@ -2443,14 +2416,10 @@ cleaned_buffers_try_again: struct buffer_head * p = tmp; tmp = tmp->b_this_page; - /* The buffer can be either on the regular - * queues or on the free list.. - */ - if (p->b_dev != B_FREE) { - remove_inode_queue(p); - __remove_from_queues(p); - } else - __remove_from_free_list(p, index); + if (p->b_dev == B_FREE) BUG(); + + remove_inode_queue(p); + __remove_from_queues(p); __put_unused_buffer_head(p); } while (tmp != bh); spin_unlock(&unused_list_lock); @@ -2461,14 +2430,12 @@ cleaned_buffers_try_again: /* And free the page */ page->buffers = NULL; page_cache_release(page); - spin_unlock(&free_list[index].lock); write_unlock(&hash_table_lock); spin_unlock(&lru_list_lock); return 1; busy_buffer_page: /* Uhhuh, start writeback so that we don't end up with all dirty pages */ - spin_unlock(&free_list[index].lock); write_unlock(&hash_table_lock); spin_unlock(&lru_list_lock); if (gfp_mask & __GFP_IO) { @@ -2581,12 +2548,6 @@ void __init buffer_init(unsigned long mempages) for(i = 0; i < nr_hash; i++) hash_table[i] = NULL; - /* Setup free lists. */ - for(i = 0; i < NR_SIZES; i++) { - free_list[i].list = NULL; - free_list[i].lock = SPIN_LOCK_UNLOCKED; - } - /* Setup lru lists. */ for(i = 0; i < NR_LIST; i++) lru_list[i] = NULL; diff --git a/include/linux/fs.h b/include/linux/fs.h index 686fb7160c25..6d6066c6c228 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1086,11 +1086,6 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); /* reiserfs_writepage needs this */ extern void set_buffer_async_io(struct buffer_head *bh) ; -#define BUF_CLEAN 0 -#define BUF_LOCKED 1 /* Buffers scheduled for write */ -#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */ -#define NR_LIST 3 - static inline void get_bh(struct buffer_head * bh) { atomic_inc(&(bh)->b_count); diff --git a/include/linux/list.h b/include/linux/list.h index 81c9170a0f3a..0d04422e81d9 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -92,7 +92,6 @@ static __inline__ void __list_del(struct list_head * prev, static __inline__ void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); - entry->next = entry->prev = 0; } /** diff --git a/include/linux/mm.h b/include/linux/mm.h index 494d025d143d..81190ca01383 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -550,16 +550,17 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); #define __GFP_IO 0x40 /* Can start low memory physical IO? */ #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */ #define __GFP_FS 0x100 /* Can call down to low-level FS? */ +#define __GFP_WAITBUF 0x200 /* Can we wait for buffers to complete? */ #define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO) #define GFP_NOIO (__GFP_HIGH | __GFP_WAIT) -#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO) +#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF) #define GFP_ATOMIC (__GFP_HIGH) -#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) -#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM) -#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) -#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) -#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) +#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) +#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS | __GFP_HIGHMEM) +#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) +#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) +#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some platforms, used as appropriate on others */ diff --git a/include/linux/slab.h b/include/linux/slab.h index efa8638d612b..ae2cffd45ba0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t; #define SLAB_NFS GFP_NFS #define SLAB_DMA GFP_DMA -#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS) +#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_WAITBUF|__GFP_FS) #define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */ /* flags to pass to kmem_cache_create(). diff --git a/mm/filemap.c b/mm/filemap.c index 42cc4bfd73ed..b8d1e90ada58 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -873,6 +873,13 @@ struct page * find_or_create_page(struct address_space *mapping, unsigned long i return page; } +/* + * Returns locked page at given index in given cache, creating it if needed. + */ +struct page *grab_cache_page(struct address_space *mapping, unsigned long index) +{ + return find_or_create_page(mapping, index, mapping->gfp_mask); +} #if 0 @@ -1005,24 +1012,6 @@ static inline int get_max_readahead(struct inode * inode) return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)]; } -static inline unsigned long calc_end_index(struct inode * inode) -{ - unsigned long end_index; - - end_index = inode->i_size >> PAGE_CACHE_SHIFT; - - return end_index; -} - -static inline loff_t calc_rsize(struct inode * inode) -{ - loff_t rsize; - - rsize = inode->i_size; - - return rsize; -} - static void generic_file_readahead(int reada_ok, struct file * filp, struct inode * inode, struct page * page) @@ -1033,7 +1022,7 @@ static void generic_file_readahead(int reada_ok, unsigned long raend; int max_readahead = get_max_readahead(inode); - end_index = calc_end_index(inode); + end_index = inode->i_size >> PAGE_CACHE_SHIFT; raend = filp->f_raend; max_ahead = 0; @@ -1157,8 +1146,8 @@ void mark_page_accessed(struct page *page) */ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor) { - struct inode *inode = filp->f_dentry->d_inode; - struct address_space *mapping = inode->i_mapping; + struct address_space *mapping = filp->f_dentry->d_inode->i_mapping; + struct inode *inode = mapping->host; unsigned long index, offset; struct page *cached_page; int reada_ok; @@ -1212,13 +1201,13 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * struct page *page, **hash; unsigned long end_index, nr, ret; - end_index = calc_end_index(inode); + end_index = inode->i_size >> PAGE_CACHE_SHIFT; if (index > end_index) break; nr = PAGE_CACHE_SIZE; if (index == end_index) { - nr = calc_rsize(inode) & ~PAGE_CACHE_MASK; + nr = inode->i_size & ~PAGE_CACHE_MASK; if (nr <= offset) break; } @@ -1595,7 +1584,6 @@ struct page * filemap_nopage(struct vm_area_struct * area, struct address_space *mapping = inode->i_mapping; struct page *page, **hash, *old_page; unsigned long size, pgoff; - loff_t rsize; pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; @@ -1604,8 +1592,7 @@ retry_all: * An external ptracer can access pages that normally aren't * accessible.. */ - rsize = calc_rsize(inode); - size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if ((pgoff >= size) && (area->vm_mm == current->mm)) return NULL; @@ -2104,14 +2091,13 @@ static long madvise_willneed(struct vm_area_struct * vma, long error = -EBADF; struct file * file; unsigned long size, rlim_rss; - loff_t rsize; /* Doesn't work if there's no mapped file. */ if (!vma->vm_file) return error; file = vma->vm_file; - rsize = calc_rsize(file->f_dentry->d_inode); - size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size = (file->f_dentry->d_inode->i_size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) @@ -2549,19 +2535,6 @@ repeat: return page; } -/* - * Returns locked page at given index in given cache, creating it if needed. - */ - -struct page *grab_cache_page(struct address_space *mapping, unsigned long index) -{ - struct page *cached_page = NULL; - struct page *page = __grab_cache_page(mapping,index,&cached_page); - if (cached_page) - page_cache_release(cached_page); - return page; -} - inline void remove_suid(struct inode *inode) { unsigned int mode; @@ -2595,8 +2568,8 @@ inline void remove_suid(struct inode *inode) ssize_t generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) { - struct inode *inode = file->f_dentry->d_inode; - struct address_space *mapping = inode->i_mapping; + struct address_space *mapping = file->f_dentry->d_inode->i_mapping; + struct inode *inode = mapping->host; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; loff_t pos; struct page *page, *cached_page; @@ -2628,8 +2601,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) written = 0; - /* FIXME: this is for backwards compatibility with 2.4 */ - if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND) + if (file->f_flags & O_APPEND) pos = inode->i_size; /* @@ -2690,17 +2662,15 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) err = -EPERM; goto out; } - if (pos >= calc_rsize(inode)) { - if (count || pos > calc_rsize(inode)) { - /* FIXME: this is for backwards compatibility with 2.4 */ + if (pos >= inode->i_size) { + if (count || pos > inode->i_size) { err = -ENOSPC; goto out; } - /* zero-length writes at blkdev end are OK */ } - if (pos + count > calc_rsize(inode)) - count = calc_rsize(inode) - pos; + if (pos + count > inode->i_size) + count = inode->i_size - pos; } err = 0; diff --git a/mm/memory.c b/mm/memory.c index 440ed1127de1..371a023043d7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -319,7 +319,9 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad if (pte_none(pte)) continue; if (pte_present(pte)) { - freed ++; + struct page *page = pte_page(pte); + if (VALID_PAGE(page) && !PageReserved(page)) + freed ++; /* This will eventually call __free_pte on the pte. */ tlb_remove_page(tlb, ptep, address + offset); } else { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0762ef388704..cd9b971d4692 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -480,7 +480,7 @@ unsigned int nr_free_buffer_pages (void) zone_t **zonep, *zone; do { - zonelist = pgdat->node_zonelists + __GFP_HIGHMEM; + zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); zonep = zonelist->zones; for (zone = *zonep++; zone; zone = *zonep++) |
