diff options
| author | Andrew Morton <akpm@osdl.org> | 2003-09-23 09:43:45 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.osdl.org> | 2003-09-23 09:43:45 -0700 |
| commit | 41cffedc07d938ed655a95ef0309d79f203d8add (patch) | |
| tree | fb8e4a5e96adc9e0615e7f1824cf1953d2a72f25 | |
| parent | 257e2ce7256a7a5a93a79d65e1e64818f940c11a (diff) | |
[PATCH] Hugetlb FS quota accounting problem
From: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
We found a problem in hugetlbfs file system quota when using huge page
via mmap. The mmap method in hugetlbfs_file_operation always takes
quota for every mmap even for pages that are already allocated on that
inode. This results in taxing the same hugepage multiple times and
causing mmap to fail on existing file when quota mistakenly runs out.
| -rw-r--r-- | arch/i386/mm/hugetlbpage.c | 7 | ||||
| -rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 7 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 56 | ||||
| -rw-r--r-- | include/linux/hugetlb.h | 2 |
4 files changed, 45 insertions, 27 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index f4e073471958..d14cbbeaf864 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -355,14 +355,21 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); page = find_get_page(mapping, idx); if (!page) { + /* charge the fs quota first */ + if (hugetlb_get_quota(mapping)) { + ret = -ENOMEM; + goto out; + } page = alloc_hugetlb_page(); if (!page) { + hugetlb_put_quota(mapping); ret = -ENOMEM; goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); unlock_page(page); if (ret) { + hugetlb_put_quota(mapping); free_huge_page(page); goto out; } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 20426c58c63b..6a2e5a23f87d 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -297,14 +297,21 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); page = find_get_page(mapping, idx); if (!page) { + /* charge the fs quota first */ + if (hugetlb_get_quota(mapping)) { + ret = -ENOMEM; + goto out; + } page = alloc_hugetlb_page(); if (!page) { + hugetlb_put_quota(mapping); ret = -ENOMEM; goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); unlock_page(page); if (ret) { + hugetlb_put_quota(mapping); free_huge_page(page); goto out; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index effaa5bc5a96..86803c7262a5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -47,7 +47,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_dentry->d_inode; struct address_space *mapping = inode->i_mapping; - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(inode->i_sb); loff_t len, vma_len; int ret; @@ -61,19 +60,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); - if (sbinfo->free_blocks >= 0) { /* Check if there is any size limit. */ - spin_lock(&sbinfo->stat_lock); - if ((vma_len >> HPAGE_SHIFT) <= sbinfo->free_blocks) { - sbinfo->free_blocks -= (vma_len >> HPAGE_SHIFT); - spin_unlock(&sbinfo->stat_lock); - } else { - spin_unlock(&sbinfo->stat_lock); - return -ENOMEM; - } - } down(&inode->i_sem); - update_atime(inode); vma->vm_flags |= VM_HUGETLB | VM_RESERVED; vma->vm_ops = &hugetlb_vm_ops; @@ -83,15 +71,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) inode->i_size = len; up(&inode->i_sem); - /* - * If the huge page allocation has failed then increment free_blocks. - */ - if ((ret != 0) && (sbinfo->free_blocks >= 0)) { - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks += (vma_len >> HPAGE_SHIFT); - spin_unlock(&sbinfo->stat_lock); - } - return ret; } @@ -178,7 +157,6 @@ void truncate_huge_page(struct page *page) void truncate_hugepages(struct address_space *mapping, loff_t lstart) { - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); const pgoff_t start = lstart >> HPAGE_SHIFT; struct pagevec pvec; pgoff_t next; @@ -203,11 +181,7 @@ void truncate_hugepages(struct address_space *mapping, loff_t lstart) ++next; truncate_huge_page(page); unlock_page(page); - if (sbinfo->free_blocks >= 0) { - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks++; - spin_unlock(&sbinfo->stat_lock); - } + hugetlb_put_quota(mapping); } huge_pagevec_release(&pvec); } @@ -712,6 +686,34 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) return 0; } +int hugetlb_get_quota(struct address_space *mapping) +{ + int ret = 0; + struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); + + if (sbinfo->free_blocks > -1) { + spin_lock(&sbinfo->stat_lock); + if (sbinfo->free_blocks > 0) + sbinfo->free_blocks--; + else + ret = -ENOMEM; + spin_unlock(&sbinfo->stat_lock); + } + + return ret; +} + +void hugetlb_put_quota(struct address_space *mapping) +{ + struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); + + if (sbinfo->free_blocks > -1) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_blocks++; + spin_unlock(&sbinfo->stat_lock); + } +} + static struct super_block *hugetlbfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index edc512ee628f..b3dd00de8dc3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -98,6 +98,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) extern struct file_operations hugetlbfs_file_operations; extern struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_zero_setup(size_t); +int hugetlb_get_quota(struct address_space *mapping); +void hugetlb_put_quota(struct address_space *mapping); static inline int is_file_hugepages(struct file *file) { |
