From 9bba8dd6763f4584df03f9e41f27adbf66d1416d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Feb 2003 07:35:35 -0800 Subject: [PATCH] uninline get_jiffies_64() for 32-bit architectures uninline get_jiffies_64() for 32-bit architectures --- include/linux/jiffies.h | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 0a60a4f52077..bb1e0128228c 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -15,24 +15,14 @@ extern u64 jiffies_64; extern unsigned long volatile jiffies; +#if (BITS_PER_LONG < 64) +u64 get_jiffies_64(void); +#else static inline u64 get_jiffies_64(void) { -#if BITS_PER_LONG < 64 - extern seqlock_t xtime_lock; - unsigned long seq; - u64 tmp; - - do { - seq = read_seqbegin(&xtime_lock); - tmp = jiffies_64; - } while (read_seqretry(&xtime_lock, seq)); - - return tmp; -#else return (u64)jiffies; -#endif } - +#endif /* * These inlines deal with timer wrapping correctly. You are -- cgit v1.2.3 From 2ac28750c7866b6f75692558cce95a0ab399fe97 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Feb 2003 07:36:00 -0800 Subject: [PATCH] ext3: Remove journal_try_start() journal_try_start() is a function which nonblockingly attempts to open a JBD transaction handle. It was added a long time ago when there were concerns that ext3_writepage() could block kswapd for too long. It was never clearly necessary. So the patch throws it all away and just calls the blocking journal_start() from ext3_writepage(). --- fs/ext3/inode.c | 5 +- fs/jbd/journal.c | 1 - fs/jbd/transaction.c | 117 ++--------------------------------------------- include/linux/ext3_jbd.h | 8 ---- include/linux/jbd.h | 1 - 5 files changed, 4 insertions(+), 128 deletions(-) (limited to 'include') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 24897acf33da..ca17eb33b07d 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1317,10 +1317,7 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc) goto out_fail; needed = ext3_writepage_trans_blocks(inode); - if (wbc->for_reclaim) - handle = ext3_journal_try_start(inode, needed); - else - handle = ext3_journal_start(inode, needed); + handle = ext3_journal_start(inode, needed); if (IS_ERR(handle)) { ret = PTR_ERR(handle); diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index dd5e16f8c865..3a8be07f8c7e 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -38,7 +38,6 @@ #include EXPORT_SYMBOL(journal_start); -EXPORT_SYMBOL(journal_try_start); EXPORT_SYMBOL(journal_restart); EXPORT_SYMBOL(journal_extend); EXPORT_SYMBOL(journal_stop); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index b8333d8afdfe..e5c9b88cb493 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -266,113 +266,6 @@ handle_t *journal_start(journal_t *journal, int nblocks) return handle; } -/* - * Return zero on success - */ -static int try_start_this_handle(journal_t *journal, handle_t *handle) -{ - transaction_t *transaction; - int needed; - int nblocks = handle->h_buffer_credits; - int ret = 0; - - jbd_debug(3, "New handle %p maybe going live.\n", handle); - - lock_journal(journal); - - if (is_journal_aborted(journal) || - (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) { - ret = -EROFS; - goto fail_unlock; - } - - if (journal->j_barrier_count) - goto fail_unlock; - - if (!journal->j_running_transaction && get_transaction(journal, 1) == 0) - goto fail_unlock; - - transaction = journal->j_running_transaction; - if (transaction->t_state == T_LOCKED) - goto fail_unlock; - - needed = transaction->t_outstanding_credits + nblocks; - /* We could run log_start_commit here */ - if (needed > journal->j_max_transaction_buffers) - goto fail_unlock; - - needed = journal->j_max_transaction_buffers; - if (journal->j_committing_transaction) - needed += journal->j_committing_transaction-> - t_outstanding_credits; - - if (log_space_left(journal) < needed) - goto fail_unlock; - - handle->h_transaction = transaction; - transaction->t_outstanding_credits += nblocks; - transaction->t_updates++; - jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", - handle, nblocks, transaction->t_outstanding_credits, - log_space_left(journal)); - unlock_journal(journal); - return 0; - -fail_unlock: - unlock_journal(journal); - if (ret >= 0) - ret = -1; - return ret; -} - -/** - * handle_t *journal_try_start() - Don't block, but try and get a handle - * @journal: Journal to start transaction on. - * @nblocks: number of block buffer we might modify - * - * Try to start a handle, but non-blockingly. If we weren't able - * to, return an ERR_PTR value. - */ -handle_t *journal_try_start(journal_t *journal, int nblocks) -{ - handle_t *handle = journal_current_handle(); - int err; - - if (!journal) - return ERR_PTR(-EROFS); - - if (handle) { - jbd_debug(4, "h_ref %d -> %d\n", - handle->h_ref, - handle->h_ref + 1); - J_ASSERT(handle->h_transaction->t_journal == journal); - if (is_handle_aborted(handle)) - return ERR_PTR(-EIO); - handle->h_ref++; - return handle; - } else { - jbd_debug(4, "no current transaction\n"); - } - - if (is_journal_aborted(journal)) - return ERR_PTR(-EIO); - - handle = new_handle(nblocks); - if (!handle) - return ERR_PTR(-ENOMEM); - - current->journal_info = handle; - - err = try_start_this_handle(journal, handle); - if (err < 0) { - kfree(handle); - current->journal_info = NULL; - return ERR_PTR(err); - } - - return handle; -} - /** * int journal_extend() - extend buffer credits. * @handle: handle to 'extend' @@ -1726,13 +1619,6 @@ out: * to be called. We do this if the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. - */ -int journal_try_to_free_buffers(journal_t *journal, - struct page *page, int unused_gfp_mask) -{ -/* - * journal_try_to_free_buffers(). Try to remove all this page's buffers - * from the journal. * * This complicates JBD locking somewhat. We aren't protected by the * BKL here. We wish to remove the buffer from its committing or @@ -1752,6 +1638,9 @@ int journal_try_to_free_buffers(journal_t *journal, * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? */ +int journal_try_to_free_buffers(journal_t *journal, + struct page *page, int unused_gfp_mask) +{ struct buffer_head *head; struct buffer_head *bh; int ret = 0; diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index 7ac910d15863..2c75f9f6dab9 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h @@ -210,14 +210,6 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks) return journal_start(journal, nblocks); } -static inline handle_t * -ext3_journal_try_start(struct inode *inode, int nblocks) -{ - if (inode->i_sb->s_flags & MS_RDONLY) - return ERR_PTR(-EROFS); - return journal_try_start(EXT3_JOURNAL(inode), nblocks); -} - /* * The only special thing we need to do here is to make sure that all * journal_stop calls result in the superblock being marked dirty, so diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 2236641f5593..71bec2befafa 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -726,7 +726,6 @@ static inline handle_t *journal_current_handle(void) */ extern handle_t *journal_start(journal_t *, int nblocks); -extern handle_t *journal_try_start(journal_t *, int nblocks); extern int journal_restart (handle_t *, int nblocks); extern int journal_extend (handle_t *, int nblocks); extern int journal_get_write_access (handle_t *, struct buffer_head *); -- cgit v1.2.3 From 01c42c0c7d77157f44bb7429755fdc7198fb30c5 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Feb 2003 07:36:32 -0800 Subject: [PATCH] hugetlbpage MAP_FIXED fix We need to validate that the address and length of a MAP_FIXED request are suitable for hugetlb pages. --- arch/i386/mm/hugetlbpage.c | 12 ++++++++++++ arch/ia64/mm/hugetlbpage.c | 12 ++++++++++++ arch/sparc64/mm/hugetlbpage.c | 12 ++++++++++++ arch/x86_64/mm/hugetlbpage.c | 12 ++++++++++++ include/linux/hugetlb.h | 2 ++ mm/mmap.c | 13 +++++++++++-- 6 files changed, 61 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 243d844a1a79..749cadb2e5a3 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc set_pte(page_table, entry); } +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index a08a64c1d39d..c71ed65b5a2d 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, return; } +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index c137cb8c9d56..63895ce0202f 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -232,6 +232,18 @@ out_error: return -1; } +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { diff --git a/arch/x86_64/mm/hugetlbpage.c b/arch/x86_64/mm/hugetlbpage.c index f8e146193dc6..e1c31afb196e 100644 --- a/arch/x86_64/mm/hugetlbpage.c +++ b/arch/x86_64/mm/hugetlbpage.c @@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc set_pte(page_table, entry); } +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 370411eaaba2..7c31efc0b61b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long address); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); +int is_aligned_hugepage_range(unsigned long addr, unsigned long len); int pmd_huge(pmd_t pmd); extern int htlbpage_max; @@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) #define hugepage_vma(mm, addr) 0 #define mark_mm_hugetlb(mm, vma) do { } while (0) #define follow_huge_pmd(mm, addr, pmd, write) 0 +#define is_aligned_hugepage_range(addr, len) 0 #define pmd_huge(x) 0 #ifndef HPAGE_MASK diff --git a/mm/mmap.c b/mm/mmap.c index 07e2417185ff..7696c40185bd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -801,6 +801,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return -ENOMEM; if (addr & ~PAGE_MASK) return -EINVAL; + if (is_file_hugepages(file)) { + unsigned long ret; + + ret = is_aligned_hugepage_range(addr, len); + if (ret) + return ret; + } return addr; } @@ -1224,8 +1231,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) /* we have start < mpnt->vm_end */ if (is_vm_hugetlb_page(mpnt)) { - if ((start & ~HPAGE_MASK) || (len & ~HPAGE_MASK)) - return -EINVAL; + int ret = is_aligned_hugepage_range(start, len); + + if (ret) + return ret; } /* if it doesn't overlap, we have nothing.. */ -- cgit v1.2.3 From b789ebfca657a1fc77f6bd4dd648c0d5e96057c8 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Feb 2003 07:37:12 -0800 Subject: [PATCH] fix current->user->processes leak Patch from: Eric Lammerts Every time you do a loop mount, a kernel thread is started (those processes are called "loop0", "loop1", etc.). The problem is that when it starts, it's counted as one of your processes. Then, it's changed to be a root-owned process without correcting that count. Patch below fixes the problem. It moves the bookkeeping of changing current->user to a new function switch_uid() (which is now also used by exec_usermodehelper() in kmod.c). The patch is tested. --- include/linux/sched.h | 1 + kernel/exit.c | 2 +- kernel/kmod.c | 10 +--------- kernel/sys.c | 13 ++----------- kernel/user.c | 17 +++++++++++++++++ 5 files changed, 22 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 16864532fcd9..b99d2eb57a82 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -509,6 +509,7 @@ extern void __set_special_pids(pid_t session, pid_t pgrp); /* per-UID process charging. */ extern struct user_struct * alloc_uid(uid_t); extern void free_uid(struct user_struct *); +extern void switch_uid(struct user_struct *); #include diff --git a/kernel/exit.c b/kernel/exit.c index 729e93bff8e4..de34ed9091f5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -249,7 +249,7 @@ void reparent_to_init(void) /* signals? */ security_task_reparent_to_init(current); memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); - current->user = INIT_USER; + switch_uid(INIT_USER); write_unlock_irq(&tasklist_lock); } diff --git a/kernel/kmod.c b/kernel/kmod.c index 1930367d3736..257634f94652 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -121,15 +121,7 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[]) if (curtask->files->fd[i]) close(i); } - /* Drop the "current user" thing */ - { - struct user_struct *user = curtask->user; - curtask->user = INIT_USER; - atomic_inc(&INIT_USER->__count); - atomic_inc(&INIT_USER->processes); - atomic_dec(&user->processes); - free_uid(user); - } + switch_uid(INIT_USER); /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; diff --git a/kernel/sys.c b/kernel/sys.c index 9404304eba74..dffb67035c78 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -561,19 +561,12 @@ asmlinkage long sys_setgid(gid_t gid) static int set_user(uid_t new_ruid, int dumpclear) { - struct user_struct *new_user, *old_user; + struct user_struct *new_user; - /* What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM - */ new_user = alloc_uid(new_ruid); if (!new_user) return -EAGAIN; - old_user = current->user; - atomic_dec(&old_user->processes); - atomic_inc(&new_user->processes); + switch_uid(new_user); if(dumpclear) { @@ -581,8 +574,6 @@ static int set_user(uid_t new_ruid, int dumpclear) wmb(); } current->uid = new_ruid; - current->user = new_user; - free_uid(old_user); return 0; } diff --git a/kernel/user.c b/kernel/user.c index 0704b2aad9c5..592680d8cc68 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -116,6 +116,23 @@ struct user_struct * alloc_uid(uid_t uid) return up; } +void switch_uid(struct user_struct *new_user) +{ + struct user_struct *old_user; + + /* What if a process setreuid()'s and this brings the + * new uid over his NPROC rlimit? We can check this now + * cheaply with the new uid cache, so if it matters + * we should be checking for it. -DaveM + */ + old_user = current->user; + atomic_inc(&new_user->__count); + atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); + current->user = new_user; + free_uid(old_user); +} + static int __init uid_cache_init(void) { -- cgit v1.2.3