From f8248793e4eadc827c9a0aed77d2bfd84dce3aa2 Mon Sep 17 00:00:00 2001 From: Coywolf Qi Hunt Date: Wed, 9 Mar 2005 16:42:01 -0800 Subject: [PATCH] mnt_init() cleanup At the very beginning in 2.4 days, in mnt_init(), mount_hashtable allocation page order was determined at runtime. Later the page order got fixed to 0. This patch cleanups it. Signed-off-by: Coywolf Qi Hunt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/namespace.c b/fs/namespace.c index 755b081c0bd2..57882f9b81c0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1392,16 +1392,14 @@ static void __init init_mount_tree(void) void __init mnt_init(unsigned long mempages) { struct list_head *d; - unsigned long order; unsigned int nr_hash; int i; mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - order = 0; mount_hashtable = (struct list_head *) - __get_free_pages(GFP_ATOMIC, order); + __get_free_page(GFP_ATOMIC); if (!mount_hashtable) panic("Failed to allocate mount hash table\n"); @@ -1411,7 +1409,7 @@ void __init mnt_init(unsigned long mempages) * We don't guarantee that "sizeof(struct list_head)" is necessarily * a power-of-two. */ - nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct list_head); + nr_hash = PAGE_SIZE / sizeof(struct list_head); hash_bits = 0; do { hash_bits++; @@ -1425,8 +1423,7 @@ void __init mnt_init(unsigned long mempages) nr_hash = 1UL << hash_bits; hash_mask = nr_hash-1; - printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n", - nr_hash, order, (PAGE_SIZE << order)); + printk("Mount-cache hash table entries: %d\n", nr_hash); /* And initialize the newly allocated array */ d = mount_hashtable; -- cgit v1.2.3