summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@fys.uio.no>2004-08-23 10:13:19 -0400
committerTrond Myklebust <trond.myklebust@fys.uio.no>2004-08-23 10:13:19 -0400
commit252470cdb5c1189c5a4b4423a810358f8847a24a (patch)
treeebc09c1b15284f1137ecd345c633a73a0a4bb971 /include/linux
parent395f639e04b8c885fcc52c90748835e57b4b2f69 (diff)
NFS: Break the nfs_wreq_lock into per-mount locks. This helps prevent
a heavy read and write workload on one mount point from interfering with workloads on other mount points. Note that there is still some serialization due to the big kernel lock. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_page.h8
2 files changed, 4 insertions, 5 deletions
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index e50205d31df3..1b9154d67afa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -147,6 +147,7 @@ struct nfs_inode {
/*
* This is the list of dirty unwritten pages.
*/
+ spinlock_t req_lock;
struct list_head dirty;
struct list_head commit;
struct radix_tree_root nfs_page_tree;
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 454587123f33..12a6758cc859 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -40,8 +40,8 @@ struct nfs_page {
unsigned long wb_index; /* Offset >> PAGE_CACHE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
wb_pgbase, /* Start of page data */
- wb_bytes, /* Length of request */
- wb_count; /* reference count */
+ wb_bytes; /* Length of request */
+ atomic_t wb_count; /* reference count */
unsigned long wb_flags;
struct nfs_writeverf wb_verf; /* Commit cookie */
};
@@ -65,8 +65,6 @@ extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
unsigned int);
extern int nfs_wait_on_request(struct nfs_page *);
-extern spinlock_t nfs_wreq_lock;
-
/*
* Lock the page of an asynchronous request without incrementing the wb_count
*/
@@ -86,7 +84,7 @@ nfs_lock_request(struct nfs_page *req)
{
if (test_and_set_bit(PG_BUSY, &req->wb_flags))
return 0;
- req->wb_count++;
+ atomic_inc(&req->wb_count);
return 1;
}