diff options
| author | Trond Myklebust <trond.myklebust@fys.uio.no> | 2004-05-20 07:47:06 -0400 |
|---|---|---|
| committer | Trond Myklebust <trond.myklebust@fys.uio.no> | 2004-05-20 07:47:06 -0400 |
| commit | 6da1abab69dc2d6b12123fba230f539028743b0f (patch) | |
| tree | 0028ed9483ad16d1b3d76cfcbe176691f6c9451a /include/linux | |
| parent | 015d2aecf070aa2016f2140dcf09e837e35ec233 (diff) | |
Following a suggestion by Jamie Lokier
RPC: Make "major" timeouts be of fixed length "timeo<<retrans"
rather than counting the number of retransmissions. The
clock starts at the first attempt to send each request.
RPC: Ensure that we "slow start" the RTT estimation after a
major timeout has occurred.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/sunrpc/xprt.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 5222a8f4c580..298b18486729 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -69,8 +69,7 @@ extern unsigned int xprt_tcp_slot_table_entries; * This describes a timeout strategy */ struct rpc_timeout { - unsigned long to_current, /* current timeout */ - to_initval, /* initial timeout */ + unsigned long to_initval, /* initial timeout */ to_maxval, /* max timeout */ to_increment; /* if !exponential */ unsigned int to_retries; /* max # of retries */ @@ -85,7 +84,6 @@ struct rpc_rqst { * This is the user-visible part */ struct rpc_xprt * rq_xprt; /* RPC client */ - struct rpc_timeout rq_timeout; /* timeout parms */ struct xdr_buf rq_snd_buf; /* send buffer */ struct xdr_buf rq_rcv_buf; /* recv buffer */ @@ -103,6 +101,9 @@ struct rpc_rqst { struct xdr_buf rq_private_buf; /* The receive buffer * used in the softirq. */ + unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_timeout; /* Current timeout value */ + unsigned int rq_retries; /* # of retries */ /* * For authentication (e.g. auth_des) */ @@ -115,7 +116,6 @@ struct rpc_rqst { u32 rq_bytes_sent; /* Bytes we have sent */ unsigned long rq_xtime; /* when transmitted */ - int rq_ntimeo; int rq_ntrans; }; #define rq_svec rq_snd_buf.head @@ -210,7 +210,7 @@ void xprt_reserve(struct rpc_task *); int xprt_prepare_transmit(struct rpc_task *); void xprt_transmit(struct rpc_task *); void xprt_receive(struct rpc_task *); -int xprt_adjust_timeout(struct rpc_timeout *); +int xprt_adjust_timeout(struct rpc_rqst *req); void xprt_release(struct rpc_task *); void xprt_connect(struct rpc_task *); int xprt_clear_backlog(struct rpc_xprt *); |
