diff options
| author | Trond Myklebust <trond.myklebust@fys.uio.no> | 2003-10-07 07:30:00 -0400 |
|---|---|---|
| committer | Trond Myklebust <trond.myklebust@fys.uio.no> | 2003-10-07 07:30:00 -0400 |
| commit | 5d371cbc375aca3d47b839de5b90db5a60dd2833 (patch) | |
| tree | 6c9b8080fc65032edc3a855e46fdb3d70926a915 | |
| parent | 9b80ac47e771874c3a87f5be2b044415cd14c4a4 (diff) | |
UDP round trip timer fix. Modify Karn's algorithm so that
we inherit timeouts from previous requests.
This means that we lengthen the window of time during which
we accept updates to the RTO estimate if we see an update.
Scheme proposed by Brian Mancuso, but it is standard for TCP
congestion control implementations.
| -rw-r--r-- | include/linux/sunrpc/timer.h | 15 | ||||
| -rw-r--r-- | net/sunrpc/xprt.c | 13 |
2 files changed, 22 insertions, 6 deletions
diff --git a/include/linux/sunrpc/timer.h b/include/linux/sunrpc/timer.h index f2f2ffc4f2cd..1d0d3a0d64ec 100644 --- a/include/linux/sunrpc/timer.h +++ b/include/linux/sunrpc/timer.h @@ -15,6 +15,7 @@ struct rpc_rtt { unsigned long timeo; /* default timeout value */ unsigned long srtt[5]; /* smoothed round trip time << 3 */ unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ + int ntimeouts[5]; /* Number of timeouts for the last request */ }; @@ -22,4 +23,18 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); +static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo) +{ + if (!timer) + return; + rt->ntimeouts[timer-1] = ntimeo; +} + +static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer) +{ + if (!timer) + return 0; + return rt->ntimeouts[timer-1]; +} + #endif /* _LINUX_SUNRPC_TIMER_H */ diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 61f4bfbecdca..ef616fe9c561 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -579,14 +579,14 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) /* Adjust congestion window */ if (!xprt->nocong) { + unsigned timer = task->tk_msg.rpc_proc->p_timer; xprt_adjust_cwnd(xprt, copied); __xprt_put_cong(xprt, req); - if (req->rq_ntrans == 1) { - unsigned timer = - task->tk_msg.rpc_proc->p_timer; - if (timer) + if (timer) { + if (req->rq_ntrans == 1) rpc_update_rtt(&clnt->cl_rtt, timer, (long)jiffies - req->rq_xtime); + rpc_set_timeo(&clnt->cl_rtt, timer, req->rq_ntrans - 1); } } @@ -1223,8 +1223,9 @@ xprt_transmit(struct rpc_task *task) /* Set the task's receive timeout value */ spin_lock_bh(&xprt->sock_lock); if (!xprt->nocong) { - task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, - task->tk_msg.rpc_proc->p_timer); + int timer = task->tk_msg.rpc_proc->p_timer; + task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, timer); + task->tk_timeout <<= rpc_ntimeo(&clnt->cl_rtt, timer); task->tk_timeout <<= clnt->cl_timeout.to_retries - req->rq_timeout.to_retries; if (task->tk_timeout > req->rq_timeout.to_maxval) |
