summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:10:26 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:10:26 -0800
commitbb5b6e6c4dea4242f6ff75fa7adecea4f34935f1 (patch)
treedbdda6fdd7f0bfadb4c10a997bfb15660f3223c0 /include/net
parent0b9ded43ee424791d9283cee2a33dcb4a97da57d (diff)
v2.4.8.4 -> v2.4.9
- David Miller: sparc updates, FAT fs fixes, btaudio build fix - David Gibson: Orinoco driver update - Kevin Fleming: more disks the HPT controller doesn't like - David Miller: "min()/max()" cleanups. Understands signs and sizes. - Ben LaHaise: make vma merging more generous, help Mozilla /proc/<>/maps - Jens Axboe: CD updates - Trond Myklebust: save away NFS credentials in inode, so that mmap can writeout. - Mark Hemment: HIGHMEM ops cleanups - Jes Sorensen: use "unsigned long" for flags in various drivers
Diffstat (limited to 'include/net')
-rw-r--r--include/net/sock.h22
-rw-r--r--include/net/tcp.h34
2 files changed, 21 insertions, 35 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index bc2a1192e50b..403e842f6d78 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -803,26 +803,6 @@ do { spin_lock_bh(&((__sk)->lock.slock)); \
#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
-/*
- * This might not be the most appropriate place for this two
- * but since they are used by a lot of the net related code
- * at least they get declared on a include that is common to all
- */
-
-static __inline__ int min(unsigned int a, unsigned int b)
-{
- if (a > b)
- a = b;
- return a;
-}
-
-static __inline__ int max(unsigned int a, unsigned int b)
-{
- if (a < b)
- a = b;
- return a;
-}
-
extern struct sock * sk_alloc(int family, int priority, int zero_it);
extern void sk_free(struct sock *sk);
@@ -1265,7 +1245,7 @@ static inline long sock_sndtimeo(struct sock *sk, int noblock)
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min(sk->rcvlowat, len)) ? : 1;
+ return (waitall ? len : min(int, sk->rcvlowat, len)) ? : 1;
}
/* Alas, with timeout socket operations are not restartable.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e7f459819865..8edcd920facd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -919,11 +919,14 @@ static __inline__ unsigned int tcp_current_mss(struct sock *sk)
static inline void tcp_initialize_rcv_mss(struct sock *sk)
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- int hint = min(tp->advmss, tp->mss_cache);
+ unsigned int hint = min(unsigned int, tp->advmss, tp->mss_cache);
- hint = min(hint, tp->rcv_wnd/2);
+ hint = min(unsigned int, hint, tp->rcv_wnd/2);
- tp->ack.rcv_mss = max(min(hint, TCP_MIN_RCVMSS), TCP_MIN_MSS);
+ tp->ack.rcv_mss = max(unsigned int,
+ min(unsigned int,
+ hint, TCP_MIN_RCVMSS),
+ TCP_MIN_MSS);
}
static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
@@ -1074,7 +1077,7 @@ static __inline__ int tcp_packets_in_flight(struct tcp_opt *tp)
*/
static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
{
- return max(tp->snd_cwnd>>1, 2);
+ return max(u32, tp->snd_cwnd >> 1, 2);
}
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
@@ -1086,7 +1089,9 @@ static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
- return max(tp->snd_ssthresh, (tp->snd_cwnd>>1)+(tp->snd_cwnd>>2));
+ return max(u32, tp->snd_ssthresh,
+ ((tp->snd_cwnd >> 1) +
+ (tp->snd_cwnd >> 2)));
}
static inline void tcp_sync_left_out(struct tcp_opt *tp)
@@ -1121,7 +1126,8 @@ static inline void __tcp_enter_cwr(struct tcp_opt *tp)
{
tp->undo_marker = 0;
tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd = min(u32, tp->snd_cwnd,
+ tcp_packets_in_flight(tp) + 1);
tp->snd_cwnd_cnt = 0;
tp->high_seq = tp->snd_nxt;
tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1486,12 +1492,12 @@ static inline void tcp_select_initial_window(int space, __u32 mss,
{
/* If no clamp set the clamp to the max possible scaled window */
if (*window_clamp == 0)
- (*window_clamp) = (65535<<14);
- space = min(*window_clamp,space);
+ (*window_clamp) = (65535 << 14);
+ space = min(u32, *window_clamp, space);
/* Quantize space offering to a multiple of mss if possible. */
if (space > mss)
- space = (space/mss)*mss;
+ space = (space / mss) * mss;
/* NOTE: offering an initial window larger than 32767
* will break some buggy TCP stacks. We try to be nice.
@@ -1499,7 +1505,7 @@ static inline void tcp_select_initial_window(int space, __u32 mss,
* our initial window offering to 32k. There should also
* be a sysctl option to stop being nice.
*/
- (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
+ (*rcv_wnd) = min(int, space, MAX_TCP_WINDOW);
(*rcv_wscale) = 0;
if (wscale_ok) {
/* See RFC1323 for an explanation of the limit to 14 */
@@ -1508,7 +1514,7 @@ static inline void tcp_select_initial_window(int space, __u32 mss,
(*rcv_wscale)++;
}
if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
- space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
+ space - max(unsigned int, (space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
(*rcv_wscale)--;
}
@@ -1526,7 +1532,7 @@ static inline void tcp_select_initial_window(int space, __u32 mss,
*rcv_wnd = init_cwnd*mss;
}
/* Set the clamp no higher than max representable value */
- (*window_clamp) = min(65535<<(*rcv_wscale),*window_clamp);
+ (*window_clamp) = min(u32, 65535 << (*rcv_wscale), *window_clamp);
}
static inline int tcp_win_from_space(int space)
@@ -1692,8 +1698,8 @@ static inline void tcp_enter_memory_pressure(void)
static inline void tcp_moderate_sndbuf(struct sock *sk)
{
if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
- sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
- sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
+ sk->sndbuf = min(int, sk->sndbuf, sk->wmem_queued/2);
+ sk->sndbuf = max(int, sk->sndbuf, SOCK_MIN_SNDBUF);
}
}