summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2004-06-24 10:02:08 +0100
committerAnton Altaparmakov <aia21@cantab.net>2004-06-24 10:02:08 +0100
commit702fdfcae9a47ec4976d82d3d0b4b4a41bd72a52 (patch)
treeae4354c39b18bcba1a4dd0ed89322988be78388d /include/net
parent8e2552a8f2addafc142626cd11d4bad9101c41c4 (diff)
parent5e1c40de84c8e0514e8fe7e5db5087355dc00d5c (diff)
Merge cantab.net:/home/src/bklinux-2.6
into cantab.net:/home/src/ntfs-2.6
Diffstat (limited to 'include/net')
-rw-r--r--include/net/esp.h5
-rw-r--r--include/net/pkt_act.h286
-rw-r--r--include/net/pkt_cls.h32
-rw-r--r--include/net/pkt_sched.h85
-rw-r--r--include/net/sock.h44
-rw-r--r--include/net/tcp.h49
6 files changed, 454 insertions, 47 deletions
diff --git a/include/net/esp.h b/include/net/esp.h
index a513d14522ee..90cd94fad7d9 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -2,9 +2,14 @@
#define _NET_ESP_H
#include <net/xfrm.h>
+#include <asm/scatterlist.h>
+
+#define ESP_NUM_FAST_SG 4
struct esp_data
{
+ struct scatterlist sgbuf[ESP_NUM_FAST_SG];
+
/* Confidentiality */
struct {
u8 *key; /* Key */
diff --git a/include/net/pkt_act.h b/include/net/pkt_act.h
new file mode 100644
index 000000000000..9f37ac40f1a0
--- /dev/null
+++ b/include/net/pkt_act.h
@@ -0,0 +1,286 @@
+#ifndef __NET_PKT_ACT_H
+#define __NET_PKT_ACT_H
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+
+#define tca_st(val) (struct tcf_##val *)
+#define PRIV(a,name) ( tca_st(name) (a)->priv)
+
+#if 0 /* control */
+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define DPRINTK(format,args...)
+#endif
+
+#if 0 /* data */
+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
+#else
+#define D2PRINTK(format,args...)
+#endif
+
+static __inline__ unsigned
+tcf_hash(u32 index)
+{
+ return index & MY_TAB_MASK;
+}
+
+/* probably move this from being inline
+ * and put into act_generic
+*/
+static inline void
+tcf_hash_destroy(struct tcf_st *p)
+{
+ unsigned h = tcf_hash(p->index);
+ struct tcf_st **p1p;
+
+ for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
+ if (*p1p == p) {
+ write_lock_bh(&tcf_t_lock);
+ *p1p = p->next;
+ write_unlock_bh(&tcf_t_lock);
+#ifdef CONFIG_NET_ESTIMATOR
+ qdisc_kill_estimator(&p->stats);
+#endif
+ kfree(p);
+ return;
+ }
+ }
+ BUG_TRAP(0);
+}
+
+static inline int
+tcf_hash_release(struct tcf_st *p, int bind )
+{
+ int ret = 0;
+ if (p) {
+ if (bind) {
+ p->bindcnt--;
+ }
+ p->refcnt--;
+ if(p->bindcnt <=0 && p->refcnt <= 0) {
+ tcf_hash_destroy(p);
+ ret = 1;
+ }
+ }
+ return ret;
+}
+
+static __inline__ int
+tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tc_action *a)
+{
+ struct tcf_st *p;
+ int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
+ struct rtattr *r ;
+
+ read_lock(&tcf_t_lock);
+
+ s_i = cb->args[0];
+
+ for (i = 0; i < MY_TAB_SIZE; i++) {
+ p = tcf_ht[tcf_hash(i)];
+
+ for (; p; p = p->next) {
+ index++;
+ if (index < s_i)
+ continue;
+ a->priv = p;
+ a->order = n_i;
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ err = tcf_action_dump_1(skb, a, 0, 0);
+ if (0 > err) {
+ index--;
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+ }
+ r->rta_len = skb->tail - (u8*)r;
+ n_i++;
+ if (n_i >= TCA_ACT_MAX_PRIO) {
+ goto done;
+ }
+ }
+ }
+done:
+ read_unlock(&tcf_t_lock);
+ if (n_i)
+ cb->args[0] += n_i;
+ return n_i;
+
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+}
+
+static __inline__ int
+tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
+{
+ struct tcf_st *p, *s_p;
+ struct rtattr *r ;
+ int i= 0, n_i = 0;
+
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
+ for (i = 0; i < MY_TAB_SIZE; i++) {
+ p = tcf_ht[tcf_hash(i)];
+
+ while (p != NULL) {
+ s_p = p->next;
+ if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
+ module_put(a->ops->owner);
+ }
+ n_i++;
+ p = s_p;
+ }
+ }
+ RTA_PUT(skb, TCA_FCNT, 4, &n_i);
+ r->rta_len = skb->tail - (u8*)r;
+
+ return n_i;
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ return -EINVAL;
+}
+
+static __inline__ int
+tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
+ struct tc_action *a)
+{
+ if (type == RTM_DELACTION) {
+ return tcf_del_walker(skb,a);
+ } else if (type == RTM_GETACTION) {
+ return tcf_dump_walker(skb,cb,a);
+ } else {
+ printk("tcf_generic_walker: unknown action %d\n",type);
+ return -EINVAL;
+ }
+}
+
+static __inline__ struct tcf_st *
+tcf_hash_lookup(u32 index)
+{
+ struct tcf_st *p;
+
+ read_lock(&tcf_t_lock);
+ for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
+ if (p->index == index)
+ break;
+ }
+ read_unlock(&tcf_t_lock);
+ return p;
+}
+
+static __inline__ u32
+tcf_hash_new_index(void)
+{
+ do {
+ if (++idx_gen == 0)
+ idx_gen = 1;
+ } while (tcf_hash_lookup(idx_gen));
+
+ return idx_gen;
+}
+
+
+static inline int
+tcf_hash_search(struct tc_action *a, u32 index)
+{
+ struct tcf_st *p = tcf_hash_lookup(index);
+
+ if (p != NULL) {
+ a->priv = p;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+#ifdef CONFIG_NET_ACT_INIT
+static inline struct tcf_st *
+tcf_hash_check(struct tc_st *parm, struct tc_action *a, int ovr, int bind)
+{
+ struct tcf_st *p = NULL;
+ if (parm->index && (p = tcf_hash_lookup(parm->index)) != NULL) {
+ spin_lock(&p->lock);
+ if (bind) {
+ p->bindcnt++;
+ p->refcnt++;
+ }
+ spin_unlock(&p->lock);
+ a->priv = (void *) p;
+ }
+ return p;
+}
+
+static inline struct tcf_st *
+tcf_hash_create(struct tc_st *parm, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
+{
+ unsigned h;
+ struct tcf_st *p = NULL;
+
+ p = kmalloc(size, GFP_KERNEL);
+ if (p == NULL)
+ return p;
+
+ memset(p, 0, size);
+ p->refcnt = 1;
+
+ if (bind) {
+ p->bindcnt = 1;
+ }
+
+ spin_lock_init(&p->lock);
+ p->stats.lock = &p->lock;
+ p->index = parm->index ? : tcf_hash_new_index();
+ p->tm.install = jiffies;
+ p->tm.lastuse = jiffies;
+#ifdef CONFIG_NET_ESTIMATOR
+ if (est) {
+ qdisc_new_estimator(&p->stats, est);
+ }
+#endif
+ h = tcf_hash(p->index);
+ write_lock_bh(&tcf_t_lock);
+ p->next = tcf_ht[h];
+ tcf_ht[h] = p;
+ write_unlock_bh(&tcf_t_lock);
+
+ a->priv = (void *) p;
+ return p;
+}
+
+static inline struct tcf_st *
+tcf_hash_init(struct tc_st *parm, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
+{
+ struct tcf_st *p;
+ p = tcf_hash_check (parm,a,ovr,bind);
+ if (NULL == p) {
+ return tcf_hash_create(parm, est, a, size, ovr, bind);
+ }
+}
+
+#endif
+
+#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ca07367721df..4de3ea73ca5d 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -71,12 +71,38 @@ static inline int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct
{
int err = 0;
u32 protocol = skb->protocol;
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto *otp = tp;
+reclassify:
+#endif
+ protocol = skb->protocol;
for ( ; tp; tp = tp->next) {
if ((tp->protocol == protocol ||
- tp->protocol == __constant_htons(ETH_P_ALL)) &&
- (err = tp->classify(skb, tp, res)) >= 0)
+ tp->protocol == __constant_htons(ETH_P_ALL)) &&
+ (err = tp->classify(skb, tp, res)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ if ( TC_ACT_RECLASSIFY == err) {
+ __u32 verd = (__u32) G_TC_VERD(skb->tc_verd);
+ tp = otp;
+
+ if (MAX_REC_LOOP < verd++) {
+ printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n",tp->prio&0xffff, ntohs(tp->protocol));
+ return TC_ACT_SHOT;
+ }
+ skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
+ goto reclassify;
+ } else {
+ if (skb->tc_verd)
+ skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
+ return err;
+ }
+#else
+
return err;
+#endif
+ }
+
}
return -1;
}
@@ -90,6 +116,8 @@ static inline void tcf_destroy(struct tcf_proto *tp)
extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+extern int ing_filter(struct sk_buff *skb);
+
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d8960a01df42..221f1dac7853 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -13,11 +13,14 @@
#include <linux/pkt_sched.h>
#include <linux/rcupdate.h>
#include <net/pkt_cls.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
#ifdef CONFIG_X86_TSC
#include <asm/msr.h>
#endif
+
struct rtattr;
struct Qdisc;
@@ -392,14 +395,15 @@ struct tcf_police
{
struct tcf_police *next;
int refcnt;
+#ifdef CONFIG_NET_CLS_ACT
+ int bindcnt;
+#endif
u32 index;
-
int action;
int result;
u32 ewma_rate;
u32 burst;
u32 mtu;
-
u32 toks;
u32 ptoks;
psched_time_t t_c;
@@ -410,16 +414,89 @@ struct tcf_police
struct tc_stats stats;
};
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+#define tca_gen(name) \
+struct tcf_##name *next; \
+ u32 index; \
+ int refcnt; \
+ int bindcnt; \
+ u32 capab; \
+ int action; \
+ struct tcf_t tm; \
+ struct tc_stats stats; \
+ spinlock_t lock
+
+
+struct tc_action
+{
+ void *priv;
+ struct tc_action_ops *ops;
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ __u32 order;
+ struct tc_action *next;
+};
+
+#define TCA_CAP_NONE 0
+struct tc_action_ops
+{
+ struct tc_action_ops *next;
+ char kind[IFNAMSIZ];
+ __u32 type; /* TBD to match kind */
+ __u32 capab; /* capabilities includes 4 bit version */
+ struct module *owner;
+ int (*act)(struct sk_buff **, struct tc_action *);
+ int (*get_stats)(struct sk_buff *, struct tc_action *);
+ int (*dump)(struct sk_buff *, struct tc_action *,int , int);
+ int (*cleanup)(struct tc_action *, int bind);
+ int (*lookup)(struct tc_action *, u32 );
+ int (*init)(struct rtattr *,struct rtattr *,struct tc_action *, int , int );
+ int (*walk)(struct sk_buff *, struct netlink_callback *, int , struct tc_action *);
+};
+
+extern int tcf_register_action(struct tc_action_ops *a);
+extern int tcf_unregister_action(struct tc_action_ops *a);
+extern void tcf_action_destroy(struct tc_action *a, int bind);
+extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a);
+extern int tcf_action_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,char *n, int ovr, int bind);
+extern int tcf_action_init_1(struct rtattr *rta, struct rtattr *est, struct tc_action *a,char *n, int ovr, int bind);
+extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *);
+extern int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,struct tc_action *,int , int );
+extern int tcf_act_police_dump(struct sk_buff *, struct tc_action *, int, int);
+extern int tcf_act_police(struct sk_buff **skb, struct tc_action *a);
+#endif
+
+extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st);
extern void tcf_police_destroy(struct tcf_police *p);
extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
-extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
-static inline void tcf_police_release(struct tcf_police *p)
+static inline int tcf_police_release(struct tcf_police *p, int bind)
{
+ int ret = 0;
+#ifdef CONFIG_NET_CLS_ACT
+ if (p) {
+ if (bind) {
+ p->bindcnt--;
+ }
+ p->refcnt--;
+ if (p->refcnt <= 0 && !p->bindcnt) {
+ tcf_police_destroy(p);
+ ret = 1;
+ }
+ }
+#else
if (p && --p->refcnt == 0)
tcf_police_destroy(p);
+
+#endif
+ return ret;
}
extern struct Qdisc noop_qdisc;
diff --git a/include/net/sock.h b/include/net/sock.h
index 0ee6ccddca40..0398823e18ed 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -167,6 +167,9 @@ struct sock_common {
* @sk_socket - Identd and reporting IO signals
* @sk_user_data - RPC layer private data
* @sk_owner - module that owns this socket
+ * @sk_send_head - front of stuff to transmit
+ * @sk_write_pending - a write to stream socket waits to start
+ * @sk_queue_shrunk - write queue has been shrunk recently
* @sk_state_change - callback to indicate change in the state of the sock
* @sk_data_ready - callback to indicate there is data to be processed
* @sk_write_space - callback to indicate there is bf sending space available
@@ -246,8 +249,12 @@ struct sock {
struct timeval sk_stamp;
struct socket *sk_socket;
void *sk_user_data;
+ struct sk_buff *sk_send_head;
struct module *sk_owner;
+ int sk_write_pending;
void *sk_security;
+ __u8 sk_queue_shrunk;
+ /* three bytes hole, try to pack */
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk);
@@ -434,6 +441,24 @@ static inline int sk_stream_memory_free(struct sock *sk)
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
+extern void sk_stream_rfree(struct sk_buff *skb);
+
+static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb->sk = sk;
+ skb->destructor = sk_stream_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc -= skb->truesize;
+}
+
+static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_queue_shrunk = 1;
+ sk->sk_wmem_queued -= skb->truesize;
+ sk->sk_forward_alloc += skb->truesize;
+ __kfree_skb(skb);
+}
+
/* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb) \
do { if (!(__sk)->sk_backlog.tail) { \
@@ -458,6 +483,11 @@ do { if (!(__sk)->sk_backlog.tail) { \
rc; \
})
+extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
+extern int sk_stream_error(struct sock *sk, int flags, int err);
+
extern int sk_wait_data(struct sock *sk, long *timeo);
/* IP protocol blocks we attach to sockets.
@@ -1067,6 +1097,20 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
#define SOCK_MIN_SNDBUF 2048
#define SOCK_MIN_RCVBUF 256
+static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+{
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+ }
+}
+
+#define sk_stream_for_retrans_queue(skb, sk) \
+ for (skb = (sk)->sk_write_queue.next; \
+ (skb != (sk)->sk_send_head) && \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3a323cd1e79f..22a62d58c24d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1186,13 +1186,6 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-#define for_retrans_queue(skb, sk, tp) \
- for (skb = (sk)->sk_write_queue.next; \
- (skb != (tp)->send_head) && \
- (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
- skb=skb->next)
-
-
#include <net/tcp_ecn.h>
/* This determines how many packets are "in the network" to the best
@@ -1400,7 +1393,7 @@ tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int n
tcp_minshall_check(tp))));
}
-/* This checks if the data bearing packet SKB (usually tp->send_head)
+/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
@@ -1457,7 +1450,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk,
unsigned cur_mss,
int nonagle)
{
- struct sk_buff *skb = tp->send_head;
+ struct sk_buff *skb = sk->sk_send_head;
if (skb) {
if (!tcp_skb_is_last(sk, skb))
@@ -1477,7 +1470,7 @@ static __inline__ void tcp_push_pending_frames(struct sock *sk,
static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
{
- struct sk_buff *skb = tp->send_head;
+ struct sk_buff *skb = sk->sk_send_head;
return (skb &&
tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
@@ -1876,14 +1869,6 @@ static __inline__ void tcp_openreq_init(struct open_request *req,
#define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
-static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- tcp_sk(sk)->queue_shrunk = 1;
- sk->sk_wmem_queued -= skb->truesize;
- sk->sk_forward_alloc += skb->truesize;
- __kfree_skb(skb);
-}
-
extern void __tcp_mem_reclaim(struct sock *sk);
extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
@@ -1901,14 +1886,6 @@ static inline void tcp_enter_memory_pressure(void)
}
}
-static inline void tcp_moderate_sndbuf(struct sock *sk)
-{
- if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
- sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
- sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
- }
-}
-
static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
{
struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
@@ -1923,7 +1900,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem,
__kfree_skb(skb);
} else {
tcp_enter_memory_pressure();
- tcp_moderate_sndbuf(sk);
+ sk_stream_moderate_sndbuf(sk);
}
return NULL;
}
@@ -1942,7 +1919,7 @@ static inline struct page * tcp_alloc_page(struct sock *sk)
return page;
}
tcp_enter_memory_pressure();
- tcp_moderate_sndbuf(sk);
+ sk_stream_moderate_sndbuf(sk);
return NULL;
}
@@ -1951,20 +1928,10 @@ static inline void tcp_writequeue_purge(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
- tcp_free_skb(sk, skb);
+ sk_stream_free_skb(sk, skb);
tcp_mem_reclaim(sk);
}
-extern void tcp_rfree(struct sk_buff *skb);
-
-static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
- skb->sk = sk;
- skb->destructor = tcp_rfree;
- atomic_add(skb->truesize, &sk->sk_rmem_alloc);
- sk->sk_forward_alloc -= skb->truesize;
-}
-
extern void tcp_listen_wlock(void);
/* - We may sleep inside this lock.
@@ -2049,8 +2016,8 @@ static inline int tcp_use_frto(const struct sock *sk)
* unsent new data, and the advertised window should allow
* sending it.
*/
- return (sysctl_tcp_frto && tp->send_head &&
- !after(TCP_SKB_CB(tp->send_head)->end_seq,
+ return (sysctl_tcp_frto && sk->sk_send_head &&
+ !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd));
}