diff options
| author | Jon Grimm <jgrimm@touki.austin.ibm.com> | 2003-07-03 11:17:11 -0500 |
|---|---|---|
| committer | Jon Grimm <jgrimm@touki.austin.ibm.com> | 2003-07-03 11:17:11 -0500 |
| commit | 1a7500029c3fd7a559352b61ece4ee5c5c5527f2 (patch) | |
| tree | 28d131737934e185d16754bf3acbd0618190bdb4 /net/sctp/ulpqueue.c | |
| parent | 876118222569b81d3fc45dacd101de57d60ebada (diff) | |
[SCTP] Move rwnd accounting and I/O redrive off of the skb destructor.
When the skb was shared with Ethereal, Ethereal was sometimes the last
user and the destructor would get called on another CPU, not
knowing anything about our sock_lock. Move our rwnd updates and I/O
redrive out of the skb destructor.
Also, if unable to allocate an skb for our transmission packet,
walk the packet's chunks and free the control chunks.
Also change list_dels to list_del_init. Fix real later, but this prevent
us from a doing damage if we list_del twice.
Diffstat (limited to 'net/sctp/ulpqueue.c')
| -rw-r--r-- | net/sctp/ulpqueue.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 8a11bc28bd38..f32d8de7f41d 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -99,12 +99,12 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq) while ((skb = __skb_dequeue(&ulpq->lobby))) { event = sctp_skb2event(skb); - sctp_ulpevent_free(event); + sctp_ulpevent_kfree_skb(skb); } while ((skb = __skb_dequeue(&ulpq->reasm))) { event = sctp_skb2event(skb); - sctp_ulpevent_free(event); + sctp_ulpevent_kfree_skb(skb); } } @@ -235,9 +235,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) out_free: if (sctp_event2skb(event)->list) - skb_queue_purge(sctp_event2skb(event)->list); + sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); else - kfree_skb(sctp_event2skb(event)); + sctp_ulpevent_kfree_skb(sctp_event2skb(event)); return 0; } @@ -289,7 +289,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, * payload was fragmented on the way and ip had to reassemble them. * We add the rest of skb's to the first skb's fraglist. */ -static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) +static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) { struct sk_buff *pos; struct sctp_ulpevent *event; @@ -325,11 +325,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * /* Remove the fragment from the reassembly queue. */ __skb_unlink(pos, pos->list); - + /* Break if we have reached the last fragment. */ if (pos == l_frag) break; - pos->next = pnext; pos = pnext; }; @@ -697,7 +696,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) event = sctp_skb2event(skb); tsn = event->sndrcvinfo.sinfo_tsn; - sctp_ulpevent_free(event); + sctp_ulpevent_kfree_skb(skb); sctp_tsnmap_renege(tsnmap, tsn); if (freed >= needed) return freed; @@ -723,7 +722,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) event = sctp_skb2event(skb); tsn = event->sndrcvinfo.sinfo_tsn; - sctp_ulpevent_free(event); + sctp_ulpevent_kfree_skb(skb); sctp_tsnmap_renege(tsnmap, tsn); if (freed >= needed) return freed; |
