From 46b6a3a9c686b451f5de52982fdbc743b34b59bb Mon Sep 17 00:00:00 2001 From: Jon Grimm Date: Thu, 6 Feb 2003 01:46:09 -0600 Subject: [SCTP] Minor surgery on ulpevent & related cleanups. sndrcvinfo.sinfo_cumtsn is new field added by the latest (05) API I-D. Remove unused fields in ulpevent, minimally to make room for for storing this new field. But I'll clear out even more so I can make room for impending partial data delivery work. See changes in comments for ulpqueue.c. Many naming and typedef removal cleanups. --- net/sctp/ulpqueue.c | 65 ++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) (limited to 'net/sctp/ulpqueue.c') diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 04c47bffb8e9..b854e77b9ba0 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -142,7 +142,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk, if (event) { /* Create a temporary list to collect chunks on. */ skb_queue_head_init(&temp); - skb_queue_tail(&temp, event->parent); + skb_queue_tail(&temp, sctp_event2skb(event)); event = sctp_ulpq_order(ulpq, event); } @@ -172,19 +172,20 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) /* If we are harvesting multiple skbs they will be * collected on a list. */ - if (event->parent->list) - sctp_skb_list_tail(event->parent->list, &sk->receive_queue); + if (sctp_event2skb(event)->list) + sctp_skb_list_tail(sctp_event2skb(event)->list, + &sk->receive_queue); else - skb_queue_tail(&sk->receive_queue, event->parent); + skb_queue_tail(&sk->receive_queue, sctp_event2skb(event)); wake_up_interruptible(sk->sleep); return 1; out_free: - if (event->parent->list) - skb_queue_purge(event->parent->list); + if (sctp_event2skb(event)->list) + skb_queue_purge(sctp_event2skb(event)->list); else - kfree_skb(event->parent); + kfree_skb(sctp_event2skb(event)); return 0; } @@ -202,7 +203,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, /* Find the right place in this list. We store them by TSN. */ sctp_skb_for_each(pos, &ulpq->reasm, tmp) { - cevent = (struct sctp_ulpevent *)pos->cb; + cevent = sctp_skb2event(pos); ctsn = cevent->sndrcvinfo.sinfo_tsn; if (TSN_lt(tsn, ctsn)) @@ -211,9 +212,10 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, /* If the queue is empty, we have a different function to call. */ if (skb_peek(&ulpq->reasm)) - __skb_insert(event->parent, pos->prev, pos, &ulpq->reasm); + __skb_insert(sctp_event2skb(event), pos->prev, pos, + &ulpq->reasm); else - __skb_queue_tail(&ulpq->reasm, event->parent); + __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); } /* Helper function to return an event corresponding to the reassembled @@ -264,7 +266,7 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * pos = pnext; } while (1); - event = (sctp_ulpevent_t *) f_frag->cb; + event = (struct sctp_ulpevent *) f_frag->cb; return event; } @@ -272,13 +274,13 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * /* Helper function to check if an incoming chunk has filled up the last * missing fragment in a SCTP datagram and return the corresponding event. */ -static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) +static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) { struct sk_buff *pos, *tmp; - sctp_ulpevent_t *cevent; + struct sctp_ulpevent *cevent; struct sk_buff *first_frag = NULL; __u32 ctsn, next_tsn; - sctp_ulpevent_t *retval = NULL; + struct sctp_ulpevent *retval = NULL; /* Initialized to 0 just to avoid compiler warning message. Will * never be used with this value. It is referenced only after it @@ -296,10 +298,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq * * start the next pass when we find another first fragment. */ sctp_skb_for_each(pos, &ulpq->reasm, tmp) { - cevent = (sctp_ulpevent_t *) pos->cb; + cevent = (struct sctp_ulpevent *) pos->cb; ctsn = cevent->sndrcvinfo.sinfo_tsn; - switch (cevent->chunk_flags & SCTP_DATA_FRAG_MASK) { + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: first_frag = pos; next_tsn = ctsn + 1; @@ -334,10 +336,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq * /* Helper function to reassemble chunks. Hold chunks on the reasm queue that * need reassembling. */ -static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, - sctp_ulpevent_t *event) +static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { - sctp_ulpevent_t *retval = NULL; + struct sctp_ulpevent *retval = NULL; /* FIXME: We should be using some new chunk structure here * instead of carrying chunk fields in the event structure. @@ -346,7 +348,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, */ /* Check if this is part of a fragmented message. */ - if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK)) + if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) return event; sctp_ulpq_store_reasm(ulpq, event); @@ -359,7 +361,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, * ordered by an an incoming chunk. */ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, - sctp_ulpevent_t *event) + struct sctp_ulpevent *event) { struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; @@ -373,7 +375,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, /* We are holding the chunks by stream, by SSN. */ sctp_skb_for_each(pos, &ulpq->lobby, tmp) { - cevent = (sctp_ulpevent_t *) pos->cb; + cevent = (struct sctp_ulpevent *) pos->cb; csid = cevent->sndrcvinfo.sinfo_stream; cssn = cevent->sndrcvinfo.sinfo_ssn; @@ -394,16 +396,16 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, __skb_unlink(pos, pos->list); /* Attach all gathered skbs to the event. */ - __skb_queue_tail(event->parent->list, pos); + __skb_queue_tail(sctp_event2skb(event)->list, pos); } } /* Helper function to store chunks needing ordering. */ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, - sctp_ulpevent_t *event) + struct sctp_ulpevent *event) { struct sk_buff *pos, *tmp; - sctp_ulpevent_t *cevent; + struct sctp_ulpevent *cevent; __u16 sid, csid; __u16 ssn, cssn; @@ -415,7 +417,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, * stream ID and then by SSN. */ sctp_skb_for_each(pos, &ulpq->lobby, tmp) { - cevent = (sctp_ulpevent_t *) pos->cb; + cevent = (struct sctp_ulpevent *) pos->cb; csid = cevent->sndrcvinfo.sinfo_stream; cssn = cevent->sndrcvinfo.sinfo_ssn; @@ -427,13 +429,14 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, /* If the queue is empty, we have a different function to call. */ if (skb_peek(&ulpq->lobby)) - __skb_insert(event->parent, pos->prev, pos, &ulpq->lobby); + __skb_insert(sctp_event2skb(event), pos->prev, pos, + &ulpq->lobby); else - __skb_queue_tail(&ulpq->lobby, event->parent); + __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); } -static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq, - sctp_ulpevent_t *event) +static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { __u16 sid, ssn; struct sctp_stream *in; @@ -445,7 +448,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq, */ /* Check if this message needs ordering. */ - if (SCTP_DATA_UNORDERED & event->chunk_flags) + if (SCTP_DATA_UNORDERED & event->msg_flags) return event; /* Note: The stream ID must be verified before this routine. */ -- cgit v1.2.3 From 62603506c4716dfaf4d6947af066f67b1164b823 Mon Sep 17 00:00:00 2001 From: Jon Grimm Date: Tue, 11 Feb 2003 13:54:48 -0600 Subject: [SCTP] Partial Data Delivery Support pushing a partial record up to the application if we are receiving pressure on rwnd. The most common case is that the sender is sending a record larger than our rwnd. We send as much up the receive queue in hopes that a read will occur up room in rwnd. Other associations on the socket need held off until the partial delivery condition is finally fufilled (or ABORTed). Additionally, one must be careful to "do the right thing" with regards to associations peeled off to new sockets, properly preserving or clearing the partial delivery state. --- include/net/sctp/command.h | 1 + include/net/sctp/structs.h | 13 +- include/net/sctp/ulpevent.h | 17 ++- include/net/sctp/ulpqueue.h | 15 ++- include/net/sctp/user.h | 3 +- net/sctp/associola.c | 14 +- net/sctp/endpointola.c | 2 +- net/sctp/sm_sideeffect.c | 18 ++- net/sctp/sm_statefuns.c | 50 +++++-- net/sctp/socket.c | 83 +++++++++--- net/sctp/ulpevent.c | 70 +++++++++- net/sctp/ulpqueue.c | 319 +++++++++++++++++++++++++++++++++++++------- 12 files changed, 494 insertions(+), 111 deletions(-) (limited to 'net/sctp/ulpqueue.c') diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 40aa3ab279f0..03ab4422b683 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -86,6 +86,7 @@ typedef enum { SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */ SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */ SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */ + SCTP_CMD_CHUNK_PD, /* Partial data delivery considerations. */ SCTP_CMD_LAST } sctp_verb_t; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 8c1e4b2d0a3a..97c51580bce1 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -124,7 +124,6 @@ typedef struct sctp_association sctp_association_t; typedef struct sctp_packet sctp_packet_t; typedef struct sctp_chunk sctp_chunk_t; typedef struct sctp_bind_addr sctp_bind_addr_t; -typedef struct sctp_opt sctp_opt_t; typedef struct sctp_endpoint_common sctp_endpoint_common_t; #include @@ -249,10 +248,10 @@ struct sctp_af { int optname, char *optval, int *optlen); - struct dst_entry *(*get_dst) (sctp_association_t *asoc, + struct dst_entry *(*get_dst) (struct sctp_association *asoc, union sctp_addr *daddr, union sctp_addr *saddr); - void (*get_saddr) (sctp_association_t *asoc, + void (*get_saddr) (struct sctp_association *asoc, struct dst_entry *dst, union sctp_addr *daddr, union sctp_addr *saddr); @@ -310,6 +309,9 @@ struct sctp_opt { /* What kind of a socket is this? */ sctp_socket_type_t type; + /* PF_ family specific functions. */ + struct sctp_pf *pf; + /* What is our base endpointer? */ sctp_endpoint_t *ep; @@ -323,7 +325,10 @@ struct sctp_opt { __u32 autoclose; __u8 nodelay; __u8 disable_fragments; - struct sctp_pf *pf; + __u8 pd_mode; + + /* Receive to here while partial delivery is in effect. */ + struct sk_buff_head pd_lobby; }; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index b21bc99e0f97..8d0edaf22025 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -103,6 +103,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( __u16 flags, int priority); +struct sctp_ulpevent *sctp_ulpevent_make_pdapi( + const struct sctp_association *asoc, + __u32 indication, int priority); + struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, int priority); @@ -111,19 +115,24 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event); - +/* Is this event type enabled? */ +static inline int sctp_ulpevent_type_enabled(__u16 sn_type, + struct sctp_event_subscribe *mask) +{ + char *amask = (char *) mask; + return amask[sn_type - SCTP_SN_TYPE_BASE]; +} /* Given an event subscription, is this event enabled? */ static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, - const struct sctp_event_subscribe *mask) + struct sctp_event_subscribe *mask) { - const char *amask = (const char *) mask; __u16 sn_type; int enabled = 1; if (sctp_ulpevent_is_notification(event)) { sn_type = sctp_ulpevent_get_notification_type(event); - enabled = amask[sn_type - SCTP_SN_TYPE_BASE]; + enabled = sctp_ulpevent_type_enabled(sn_type, mask); } return enabled; } diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 689abb810eb2..dd7823b0a737 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -48,7 +48,8 @@ /* A structure to carry information to the ULP (e.g. Sockets API) */ struct sctp_ulpq { - int malloced; + char malloced; + char pd_mode; sctp_association_t *asoc; struct sk_buff_head reasm; struct sk_buff_head lobby; @@ -60,13 +61,19 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *); void sctp_ulpq_free(struct sctp_ulpq *); /* Add a new DATA chunk for processing. */ -int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority); +int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int); /* Add a new event for propogation to the ULP. */ int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); -/* Is the ulpqueue empty. */ -int sctp_ulpqueue_is_empty(struct sctp_ulpq *); +/* Perform partial delivery. */ +void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int); + +/* Abort the partial delivery. */ +void sctp_ulpq_abort_pd(struct sctp_ulpq *, int); + +/* Clear the partial data delivery condition on this socket. */ +int sctp_clear_pd(struct sock *sk); #endif /* __sctp_ulpqueue_h__ */ diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index c13ef35ab345..69e241b1a88a 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -368,6 +368,7 @@ struct sctp_rcv_pdapi_event { sctp_assoc_t pdapi_assoc_id; }; +enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; /* * Described in Section 7.3 @@ -415,8 +416,8 @@ enum sctp_sn_type { SCTP_SN_TYPE_BASE = (1<<15), SCTP_ASSOC_CHANGE, SCTP_PEER_ADDR_CHANGE, - SCTP_REMOTE_ERROR, SCTP_SEND_FAILED, + SCTP_REMOTE_ERROR, SCTP_SHUTDOWN_EVENT, SCTP_PARTIAL_DELIVERY_EVENT, SCTP_ADAPTION_INDICATION, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index e771d1ec8974..522f238f2a2f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -95,7 +95,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc, sctp_scope_t scope, int priority) { - sctp_opt_t *sp; + struct sctp_opt *sp; int i; /* Retrieve the SCTP per socket area. */ @@ -368,7 +368,7 @@ struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc, int priority) { struct sctp_transport *peer; - sctp_opt_t *sp; + struct sctp_opt *sp; unsigned short port; /* AF_INET and AF_INET6 share common port field. */ @@ -819,7 +819,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc) /* This routine moves an association from its old sk to a new sk. */ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk) { - sctp_opt_t *newsp = sctp_sk(newsk); + struct sctp_opt *newsp = sctp_sk(newsk); /* Delete the association from the old endpoint's list of * associations. @@ -996,7 +996,7 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len) /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. - * The algorithm used is similar to the one described in + * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ if ((asoc->state == SCTP_STATE_ESTABLISHED) && @@ -1006,9 +1006,9 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len) SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " "rwnd: %u a_rwnd: %u\n", __FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd); - sack = sctp_make_sack(asoc); + sack = sctp_make_sack(asoc); if (!sack) - return; + return; /* Update the last advertised rwnd value. */ asoc->a_rwnd = asoc->rwnd; @@ -1022,7 +1022,7 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len) timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; if (timer_pending(timer) && del_timer(timer)) sctp_association_put(asoc); - } + } } /* Decrease asoc's rwnd by len. */ diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 6707af53f51e..8413811a8f98 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -92,7 +92,7 @@ fail: sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto, struct sock *sk, int priority) { - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); memset(ep, 0, sizeof(sctp_endpoint_t)); /* Initialize the base structure. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 910afe8c2b9b..ba179309a2ad 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -598,6 +598,13 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, t->rto_pending = 1; break; + case SCTP_CMD_CHUNK_PD: + /* Send a chunk to the sockets layer. */ + sctp_ulpq_partial_delivery(&asoc->ulpq, + command->obj.ptr, + GFP_ATOMIC); + break; + default: printk(KERN_WARNING "Impossible command: %u, %p\n", command->verb, command->obj.ptr); @@ -1061,12 +1068,11 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, break; } - event = sctp_ulpevent_make_assoc_change(asoc, - 0, - SCTP_COMM_LOST, - error, 0, 0, - GFP_ATOMIC); + /* Cancel any partial delivery in progress. */ + sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + error, 0, 0, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); @@ -1141,7 +1147,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, if (del_timer(&t->hb_timer)) sctp_transport_put(t); } -} +} /* Helper function to update the heartbeat timer. */ static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 52d2c71cc155..64b6aaadde15 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -264,7 +264,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep, if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) goto nomem_ack; - + repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem_ack; @@ -1540,11 +1540,11 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep, SCTP_ULPEVENT(ev)); } sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); - + repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem; - + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); @@ -2241,6 +2241,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, sctp_datahdr_t *data_hdr; sctp_chunk_t *err; size_t datalen; + sctp_verb_t deliver; int tmp; __u32 tsn; @@ -2307,10 +2308,32 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, datalen = ntohs(chunk->chunk_hdr->length); datalen -= sizeof(sctp_data_chunk_t); + deliver = SCTP_CMD_CHUNK_ULP; + + /* Think about partial delivery. */ + if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { + + /* Even if we don't accept this chunk there is + * memory pressure. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_PD, SCTP_NULL()); + } + if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) { - SCTP_DEBUG_PRINTK("Discarding tsn: %u datalen: %Zd, " - "rwnd: %d\n", tsn, datalen, asoc->rwnd); - goto discard_force; + + + /* There is absolutely no room, but this is the most + * important tsn that we are waiting on, try to + * to partial deliver or renege to make room. + */ + if ((sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { + deliver = SCTP_CMD_CHUNK_PD; + } else { + SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, " + "rwnd: %d\n", tsn, datalen, + asoc->rwnd); + goto discard_force; + } } /* @@ -2335,10 +2358,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, return SCTP_DISPOSITION_CONSUME; } - /* We are accepting this DATA chunk. */ - - /* Record the fact that we have received this TSN. */ - sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + /* If definately accepting the DATA chunk, record its TSN, otherwise + * wait for renege processing. + */ + if (deliver != SCTP_CMD_CHUNK_PD) + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * @@ -2352,10 +2376,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, &data_hdr->stream, sizeof(data_hdr->stream)); - if (err) { + if (err) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); - } goto discard_noforce; } @@ -2363,7 +2386,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK * chunk needs the updated rwnd. */ - sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_ULP, SCTP_CHUNK(chunk)); + sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); + if (asoc->autoclose) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cb508d2d4632..a1b5e4feff00 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -81,13 +81,13 @@ /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); -static inline int sctp_wspace(sctp_association_t *asoc); +static inline int sctp_wspace(struct sctp_association *asoc); static inline void sctp_set_owner_w(sctp_chunk_t *chunk); static void sctp_wfree(struct sk_buff *skb); -static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p, +static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, int msg_len); static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); -static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p); +static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int); static int sctp_bindx_add(struct sock *, struct sockaddr_storage *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr_storage *, int); @@ -158,7 +158,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt, /* Bind a local address either to an endpoint or to an association. */ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); sctp_endpoint_t *ep = sp->ep; sctp_bind_addr_t *bp = &ep->base.bind_addr; struct sctp_af *af; @@ -454,7 +454,7 @@ err_bindx_add: */ int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt) { - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); sctp_endpoint_t *ep = sp->ep; int cnt; sctp_bind_addr_t *bp = &ep->base.bind_addr; @@ -662,6 +662,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) /* Clean up any skbs sitting on the receive queue. */ skb_queue_purge(&sk->receive_queue); + skb_queue_purge(&sctp_sk(sk)->pd_lobby); /* This will run the backlog queue. */ sctp_release_sock(sk); @@ -714,7 +715,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int msg_len) { - sctp_opt_t *sp; + struct sctp_opt *sp; sctp_endpoint_t *ep; sctp_association_t *new_asoc=NULL, *asoc=NULL; struct sctp_transport *transport; @@ -1117,7 +1118,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr int len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); struct sk_buff *skb; int copied; int err = 0; @@ -1176,7 +1177,6 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. - * Otherwise, set MSG_EOR indicating the end of a message. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; @@ -1184,6 +1184,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->receive_queue, skb); + /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the skb's destructor is called via @@ -1191,9 +1192,11 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr */ sctp_assoc_rwnd_increase(event->asoc, copied); goto out; - } else { - msg->msg_flags |= MSG_EOR; - } + } else if ((event->msg_flags & MSG_NOTIFICATION) || + (event->msg_flags & MSG_EOR)) + msg->msg_flags |= MSG_EOR; + else + msg->msg_flags &= ~MSG_EOR; out_free: sctp_ulpevent_free(event); /* Free the skb. */ @@ -1231,7 +1234,7 @@ static inline int sctp_setsockopt_set_events(struct sock *sk, char *optval, static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval, int optlen) { - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); /* Applicable to UDP-style socket only */ if (SCTP_SOCKET_TCP == sp->type) @@ -1353,7 +1356,7 @@ static inline int sctp_setsockopt_set_default_send_param(struct sock *sk, asoc->defaults.timetolive = info.sinfo_timetolive; return 0; } - + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -1481,7 +1484,7 @@ out_nounlock: SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { - sctp_opt_t *sp; + struct sctp_opt *sp; sctp_endpoint_t *ep; sctp_association_t *asoc; struct sctp_transport *transport; @@ -1603,7 +1606,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) { sctp_endpoint_t *ep; sctp_protocol_t *proto; - sctp_opt_t *sp; + struct sctp_opt *sp; SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); @@ -1632,7 +1635,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. - * FIXME: This are not used yet. + * FIXME: These are not used yet. */ sp->rtoinfo.srto_initial = proto->rto_initial; sp->rtoinfo.srto_max = proto->rto_max; @@ -1669,6 +1672,11 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) */ sp->autoclose = 0; sp->pf = sctp_get_pf_specific(sk->family); + + /* Control variables for partial data delivery. */ + sp->pd_mode = 0; + skb_queue_head_init(&sp->pd_lobby); + /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. @@ -1823,8 +1831,8 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso struct sock *newsk; struct socket *tmpsock; sctp_endpoint_t *newep; - sctp_opt_t *oldsp = sctp_sk(oldsk); - sctp_opt_t *newsp; + struct sctp_opt *oldsp = sctp_sk(oldsk); + struct sctp_opt *newsp; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; int err = 0; @@ -1867,6 +1875,43 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso } } + /* Clean up an messages pending delivery due to partial + * delivery. Three cases: + * 1) No partial deliver; no work. + * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. + * 3) Peeling off non-partial delivery; move pd_lobby to recieve_queue. + */ + skb_queue_head_init(&newsp->pd_lobby); + sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;; + + if (sctp_sk(oldsk)->pd_mode) { + struct sk_buff_head *queue; + + /* Decide which queue to move pd_lobby skbs to. */ + if (assoc->ulpq.pd_mode) { + queue = &newsp->pd_lobby; + } else + queue = &newsk->receive_queue; + + /* Walk through the pd_lobby, looking for skbs that + * need moved to the new socket. + */ + sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == assoc) { + __skb_unlink(skb, skb->list); + __skb_queue_tail(queue, skb); + } + } + + /* Clear up any skbs waiting for the partial + * delivery to finish. + */ + if (assoc->ulpq.pd_mode) + sctp_clear_pd(oldsk); + + } + /* Set the type of socket to indicate that it is peeled off from the * original socket. */ @@ -2438,7 +2483,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum) */ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) { - sctp_opt_t *sp = sctp_sk(sk); + struct sctp_opt *sp = sctp_sk(sk); sctp_endpoint_t *ep = sp->ep; /* Only UDP style sockets that are not peeled off are allowed to diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 51c9fe104b1c..30e96c68d10f 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -628,14 +628,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc, if (!event) goto fail_init; - for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { - /* Note: Not clearing the entire event struct as - * this is just a fragment of the real event. However, - * we still need to do rwnd accounting. - */ + /* Note: Not clearing the entire event struct as + * this is just a fragment of the real event. However, + * we still need to do rwnd accounting. + */ + for (list = skb_shinfo(skb)->frag_list; list; list = list->next) sctp_ulpevent_set_owner_r(list, asoc); - } - info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo; @@ -733,6 +731,64 @@ fail: return NULL; } +/* Create a partial delivery related event. + * + * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT + * + * When a reciever is engaged in a partial delivery of a + * message this notification will be used to inidicate + * various events. + */ +struct sctp_ulpevent *sctp_ulpevent_make_pdapi( + const sctp_association_t *asoc, __u32 indication, int priority) +{ + struct sctp_ulpevent *event; + struct sctp_rcv_pdapi_event *pd; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), + MSG_NOTIFICATION, priority); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + pd = (struct sctp_rcv_pdapi_event *) + skb_put(skb, sizeof(struct sctp_rcv_pdapi_event)); + + /* pdapi_type + * It should be SCTP_PARTIAL_DELIVERY_EVENT + * + * pdapi_flags: 16 bits (unsigned integer) + * Currently unused. + */ + pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; + pd->pdapi_flags = 0; + + /* pdapi_length: 32 bits (unsigned integer) + * + * This field is the total length of the notification data, including + * the notification header. It will generally be sizeof (struct + * sctp_rcv_pdapi_event). + */ + pd->pdapi_length = sizeof(struct sctp_rcv_pdapi_event); + + /* pdapi_indication: 32 bits (unsigned integer) + * + * This field holds the indication being sent to the application. + */ + pd->pdapi_indication = indication; + + /* pdapi_assoc_id: sizeof (sctp_assoc_t) + * + * The association id field, holds the identifier for the association. + */ + pd->pdapi_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + /* Return the notification type, assuming this is a notification * event. */ diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b854e77b9ba0..078e6ec079dd 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -84,6 +84,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, ulpq->asoc = asoc; skb_queue_head_init(&ulpq->reasm); skb_queue_head_init(&ulpq->lobby); + ulpq->pd_mode = 0; ulpq->malloced = 0; return ulpq; @@ -96,15 +97,16 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq) struct sk_buff *skb; struct sctp_ulpevent *event; - while ((skb = skb_dequeue(&ulpq->lobby))) { - event = (struct sctp_ulpevent *) skb->cb; + while ((skb = __skb_dequeue(&ulpq->lobby))) { + event = sctp_skb2event(skb); sctp_ulpevent_free(event); } - while ((skb = skb_dequeue(&ulpq->reasm))) { - event = (struct sctp_ulpevent *) skb->cb; + while ((skb = __skb_dequeue(&ulpq->reasm))) { + event = sctp_skb2event(skb); sctp_ulpevent_free(event); } + } /* Dispose of a ulpqueue. */ @@ -117,7 +119,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq) /* Process an incoming DATA chunk. */ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk, - int priority) + int priority) { struct sk_buff_head temp; sctp_data_chunk_t *hdr; @@ -125,12 +127,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk, hdr = (sctp_data_chunk_t *) chunk->chunk_hdr; - /* FIXME: Instead of event being the skb clone, we really should - * have a new skb based chunk structure that we can convert to - * an event. Temporarily, I'm carrying a few chunk fields in - * the event to allow reassembly. Its too painful to change - * everything at once. --jgrimm - */ + /* Create an event from the incoming chunk. */ event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, priority); if (!event) return -ENOMEM; @@ -139,10 +136,10 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk, event = sctp_ulpq_reasm(ulpq, event); /* Do ordering if needed. */ - if (event) { + if ((event) && (event->msg_flags & MSG_EOR)){ /* Create a temporary list to collect chunks on. */ skb_queue_head_init(&temp); - skb_queue_tail(&temp, sctp_event2skb(event)); + __skb_queue_tail(&temp, sctp_event2skb(event)); event = sctp_ulpq_order(ulpq, event); } @@ -154,10 +151,40 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk, return 0; } +/* Clear the partial delivery mode for this socket. Note: This + * assumes that no association is currently in partial delivery mode. + */ +int sctp_clear_pd(struct sock *sk) +{ + struct sctp_opt *sp; + sp = sctp_sk(sk); + + sp->pd_mode = 0; + if (!skb_queue_empty(&sp->pd_lobby)) { + struct list_head *list; + sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue); + list = (struct list_head *)&sctp_sk(sk)->pd_lobby; + INIT_LIST_HEAD(list); + return 1; + } + return 0; +} + +/* Clear the pd_mode and restart any pending messages waiting for delivery. */ +static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) +{ + ulpq->pd_mode = 0; + return sctp_clear_pd(ulpq->asoc->base.sk); +} + + + /* Add a new event for propogation to the ULP. */ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sock *sk = ulpq->asoc->base.sk; + struct sk_buff_head *queue; + int clear_pd = 0; /* If the socket is just going to throw this away, do not * even try to deliver it. @@ -169,16 +196,41 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) goto out_free; + /* If we are in partial delivery mode, post to the lobby until + * partial delivery is cleared, unless, of course _this_ is + * the association the cause of the partial delivery. + */ + + if (!sctp_sk(sk)->pd_mode) { + queue = &sk->receive_queue; + } else if (ulpq->pd_mode) { + if (event->msg_flags & MSG_NOTIFICATION) + queue = &sctp_sk(sk)->pd_lobby; + else { + clear_pd = event->msg_flags & MSG_EOR; + queue = &sk->receive_queue; + } + } else + queue = &sctp_sk(sk)->pd_lobby; + + /* If we are harvesting multiple skbs they will be * collected on a list. */ if (sctp_event2skb(event)->list) - sctp_skb_list_tail(sctp_event2skb(event)->list, - &sk->receive_queue); + sctp_skb_list_tail(sctp_event2skb(event)->list, queue); else - skb_queue_tail(&sk->receive_queue, sctp_event2skb(event)); + skb_queue_tail(queue, sctp_event2skb(event)); - wake_up_interruptible(sk->sleep); + /* Did we just complete partial delivery and need to get + * rolling again? Move pending data to the receive + * queue. + */ + if (clear_pd) + sctp_ulpq_clear_pd(ulpq); + + if (queue == &sk->receive_queue) + wake_up_interruptible(sk->sleep); return 1; out_free: @@ -192,7 +244,7 @@ out_free: /* 2nd Level Abstractions */ /* Helper function to store chunks that need to be reassembled. */ -static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, +static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff *pos, *tmp; @@ -212,7 +264,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, /* If the queue is empty, we have a different function to call. */ if (skb_peek(&ulpq->reasm)) - __skb_insert(sctp_event2skb(event), pos->prev, pos, + __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); else __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); @@ -233,7 +285,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * struct sk_buff *list = skb_shinfo(f_frag)->frag_list; /* Store the pointer to the 2nd skb */ - pos = f_frag->next; + if (f_frag == l_frag) + pos = NULL; + else + pos = f_frag->next; /* Get the last skb in the f_frag's frag_list if present. */ for (last = list; list; last = list, list = list->next); @@ -248,7 +303,8 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * /* Remove the first fragment from the reassembly queue. */ __skb_unlink(f_frag, f_frag->list); - do { + while (pos) { + pnext = pos->next; /* Update the len and data_len fields of the first fragment. */ @@ -264,13 +320,14 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * pos->next = pnext; pos = pnext; - } while (1); + }; - event = (struct sctp_ulpevent *) f_frag->cb; + event = sctp_skb2event(f_frag); return event; } + /* Helper function to check if an incoming chunk has filled up the last * missing fragment in a SCTP datagram and return the corresponding event. */ @@ -282,7 +339,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u __u32 ctsn, next_tsn; struct sctp_ulpevent *retval = NULL; - /* Initialized to 0 just to avoid compiler warning message. Will + /* Initialized to 0 just to avoid compiler warning message. Will * never be used with this value. It is referenced only after it * is set when we find the first fragment of a message. */ @@ -298,7 +355,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u * start the next pass when we find another first fragment. */ sctp_skb_for_each(pos, &ulpq->reasm, tmp) { - cevent = (struct sctp_ulpevent *) pos->cb; + cevent = sctp_skb2event(pos); ctsn = cevent->sndrcvinfo.sinfo_tsn; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { @@ -315,7 +372,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u break; case SCTP_DATA_LAST_FRAG: - if ((first_frag) && (ctsn == next_tsn)) + if (first_frag && (ctsn == next_tsn)) retval = sctp_make_reassembled_event( first_frag, pos); else @@ -326,14 +383,78 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u /* We have the reassembled event. There is no need to look * further. */ - if (retval) + if (retval) { + retval->msg_flags |= MSG_EOR; + break; + } + } + + return retval; +} + +/* Retrieve the next set of fragments of a partial message. */ +static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) +{ + struct sk_buff *pos, *tmp, *last_frag, *first_frag; + struct sctp_ulpevent *cevent; + __u32 ctsn, next_tsn; + int is_last; + struct sctp_ulpevent *retval; + + /* The chunks are held in the reasm queue sorted by TSN. + * Walk through the queue sequentially and look for the first + * sequence of fragmented chunks. + */ + + if (skb_queue_empty(&ulpq->reasm)) + return NULL; + + last_frag = first_frag = NULL; + retval = NULL; + next_tsn = 0; + is_last = 0; + + sctp_skb_for_each(pos, &ulpq->reasm, tmp) { + cevent = sctp_skb2event(pos); + ctsn = cevent->sndrcvinfo.sinfo_tsn; + + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_MIDDLE_FRAG: + if (!first_frag) { + first_frag = pos; + next_tsn = ctsn + 1; + last_frag = pos; + } else if (next_tsn == ctsn) + next_tsn++; + else + goto done; break; + case SCTP_DATA_LAST_FRAG: + if (!first_frag) + first_frag = pos; + else if (ctsn != next_tsn) + goto done; + last_frag = pos; + is_last = 1; + goto done; + default: + return NULL; + }; } + /* We have the reassembled event. There is no need to look + * further. + */ +done: + retval = sctp_make_reassembled_event(first_frag, last_frag); + if (is_last) + retval->msg_flags |= MSG_EOR; + return retval; } -/* Helper function to reassemble chunks. Hold chunks on the reasm queue that + +/* Helper function to reassemble chunks. Hold chunks on the reasm queue that * need reassembling. */ static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, @@ -341,22 +462,86 @@ static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, { struct sctp_ulpevent *retval = NULL; - /* FIXME: We should be using some new chunk structure here - * instead of carrying chunk fields in the event structure. - * This is temporary as it is too painful to change everything - * at once. - */ - /* Check if this is part of a fragmented message. */ - if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) + if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { + event->msg_flags |= MSG_EOR; return event; + } sctp_ulpq_store_reasm(ulpq, event); - retval = sctp_ulpq_retrieve_reassembled(ulpq); + if (!ulpq->pd_mode) + retval = sctp_ulpq_retrieve_reassembled(ulpq); + else { + __u32 ctsn, ctsnap; + + /* Do not even bother unless this is the next tsn to + * be delivered. + */ + ctsn = event->sndrcvinfo.sinfo_tsn; + ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); + if (TSN_lte(ctsn, ctsnap)) + retval = sctp_ulpq_retrieve_partial(ulpq); + } return retval; } +/* Retrieve the first part (sequential fragments) for partial delivery. */ +static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) +{ + struct sk_buff *pos, *tmp, *last_frag, *first_frag; + struct sctp_ulpevent *cevent; + __u32 ctsn, next_tsn; + struct sctp_ulpevent *retval; + + /* The chunks are held in the reasm queue sorted by TSN. + * Walk through the queue sequentially and look for a sequence of + * fragmented chunks that start a datagram. + */ + + if (skb_queue_empty(&ulpq->reasm)) + return NULL; + + last_frag = first_frag = NULL; + retval = NULL; + next_tsn = 0; + + sctp_skb_for_each(pos, &ulpq->reasm, tmp) { + cevent = sctp_skb2event(pos); + ctsn = cevent->sndrcvinfo.sinfo_tsn; + + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_FIRST_FRAG: + if (!first_frag) { + first_frag = pos; + next_tsn = ctsn + 1; + last_frag = pos; + } else + goto done; + break; + + case SCTP_DATA_MIDDLE_FRAG: + if (!first_frag) + return NULL; + if (ctsn == next_tsn) { + next_tsn++; + last_frag = pos; + } else + goto done; + break; + default: + return NULL; + }; + } + + /* We have the reassembled event. There is no need to look + * further. + */ +done: + retval = sctp_make_reassembled_event(first_frag, last_frag); + return retval; +} + /* Helper function to gather skbs that have possibly become * ordered by an an incoming chunk. */ @@ -392,7 +577,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, /* Found it, so mark in the ssnmap. */ sctp_ssn_next(in, sid); - + __skb_unlink(pos, pos->list); /* Attach all gathered skbs to the event. */ @@ -412,7 +597,6 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, sid = event->sndrcvinfo.sinfo_stream; ssn = event->sndrcvinfo.sinfo_ssn; - /* Find the right place in this list. We store them by * stream ID and then by SSN. */ @@ -429,7 +613,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, /* If the queue is empty, we have a different function to call. */ if (skb_peek(&ulpq->lobby)) - __skb_insert(sctp_event2skb(event), pos->prev, pos, + __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); else __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); @@ -441,12 +625,6 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, __u16 sid, ssn; struct sctp_stream *in; - /* FIXME: We should be using some new chunk structure here - * instead of carrying chunk fields in the event structure. - * This is temporary as it is too painful to change everything - * at once. - */ - /* Check if this message needs ordering. */ if (SCTP_DATA_UNORDERED & event->msg_flags) return event; @@ -475,3 +653,54 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, return event; } + +/* Partial deliver the first message as there is pressure on rwnd. */ +void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, + struct sctp_chunk *chunk, int priority) +{ + struct sctp_ulpevent *event; + + /* Are we already in partial delivery mode? */ + if (!sctp_sk(ulpq->asoc->base.sk)->pd_mode) { + + /* Is partial delivery possible? */ + event = sctp_ulpq_retrieve_first(ulpq); + /* Send event to the ULP. */ + if (event) { + sctp_ulpq_tail_event(ulpq, event); + sctp_sk(ulpq->asoc->base.sk)->pd_mode = 1; + ulpq->pd_mode = 1; + return; + } + } + + /* Assert: Either already in partial delivery mode or partial + * delivery wasn't possible, so now the only recourse is + * to renege. FIXME: Add renege support starts here. + */ +} + +/* Notify the application if an association is aborted and in + * partial delivery mode. Send up any pending received messages. + */ +void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int priority) +{ + struct sctp_ulpevent *ev = NULL; + struct sock *sk; + + if (!ulpq->pd_mode) + return; + + sk = ulpq->asoc->base.sk; + if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, + &sctp_sk(sk)->subscribe)) + ev = sctp_ulpevent_make_pdapi(ulpq->asoc, + SCTP_PARTIAL_DELIVERY_ABORTED, + priority); + if (ev) + skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev)); + + /* If there is data waiting, send it up the socket now. */ + if (sctp_ulpq_clear_pd(ulpq) || ev) + wake_up_interruptible(sk->sleep); +} -- cgit v1.2.3 From f2b48f2eb41188c13d865ca9bc48591e772ed6af Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Tue, 11 Feb 2003 18:46:31 -0800 Subject: [SCTP] sctp mib statistics update/display support. --- include/net/sctp/sctp.h | 1 + net/sctp/Makefile | 2 +- net/sctp/associola.c | 2 + net/sctp/endpointola.c | 2 + net/sctp/input.c | 7 ++- net/sctp/ipv6.c | 2 + net/sctp/outqueue.c | 8 ++- net/sctp/proc.c | 128 +++++++++++++++++++++++++++++++++++++++++++++++ net/sctp/protocol.c | 19 ++++++- net/sctp/sm_make_chunk.c | 3 ++ net/sctp/sm_statefuns.c | 58 +++++++++++++++++++++ net/sctp/ulpqueue.c | 2 + 12 files changed, 229 insertions(+), 5 deletions(-) create mode 100644 net/sctp/proc.c (limited to 'net/sctp/ulpqueue.c') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 4ce62633f2e5..b2e19ebde563 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -214,6 +214,7 @@ DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics); #define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field) #define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field) #define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field) +#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field) /* Determine if this is a valid kernel address. */ static inline int sctp_is_valid_kaddr(unsigned long addr) diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 162f9b11086f..545fad836084 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ inqueue.o outqueue.o ulpqueue.o command.o \ tsnmap.o bind_addr.o socket.o primitive.o \ output.o input.o hashdriver.o sla1.o \ - debug.o ssnmap.o + debug.o ssnmap.o proc.o ifeq ($(CONFIG_SCTP_ADLER32), y) sctp-y += adler32.o diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 1f8e0b094a73..cc6c951a3356 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -795,6 +795,8 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc) */ if (sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; + else + SCTP_INC_STATS(SctpInCtrlChunks); if (chunk->transport) chunk->transport->last_time_heard = jiffies; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 4932831903aa..5a902a3b7e7c 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -369,6 +369,8 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep) */ if (asoc && sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; + else + SCTP_INC_STATS(SctpInCtrlChunks); if (chunk->transport) chunk->transport->last_time_heard = jiffies; diff --git a/net/sctp/input.c b/net/sctp/input.c index a6aabd3d36a4..895ea6f1694f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -90,6 +90,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) if (val != cmp) { /* CRC failure, dump it. */ + SCTP_INC_STATS_BH(SctpChecksumErrors); return -1; } return 0; @@ -115,6 +116,8 @@ int sctp_rcv(struct sk_buff *skb) if (skb->pkt_type!=PACKET_HOST) goto discard_it; + SCTP_INC_STATS_BH(SctpInSCTPPacks); + sh = (struct sctphdr *) skb->h.raw; /* Pull up the IP and SCTP headers. */ @@ -160,8 +163,10 @@ int sctp_rcv(struct sk_buff *skb) */ if (!asoc) { ep = __sctp_rcv_lookup_endpoint(&dest); - if (sctp_rcv_ootb(skb)) + if (sctp_rcv_ootb(skb)) { + SCTP_INC_STATS_BH(SctpOutOfBlues); goto discard_release; + } } /* Retrieve the common input handling substructure. */ diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index afd577b71bdc..2de6ed9811b3 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -132,6 +132,8 @@ static inline int sctp_v6_xmit(struct sk_buff *skb, __FUNCTION__, skb, skb->len, NIP6(fl.fl6_src), NIP6(fl.fl6_dst)); + SCTP_INC_STATS(SctpOutSCTPPacks); + return ip6_xmit(sk, skb, &fl, np->opt); } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index b1bfe69e61ed..a5019f6d7bdd 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -193,11 +193,17 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk) : "Illegal Chunk"); skb_queue_tail(&q->out, (struct sk_buff *) chunk); + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(SctpOutUnorderChunks); + else + SCTP_INC_STATS(SctpOutOrderChunks); q->empty = 0; break; }; - } else + } else { skb_queue_tail(&q->control, (struct sk_buff *) chunk); + SCTP_INC_STATS(SctpOutCtrlChunks); + } if (error < 0) return error; diff --git a/net/sctp/proc.c b/net/sctp/proc.c new file mode 100644 index 000000000000..9c7d9e489b22 --- /dev/null +++ b/net/sctp/proc.c @@ -0,0 +1,128 @@ +/* SCTP kernel reference Implementation + * Copyright (c) 2003 International Business Machines, Corp. + * + * This file is part of the SCTP kernel reference Implementation + * + * The SCTP reference implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP reference implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * Sridhar Samudrala + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#include +#include +#include + +static char *sctp_snmp_list[] = { +#define SCTP_SNMP_ENTRY(x) #x + SCTP_SNMP_ENTRY(SctpCurrEstab), + SCTP_SNMP_ENTRY(SctpActiveEstabs), + SCTP_SNMP_ENTRY(SctpPassiveEstabs), + SCTP_SNMP_ENTRY(SctpAborteds), + SCTP_SNMP_ENTRY(SctpShutdowns), + SCTP_SNMP_ENTRY(SctpOutOfBlues), + SCTP_SNMP_ENTRY(SctpChecksumErrors), + SCTP_SNMP_ENTRY(SctpOutCtrlChunks), + SCTP_SNMP_ENTRY(SctpOutOrderChunks), + SCTP_SNMP_ENTRY(SctpOutUnorderChunks), + SCTP_SNMP_ENTRY(SctpInCtrlChunks), + SCTP_SNMP_ENTRY(SctpInOrderChunks), + SCTP_SNMP_ENTRY(SctpInUnorderChunks), + SCTP_SNMP_ENTRY(SctpFragUsrMsgs), + SCTP_SNMP_ENTRY(SctpReasmUsrMsgs), + SCTP_SNMP_ENTRY(SctpOutSCTPPacks), + SCTP_SNMP_ENTRY(SctpInSCTPPacks), +#undef SCTP_SNMP_ENTRY +}; + +/* Return the current value of a particular entry in the mib by adding its + * per cpu counters. + */ +static unsigned long +fold_field(void *mib[], int nr) +{ + unsigned long res = 0; + int i; + + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) + continue; + res += + *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + + sizeof (unsigned long) * nr)); + res += + *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) + + sizeof (unsigned long) * nr)); + } + return res; +} + +/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ +static int sctp_snmp_seq_show(struct seq_file *seq, void *v) +{ + int i; + + for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++) + seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i], + fold_field((void **)sctp_statistics, i)); + + return 0; +} + +/* Initialize the seq file operations for 'snmp' object. */ +static int sctp_snmp_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, sctp_snmp_seq_show, NULL); +} + +static struct file_operations sctp_snmp_seq_fops = { + .open = sctp_snmp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* Set up the proc fs entry for 'snmp' object. */ +int __init sctp_snmp_proc_init(void) +{ + struct proc_dir_entry *p; + + p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp); + if (!p) + return -ENOMEM; + + p->proc_fops = &sctp_snmp_seq_fops; + + return 0; +} + +/* Cleanup the proc fs entry for 'snmp' object. */ +void sctp_snmp_proc_exit(void) +{ + remove_proc_entry("snmp", proc_net_sctp); +} diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index cd8e3b1adb34..e97b4d443b56 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -75,6 +75,9 @@ static struct sctp_af *sctp_af_v6_specific; extern struct net_proto_family inet_family_ops; +extern int sctp_snmp_proc_init(void); +extern int sctp_snmp_proc_exit(void); + /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) { @@ -82,21 +85,32 @@ struct sock *sctp_get_ctl_sock(void) } /* Set up the proc fs entry for the SCTP protocol. */ -__init void sctp_proc_init(void) +__init int sctp_proc_init(void) { + int rc = 0; + if (!proc_net_sctp) { struct proc_dir_entry *ent; ent = proc_mkdir("net/sctp", 0); if (ent) { ent->owner = THIS_MODULE; proc_net_sctp = ent; - } + } else + rc = -ENOMEM; } + + if (sctp_snmp_proc_init()) + rc = -ENOMEM; + + return rc; } /* Clean up the proc fs entry for the SCTP protocol. */ void sctp_proc_exit(void) { + + sctp_snmp_proc_exit(); + if (proc_net_sctp) { proc_net_sctp = NULL; remove_proc_entry("net/sctp", 0); @@ -628,6 +642,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, NIPQUAD(((struct rtable *)skb->dst)->rt_src), NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); + SCTP_INC_STATS(SctpOutSCTPPacks); return ip_queue_xmit(skb, ipfragok); } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 487221d46a65..cf752d4d2711 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1180,6 +1180,9 @@ int sctp_datachunks_from_user(sctp_association_t *asoc, over = msg_len % max; offset = 0; + if (whole && over) + SCTP_INC_STATS_USER(SctpFragUsrMsgs); + /* Create chunks for all the full sized DATA chunks. */ for (i=0, len=first_len; i < whole; i++) { frag = SCTP_DATA_MIDDLE_FRAG; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 848a48229f17..47a22b9daf11 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -145,6 +145,9 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + + SCTP_INC_STATS(SctpShutdowns); + SCTP_DEC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); @@ -223,6 +226,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep, if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); return SCTP_DISPOSITION_CONSUME; } else { return SCTP_DISPOSITION_NOMEM; @@ -379,6 +383,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SctpAborteds); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; } @@ -388,6 +393,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep, if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { + + SCTP_INC_STATS(SctpAborteds); + /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. */ @@ -403,6 +411,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep, if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, @@ -557,6 +566,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(SctpCurrEstab); + SCTP_INC_STATS(SctpPassiveEstabs); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (new_asoc->autoclose) @@ -648,6 +659,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(SctpCurrEstab); + SCTP_INC_STATS(SctpActiveEstabs); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (asoc->autoclose) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, @@ -719,6 +732,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep, if (asoc->overall_error_count >= asoc->overall_error_threshold) { /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -929,6 +944,8 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa, goto out; sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); + SCTP_INC_STATS(SctpOutCtrlChunks); + /* Discard the rest of the inbound packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); @@ -1125,6 +1142,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); retval = SCTP_DISPOSITION_CONSUME; } else { retval = SCTP_DISPOSITION_NOMEM; @@ -1436,6 +1454,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); repl = sctp_make_cookie_ack(new_asoc, chunk); @@ -1519,6 +1538,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); @@ -1925,6 +1945,8 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const sctp_endpoint_t *ep, /* ASSOC_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); /* BUG? This does not look complete... */ return SCTP_DISPOSITION_ABORT; @@ -1948,6 +1970,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SctpAborteds); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); @@ -2332,6 +2355,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_INC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_CONSUME; } @@ -2340,6 +2365,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep, /* Record the fact that we have received this TSN. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(SctpInUnorderChunks); + else + SCTP_INC_STATS(SctpInOrderChunks); + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * If an endpoint receive a DATA chunk with an invalid stream @@ -2536,6 +2566,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep, */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_INC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_CONSUME; } @@ -2544,6 +2576,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep, /* Record the fact that we have received this TSN. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(SctpInUnorderChunks); + else + SCTP_INC_STATS(SctpInOrderChunks); + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * If an endpoint receive a DATA chunk with an invalid stream @@ -2705,6 +2742,8 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); + return SCTP_DISPOSITION_CONSUME; } @@ -2794,6 +2833,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SctpShutdowns); + SCTP_DEC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); /* ...and remove all record of the association. */ @@ -2834,6 +2875,8 @@ sctp_disposition_t sctp_sf_ootb(const sctp_endpoint_t *ep, __u8 *ch_end; int ootb_shut_ack = 0; + SCTP_INC_STATS(SctpOutOfBlues); + ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; do { ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); @@ -2901,6 +2944,8 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); + return SCTP_DISPOSITION_CONSUME; } @@ -3472,6 +3517,10 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep, /* Delete the established association. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); + return retval; } @@ -3527,6 +3576,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SctpShutdowns); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; @@ -3597,6 +3648,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SctpAborteds); + /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. */ @@ -3929,6 +3982,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep, if (asoc->overall_error_count >= asoc->overall_error_threshold) { /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -4096,6 +4151,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const sctp_endpoint_t *ep, if (asoc->overall_error_count >= asoc->overall_error_threshold) { /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL()); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -4401,6 +4458,7 @@ void sctp_send_stale_cookie_err(const sctp_endpoint_t *ep, sctp_packet_append_chunk(packet, err_chunk); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); + SCTP_INC_STATS(SctpOutCtrlChunks); } else sctp_free_chunk (err_chunk); } diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 04c47bffb8e9..098f86ef2b4f 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -266,6 +266,8 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * event = (sctp_ulpevent_t *) f_frag->cb; + SCTP_INC_STATS(SctpReasmUsrMsgs); + return event; } -- cgit v1.2.3