summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/net/sctp/command.h5
-rw-r--r--include/net/sctp/tsnmap.h31
-rw-r--r--include/net/sctp/ulpqueue.h3
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/sm_sideeffect.c132
-rw-r--r--net/sctp/sm_statefuns.c48
-rw-r--r--net/sctp/socket.c49
-rw-r--r--net/sctp/tsnmap.c20
-rw-r--r--net/sctp/ulpqueue.c99
9 files changed, 272 insertions, 119 deletions
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 03ab4422b683..6603202d91f7 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -68,7 +68,6 @@ typedef enum {
SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
- SCTP_CMD_REPORT_BIGGAP, /* Narc on a TSN (it was too high). */
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
@@ -86,8 +85,8 @@ typedef enum {
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
- SCTP_CMD_CHUNK_PD, /* Partial data delivery considerations. */
-
+ SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
+ SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 70bea4ce1a98..61e26a90abf4 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -6,13 +6,13 @@
* These are the definitions needed for the tsnmap type. The tsnmap is used
* to track out of order TSNs received.
*
- * The SCTP reference implementation is free software;
+ * The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
- * the SCTP reference implementation is distributed in the hope that it
+ * The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -23,12 +23,17 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
- * Please send any bug reports or fixes you make to one of the
- * following email addresses:
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
*
- * Jon Grimm <jgrimm@us.ibm.com>
- * La Monte H.P. Yarroll <piggy@acm.org>
- * Karl Knutson <karl@athena.chicago.il.us>
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
@@ -153,15 +158,18 @@ static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
return map->dup_tsns;
}
-/* Mark a duplicate TSN. Note: we limit how many we are willing to
- * store and consequently report.
+/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
+ * information.
*/
static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
{
if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
- map->dup_tsns[map->num_dup_tsns++] = tsn;
+ map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
}
+/* Renege a TSN that was seen. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
+
/* Is there a gap in the TSN map? */
int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
@@ -176,6 +184,3 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *,
struct sctp_tsnmap_iter *,__u16 *start, __u16 *end);
#endif /* __sctp_tsnmap_h__ */
-
-
-
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index dd7823b0a737..7b8ad3ab1d4a 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -66,6 +66,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Add a new event for propogation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
+/* Renege previously received chunks. */
+void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, int);
+
/* Perform partial delivery. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 916c11d97ac1..230b5602004d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1014,8 +1014,8 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu))) {
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
- "rwnd: %u a_rwnd: %u\n",
- __FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd);
+ "rwnd: %u a_rwnd: %u\n", __FUNCTION__,
+ asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
if (!sack)
return;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9fe550bbed78..1228f55dfdfb 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -253,7 +253,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
{
int error = 0;
int force;
- sctp_cmd_t *command;
+ sctp_cmd_t *cmd;
sctp_chunk_t *new_obj;
sctp_chunk_t *chunk = NULL;
sctp_packet_t *packet;
@@ -273,22 +273,22 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* cmd->handle(x, y, z)
* --jgrimm
*/
- while (NULL != (command = sctp_next_cmd(commands))) {
- switch (command->verb) {
+ while (NULL != (cmd = sctp_next_cmd(commands))) {
+ switch (cmd->verb) {
case SCTP_CMD_NOP:
/* Do nothing. */
break;
case SCTP_CMD_NEW_ASOC:
/* Register a new association. */
- asoc = command->obj.ptr;
+ asoc = cmd->obj.ptr;
/* Register with the endpoint. */
sctp_endpoint_add_asoc(ep, asoc);
sctp_hash_established(asoc);
break;
case SCTP_CMD_UPDATE_ASSOC:
- sctp_assoc_update(asoc, command->obj.ptr);
+ sctp_assoc_update(asoc, cmd->obj.ptr);
break;
case SCTP_CMD_PURGE_OUTQUEUE:
@@ -304,13 +304,12 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_NEW_STATE:
/* Enter a new state. */
- sctp_cmd_new_state(commands, asoc, command->obj.state);
+ sctp_cmd_new_state(commands, asoc, cmd->obj.state);
break;
case SCTP_CMD_REPORT_TSN:
/* Record the arrival of a TSN. */
- sctp_tsnmap_mark(&asoc->peer.tsn_map,
- command->obj.u32);
+ sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32);
break;
case SCTP_CMD_GEN_SACK:
@@ -319,14 +318,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
- force = command->obj.i32;
+ force = cmd->obj.i32;
error = sctp_gen_sack(asoc, force, commands);
break;
case SCTP_CMD_PROCESS_SACK:
/* Process an inbound SACK. */
error = sctp_cmd_process_sack(commands, asoc,
- command->obj.ptr);
+ cmd->obj.ptr);
break;
case SCTP_CMD_GEN_INIT_ACK:
@@ -347,16 +346,15 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* layer which will bail.
*/
error = sctp_cmd_process_init(commands, asoc, chunk,
- command->obj.ptr,
- priority);
+ cmd->obj.ptr, priority);
break;
case SCTP_CMD_GEN_COOKIE_ECHO:
/* Generate a COOKIE ECHO chunk. */
new_obj = sctp_make_cookie_echo(asoc, chunk);
if (!new_obj) {
- if (command->obj.ptr)
- sctp_free_chunk(command->obj.ptr);
+ if (cmd->obj.ptr)
+ sctp_free_chunk(cmd->obj.ptr);
goto nomem;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
@@ -365,9 +363,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
- if (command->obj.ptr)
+ if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(command->obj.ptr));
+ SCTP_CHUNK(cmd->obj.ptr));
break;
case SCTP_CMD_GEN_SHUTDOWN:
@@ -387,43 +385,36 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_CHUNK_ULP:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
- "chunk_up:",
- command->obj.ptr,
- "ulpq:",
- &asoc->ulpq);
- sctp_ulpq_tail_data(&asoc->ulpq,
- command->obj.ptr,
+ "chunk_up:", cmd->obj.ptr,
+ "ulpq:", &asoc->ulpq);
+ sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr,
GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
- "event_up:",
- command->obj.ptr,
- "ulpq:",
- &asoc->ulpq);
- sctp_ulpq_tail_event(&asoc->ulpq,
- command->obj.ptr);
+ "event_up:",cmd->obj.ptr,
+ "ulpq:",&asoc->ulpq);
+ sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr);
break;
case SCTP_CMD_REPLY:
/* Send a chunk to our peer. */
error = sctp_outq_tail(&asoc->outqueue,
- command->obj.ptr);
+ cmd->obj.ptr);
break;
case SCTP_CMD_SEND_PKT:
/* Send a full packet to our peer. */
- packet = command->obj.ptr;
+ packet = cmd->obj.ptr;
sctp_packet_transmit(packet);
sctp_ootb_pkt_free(packet);
break;
case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */
- sctp_retransmit(&asoc->outqueue,
- command->obj.transport,
+ sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
SCTP_RETRANSMIT_T3_RTX);
break;
@@ -434,32 +425,30 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_ECN_CE:
/* Do delayed CE processing. */
- sctp_do_ecn_ce_work(asoc, command->obj.u32);
+ sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
break;
case SCTP_CMD_ECN_ECNE:
/* Do delayed ECNE processing. */
- new_obj = sctp_do_ecn_ecne_work(asoc,
- command->obj.u32,
+ new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
chunk);
- if (new_obj) {
+ if (new_obj)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
- }
break;
case SCTP_CMD_ECN_CWR:
/* Do delayed CWR processing. */
- sctp_do_ecn_cwr_work(asoc, command->obj.u32);
+ sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
break;
case SCTP_CMD_SETUP_T2:
- sctp_cmd_setup_t2(commands, asoc, command->obj.ptr);
+ sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
break;
case SCTP_CMD_TIMER_START:
- timer = &asoc->timers[command->obj.to];
- timeout = asoc->timeouts[command->obj.to];
+ timer = &asoc->timers[cmd->obj.to];
+ timeout = asoc->timeouts[cmd->obj.to];
if (!timeout)
BUG();
@@ -469,29 +458,28 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_TIMER_RESTART:
- timer = &asoc->timers[command->obj.to];
- timeout = asoc->timeouts[command->obj.to];
+ timer = &asoc->timers[cmd->obj.to];
+ timeout = asoc->timeouts[cmd->obj.to];
if (!mod_timer(timer, jiffies + timeout))
sctp_association_hold(asoc);
break;
case SCTP_CMD_TIMER_STOP:
- timer = &asoc->timers[command->obj.to];
+ timer = &asoc->timers[cmd->obj.to];
if (timer_pending(timer) && del_timer(timer))
sctp_association_put(asoc);
break;
case SCTP_CMD_INIT_RESTART:
-
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
*/
asoc->counters[SCTP_COUNTER_INIT_ERROR]++;
- asoc->timeouts[command->obj.to] *= 2;
- if (asoc->timeouts[command->obj.to] >
+ asoc->timeouts[cmd->obj.to] *= 2;
+ if (asoc->timeouts[cmd->obj.to] >
asoc->max_init_timeo) {
- asoc->timeouts[command->obj.to] =
+ asoc->timeouts[cmd->obj.to] =
asoc->max_init_timeo;
}
@@ -506,7 +494,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_add_cmd_sf(commands,
SCTP_CMD_TIMER_RESTART,
- SCTP_TO(command->obj.to));
+ SCTP_TO(cmd->obj.to));
break;
case SCTP_CMD_INIT_FAILED:
@@ -519,23 +507,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_COUNTER_INC:
- asoc->counters[command->obj.counter]++;
+ asoc->counters[cmd->obj.counter]++;
break;
case SCTP_CMD_COUNTER_RESET:
- asoc->counters[command->obj.counter] = 0;
+ asoc->counters[cmd->obj.counter] = 0;
break;
case SCTP_CMD_REPORT_DUP:
- sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
- ntohl(command->obj.u32));
- break;
-
- case SCTP_CMD_REPORT_BIGGAP:
- SCTP_DEBUG_PRINTK("Big gap: %x to %x\n",
- sctp_tsnmap_get_ctsn(
- &asoc->peer.tsn_map),
- command->obj.u32);
+ sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
+ cmd->obj.u32);
break;
case SCTP_CMD_REPORT_BAD_TAG:
@@ -544,17 +525,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_STRIKE:
/* Mark one strike against a transport. */
- sctp_do_8_2_transport_strike(asoc,
- command->obj.transport);
+ sctp_do_8_2_transport_strike(asoc, cmd->obj.transport);
break;
case SCTP_CMD_TRANSPORT_RESET:
- t = command->obj.transport;
+ t = cmd->obj.transport;
sctp_cmd_transport_reset(commands, asoc, t);
break;
case SCTP_CMD_TRANSPORT_ON:
- t = command->obj.transport;
+ t = cmd->obj.transport;
sctp_cmd_transport_on(commands, asoc, t, chunk);
break;
@@ -563,7 +543,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_HB_TIMER_UPDATE:
- t = command->obj.transport;
+ t = cmd->obj.transport;
sctp_cmd_hb_timer_update(commands, asoc, t);
break;
@@ -572,17 +552,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_REPORT_ERROR:
- error = command->obj.error;
+ error = cmd->obj.error;
break;
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
- sackh.cum_tsn_ack = command->obj.u32;
+ sackh.cum_tsn_ack = cmd->obj.u32;
sackh.a_rwnd = 0;
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
- sctp_add_cmd_sf(commands,
- SCTP_CMD_PROCESS_SACK,
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
SCTP_SACKH(&sackh));
break;
@@ -592,20 +571,23 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_RTO_PENDING:
- t = command->obj.transport;
+ t = cmd->obj.transport;
t->rto_pending = 1;
break;
- case SCTP_CMD_CHUNK_PD:
- /* Send a chunk to the sockets layer. */
- sctp_ulpq_partial_delivery(&asoc->ulpq,
- command->obj.ptr,
+ case SCTP_CMD_PART_DELIVER:
+ sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr,
GFP_ATOMIC);
break;
+ case SCTP_CMD_RENEGE:
+ sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr,
+ GFP_ATOMIC);
+ break;
+
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
- command->verb, command->obj.ptr);
+ cmd->verb, cmd->obj.ptr);
break;
};
if (error)
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4b1090828f69..12889a5d350f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -145,7 +145,7 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
-
+
SCTP_INC_STATS(SctpShutdowns);
SCTP_DEC_STATS(SctpCurrEstab);
@@ -682,7 +682,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
return SCTP_DISPOSITION_CONSUME;
-
nomem:
return SCTP_DISPOSITION_NOMEM;
}
@@ -2274,7 +2273,6 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
-
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
@@ -2339,21 +2337,29 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
/* Even if we don't accept this chunk there is
* memory pressure.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_PD, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
+ /* Spill over rwnd a little bit. Note: While allowed, this spill over
+ * seems a bit troublesome in that frag_point varies based on
+ * PMTU. In cases, such as loopback, this might be a rather
+ * large spill over.
+ */
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
-
- /* There is absolutely no room, but this is the most
- * important tsn that we are waiting on, try to
- * to partial deliver or renege to make room.
+ /* If this is the next TSN, consider reneging to make
+ * room. Note: Playing nice with a confused sender. A
+ * malicious sender can still eat up all our buffer
+ * space and in the future we may want to detect and
+ * do more drastic reneging.
*/
- if ((sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
- deliver = SCTP_CMD_CHUNK_PD;
+ if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
+ (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
+ deliver = SCTP_CMD_RENEGE;
} else {
SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
- "rwnd: %d\n", tsn, datalen,
+ "rwnd: %d\n", tsn, datalen,
asoc->rwnd);
goto discard_force;
}
@@ -2379,21 +2385,23 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
- SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
/* If definately accepting the DATA chunk, record its TSN, otherwise
- * wait for renege processing.
+ * wait for renege processing.
*/
- if (deliver != SCTP_CMD_CHUNK_PD) {
+ if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SctpInUnorderChunks);
- else
- SCTP_INC_STATS(SctpInOrderChunks);
- }
+ /* Note: Some chunks may get overcounted (if we drop) or overcounted
+ * if we renege and the chunk arrives again.
+ */
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
@@ -2592,7 +2600,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
- SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cda5d5aab53c..85fcc4fa6ee9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2183,6 +2183,50 @@ static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
return 0;
}
+/*
+ *
+ * 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
+ *
+ * Applications that wish to use the sendto() system call may wish to
+ * specify a default set of parameters that would normally be supplied
+ * through the inclusion of ancillary data. This socket option allows
+ * such an application to set the default sctp_sndrcvinfo structure.
+ * The application that wishes to use this socket option simply passes
+ * in to this call the sctp_sndrcvinfo structure defined in Section
+ * 5.2.2) The input parameters accepted by this call include
+ * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
+ * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
+ * to this call if the caller is using the UDP model.
+ *
+ * For getsockopt, it get the default sctp_sndrcvinfo structure.
+ */
+static inline int sctp_getsockopt_set_default_send_param(struct sock *sk,
+ int len, char *optval, int *optlen)
+{
+ struct sctp_sndrcvinfo info;
+ sctp_association_t *asoc;
+
+ if (len != sizeof(struct sctp_sndrcvinfo))
+ return -EINVAL;
+ if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo)))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ info.sinfo_stream = asoc->defaults.stream;
+ info.sinfo_flags = asoc->defaults.flags;
+ info.sinfo_ppid = asoc->defaults.ppid;
+ info.sinfo_context = asoc->defaults.context;
+ info.sinfo_timetolive = asoc->defaults.timetolive;
+
+ if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen)
{
@@ -2260,6 +2304,11 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
optlen);
break;
+ case SCTP_SET_DEFAULT_SEND_PARAM:
+ retval = sctp_getsockopt_set_default_send_param(sk, len,
+ optval, optlen);
+ break;
+
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 284a6b56a4f5..8773a7ee3ead 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -385,3 +385,23 @@ static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
}
}
}
+
+/* Renege that we have seen a TSN. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
+{
+ __s32 gap;
+
+ if (TSN_lt(tsn, map->base_tsn))
+ return;
+ if (!TSN_lt(tsn, map->base_tsn + map->len + map->len))
+ return;
+
+ /* Assert: TSN is in range. */
+ gap = tsn - map->base_tsn;
+
+ /* Pretend we never saw the TSN. */
+ if (gap < map->len)
+ map->tsn_map[gap] = 0;
+ else
+ map->overflow_map[gap - map->len] = 0;
+}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ff1a6415c72d..9f8d3b7826ee 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -655,32 +655,119 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
+/* Renege 'needed' bytes from the ordering queue. */
+static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
+{
+ __u16 freed = 0;
+ __u32 tsn;
+ struct sk_buff *skb;
+ struct sctp_ulpevent *event;
+ struct sctp_tsnmap *tsnmap;
+
+ tsnmap = &ulpq->asoc->peer.tsn_map;
+
+ while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
+ freed += skb_headlen(skb);
+ event = sctp_skb2event(skb);
+ tsn = event->sndrcvinfo.sinfo_tsn;
+
+ sctp_ulpevent_free(event);
+ sctp_tsnmap_renege(tsnmap, tsn);
+ if (freed >= needed)
+ return freed;
+ }
+
+ return freed;
+}
+
+/* Renege 'needed' bytes from the reassembly queue. */
+static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
+{
+ __u16 freed = 0;
+ __u32 tsn;
+ struct sk_buff *skb;
+ struct sctp_ulpevent *event;
+ struct sctp_tsnmap *tsnmap;
+
+ tsnmap = &ulpq->asoc->peer.tsn_map;
+
+ /* Walk backwards through the list, reneges the newest tsns. */
+ while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
+ freed += skb_headlen(skb);
+ event = sctp_skb2event(skb);
+ tsn = event->sndrcvinfo.sinfo_tsn;
+
+ sctp_ulpevent_free(event);
+ sctp_tsnmap_renege(tsnmap, tsn);
+ if (freed >= needed)
+ return freed;
+ }
+
+ return freed;
+}
+
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, int priority)
{
struct sctp_ulpevent *event;
+ struct sctp_association *asoc;
+
+ asoc = ulpq->asoc;
/* Are we already in partial delivery mode? */
- if (!sctp_sk(ulpq->asoc->base.sk)->pd_mode) {
+ if (!sctp_sk(asoc->base.sk)->pd_mode) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
- sctp_sk(ulpq->asoc->base.sk)->pd_mode = 1;
+ sctp_sk(asoc->base.sk)->pd_mode = 1;
ulpq->pd_mode = 1;
return;
}
}
+}
- /* Assert: Either already in partial delivery mode or partial
- * delivery wasn't possible, so now the only recourse is
- * to renege. FIXME: Add renege support starts here.
- */
+/* Renege some packets to make room for an incoming chunk. */
+void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+ int priority)
+{
+ struct sctp_association *asoc;
+ __u16 needed, freed;
+
+ asoc = ulpq->asoc;
+
+ if (chunk) {
+ needed = ntohs(chunk->chunk_hdr->length);
+ needed -= sizeof(sctp_data_chunk_t);
+ } else
+ needed = SCTP_DEFAULT_MAXWINDOW;
+
+ freed = 0;
+
+ if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
+ freed = sctp_ulpq_renege_order(ulpq, needed);
+ if (freed < needed) {
+ freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
+ }
+ }
+ /* If able to free enough room, accept this chunk. */
+ if (chunk && (freed >= needed)) {
+ __u32 tsn;
+ tsn = ntohl(chunk->subh.data_hdr->tsn);
+ sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+ sctp_ulpq_tail_data(ulpq, chunk, priority);
+
+ sctp_ulpq_partial_delivery(ulpq, chunk, priority);
+ }
+
+ return;
}
+
+
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/