summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSridhar Samudrala <sri@us.ibm.com>2003-02-17 18:09:28 -0800
committerSridhar Samudrala <sri@us.ibm.com>2003-02-17 18:09:28 -0800
commit9967b51fc01aae6d822ad62ff031ff2d656d5b10 (patch)
treee956f79d598e25a7a42faab23d0794ec992c20fa
parent611f4c044cbd8595d40d09a34981e57aa8aa5f30 (diff)
parent13970d8e1ff3451967050309ce14ae2b6160bfd5 (diff)
Merge us.ibm.com:/home/sridhar/BK/linux-2.5.62
into us.ibm.com:/home/sridhar/BK/lksctp-2.5.62
-rw-r--r--include/net/sctp/command.h5
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--include/net/sctp/structs.h85
-rw-r--r--include/net/sctp/tsnmap.h96
-rw-r--r--include/net/sctp/ulpevent.h162
-rw-r--r--include/net/sctp/ulpqueue.h15
-rw-r--r--include/net/sctp/user.h4
-rw-r--r--net/sctp/Makefile2
-rw-r--r--net/sctp/associola.c73
-rw-r--r--net/sctp/endpointola.c14
-rw-r--r--net/sctp/input.c11
-rw-r--r--net/sctp/inqueue.c26
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/sctp/outqueue.c13
-rw-r--r--net/sctp/proc.c128
-rw-r--r--net/sctp/protocol.c23
-rw-r--r--net/sctp/sm_make_chunk.c36
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c126
-rw-r--r--net/sctp/socket.c184
-rw-r--r--net/sctp/tsnmap.c116
-rw-r--r--net/sctp/ulpevent.c305
-rw-r--r--net/sctp/ulpqueue.c367
24 files changed, 1254 insertions, 572 deletions
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 9cb1b7087423..03ab4422b683 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -86,6 +86,7 @@ typedef enum {
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
+ SCTP_CMD_CHUNK_PD, /* Partial data delivery considerations. */
SCTP_CMD_LAST
} sctp_verb_t;
@@ -115,7 +116,7 @@ typedef union {
struct sctp_transport *transport;
sctp_bind_addr_t *bp;
sctp_init_chunk_t *init;
- sctp_ulpevent_t *ulpevent;
+ struct sctp_ulpevent *ulpevent;
sctp_packet_t *packet;
sctp_sackhdr_t *sackh;
} sctp_arg_t;
@@ -163,7 +164,7 @@ SCTP_ARG_CONSTRUCTOR(ASOC, sctp_association_t *, asoc)
SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
SCTP_ARG_CONSTRUCTOR(BA, sctp_bind_addr_t *, bp)
SCTP_ARG_CONSTRUCTOR(PEER_INIT, sctp_init_chunk_t *, init)
-SCTP_ARG_CONSTRUCTOR(ULPEVENT, sctp_ulpevent_t *, ulpevent)
+SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
SCTP_ARG_CONSTRUCTOR(PACKET, sctp_packet_t *, packet)
SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 4ce62633f2e5..b2e19ebde563 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -214,6 +214,7 @@ DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
+#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field)
/* Determine if this is a valid kernel address. */
static inline int sctp_is_valid_kaddr(unsigned long addr)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 45bd3fc4df77..d136122af892 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -2,7 +2,7 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
- * Copyright (c) 2001-2002 International Business Machines Corp.
+ * Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
@@ -105,27 +105,25 @@ union sctp_addr {
/* Forward declarations for data structures. */
struct sctp_protocol;
-struct SCTP_endpoint;
-struct SCTP_association;
+struct sctp_endpoint;
+struct sctp_association;
struct sctp_transport;
-struct SCTP_packet;
-struct SCTP_chunk;
-struct SCTP_inqueue;
+struct sctp_packet;
+struct sctp_chunk;
+struct sctp_inq;
struct sctp_outq;
-struct SCTP_bind_addr;
+struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_opt;
struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct sctp_protocol sctp_protocol_t;
-typedef struct SCTP_endpoint sctp_endpoint_t;
-typedef struct SCTP_association sctp_association_t;
-typedef struct SCTP_packet sctp_packet_t;
-typedef struct SCTP_chunk sctp_chunk_t;
-typedef struct SCTP_inqueue sctp_inqueue_t;
-typedef struct SCTP_bind_addr sctp_bind_addr_t;
-typedef struct sctp_opt sctp_opt_t;
+typedef struct sctp_endpoint sctp_endpoint_t;
+typedef struct sctp_association sctp_association_t;
+typedef struct sctp_packet sctp_packet_t;
+typedef struct sctp_chunk sctp_chunk_t;
+typedef struct sctp_bind_addr sctp_bind_addr_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t;
#include <net/sctp/tsnmap.h>
@@ -250,10 +248,10 @@ struct sctp_af {
int optname,
char *optval,
int *optlen);
- struct dst_entry *(*get_dst) (sctp_association_t *asoc,
+ struct dst_entry *(*get_dst) (struct sctp_association *asoc,
union sctp_addr *daddr,
union sctp_addr *saddr);
- void (*get_saddr) (sctp_association_t *asoc,
+ void (*get_saddr) (struct sctp_association *asoc,
struct dst_entry *dst,
union sctp_addr *daddr,
union sctp_addr *saddr);
@@ -289,7 +287,7 @@ int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */
struct sctp_pf {
- void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
+ void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t);
int (*cmp_addr) (const union sctp_addr *,
@@ -311,6 +309,9 @@ struct sctp_opt {
/* What kind of a socket is this? */
sctp_socket_type_t type;
+ /* PF_ family specific functions. */
+ struct sctp_pf *pf;
+
/* What is our base endpointer? */
sctp_endpoint_t *ep;
@@ -324,7 +325,10 @@ struct sctp_opt {
__u32 autoclose;
__u8 nodelay;
__u8 disable_fragments;
- struct sctp_pf *pf;
+ __u8 pd_mode;
+
+ /* Receive to here while partial delivery is in effect. */
+ struct sk_buff_head pd_lobby;
};
@@ -484,7 +488,7 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
* As a matter of convenience, we remember the SCTP common header for
* each chunk as well as a few other header pointers...
*/
-struct SCTP_chunk {
+struct sctp_chunk {
/* These first three elements MUST PRECISELY match the first
* three elements of struct sk_buff. This allows us to reuse
* all the skb_* queue management functions.
@@ -594,7 +598,7 @@ typedef sctp_chunk_t *(sctp_packet_phandler_t)(sctp_association_t *);
/* This structure holds lists of chunks as we are assembling for
* transmission.
*/
-struct SCTP_packet {
+struct sctp_packet {
/* These are the SCTP header values (host order) for the packet. */
__u16 source_port;
__u16 destination_port;
@@ -846,8 +850,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
* SCTP. We write packets to it and read chunks from it.
*/
-struct SCTP_inqueue {
- /* This is actually a queue of sctp_chunk_t each
+struct sctp_inq {
+ /* This is actually a queue of sctp_chunk each
* containing a partially decoded packet.
*/
struct sk_buff_head in;
@@ -864,13 +868,12 @@ struct SCTP_inqueue {
int malloced; /* Is this structure kfree()able? */
};
-sctp_inqueue_t *sctp_inqueue_new(void);
-void sctp_inqueue_init(sctp_inqueue_t *);
-void sctp_inqueue_free(sctp_inqueue_t *);
-void sctp_push_inqueue(sctp_inqueue_t *, sctp_chunk_t *packet);
-sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *);
-void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
- void (*)(void *), void *);
+struct sctp_inq *sctp_inq_new(void);
+void sctp_inq_init(struct sctp_inq *);
+void sctp_inq_free(struct sctp_inq *);
+void sctp_inq_push(struct sctp_inq *, sctp_chunk_t *packet);
+struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
+void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
/* This is the structure we use to hold outbound chunks. You push
* chunks in and they automatically pop out the other end as bundled
@@ -954,7 +957,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
/* These bind address data fields common between endpoints and associations */
-struct SCTP_bind_addr {
+struct sctp_bind_addr {
/* RFC 2960 12.1 Parameters necessary for the SCTP instance
*
@@ -1043,7 +1046,7 @@ struct sctp_endpoint_common {
struct sock *sk;
/* This is where we receive inbound chunks. */
- sctp_inqueue_t inqueue;
+ struct sctp_inq inqueue;
/* This substructure includes the defining parameters of the
* endpoint:
@@ -1076,7 +1079,7 @@ struct sctp_endpoint_common {
* off one of these.
*/
-struct SCTP_endpoint {
+struct sctp_endpoint {
/* Common substructure for endpoint and association. */
sctp_endpoint_common_t base;
@@ -1172,7 +1175,7 @@ __u32 sctp_generate_tsn(const sctp_endpoint_t *ep);
/* Here we have information about each individual association. */
-struct SCTP_association {
+struct sctp_association {
/* A base structure common to endpoint and association.
* In this context, it represents the associations's view
@@ -1288,19 +1291,11 @@ struct SCTP_association {
* used in the bulk of the text. This value is hidden
* in tsn_map--we get it by calling sctp_tsnmap_get_ctsn().
*/
- sctp_tsnmap_t tsn_map;
+ struct sctp_tsnmap tsn_map;
__u8 _map[sctp_tsnmap_storage_size(SCTP_TSN_MAP_SIZE)];
- /* We record duplicate TSNs here. We clear this after
- * every SACK.
- * FIXME: We should move this into the tsnmap? --jgrimm
- */
- sctp_dup_tsn_t dup_tsns[SCTP_MAX_DUP_TSNS];
- int next_dup_tsn;
-
/* Do we need to sack the peer? */
- uint8_t sack_needed;
-
+ __u8 sack_needed;
/* These are capabilities which our peer advertised. */
__u8 ecn_capable; /* Can peer do ECN? */
__u8 ipv4_address; /* Peer understands IPv4 addresses? */
@@ -1457,7 +1452,10 @@ struct SCTP_association {
struct {
__u16 stream;
+ __u16 flags;
__u32 ppid;
+ __u32 context;
+ __u32 timetolive;
} defaults;
/* This tracks outbound ssn for a given stream. */
@@ -1615,6 +1613,7 @@ void sctp_association_put(sctp_association_t *);
void sctp_association_hold(sctp_association_t *);
struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *);
+void sctp_assoc_update_retran_path(sctp_association_t *);
struct sctp_transport *sctp_assoc_lookup_paddr(const sctp_association_t *,
const union sctp_addr *);
struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *,
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index af775264a785..70bea4ce1a98 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -1,35 +1,35 @@
/* SCTP kernel reference Implementation Copyright (C) 1999-2001
* Cisco, Motorola, Intel, and International Business Machines Corp.
- *
+ *
* This file is part of the SCTP kernel reference Implementation
- *
+ *
* These are the definitions needed for the tsnmap type. The tsnmap is used
* to track out of order TSNs received.
- *
- * The SCTP reference implementation is free software;
- * you can redistribute it and/or modify it under the terms of
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * the SCTP reference implementation is distributed in the hope that it
+ *
+ * the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- *
+ * Boston, MA 02111-1307, USA.
+ *
* Please send any bug reports or fixes you make to one of the
* following email addresses:
- *
+ *
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
+ *
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@@ -38,8 +38,6 @@
#ifndef __sctp_tsnmap_h__
#define __sctp_tsnmap_h__
-
-
/* RFC 2960 12.2 Parameters necessary per association (i.e. the TCB)
* Mapping An array of bits or bytes indicating which out of
* Array order TSN's have been received (relative to the
@@ -48,9 +46,7 @@
* will be set to all zero. This structure may be
* in the form of a circular buffer or bit array.
*/
-typedef struct sctp_tsnmap {
-
-
+struct sctp_tsnmap {
/* This array counts the number of chunks with each TSN.
* It points at one of the two buffers with which we will
* ping-pong between.
@@ -93,25 +89,30 @@ typedef struct sctp_tsnmap {
/* This is the highest TSN we've marked. */
__u32 max_tsn_seen;
- /* No. of data chunks pending receipt. used by SCTP_STATUS sockopt */
+ /* Data chunks pending receipt. used by SCTP_STATUS sockopt */
__u16 pending_data;
+ /* We record duplicate TSNs here. We clear this after
+ * every SACK. Store up to SCTP_MAX_DUP_TSNS worth of
+ * information.
+ */
+ __u32 dup_tsns[SCTP_MAX_DUP_TSNS];
+ __u16 num_dup_tsns;
+
int malloced;
__u8 raw_map[0];
-} sctp_tsnmap_t;
+};
-typedef struct sctp_tsnmap_iter {
+struct sctp_tsnmap_iter {
__u32 start;
-} sctp_tsnmap_iter_t;
-
+};
/* Create a new tsnmap. */
-sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn,
- int priority);
+struct sctp_tsnmap *sctp_tsnmap_new(__u16 len, __u32 init_tsn, int priority);
/* Dispose of a tsnmap. */
-void sctp_tsnmap_free(sctp_tsnmap_t *map);
+void sctp_tsnmap_free(struct sctp_tsnmap *);
/* This macro assists in creation of external storage for variable length
* internal buffers. We double allocate so the overflow map works.
@@ -119,9 +120,8 @@ void sctp_tsnmap_free(sctp_tsnmap_t *map);
#define sctp_tsnmap_storage_size(count) (sizeof(__u8) * (count) * 2)
/* Initialize a block of memory as a tsnmap. */
-sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn);
-
-
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len,
+ __u32 initial_tsn);
/* Test the tracking state of this TSN.
* Returns:
@@ -129,29 +129,51 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
* >0 if the TSN has been seen (duplicate)
* <0 if the TSN is invalid (too large to track)
*/
-int sctp_tsnmap_check(const sctp_tsnmap_t *map, __u32 tsn);
+int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
-void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn);
+void sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
/* Retrieve the Cumulative TSN ACK Point. */
-__u32 sctp_tsnmap_get_ctsn(const sctp_tsnmap_t *map);
+__u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *);
/* Retrieve the highest TSN we've seen. */
-__u32 sctp_tsnmap_get_max_tsn_seen(const sctp_tsnmap_t *map);
+__u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *);
+
+/* How many Duplicate TSNs are stored? */
+static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
+{
+ return map->num_dup_tsns;
+}
+
+/* Return pointer to duplicate tsn array as needed by SACK. */
+static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
+{
+ map->num_dup_tsns = 0;
+ return map->dup_tsns;
+}
+
+/* Mark a duplicate TSN. Note: we limit how many we are willing to
+ * store and consequently report.
+ */
+static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
+{
+ if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
+ map->dup_tsns[map->num_dup_tsns++] = tsn;
+}
/* Is there a gap in the TSN map? */
-int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map);
+int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
/* Initialize a gap ack block interator from user-provided memory. */
-void sctp_tsnmap_iter_init(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter);
+void sctp_tsnmap_iter_init(const struct sctp_tsnmap *,
+ struct sctp_tsnmap_iter *);
/* Get the next gap ack blocks. We return 0 if there are no more
* gap ack blocks.
*/
-int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
- __u16 *start, __u16 *end);
-
+int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *,
+ struct sctp_tsnmap_iter *,__u16 *start, __u16 *end);
#endif /* __sctp_tsnmap_h__ */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 6e90b83a7013..8d0edaf22025 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -6,34 +6,34 @@
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
- * These are the definitions needed for the sctp_ulpevent type. The
+ * These are the definitions needed for the sctp_ulpevent type. The
* sctp_ulpevent type is used to carry information from the state machine
- * upwards to the ULP.
- *
- * The SCTP reference implementation is free software;
- * you can redistribute it and/or modify it under the terms of
+ * upwards to the ULP.
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * the SCTP reference implementation is distributed in the hope that it
+ *
+ * the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- *
+ * Boston, MA 02111-1307, USA.
+ *
* Please send any bug reports or fixes you make to one of the
* following email addresses:
- *
+ *
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
+ *
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@@ -46,85 +46,97 @@
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
*/
-typedef struct sctp_ulpevent {
- int malloced;
- sctp_association_t *asoc;
- struct sk_buff *parent;
+struct sctp_ulpevent {
+ struct sctp_association *asoc;
struct sctp_sndrcvinfo sndrcvinfo;
- int chunk_flags; /* Temp. until we get a new chunk_t */
int msg_flags;
-} sctp_ulpevent_t;
-
-
-sctp_ulpevent_t *sctp_ulpevent_new(int size, int msg_flags, int priority);
-
-sctp_ulpevent_t *sctp_ulpevent_init(sctp_ulpevent_t *event, struct sk_buff *skb, int msg_flags);
-
-void sctp_ulpevent_free(sctp_ulpevent_t *event);
-
-int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event);
-
-sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(
- const struct SCTP_association *asoc,
- __u16 flags,
- __u16 state,
- __u16 error,
- __u16 outbound,
- __u16 inbound,
- int priority);
-
-sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
- const struct SCTP_association *asoc,
- const struct sockaddr_storage *aaddr,
- int flags,
- int state,
- int error,
- int priority);
-
-sctp_ulpevent_t *sctp_ulpevent_make_remote_error(
- const struct SCTP_association *asoc,
- struct SCTP_chunk *chunk,
- __u16 flags,
- int priority);
-sctp_ulpevent_t *sctp_ulpevent_make_send_failed(
- const struct SCTP_association *asoc,
- struct SCTP_chunk *chunk,
- __u16 flags,
- __u32 error,
- int priority);
-
-sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
- const struct SCTP_association *asoc,
- __u16 flags,
- int priority);
-
-sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(struct SCTP_association *asoc,
- struct SCTP_chunk *chunk,
- int priority);
-
-void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
- struct msghdr *msghdr);
-
-__u16 sctp_ulpevent_get_notification_type(const sctp_ulpevent_t *event);
+};
+/* Retrieve the skb this event sits inside of. */
+static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
+{
+ return container_of((void *)ev, struct sk_buff, cb);
+}
+/* Retrieve & cast the event sitting inside the skb. */
+static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
+{
+ return (struct sctp_ulpevent *)skb->cb;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int priority);
+struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
+void sctp_ulpevent_free(struct sctp_ulpevent *);
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ __u16 state,
+ __u16 error,
+ __u16 outbound,
+ __u16 inbound,
+ int priority);
+
+struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
+ const struct sctp_association *asoc,
+ const struct sockaddr_storage *aaddr,
+ int flags,
+ int state,
+ int error,
+ int priority);
+
+struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ int priority);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ int priority);
+
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ int priority);
+
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const struct sctp_association *asoc,
+ __u32 indication, int priority);
+
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ int priority);
+
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+
+/* Is this event type enabled? */
+static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ struct sctp_event_subscribe *mask)
+{
+ char *amask = (char *) mask;
+ return amask[sn_type - SCTP_SN_TYPE_BASE];
+}
/* Given an event subscription, is this event enabled? */
-static inline int sctp_ulpevent_is_enabled(const sctp_ulpevent_t *event,
- const struct sctp_event_subscribe *mask)
+static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ struct sctp_event_subscribe *mask)
{
- const char *amask = (const char *) mask;
__u16 sn_type;
int enabled = 1;
if (sctp_ulpevent_is_notification(event)) {
sn_type = sctp_ulpevent_get_notification_type(event);
- enabled = amask[sn_type - SCTP_SN_TYPE_BASE];
+ enabled = sctp_ulpevent_type_enabled(sn_type, mask);
}
return enabled;
}
-
#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 689abb810eb2..dd7823b0a737 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -48,7 +48,8 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
struct sctp_ulpq {
- int malloced;
+ char malloced;
+ char pd_mode;
sctp_association_t *asoc;
struct sk_buff_head reasm;
struct sk_buff_head lobby;
@@ -60,13 +61,19 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
-int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority);
+int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Add a new event for propogation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
-/* Is the ulpqueue empty. */
-int sctp_ulpqueue_is_empty(struct sctp_ulpq *);
+/* Perform partial delivery. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
+
+/* Abort the partial delivery. */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *, int);
+
+/* Clear the partial data delivery condition on this socket. */
+int sctp_clear_pd(struct sock *sk);
#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index e95ef92ff9b9..69e241b1a88a 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -166,6 +166,7 @@ struct sctp_sndrcvinfo {
__u32 sinfo_context;
__u32 sinfo_timetolive;
__u32 sinfo_tsn;
+ __u32 sinfo_cumtsn;
sctp_assoc_t sinfo_assoc_id;
};
@@ -367,6 +368,7 @@ struct sctp_rcv_pdapi_event {
sctp_assoc_t pdapi_assoc_id;
};
+enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
/*
* Described in Section 7.3
@@ -414,8 +416,8 @@ enum sctp_sn_type {
SCTP_SN_TYPE_BASE = (1<<15),
SCTP_ASSOC_CHANGE,
SCTP_PEER_ADDR_CHANGE,
- SCTP_REMOTE_ERROR,
SCTP_SEND_FAILED,
+ SCTP_REMOTE_ERROR,
SCTP_SHUTDOWN_EVENT,
SCTP_PARTIAL_DELIVERY_EVENT,
SCTP_ADAPTION_INDICATION,
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 162f9b11086f..545fad836084 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o command.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o hashdriver.o sla1.o \
- debug.o ssnmap.o
+ debug.o ssnmap.o proc.o
ifeq ($(CONFIG_SCTP_ADLER32), y)
sctp-y += adler32.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1f8e0b094a73..916c11d97ac1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -95,7 +95,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
sctp_scope_t scope,
int priority)
{
- sctp_opt_t *sp;
+ struct sctp_opt *sp;
int i;
/* Retrieve the SCTP per socket area. */
@@ -241,8 +241,8 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->peer.sack_needed = 1;
/* Create an input queue. */
- sctp_inqueue_init(&asoc->base.inqueue);
- sctp_inqueue_set_th_handler(&asoc->base.inqueue,
+ sctp_inq_init(&asoc->base.inqueue);
+ sctp_inq_set_th_handler(&asoc->base.inqueue,
(void (*)(void *))sctp_assoc_bh_rcv,
asoc);
@@ -260,7 +260,6 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
/* Set up the tsn tracking. */
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0);
- asoc->peer.next_dup_tsn = 0;
skb_queue_head_init(&asoc->addip_chunks);
@@ -311,7 +310,7 @@ void sctp_association_free(sctp_association_t *asoc)
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
- sctp_inqueue_free(&asoc->base.inqueue);
+ sctp_inq_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
@@ -368,7 +367,7 @@ struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc,
int priority)
{
struct sctp_transport *peer;
- sctp_opt_t *sp;
+ struct sctp_opt *sp;
unsigned short port;
/* AF_INET and AF_INET6 share common port field. */
@@ -505,7 +504,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
struct sctp_transport *t = NULL;
struct sctp_transport *first;
struct sctp_transport *second;
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct list_head *pos;
int spc_state = 0;
@@ -776,7 +775,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sctp_endpoint_t *ep;
sctp_chunk_t *chunk;
struct sock *sk;
- sctp_inqueue_t *inqueue;
+ struct sctp_inq *inqueue;
int state, subtype;
sctp_assoc_t associd = sctp_assoc2id(asoc);
int error = 0;
@@ -786,7 +785,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sk = asoc->base.sk;
inqueue = &asoc->base.inqueue;
- while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
+ while (NULL != (chunk = sctp_inq_pop(inqueue))) {
state = asoc->state;
subtype = chunk->chunk_hdr->type;
@@ -795,6 +794,8 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
*/
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
+ else
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
@@ -819,7 +820,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
/* This routine moves an association from its old sk to a new sk. */
void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
{
- sctp_opt_t *newsp = sctp_sk(newsk);
+ struct sctp_opt *newsp = sctp_sk(newsk);
/* Delete the association from the old endpoint's list of
* associations.
@@ -848,7 +849,6 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
- asoc->peer.next_dup_tsn = new->peer.next_dup_tsn;
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
@@ -887,26 +887,19 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
}
-/* Choose the transport for sending a shutdown packet.
+/* Update the retran path for sending a retransmitted packet.
* Round-robin through the active transports, else round-robin
* through the inactive transports as this is the next best thing
* we can try.
*/
-struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *asoc)
+void sctp_assoc_update_retran_path(sctp_association_t *asoc)
{
struct sctp_transport *t, *next;
struct list_head *head = &asoc->peer.transport_addr_list;
struct list_head *pos;
- /* If this is the first time SHUTDOWN is sent, use the active
- * path.
- */
- if (!asoc->shutdown_last_sent_to)
- return asoc->peer.active_path;
-
- /* Otherwise, find the next transport in a round-robin fashion. */
-
- t = asoc->shutdown_last_sent_to;
+ /* Find the next transport in a round-robin fashion. */
+ t = asoc->peer.retran_path;
pos = &t->transports;
next = NULL;
@@ -935,13 +928,30 @@ struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *
* other active transports. If so, use the next
* transport.
*/
- if (t == asoc->shutdown_last_sent_to) {
+ if (t == asoc->peer.retran_path) {
t = next;
break;
}
}
- return t;
+ asoc->peer.retran_path = t;
+}
+
+/* Choose the transport for sending a SHUTDOWN packet. */
+struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *asoc)
+{
+ /* If this is the first time SHUTDOWN is sent, use the active path,
+ * else use the retran path. If the last SHUTDOWN was sent over the
+ * retran path, update the retran path and use it.
+ */
+ if (!asoc->shutdown_last_sent_to)
+ return asoc->peer.active_path;
+ else {
+ if (asoc->shutdown_last_sent_to == asoc->peer.retran_path)
+ sctp_assoc_update_retran_path(asoc);
+ return asoc->peer.retran_path;
+ }
+
}
/* Update the association's pmtu and frag_point by going through all the
@@ -990,13 +1000,13 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
asoc->rwnd += len;
}
- SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) - %u\n",
- __FUNCTION__, asoc, len, asoc->rwnd, asoc->rwnd_over,
- asoc->a_rwnd);
+ SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
+ "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd,
+ asoc->rwnd_over, asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
- * The algorithm used is similar to the one described in
+ * The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if ((asoc->state == SCTP_STATE_ESTABLISHED) &&
@@ -1006,15 +1016,14 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u\n",
__FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd);
- sack = sctp_make_sack(asoc);
+ sack = sctp_make_sack(asoc);
if (!sack)
- return;
+ return;
/* Update the last advertised rwnd value. */
asoc->a_rwnd = asoc->rwnd;
asoc->peer.sack_needed = 0;
- asoc->peer.next_dup_tsn = 0;
sctp_outq_tail(&asoc->outqueue, sack);
@@ -1022,7 +1031,7 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
if (timer_pending(timer) && del_timer(timer))
sctp_association_put(asoc);
- }
+ }
}
/* Decrease asoc's rwnd by len. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 4932831903aa..8efbd4af013e 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,7 @@ fail:
sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto,
struct sock *sk, int priority)
{
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_opt *sp = sctp_sk(sk);
memset(ep, 0, sizeof(sctp_endpoint_t));
/* Initialize the base structure. */
@@ -105,10 +105,10 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto,
ep->base.malloced = 1;
/* Create an input queue. */
- sctp_inqueue_init(&ep->base.inqueue);
+ sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
- sctp_inqueue_set_th_handler(&ep->base.inqueue,
+ sctp_inq_set_th_handler(&ep->base.inqueue,
(void (*)(void *))sctp_endpoint_bh_rcv,
ep);
@@ -198,7 +198,7 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
sctp_unhash_endpoint(ep);
/* Cleanup the inqueue. */
- sctp_inqueue_free(&ep->base.inqueue);
+ sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
@@ -333,7 +333,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
struct sock *sk;
struct sctp_transport *transport;
sctp_chunk_t *chunk;
- sctp_inqueue_t *inqueue;
+ struct sctp_inq *inqueue;
sctp_subtype_t subtype;
sctp_state_t state;
int error = 0;
@@ -345,7 +345,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
inqueue = &ep->base.inqueue;
sk = ep->base.sk;
- while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
+ while (NULL != (chunk = sctp_inq_pop(inqueue))) {
subtype.chunk = chunk->chunk_hdr->type;
/* We might have grown an association since last we
@@ -369,6 +369,8 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
*/
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
+ else
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a6aabd3d36a4..8e67351f419d 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -90,6 +90,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
if (val != cmp) {
/* CRC failure, dump it. */
+ SCTP_INC_STATS_BH(SctpChecksumErrors);
return -1;
}
return 0;
@@ -115,6 +116,8 @@ int sctp_rcv(struct sk_buff *skb)
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
+ SCTP_INC_STATS_BH(SctpInSCTPPacks);
+
sh = (struct sctphdr *) skb->h.raw;
/* Pull up the IP and SCTP headers. */
@@ -160,8 +163,10 @@ int sctp_rcv(struct sk_buff *skb)
*/
if (!asoc) {
ep = __sctp_rcv_lookup_endpoint(&dest);
- if (sctp_rcv_ootb(skb))
+ if (sctp_rcv_ootb(skb)) {
+ SCTP_INC_STATS_BH(SctpOutOfBlues);
goto discard_release;
+ }
}
/* Retrieve the common input handling substructure. */
@@ -248,7 +253,7 @@ discard_release:
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
sctp_chunk_t *chunk;
- sctp_inqueue_t *inqueue;
+ struct sctp_inq *inqueue;
/* One day chunk will live inside the skb, but for
* now this works.
@@ -256,7 +261,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
chunk = (sctp_chunk_t *) skb;
inqueue = &chunk->rcvr->inqueue;
- sctp_push_inqueue(inqueue, chunk);
+ sctp_inq_push(inqueue, chunk);
return 0;
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 4284fd4cc5d1..41586238927a 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -47,8 +47,8 @@
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
-/* Initialize an SCTP_inqueue. */
-void sctp_inqueue_init(sctp_inqueue_t *queue)
+/* Initialize an SCTP inqueue. */
+void sctp_inq_init(struct sctp_inq *queue)
{
skb_queue_head_init(&queue->in);
queue->in_progress = NULL;
@@ -59,21 +59,21 @@ void sctp_inqueue_init(sctp_inqueue_t *queue)
queue->malloced = 0;
}
-/* Create an initialized SCTP_inqueue. */
-sctp_inqueue_t *sctp_inqueue_new(void)
+/* Create an initialized sctp_inq. */
+struct sctp_inq *sctp_inq_new(void)
{
- sctp_inqueue_t *retval;
+ struct sctp_inq *retval;
- retval = t_new(sctp_inqueue_t, GFP_ATOMIC);
+ retval = t_new(struct sctp_inq, GFP_ATOMIC);
if (retval) {
- sctp_inqueue_init(retval);
+ sctp_inq_init(retval);
retval->malloced = 1;
}
return retval;
}
/* Release the memory associated with an SCTP inqueue. */
-void sctp_inqueue_free(sctp_inqueue_t *queue)
+void sctp_inq_free(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
@@ -96,7 +96,7 @@ void sctp_inqueue_free(sctp_inqueue_t *queue)
/* Put a new packet in an SCTP inqueue.
* We assume that packet->sctp_hdr is set and in host byte order.
*/
-void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
+void sctp_inq_push(struct sctp_inq *q, sctp_chunk_t *packet)
{
/* Directly call the packet handling routine. */
@@ -114,7 +114,7 @@ void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
* WARNING: If you need to put the chunk on another queue, you need to
* make a shallow copy (clone) of it.
*/
-sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
+sctp_chunk_t *sctp_inq_pop(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
sctp_chunkhdr_t *ch = NULL;
@@ -172,7 +172,7 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
chunk->end_of_packet = 1;
}
- SCTP_DEBUG_PRINTK("+++sctp_pop_inqueue+++ chunk %p[%s],"
+ SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s],"
" length %d, skb->len %d\n",chunk,
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
@@ -182,12 +182,12 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
/* Set a top-half handler.
*
* Originally, we the top-half handler was scheduled as a BH. We now
- * call the handler directly in sctp_push_inqueue() at a time that
+ * call the handler directly in sctp_inq_push() at a time that
* we know we are lock safe.
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
-void sctp_inqueue_set_th_handler(sctp_inqueue_t *q,
+void sctp_inq_set_th_handler(struct sctp_inq *q,
void (*callback)(void *), void *arg)
{
INIT_WORK(&q->immediate, callback, arg);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index afd577b71bdc..db2c10135190 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -132,6 +132,8 @@ static inline int sctp_v6_xmit(struct sk_buff *skb,
__FUNCTION__, skb, skb->len, NIP6(fl.fl6_src),
NIP6(fl.fl6_dst));
+ SCTP_INC_STATS(SctpOutSCTPPacks);
+
return ip6_xmit(sk, skb, &fl, np->opt);
}
@@ -444,7 +446,7 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
}
/* Initialize a PF_INET msgname from a ulpevent. */
-static void sctp_inet6_event_msgname(sctp_ulpevent_t *event, char *msgname,
+static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addrlen)
{
struct sockaddr_in6 *sin6, *sin6from;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9dcbc6d38a1f..d7826c2216e6 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -419,6 +419,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
dst = transport->dst;
/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
if (!dst || (dst->obsolete > 1)) {
+ dst_release(dst);
sctp_transport_route(transport, NULL, sctp_sk(sk));
sctp_assoc_sync_pmtu(asoc);
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index b1bfe69e61ed..1171984c3e20 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -193,11 +193,17 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
: "Illegal Chunk");
skb_queue_tail(&q->out, (struct sk_buff *) chunk);
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpOutUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpOutOrderChunks);
q->empty = 0;
break;
};
- } else
+ } else {
skb_queue_tail(&q->control, (struct sk_buff *) chunk);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
+ }
if (error < 0)
return error;
@@ -315,6 +321,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
switch(reason) {
case SCTP_RETRANSMIT_T3_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
+ /* Update the retran path if the T3-rtx timer has expired for
+ * the current retran path.
+ */
+ if (transport == transport->asoc->peer.retran_path)
+ sctp_assoc_update_retran_path(transport->asoc);
break;
case SCTP_RETRANSMIT_FAST_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
new file mode 100644
index 000000000000..9c7d9e489b22
--- /dev/null
+++ b/net/sctp/proc.c
@@ -0,0 +1,128 @@
+/* SCTP kernel reference Implementation
+ * Copyright (c) 2003 International Business Machines, Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * The SCTP reference implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+
+#include <linux/types.h>
+#include <linux/seq_file.h>
+#include <net/sctp/sctp.h>
+
+static char *sctp_snmp_list[] = {
+#define SCTP_SNMP_ENTRY(x) #x
+ SCTP_SNMP_ENTRY(SctpCurrEstab),
+ SCTP_SNMP_ENTRY(SctpActiveEstabs),
+ SCTP_SNMP_ENTRY(SctpPassiveEstabs),
+ SCTP_SNMP_ENTRY(SctpAborteds),
+ SCTP_SNMP_ENTRY(SctpShutdowns),
+ SCTP_SNMP_ENTRY(SctpOutOfBlues),
+ SCTP_SNMP_ENTRY(SctpChecksumErrors),
+ SCTP_SNMP_ENTRY(SctpOutCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpOutOrderChunks),
+ SCTP_SNMP_ENTRY(SctpOutUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpInCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpInOrderChunks),
+ SCTP_SNMP_ENTRY(SctpInUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpFragUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpReasmUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpOutSCTPPacks),
+ SCTP_SNMP_ENTRY(SctpInSCTPPacks),
+#undef SCTP_SNMP_ENTRY
+};
+
+/* Return the current value of a particular entry in the mib by adding its
+ * per cpu counters.
+ */
+static unsigned long
+fold_field(void *mib[], int nr)
+{
+ unsigned long res = 0;
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_possible(i))
+ continue;
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
+ sizeof (unsigned long) * nr));
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) +
+ sizeof (unsigned long) * nr));
+ }
+ return res;
+}
+
+/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
+static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++)
+ seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i],
+ fold_field((void **)sctp_statistics, i));
+
+ return 0;
+}
+
+/* Initialize the seq file operations for 'snmp' object. */
+static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sctp_snmp_seq_show, NULL);
+}
+
+static struct file_operations sctp_snmp_seq_fops = {
+ .open = sctp_snmp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Set up the proc fs entry for 'snmp' object. */
+int __init sctp_snmp_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp);
+ if (!p)
+ return -ENOMEM;
+
+ p->proc_fops = &sctp_snmp_seq_fops;
+
+ return 0;
+}
+
+/* Cleanup the proc fs entry for 'snmp' object. */
+void sctp_snmp_proc_exit(void)
+{
+ remove_proc_entry("snmp", proc_net_sctp);
+}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index cd8e3b1adb34..85a5a2941af5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -75,6 +75,9 @@ static struct sctp_af *sctp_af_v6_specific;
extern struct net_proto_family inet_family_ops;
+extern int sctp_snmp_proc_init(void);
+extern int sctp_snmp_proc_exit(void);
+
/* Return the address of the control sock. */
struct sock *sctp_get_ctl_sock(void)
{
@@ -82,21 +85,32 @@ struct sock *sctp_get_ctl_sock(void)
}
/* Set up the proc fs entry for the SCTP protocol. */
-__init void sctp_proc_init(void)
+__init int sctp_proc_init(void)
{
+ int rc = 0;
+
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
ent = proc_mkdir("net/sctp", 0);
if (ent) {
ent->owner = THIS_MODULE;
proc_net_sctp = ent;
- }
+ } else
+ rc = -ENOMEM;
}
+
+ if (sctp_snmp_proc_init())
+ rc = -ENOMEM;
+
+ return rc;
}
/* Clean up the proc fs entry for the SCTP protocol. */
void sctp_proc_exit(void)
{
+
+ sctp_snmp_proc_exit();
+
if (proc_net_sctp) {
proc_net_sctp = NULL;
remove_proc_entry("net/sctp", 0);
@@ -124,7 +138,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
/* Add the address to the local list. */
addr = t_new(struct sockaddr_storage_list, GFP_ATOMIC);
if (addr) {
- INIT_LIST_HEAD(&addr->list);
addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
@@ -436,7 +449,6 @@ struct dst_entry *sctp_v4_get_dst(sctp_association_t *asoc,
if (AF_INET == laddr->a.sa.sa_family) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
- dst = sctp_v4_get_dst(asoc, daddr, &laddr->a);
if (!ip_route_output_key(&rt, &fl)) {
dst = &rt->u.dst;
goto out_unlock;
@@ -557,7 +569,7 @@ static void sctp_inet_msgname(char *msgname, int *addr_len)
}
/* Copy the primary address of the peer primary address as the msg_name. */
-static void sctp_inet_event_msgname(sctp_ulpevent_t *event, char *msgname,
+static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addr_len)
{
struct sockaddr_in *sin, *sinfrom;
@@ -628,6 +640,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
NIPQUAD(((struct rtable *)skb->dst)->rt_src),
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
+ SCTP_INC_STATS(SctpOutSCTPPacks);
return ip_queue_xmit(skb, ipfragok);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 487221d46a65..0677dbbbd802 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -222,9 +222,7 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &sat_param);
sctp_addto_chunk(retval, sizeof(sat_addr_types), sat_addr_types);
-
sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
-
nodata:
if (addrs.v)
kfree(addrs.v);
@@ -245,7 +243,8 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
retval = NULL;
- addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, priority);
+ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len,
+ priority);
if (!addrs.v)
goto nomem_rawaddr;
@@ -586,14 +585,12 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
sctp_gap_ack_block_t gab;
int length;
__u32 ctsn;
- sctp_tsnmap_iter_t iter;
- __u16 num_gabs;
- __u16 num_dup_tsns = asoc->peer.next_dup_tsn;
- const sctp_tsnmap_t *map = &asoc->peer.tsn_map;
+ struct sctp_tsnmap_iter iter;
+ __u16 num_gabs, num_dup_tsns;
+ struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
ctsn = sctp_tsnmap_get_ctsn(map);
- SCTP_DEBUG_PRINTK("make_sack: sackCTSNAck sent is 0x%x.\n",
- ctsn);
+ SCTP_DEBUG_PRINTK("sackCTSNAck sent is 0x%x.\n", ctsn);
/* Count the number of Gap Ack Blocks. */
sctp_tsnmap_iter_init(map, &iter);
@@ -603,15 +600,17 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
/* Do nothing. */
}
+ num_dup_tsns = sctp_tsnmap_num_dups(map);
+
/* Initialize the SACK header. */
sack.cum_tsn_ack = htonl(ctsn);
sack.a_rwnd = htonl(asoc->rwnd);
sack.num_gap_ack_blocks = htons(num_gabs);
- sack.num_dup_tsns = htons(num_dup_tsns);
+ sack.num_dup_tsns = htons(num_dup_tsns);
length = sizeof(sack)
+ sizeof(sctp_gap_ack_block_t) * num_gabs
- + sizeof(sctp_dup_tsn_t) * num_dup_tsns;
+ + sizeof(__u32) * num_dup_tsns;
/* Create the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, length);
@@ -658,21 +657,18 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
while(sctp_tsnmap_next_gap_ack(map, &iter, &gab.start, &gab.end)) {
gab.start = htons(gab.start);
gab.end = htons(gab.end);
- sctp_addto_chunk(retval,
- sizeof(sctp_gap_ack_block_t),
- &gab);
+ sctp_addto_chunk(retval, sizeof(sctp_gap_ack_block_t), &gab);
}
/* Register the duplicates. */
- sctp_addto_chunk(retval,
- sizeof(sctp_dup_tsn_t) * num_dup_tsns,
- &asoc->peer.dup_tsns);
+ sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
+ sctp_tsnmap_get_dups(map));
nodata:
return retval;
}
-/* FIXME: Comments. */
+/* Make a SHUTDOWN chunk. */
sctp_chunk_t *sctp_make_shutdown(const sctp_association_t *asoc)
{
sctp_chunk_t *retval;
@@ -689,7 +685,6 @@ sctp_chunk_t *sctp_make_shutdown(const sctp_association_t *asoc)
retval->subh.shutdown_hdr =
sctp_addto_chunk(retval, sizeof(shut), &shut);
-
nodata:
return retval;
}
@@ -1180,6 +1175,9 @@ int sctp_datachunks_from_user(sctp_association_t *asoc,
over = msg_len % max;
offset = 0;
+ if (whole && over)
+ SCTP_INC_STATS_USER(SctpFragUsrMsgs);
+
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
frag = SCTP_DATA_MIDDLE_FRAG;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7a79105576de..9fe550bbed78 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -527,10 +527,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_REPORT_DUP:
- if (asoc->peer.next_dup_tsn < SCTP_MAX_DUP_TSNS) {
- asoc->peer.dup_tsns[asoc->peer.next_dup_tsn++] =
- ntohl(command->obj.u32);
- }
+ sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
+ ntohl(command->obj.u32));
break;
case SCTP_CMD_REPORT_BIGGAP:
@@ -598,6 +596,13 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
t->rto_pending = 1;
break;
+ case SCTP_CMD_CHUNK_PD:
+ /* Send a chunk to the sockets layer. */
+ sctp_ulpq_partial_delivery(&asoc->ulpq,
+ command->obj.ptr,
+ GFP_ATOMIC);
+ break;
+
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
command->verb, command->obj.ptr);
@@ -737,7 +742,6 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc->a_rwnd = asoc->rwnd;
asoc->peer.sack_needed = 0;
- asoc->peer.next_dup_tsn = 0;
error = sctp_outq_tail(&asoc->outqueue, sack);
@@ -1014,7 +1018,7 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
sctp_association_t *asoc)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
event = sctp_ulpevent_make_assoc_change(asoc,
0,
@@ -1041,7 +1045,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_subtype_t subtype,
sctp_chunk_t *chunk)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
__u16 error = 0;
switch(event_type) {
@@ -1061,12 +1065,11 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
break;
}
- event = sctp_ulpevent_make_assoc_change(asoc,
- 0,
- SCTP_COMM_LOST,
- error, 0, 0,
- GFP_ATOMIC);
+ /* Cancel any partial delivery in progress. */
+ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+ error, 0, 0, GFP_ATOMIC);
if (event)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(event));
@@ -1141,7 +1144,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
if (del_timer(&t->hb_timer))
sctp_transport_put(t);
}
-}
+}
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 848a48229f17..4b1090828f69 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -102,7 +102,7 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
/* RFC 2960 6.10 Bundling
*
@@ -145,6 +145,9 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -223,6 +226,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
@@ -264,7 +268,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
goto nomem_ack;
-
+
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_ack;
@@ -379,6 +383,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
@@ -388,6 +393,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
+
+ SCTP_INC_STATS(SctpAborteds);
+
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
@@ -403,6 +411,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
@@ -504,7 +513,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc;
sctp_init_chunk_t *peer_init;
sctp_chunk_t *repl;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
int error = 0;
sctp_chunk_t *err_chk_p;
@@ -557,6 +566,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpPassiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
@@ -636,7 +647,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
/* RFC 2960 5.1 Normal Establishment of an Association
*
@@ -648,6 +659,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpActiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -719,6 +732,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
@@ -929,6 +944,8 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
+
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1125,6 +1142,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
@@ -1355,7 +1373,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
@@ -1421,7 +1439,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
@@ -1436,6 +1454,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1503,7 +1522,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands,
sctp_association_t *new_asoc)
{
- sctp_ulpevent_t *ev = NULL;
+ struct sctp_ulpevent *ev = NULL;
sctp_chunk_t *repl;
/* Clarification from Implementor's Guide:
@@ -1519,6 +1538,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
@@ -1540,11 +1560,11 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
SCTP_ULPEVENT(ev));
}
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
-
+
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
-
+
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
@@ -1925,6 +1945,8 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const sctp_endpoint_t *ep,
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
/* BUG? This does not look complete... */
return SCTP_DISPOSITION_ABORT;
@@ -1948,6 +1970,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
@@ -2241,6 +2264,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
sctp_datahdr_t *data_hdr;
sctp_chunk_t *err;
size_t datalen;
+ sctp_verb_t deliver;
int tmp;
__u32 tsn;
@@ -2307,10 +2331,32 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
+ deliver = SCTP_CMD_CHUNK_ULP;
+
+ /* Think about partial delivery. */
+ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+
+ /* Even if we don't accept this chunk there is
+ * memory pressure.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_PD, SCTP_NULL());
+ }
+
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
- SCTP_DEBUG_PRINTK("Discarding tsn: %u datalen: %Zd, "
- "rwnd: %d\n", tsn, datalen, asoc->rwnd);
- goto discard_force;
+
+
+ /* There is absolutely no room, but this is the most
+ * important tsn that we are waiting on, try to
+ * to partial deliver or renege to make room.
+ */
+ if ((sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ deliver = SCTP_CMD_CHUNK_PD;
+ } else {
+ SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
+ "rwnd: %d\n", tsn, datalen,
+ asoc->rwnd);
+ goto discard_force;
+ }
}
/*
@@ -2332,13 +2378,22 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
- /* We are accepting this DATA chunk. */
+ /* If definately accepting the DATA chunk, record its TSN, otherwise
+ * wait for renege processing.
+ */
+ if (deliver != SCTP_CMD_CHUNK_PD) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
- /* Record the fact that we have received this TSN. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+ }
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
@@ -2352,10 +2407,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
sizeof(data_hdr->stream));
- if (err) {
+ if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
- }
goto discard_noforce;
}
@@ -2363,7 +2417,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_ULP, SCTP_CHUNK(chunk));
+ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
+
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2536,6 +2591,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
@@ -2544,6 +2601,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
/* Record the fact that we have received this TSN. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
@@ -2705,6 +2767,8 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
+
return SCTP_DISPOSITION_CONSUME;
}
@@ -2726,7 +2790,7 @@ sctp_disposition_t sctp_sf_operr_notify(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
while (chunk->chunk_end > chunk->skb->data) {
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
@@ -2764,7 +2828,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
{
sctp_chunk_t *chunk = arg;
sctp_chunk_t *reply;
- sctp_ulpevent_t *ev;
+ struct sctp_ulpevent *ev;
/* 10.2 H) SHUTDOWN COMPLETE notification
*
@@ -2794,6 +2858,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
@@ -2834,6 +2900,8 @@ sctp_disposition_t sctp_sf_ootb(const sctp_endpoint_t *ep,
__u8 *ch_end;
int ootb_shut_ack = 0;
+ SCTP_INC_STATS(SctpOutOfBlues);
+
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
@@ -2901,6 +2969,8 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
+
return SCTP_DISPOSITION_CONSUME;
}
@@ -3472,6 +3542,10 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
+
return retval;
}
@@ -3527,6 +3601,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(SctpShutdowns);
+
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
@@ -3597,6 +3673,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(SctpAborteds);
+
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
@@ -3929,6 +4007,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
@@ -4096,6 +4176,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
@@ -4271,6 +4353,9 @@ sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *chunk)
__u16 num_blocks;
__u16 num_dup_tsns;
+ /* FIXME: Protect ourselves from reading too far into
+ * the skb from a bogus sender.
+ */
sack = (sctp_sackhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_sackhdr_t));
@@ -4401,6 +4486,7 @@ void sctp_send_stale_cookie_err(const sctp_endpoint_t *ep,
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
+ SCTP_INC_STATS(SctpOutCtrlChunks);
} else
sctp_free_chunk (err_chunk);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fd6c36e0415c..cda5d5aab53c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -81,13 +81,13 @@
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
-static inline int sctp_wspace(sctp_association_t *asoc);
+static inline int sctp_wspace(struct sctp_association *asoc);
static inline void sctp_set_owner_w(sctp_chunk_t *chunk);
static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p,
+static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
int msg_len);
static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
-static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p);
+static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int);
static int sctp_bindx_add(struct sock *, struct sockaddr_storage *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr_storage *, int);
@@ -158,7 +158,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
/* Bind a local address either to an endpoint or to an association. */
SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
sctp_bind_addr_t *bp = &ep->base.bind_addr;
struct sctp_af *af;
@@ -454,7 +454,7 @@ err_bindx_add:
*/
int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
{
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
int cnt;
sctp_bind_addr_t *bp = &ep->base.bind_addr;
@@ -662,6 +662,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
/* Clean up any skbs sitting on the receive queue. */
skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sctp_sk(sk)->pd_lobby);
/* This will run the backlog queue. */
sctp_release_sock(sk);
@@ -714,7 +715,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, int msg_len)
{
- sctp_opt_t *sp;
+ struct sctp_opt *sp;
sctp_endpoint_t *ep;
sctp_association_t *new_asoc=NULL, *asoc=NULL;
struct sctp_transport *transport;
@@ -939,6 +940,19 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* ASSERT: we have a valid association at this point. */
SCTP_DEBUG_PRINTK("We have a valid association.\n");
+ if (!sinfo) {
+ /* If the user didn't specify SNDRCVINFO, make up one with
+ * some defaults.
+ */
+ default_sinfo.sinfo_stream = asoc->defaults.stream;
+ default_sinfo.sinfo_flags = asoc->defaults.flags;
+ default_sinfo.sinfo_ppid = asoc->defaults.ppid;
+ default_sinfo.sinfo_context = asoc->defaults.context;
+ default_sinfo.sinfo_timetolive = asoc->defaults.timetolive;
+ default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
+ sinfo = &default_sinfo;
+ }
+
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
@@ -963,13 +977,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
err = -EINVAL;
goto out_free;
}
- } else {
- /* If the user didn't specify SNDRCVINFO, make up one with
- * some defaults.
- */
- default_sinfo.sinfo_stream = asoc->defaults.stream;
- default_sinfo.sinfo_ppid = asoc->defaults.ppid;
- sinfo = &default_sinfo;
}
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -979,21 +986,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_free;
}
-#if 0
- /* FIXME: This looks wrong so I'll comment out.
- * We should be able to use this same technique for
- * primary address override! --jgrimm
- */
- /* If the user gave us an address, copy it in. */
- if (msg->msg_name) {
- chunk->transport = sctp_assoc_lookup_paddr(asoc, &to);
- if (!chunk->transport) {
- err = -EINVAL;
- goto out_free;
- }
- }
-#endif /* 0 */
-
/* Break the message into multiple chunks of maximum size. */
skb_queue_head_init(&chunks);
err = sctp_datachunks_from_user(asoc, sinfo, msg, msg_len, &chunks);
@@ -1013,6 +1005,23 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
+
+ /* This flag, in the UDP model, requests the SCTP stack to
+ * override the primary destination address with the
+ * address found with the sendto/sendmsg call.
+ */
+ if (sinfo_flags & MSG_ADDR_OVER) {
+ if (!msg->msg_name) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ chunk->transport = sctp_assoc_lookup_paddr(asoc, &to);
+ if (!chunk->transport) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
/* Send it to the lower layers. */
sctp_primitive_SEND(asoc, chunk);
SCTP_DEBUG_PRINTK("We sent primitively.\n");
@@ -1110,8 +1119,8 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int len, int noblock, int flags, int *addr_len)
{
- sctp_ulpevent_t *event = NULL;
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_ulpevent *event = NULL;
+ struct sctp_opt *sp = sctp_sk(sk);
struct sk_buff *skb;
int copied;
int err = 0;
@@ -1143,7 +1152,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- event = (sctp_ulpevent_t *) skb->cb;
+ event = sctp_skb2event(skb);
if (err)
goto out_free;
@@ -1170,7 +1179,6 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR.
- * Otherwise, set MSG_EOR indicating the end of a message.
*/
if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR;
@@ -1178,6 +1186,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
goto out_free;
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->receive_queue, skb);
+
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the skb's destructor is called via
@@ -1185,9 +1194,11 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
*/
sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
- } else {
- msg->msg_flags |= MSG_EOR;
- }
+ } else if ((event->msg_flags & MSG_NOTIFICATION) ||
+ (event->msg_flags & MSG_EOR))
+ msg->msg_flags |= MSG_EOR;
+ else
+ msg->msg_flags &= ~MSG_EOR;
out_free:
sctp_ulpevent_free(event); /* Free the skb. */
@@ -1225,7 +1236,7 @@ static inline int sctp_setsockopt_set_events(struct sock *sk, char *optval,
static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
int optlen)
{
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_opt *sp = sctp_sk(sk);
/* Applicable to UDP-style socket only */
if (SCTP_SOCKET_TCP == sp->type)
@@ -1310,6 +1321,44 @@ static inline int sctp_setsockopt_initmsg(struct sock *sk, char *optval,
return 0;
}
+/*
+ *
+ * 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
+ *
+ * Applications that wish to use the sendto() system call may wish to
+ * specify a default set of parameters that would normally be supplied
+ * through the inclusion of ancillary data. This socket option allows
+ * such an application to set the default sctp_sndrcvinfo structure.
+ * The application that wishes to use this socket option simply passes
+ * in to this call the sctp_sndrcvinfo structure defined in Section
+ * 5.2.2) The input parameters accepted by this call include
+ * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
+ * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
+ * to this call if the caller is using the UDP model.
+ */
+static inline int sctp_setsockopt_set_default_send_param(struct sock *sk,
+ char *optval, int optlen)
+{
+ struct sctp_sndrcvinfo info;
+ sctp_association_t *asoc;
+
+ if (optlen != sizeof(struct sctp_sndrcvinfo))
+ return -EINVAL;
+ if (copy_from_user(&info, optval, optlen))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ asoc->defaults.stream = info.sinfo_stream;
+ asoc->defaults.flags = info.sinfo_flags;
+ asoc->defaults.ppid = info.sinfo_ppid;
+ asoc->defaults.context = info.sinfo_context;
+ asoc->defaults.timetolive = info.sinfo_timetolive;
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -1401,6 +1450,11 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_initmsg(sk, optval, optlen);
break;
+ case SCTP_SET_DEFAULT_SEND_PARAM:
+ retval = sctp_setsockopt_set_default_send_param(sk,
+ optval, optlen);
+ break;
+
default:
retval = -ENOPROTOOPT;
break;
@@ -1432,7 +1486,7 @@ out_nounlock:
SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
- sctp_opt_t *sp;
+ struct sctp_opt *sp;
sctp_endpoint_t *ep;
sctp_association_t *asoc;
struct sctp_transport *transport;
@@ -1554,7 +1608,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
{
sctp_endpoint_t *ep;
sctp_protocol_t *proto;
- sctp_opt_t *sp;
+ struct sctp_opt *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
@@ -1583,7 +1637,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
- * FIXME: This are not used yet.
+ * FIXME: These are not used yet.
*/
sp->rtoinfo.srto_initial = proto->rto_initial;
sp->rtoinfo.srto_max = proto->rto_max;
@@ -1620,6 +1674,11 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
*/
sp->autoclose = 0;
sp->pf = sctp_get_pf_specific(sk->family);
+
+ /* Control variables for partial data delivery. */
+ sp->pd_mode = 0;
+ skb_queue_head_init(&sp->pd_lobby);
+
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
@@ -1774,10 +1833,10 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
struct sock *newsk;
struct socket *tmpsock;
sctp_endpoint_t *newep;
- sctp_opt_t *oldsp = sctp_sk(oldsk);
- sctp_opt_t *newsp;
+ struct sctp_opt *oldsp = sctp_sk(oldsk);
+ struct sctp_opt *newsp;
struct sk_buff *skb, *tmp;
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
int err = 0;
/* An association cannot be branched off from an already peeled-off
@@ -1811,13 +1870,50 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->receive_queue, tmp) {
- event = (sctp_ulpevent_t *)skb->cb;
+ event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
__skb_queue_tail(&newsk->receive_queue, skb);
}
}
+ /* Clean up an messages pending delivery due to partial
+ * delivery. Three cases:
+ * 1) No partial deliver; no work.
+ * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
+ * 3) Peeling off non-partial delivery; move pd_lobby to recieve_queue.
+ */
+ skb_queue_head_init(&newsp->pd_lobby);
+ sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;;
+
+ if (sctp_sk(oldsk)->pd_mode) {
+ struct sk_buff_head *queue;
+
+ /* Decide which queue to move pd_lobby skbs to. */
+ if (assoc->ulpq.pd_mode) {
+ queue = &newsp->pd_lobby;
+ } else
+ queue = &newsk->receive_queue;
+
+ /* Walk through the pd_lobby, looking for skbs that
+ * need moved to the new socket.
+ */
+ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
+ event = sctp_skb2event(skb);
+ if (event->asoc == assoc) {
+ __skb_unlink(skb, skb->list);
+ __skb_queue_tail(queue, skb);
+ }
+ }
+
+ /* Clear up any skbs waiting for the partial
+ * delivery to finish.
+ */
+ if (assoc->ulpq.pd_mode)
+ sctp_clear_pd(oldsk);
+
+ }
+
/* Set the type of socket to indicate that it is peeled off from the
* original socket.
*/
@@ -2389,7 +2485,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
*/
SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
{
- sctp_opt_t *sp = sctp_sk(sk);
+ struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
/* Only UDP style sockets that are not peeled off are allowed to
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 286fd1869725..284a6b56a4f5 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -3,40 +3,40 @@
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
- *
+ *
* This file is part of the SCTP kernel reference Implementation
- *
+ *
* These functions manipulate sctp tsn mapping array.
- *
- * The SCTP reference implementation is free software;
- * you can redistribute it and/or modify it under the terms of
+ *
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * The SCTP reference implementation is distributed in the hope that it
+ *
+ * The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- *
+ * Boston, MA 02111-1307, USA.
+ *
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
+ *
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
- * Written or modified by:
+ * Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
- *
+ *
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@@ -45,21 +45,21 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-static void _sctp_tsnmap_update(sctp_tsnmap_t *map);
-static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map);
-static void _sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
- __u16 len, __u16 base,
- int *started, __u16 *start,
- int *ended, __u16 *end);
+static void sctp_tsnmap_update(struct sctp_tsnmap *map);
+static void sctp_tsnmap_update_pending_data(struct sctp_tsnmap *map);
+static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
+ __u16 len, __u16 base,
+ int *started, __u16 *start,
+ int *ended, __u16 *end);
/* Create a new sctp_tsnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
-sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
+struct sctp_tsnmap *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
{
- sctp_tsnmap_t *retval;
+ struct sctp_tsnmap *retval;
- retval = kmalloc(sizeof(sctp_tsnmap_t) +
+ retval = kmalloc(sizeof(struct sctp_tsnmap) +
sctp_tsnmap_storage_size(len),
priority);
if (!retval)
@@ -72,13 +72,13 @@ sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
fail_map:
kfree(retval);
-
fail:
return NULL;
}
/* Initialize a block of memory as a tsnmap. */
-sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn)
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
+ __u32 initial_tsn)
{
map->tsn_map = map->raw_map;
map->overflow_map = map->tsn_map + len;
@@ -94,6 +94,7 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
map->max_tsn_seen = map->cumulative_tsn_ack_point;
map->malloced = 0;
map->pending_data = 0;
+ map->num_dup_tsns = 0;
return map;
}
@@ -104,7 +105,7 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
* >0 if the TSN has been seen (duplicate)
* <0 if the TSN is invalid (too large to track)
*/
-int sctp_tsnmap_check(const sctp_tsnmap_t *map, __u32 tsn)
+int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
int dup;
@@ -136,7 +137,7 @@ out:
}
/* Is there a gap in the TSN map? */
-int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map)
+int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
{
int has_gap;
@@ -145,7 +146,7 @@ int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map)
}
/* Mark this TSN as seen. */
-void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn)
+void sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
@@ -173,40 +174,45 @@ void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn)
/* Go fixup any internal TSN mapping variables including
* cumulative_tsn_ack_point.
*/
- _sctp_tsnmap_update(map);
+ sctp_tsnmap_update(map);
+}
+
+void sctp_tsnmap_report_dup(struct sctp_tsnmap *map, __u32 tsn)
+{
}
/* Retrieve the Cumulative TSN Ack Point. */
-__u32 sctp_tsnmap_get_ctsn(const sctp_tsnmap_t *map)
+__u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
{
return map->cumulative_tsn_ack_point;
}
/* Retrieve the highest TSN we've seen. */
-__u32 sctp_tsnmap_get_max_tsn_seen(const sctp_tsnmap_t *map)
+__u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
{
return map->max_tsn_seen;
}
/* Dispose of a tsnmap. */
-void sctp_tsnmap_free(sctp_tsnmap_t *map)
+void sctp_tsnmap_free(struct sctp_tsnmap *map)
{
if (map->malloced)
kfree(map);
}
/* Initialize a Gap Ack Block iterator from memory being provided. */
-void sctp_tsnmap_iter_init(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter)
+void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
+ struct sctp_tsnmap_iter *iter)
{
/* Only start looking one past the Cumulative TSN Ack Point. */
iter->start = map->cumulative_tsn_ack_point + 1;
}
-/* Get the next Gap Ack Blocks. Returns 0 if there was not
- * another block to get.
+/* Get the next Gap Ack Blocks. Returns 0 if there was not another block
+ * to get.
*/
-int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
- __u16 *start, __u16 *end)
+int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
+ struct sctp_tsnmap_iter *iter, __u16 *start, __u16 *end)
{
int started, ended;
__u16 _start, _end, offset;
@@ -216,12 +222,10 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
/* Search the first mapping array. */
if (iter->start - map->base_tsn < map->len) {
+
offset = iter->start - map->base_tsn;
- _sctp_tsnmap_find_gap_ack(map->tsn_map,
- offset,
- map->len, 0,
- &started, &_start,
- &ended, &_end);
+ sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0,
+ &started, &_start, &ended, &_end);
}
/* Do we need to check the overflow map? */
@@ -235,12 +239,12 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
offset = iter->start - map->base_tsn - map->len;
/* Search the overflow map. */
- _sctp_tsnmap_find_gap_ack(map->overflow_map,
- offset,
- map->len,
- map->len,
- &started, &_start,
- &ended, &_end);
+ sctp_tsnmap_find_gap_ack(map->overflow_map,
+ offset,
+ map->len,
+ map->len,
+ &started, &_start,
+ &ended, &_end);
}
/* The Gap Ack Block happens to end at the end of the
@@ -278,7 +282,7 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
/* This private helper function updates the tsnmap buffers and
* the Cumulative TSN Ack Point.
*/
-static void _sctp_tsnmap_update(sctp_tsnmap_t *map)
+static void sctp_tsnmap_update(struct sctp_tsnmap *map)
{
__u32 ctsn;
@@ -301,10 +305,10 @@ static void _sctp_tsnmap_update(sctp_tsnmap_t *map)
} while (map->tsn_map[ctsn - map->base_tsn]);
map->cumulative_tsn_ack_point = ctsn - 1; /* Back up one. */
- _sctp_tsnmap_update_pending_data(map);
+ sctp_tsnmap_update_pending_data(map);
}
-static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map)
+static void sctp_tsnmap_update_pending_data(struct sctp_tsnmap *map)
{
__u32 cum_tsn = map->cumulative_tsn_ack_point;
__u32 max_tsn = map->max_tsn_seen;
@@ -324,7 +328,7 @@ static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map)
for (i = start; i < end; i++) {
if (map->tsn_map[i])
pending_data--;
- }
+ }
if (gap >= map->len) {
start = 0;
@@ -345,14 +349,14 @@ out:
* The flags "started" and "ended" tell is if we found the beginning
* or (respectively) the end of a Gap Ack Block.
*/
-static void _sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
- __u16 len, __u16 base,
- int *started, __u16 *start,
- int *ended, __u16 *end)
+static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
+ __u16 len, __u16 base,
+ int *started, __u16 *start,
+ int *ended, __u16 *end)
{
int i = off;
- /* Let's look through the entire array, but break out
+ /* Look through the entire array, but break out
* early if we have found the end of the Gap Ack Block.
*/
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 4a368eb47073..30e96c68d10f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -5,37 +5,37 @@
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
- *
- * These functions manipulate an sctp event. The sctp_ulpevent_t is used
- * to carry notifications and data to the ULP (sockets).
- * The SCTP reference implementation is free software;
- * you can redistribute it and/or modify it under the terms of
+ *
+ * These functions manipulate an sctp event. The struct ulpevent is used
+ * to carry notifications and data to the ULP (sockets).
+ * The SCTP reference implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * The SCTP reference implementation is distributed in the hope that it
+ *
+ * The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- *
+ * Boston, MA 02111-1307, USA.
+ *
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
+ *
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
- * Written or modified by:
+ * Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
- *
+ *
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
@@ -47,58 +47,51 @@
#include <net/sctp/sm.h>
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb,
- sctp_association_t *asoc);
-static void
-sctp_ulpevent_set_owner(struct sk_buff *skb, const sctp_association_t *asoc);
+ struct sctp_association *asoc);
+static void sctp_ulpevent_set_owner(struct sk_buff *skb,
+ const struct sctp_association *asoc);
/* Create a new sctp_ulpevent. */
-sctp_ulpevent_t *sctp_ulpevent_new(int size, int msg_flags, int priority)
+struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sk_buff *skb;
skb = alloc_skb(size, priority);
if (!skb)
goto fail;
- event = (sctp_ulpevent_t *) skb->cb;
- event = sctp_ulpevent_init(event, skb, msg_flags);
+ event = sctp_skb2event(skb);
+ event = sctp_ulpevent_init(event, msg_flags);
if (!event)
goto fail_init;
-
- event->malloced = 1;
return event;
fail_init:
- kfree_skb(event->parent);
-
+ kfree_skb(skb);
fail:
return NULL;
}
/* Initialize an ULP event from an given skb. */
-sctp_ulpevent_t *sctp_ulpevent_init(sctp_ulpevent_t *event,
- struct sk_buff *parent,
- int msg_flags)
+struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *event,
+ int msg_flags)
{
- memset(event, sizeof(sctp_ulpevent_t), 0x00);
+ memset(event, sizeof(struct sctp_ulpevent), 0x00);
event->msg_flags = msg_flags;
- event->parent = parent;
- event->malloced = 0;
return event;
}
/* Dispose of an event. */
-void sctp_ulpevent_free(sctp_ulpevent_t *event)
+void sctp_ulpevent_free(struct sctp_ulpevent *event)
{
- if (event->malloced)
- kfree_skb(event->parent);
+ kfree_skb(sctp_event2skb(event));
}
/* Is this a MSG_NOTIFICATION? */
-int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event)
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
{
- return event->msg_flags & MSG_NOTIFICATION;
+ return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION);
}
/* Create and initialize an SCTP_ASSOC_CHANGE event.
@@ -112,24 +105,22 @@ int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event)
* Note: There is no field checking here. If a field is unused it will be
* zero'd out.
*/
-sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(const sctp_association_t *asoc,
- __u16 flags,
- __u16 state,
- __u16 error,
- __u16 outbound,
- __u16 inbound,
- int priority)
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const sctp_association_t *asoc,
+ __u16 flags, __u16 state, __u16 error, __u16 outbound,
+ __u16 inbound, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sctp_assoc_change *sac;
+ struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
-
+ skb = sctp_event2skb(event);
sac = (struct sctp_assoc_change *)
- skb_put(event->parent, sizeof(struct sctp_assoc_change));
+ skb_put(skb, sizeof(struct sctp_assoc_change));
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
@@ -198,13 +189,13 @@ sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(const sctp_association_t *asoc,
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
- sctp_ulpevent_set_owner(event->parent, asoc);
+ sctp_ulpevent_set_owner(skb, asoc);
sac->sac_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
- return NULL;
+ return NULL;
}
/* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
@@ -215,24 +206,22 @@ fail:
* When a destination address on a multi-homed peer encounters a change
* an interface details event is sent.
*/
-sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
- const sctp_association_t *asoc,
- const struct sockaddr_storage *aaddr,
- int flags,
- int state,
- int error,
- int priority)
+struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
+ const sctp_association_t *asoc, const struct sockaddr_storage *aaddr,
+ int flags, int state, int error, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sctp_paddr_change *spc;
+ struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
+ skb = sctp_event2skb(event);
spc = (struct sctp_paddr_change *)
- skb_put(event->parent, sizeof(struct sctp_paddr_change));
+ skb_put(skb, sizeof(struct sctp_paddr_change));
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
@@ -265,7 +254,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_state: 32 bits (signed integer)
- *
+ *
* This field holds one of a number of values that communicate the
* event that happened to the address.
*/
@@ -291,7 +280,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
- sctp_ulpevent_set_owner(event->parent, asoc);
+ sctp_ulpevent_set_owner(skb, asoc);
spc->spc_assoc_id = sctp_assoc2id(asoc);
/* Sockets API Extensions for SCTP
@@ -325,12 +314,11 @@ fail:
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
-sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
- sctp_chunk_t *chunk,
- __u16 flags,
- int priority)
+struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ const sctp_association_t *asoc, sctp_chunk_t *chunk,
+ __u16 flags, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
struct sk_buff *skb;
sctp_errhdr_t *ch;
@@ -358,13 +346,12 @@ sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
goto fail;
/* Embed the event fields inside the cloned skb. */
- event = (sctp_ulpevent_t *) skb->cb;
- event = sctp_ulpevent_init(event, skb, MSG_NOTIFICATION);
+ event = sctp_skb2event(skb);
+ event = sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
- event->malloced = 1;
sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error));
@@ -416,7 +403,8 @@ sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
- sctp_ulpevent_set_owner(event->parent, asoc);
+ skb = sctp_event2skb(event);
+ sctp_ulpevent_set_owner(skb, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
@@ -430,13 +418,11 @@ fail:
* Socket Extensions for SCTP - draft-01
* 5.3.1.4 SCTP_SEND_FAILED
*/
-sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
- sctp_chunk_t *chunk,
- __u16 flags,
- __u32 error,
- int priority)
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const sctp_association_t *asoc, sctp_chunk_t *chunk,
+ __u16 flags, __u32 error, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sctp_send_failed *ssf;
struct sk_buff *skb;
@@ -452,16 +438,11 @@ sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
skb_pull(skb, sizeof(sctp_data_chunk_t));
/* Embed the event fields inside the cloned skb. */
- event = (sctp_ulpevent_t *) skb->cb;
- event = sctp_ulpevent_init(event, skb, MSG_NOTIFICATION);
+ event = sctp_skb2event(skb);
+ event = sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
- /* Mark as malloced, even though the constructor was not
- * called.
- */
- event->malloced = 1;
-
ssf = (struct sctp_send_failed *)
skb_push(skb, sizeof(struct sctp_send_failed));
@@ -525,7 +506,8 @@ sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
* same association identifier. For TCP style socket, this field is
* ignored.
*/
- sctp_ulpevent_set_owner(event->parent, asoc);
+ skb = sctp_event2skb(event);
+ sctp_ulpevent_set_owner(skb, asoc);
ssf->ssf_assoc_id = sctp_assoc2id(asoc);
return event;
@@ -538,21 +520,22 @@ fail:
* Socket Extensions for SCTP - draft-01
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*/
-sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const sctp_association_t *asoc,
- __u16 flags,
- int priority)
+ __u16 flags, int priority)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
struct sctp_shutdown_event *sse;
+ struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
+ skb = sctp_event2skb(event);
sse = (struct sctp_shutdown_event *)
- skb_put(event->parent, sizeof(struct sctp_shutdown_event));
+ skb_put(skb, sizeof(struct sctp_shutdown_event));
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
@@ -587,7 +570,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
- sctp_ulpevent_set_owner(event->parent, asoc);
+ sctp_ulpevent_set_owner(skb, asoc);
sse->sse_assoc_id = sctp_assoc2id(asoc);
return event;
@@ -600,13 +583,13 @@ fail:
* to pass it to the upper layers. Go ahead and calculate the sndrcvinfo
* even if filtered out later.
*
- * Socket Extensions for SCTP - draft-01
+ * Socket Extensions for SCTP
* 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*/
-sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
- sctp_chunk_t *chunk, int priority)
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
+ sctp_chunk_t *chunk, int priority)
{
- sctp_ulpevent_t *event, *levent;
+ struct sctp_ulpevent *event;
struct sctp_sndrcvinfo *info;
struct sk_buff *skb, *list;
size_t padding, len;
@@ -638,24 +621,19 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_ulpevent_set_owner_r(skb, asoc);
/* Embed the event fields inside the cloned skb. */
- event = (sctp_ulpevent_t *) skb->cb;
+ event = sctp_skb2event(skb);
/* Initialize event with flags 0. */
- event = sctp_ulpevent_init(event, skb, 0);
+ event = sctp_ulpevent_init(event, 0);
if (!event)
goto fail_init;
- event->malloced = 1;
-
- for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
+ /* Note: Not clearing the entire event struct as
+ * this is just a fragment of the real event. However,
+ * we still need to do rwnd accounting.
+ */
+ for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
sctp_ulpevent_set_owner_r(list, asoc);
- /* Initialize event with flags 0. */
- levent = sctp_ulpevent_init(event, skb, 0);
- if (!levent)
- goto fail_init;
-
- levent->malloced = 1;
- }
info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo;
@@ -707,18 +685,26 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
* MSG_UNORDERED - This flag is present when the message was sent
* non-ordered.
*/
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
info->sinfo_flags |= MSG_UNORDERED;
- /* FIXME: For reassembly, we need to have the fragmentation bits.
- * This really does not belong in the event structure, but
- * its difficult to fix everything at the same time. Eventually,
- * we should create and skb based chunk structure. This structure
- * storage can be converted to an event. --jgrimm
+ /* sinfo_cumtsn: 32 bit (unsigned integer)
+ *
+ * This field will hold the current cumulative TSN as
+ * known by the underlying SCTP layer. Note this field is
+ * ignored when sending and only valid for a receive
+ * operation when sinfo_flags are set to MSG_UNORDERED.
+ */
+ info->sinfo_cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+ }
+
+ /* Note: For reassembly, we need to have the fragmentation bits.
+ * For now, merge these into the msg_flags, since those bit
+ * possitions are not used.
*/
- event->chunk_flags = chunk->chunk_hdr->flags;
+ event->msg_flags |= chunk->chunk_hdr->flags;
- /* With -04 draft, tsn moves into sndrcvinfo. */
+ /* With 04 draft, tsn moves into sndrcvinfo. */
info->sinfo_tsn = ntohl(chunk->subh.data_hdr->tsn);
/* Context is not used on receive. */
@@ -745,19 +731,79 @@ fail:
return NULL;
}
+/* Create a partial delivery related event.
+ *
+ * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
+ *
+ * When a reciever is engaged in a partial delivery of a
+ * message this notification will be used to inidicate
+ * various events.
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const sctp_association_t *asoc, __u32 indication, int priority)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_rcv_pdapi_event *pd;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
+ MSG_NOTIFICATION, priority);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ pd = (struct sctp_rcv_pdapi_event *)
+ skb_put(skb, sizeof(struct sctp_rcv_pdapi_event));
+
+ /* pdapi_type
+ * It should be SCTP_PARTIAL_DELIVERY_EVENT
+ *
+ * pdapi_flags: 16 bits (unsigned integer)
+ * Currently unused.
+ */
+ pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+ pd->pdapi_flags = 0;
+
+ /* pdapi_length: 32 bits (unsigned integer)
+ *
+ * This field is the total length of the notification data, including
+ * the notification header. It will generally be sizeof (struct
+ * sctp_rcv_pdapi_event).
+ */
+ pd->pdapi_length = sizeof(struct sctp_rcv_pdapi_event);
+
+ /* pdapi_indication: 32 bits (unsigned integer)
+ *
+ * This field holds the indication being sent to the application.
+ */
+ pd->pdapi_indication = indication;
+
+ /* pdapi_assoc_id: sizeof (sctp_assoc_t)
+ *
+ * The association id field, holds the identifier for the association.
+ */
+ pd->pdapi_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+fail:
+ return NULL;
+}
+
/* Return the notification type, assuming this is a notification
* event.
*/
-__u16 sctp_ulpevent_get_notification_type(const sctp_ulpevent_t *event)
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
{
union sctp_notification *notification;
+ struct sk_buff *skb;
- notification = (union sctp_notification *) event->parent->data;
+ skb = sctp_event2skb((struct sctp_ulpevent *)event);
+ notification = (union sctp_notification *) skb->data;
return notification->h.sn_type;
}
/* Copy out the sndrcvinfo into a msghdr. */
-void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
if (!sctp_ulpevent_is_notification(event)) {
@@ -771,7 +817,7 @@ void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
static void sctp_rcvmsg_rfree(struct sk_buff *skb)
{
sctp_association_t *asoc;
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
@@ -779,16 +825,17 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
* Use the local private area of the skb to track the owning
* association.
*/
- event = (sctp_ulpevent_t *) skb->cb;
+ event = sctp_skb2event(skb);
asoc = event->asoc;
sctp_assoc_rwnd_increase(asoc, skb_headlen(skb));
sctp_association_put(asoc);
}
/* Charge receive window for bytes recieved. */
-static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *asoc)
+static void sctp_ulpevent_set_owner_r(struct sk_buff *skb,
+ sctp_association_t *asoc)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
/* The current stack structures assume that the rcv buffer is
* per socket. For UDP-style sockets this is not true as
@@ -798,7 +845,7 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
*/
sctp_association_hold(asoc);
skb->sk = asoc->base.sk;
- event = (sctp_ulpevent_t *) skb->cb;
+ event = sctp_skb2event(skb);
event->asoc = asoc;
skb->destructor = sctp_rcvmsg_rfree;
@@ -809,26 +856,26 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
/* A simple destructor to give up the reference to the association. */
static void sctp_ulpevent_rfree(struct sk_buff *skb)
{
- sctp_ulpevent_t *event;
-
- event = (sctp_ulpevent_t *)skb->cb;
+ struct sctp_ulpevent *event;
+
+ event = sctp_skb2event(skb);
sctp_association_put(event->asoc);
}
-/* Hold the association in case the msg_name needs read out of
- * the association.
+/* Hold the association in case the msg_name needs read out of
+ * the association.
*/
static void sctp_ulpevent_set_owner(struct sk_buff *skb,
- const sctp_association_t *asoc)
+ const struct sctp_association *asoc)
{
- sctp_ulpevent_t *event;
+ struct sctp_ulpevent *event;
/* Cast away the const, as we are just wanting to
* bump the reference count.
*/
- sctp_association_hold((sctp_association_t *)asoc);
+ sctp_association_hold((struct sctp_association *)asoc);
skb->sk = asoc->base.sk;
- event = (sctp_ulpevent_t *)skb->cb;
- event->asoc = (sctp_association_t *)asoc;
+ event = sctp_skb2event(skb);
+ event->asoc = (struct sctp_association *)asoc;
skb->destructor = sctp_ulpevent_rfree;
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 04c47bffb8e9..ff1a6415c72d 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -84,6 +84,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
+ ulpq->pd_mode = 0;
ulpq->malloced = 0;
return ulpq;
@@ -96,15 +97,16 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
struct sk_buff *skb;
struct sctp_ulpevent *event;
- while ((skb = skb_dequeue(&ulpq->lobby))) {
- event = (struct sctp_ulpevent *) skb->cb;
+ while ((skb = __skb_dequeue(&ulpq->lobby))) {
+ event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
- while ((skb = skb_dequeue(&ulpq->reasm))) {
- event = (struct sctp_ulpevent *) skb->cb;
+ while ((skb = __skb_dequeue(&ulpq->reasm))) {
+ event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
+
}
/* Dispose of a ulpqueue. */
@@ -117,7 +119,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq)
/* Process an incoming DATA chunk. */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
- int priority)
+ int priority)
{
struct sk_buff_head temp;
sctp_data_chunk_t *hdr;
@@ -125,12 +127,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
- /* FIXME: Instead of event being the skb clone, we really should
- * have a new skb based chunk structure that we can convert to
- * an event. Temporarily, I'm carrying a few chunk fields in
- * the event to allow reassembly. Its too painful to change
- * everything at once. --jgrimm
- */
+ /* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, priority);
if (!event)
return -ENOMEM;
@@ -139,10 +136,10 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
- if (event) {
+ if ((event) && (event->msg_flags & MSG_EOR)){
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
- skb_queue_tail(&temp, event->parent);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
@@ -154,10 +151,40 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
return 0;
}
+/* Clear the partial delivery mode for this socket. Note: This
+ * assumes that no association is currently in partial delivery mode.
+ */
+int sctp_clear_pd(struct sock *sk)
+{
+ struct sctp_opt *sp;
+ sp = sctp_sk(sk);
+
+ sp->pd_mode = 0;
+ if (!skb_queue_empty(&sp->pd_lobby)) {
+ struct list_head *list;
+ sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue);
+ list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
+ INIT_LIST_HEAD(list);
+ return 1;
+ }
+ return 0;
+}
+
+/* Clear the pd_mode and restart any pending messages waiting for delivery. */
+static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
+{
+ ulpq->pd_mode = 0;
+ return sctp_clear_pd(ulpq->asoc->base.sk);
+}
+
+
+
/* Add a new event for propogation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
+ struct sk_buff_head *queue;
+ int clear_pd = 0;
/* If the socket is just going to throw this away, do not
* even try to deliver it.
@@ -169,29 +196,55 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
goto out_free;
+ /* If we are in partial delivery mode, post to the lobby until
+ * partial delivery is cleared, unless, of course _this_ is
+ * the association the cause of the partial delivery.
+ */
+
+ if (!sctp_sk(sk)->pd_mode) {
+ queue = &sk->receive_queue;
+ } else if (ulpq->pd_mode) {
+ if (event->msg_flags & MSG_NOTIFICATION)
+ queue = &sctp_sk(sk)->pd_lobby;
+ else {
+ clear_pd = event->msg_flags & MSG_EOR;
+ queue = &sk->receive_queue;
+ }
+ } else
+ queue = &sctp_sk(sk)->pd_lobby;
+
+
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
- if (event->parent->list)
- sctp_skb_list_tail(event->parent->list, &sk->receive_queue);
+ if (sctp_event2skb(event)->list)
+ sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
else
- skb_queue_tail(&sk->receive_queue, event->parent);
+ skb_queue_tail(queue, sctp_event2skb(event));
- wake_up_interruptible(sk->sleep);
+ /* Did we just complete partial delivery and need to get
+ * rolling again? Move pending data to the receive
+ * queue.
+ */
+ if (clear_pd)
+ sctp_ulpq_clear_pd(ulpq);
+
+ if (queue == &sk->receive_queue)
+ wake_up_interruptible(sk->sleep);
return 1;
out_free:
- if (event->parent->list)
- skb_queue_purge(event->parent->list);
+ if (sctp_event2skb(event)->list)
+ skb_queue_purge(sctp_event2skb(event)->list);
else
- kfree_skb(event->parent);
+ kfree_skb(sctp_event2skb(event));
return 0;
}
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
-static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
+static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
@@ -202,7 +255,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
- cevent = (struct sctp_ulpevent *)pos->cb;
+ cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn))
@@ -211,9 +264,10 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->reasm))
- __skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
+ __skb_insert(sctp_event2skb(event), pos->prev, pos,
+ &ulpq->reasm);
else
- __skb_queue_tail(&ulpq->reasm, event->parent);
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled
@@ -231,7 +285,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
- pos = f_frag->next;
+ if (f_frag == l_frag)
+ pos = NULL;
+ else
+ pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
@@ -246,7 +303,8 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, f_frag->list);
- do {
+ while (pos) {
+
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
@@ -262,25 +320,27 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
pos->next = pnext;
pos = pnext;
- } while (1);
+ };
- event = (sctp_ulpevent_t *) f_frag->cb;
+ event = sctp_skb2event(f_frag);
+ SCTP_INC_STATS(SctpReasmUsrMsgs);
return event;
}
+
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
-static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
+static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
- sctp_ulpevent_t *cevent;
+ struct sctp_ulpevent *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
- sctp_ulpevent_t *retval = NULL;
+ struct sctp_ulpevent *retval = NULL;
- /* Initialized to 0 just to avoid compiler warning message. Will
+ /* Initialized to 0 just to avoid compiler warning message. Will
* never be used with this value. It is referenced only after it
* is set when we find the first fragment of a message.
*/
@@ -296,10 +356,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
* start the next pass when we find another first fragment.
*/
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
- cevent = (sctp_ulpevent_t *) pos->cb;
+ cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
- switch (cevent->chunk_flags & SCTP_DATA_FRAG_MASK) {
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
first_frag = pos;
next_tsn = ctsn + 1;
@@ -313,7 +373,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
break;
case SCTP_DATA_LAST_FRAG:
- if ((first_frag) && (ctsn == next_tsn))
+ if (first_frag && (ctsn == next_tsn))
retval = sctp_make_reassembled_event(
first_frag, pos);
else
@@ -324,42 +384,170 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
/* We have the reassembled event. There is no need to look
* further.
*/
- if (retval)
+ if (retval) {
+ retval->msg_flags |= MSG_EOR;
break;
+ }
}
return retval;
}
-/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
- * need reassembling.
- */
-static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
- sctp_ulpevent_t *event)
+/* Retrieve the next set of fragments of a partial message. */
+static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
{
- sctp_ulpevent_t *retval = NULL;
+ struct sk_buff *pos, *tmp, *last_frag, *first_frag;
+ struct sctp_ulpevent *cevent;
+ __u32 ctsn, next_tsn;
+ int is_last;
+ struct sctp_ulpevent *retval;
+
+ /* The chunks are held in the reasm queue sorted by TSN.
+ * Walk through the queue sequentially and look for the first
+ * sequence of fragmented chunks.
+ */
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return NULL;
+
+ last_frag = first_frag = NULL;
+ retval = NULL;
+ next_tsn = 0;
+ is_last = 0;
+
+ sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->sndrcvinfo.sinfo_tsn;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ first_frag = pos;
+ next_tsn = ctsn + 1;
+ last_frag = pos;
+ } else if (next_tsn == ctsn)
+ next_tsn++;
+ else
+ goto done;
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag)
+ first_frag = pos;
+ else if (ctsn != next_tsn)
+ goto done;
+ last_frag = pos;
+ is_last = 1;
+ goto done;
+ default:
+ return NULL;
+ };
+ }
- /* FIXME: We should be using some new chunk structure here
- * instead of carrying chunk fields in the event structure.
- * This is temporary as it is too painful to change everything
- * at once.
+ /* We have the reassembled event. There is no need to look
+ * further.
*/
+done:
+ retval = sctp_make_reassembled_event(first_frag, last_frag);
+ if (is_last)
+ retval->msg_flags |= MSG_EOR;
+
+ return retval;
+}
+
+
+/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
+ * need reassembling.
+ */
+static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
/* Check if this is part of a fragmented message. */
- if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
return event;
+ }
sctp_ulpq_store_reasm(ulpq, event);
- retval = sctp_ulpq_retrieve_reassembled(ulpq);
+ if (!ulpq->pd_mode)
+ retval = sctp_ulpq_retrieve_reassembled(ulpq);
+ else {
+ __u32 ctsn, ctsnap;
+
+ /* Do not even bother unless this is the next tsn to
+ * be delivered.
+ */
+ ctsn = event->sndrcvinfo.sinfo_tsn;
+ ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
+ if (TSN_lte(ctsn, ctsnap))
+ retval = sctp_ulpq_retrieve_partial(ulpq);
+ }
return retval;
}
+/* Retrieve the first part (sequential fragments) for partial delivery. */
+static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
+{
+ struct sk_buff *pos, *tmp, *last_frag, *first_frag;
+ struct sctp_ulpevent *cevent;
+ __u32 ctsn, next_tsn;
+ struct sctp_ulpevent *retval;
+
+ /* The chunks are held in the reasm queue sorted by TSN.
+ * Walk through the queue sequentially and look for a sequence of
+ * fragmented chunks that start a datagram.
+ */
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return NULL;
+
+ last_frag = first_frag = NULL;
+ retval = NULL;
+ next_tsn = 0;
+
+ sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->sndrcvinfo.sinfo_tsn;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!first_frag) {
+ first_frag = pos;
+ next_tsn = ctsn + 1;
+ last_frag = pos;
+ } else
+ goto done;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ return NULL;
+ if (ctsn == next_tsn) {
+ next_tsn++;
+ last_frag = pos;
+ } else
+ goto done;
+ break;
+ default:
+ return NULL;
+ };
+ }
+
+ /* We have the reassembled event. There is no need to look
+ * further.
+ */
+done:
+ retval = sctp_make_reassembled_event(first_frag, last_frag);
+ return retval;
+}
+
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
- sctp_ulpevent_t *event)
+ struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
@@ -373,7 +561,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
- cevent = (sctp_ulpevent_t *) pos->cb;
+ cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
@@ -390,32 +578,31 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
-
+
__skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */
- __skb_queue_tail(event->parent->list, pos);
+ __skb_queue_tail(sctp_event2skb(event)->list, pos);
}
}
/* Helper function to store chunks needing ordering. */
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
- sctp_ulpevent_t *event)
+ struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
- sctp_ulpevent_t *cevent;
+ struct sctp_ulpevent *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
-
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
*/
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
- cevent = (sctp_ulpevent_t *) pos->cb;
+ cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
@@ -427,25 +614,20 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->lobby))
- __skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
+ __skb_insert(sctp_event2skb(event), pos->prev, pos,
+ &ulpq->lobby);
else
- __skb_queue_tail(&ulpq->lobby, event->parent);
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
}
-static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
- sctp_ulpevent_t *event)
+static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
- /* FIXME: We should be using some new chunk structure here
- * instead of carrying chunk fields in the event structure.
- * This is temporary as it is too painful to change everything
- * at once.
- */
-
/* Check if this message needs ordering. */
- if (SCTP_DATA_UNORDERED & event->chunk_flags)
+ if (SCTP_DATA_UNORDERED & event->msg_flags)
return event;
/* Note: The stream ID must be verified before this routine. */
@@ -472,3 +654,54 @@ static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
+
+/* Partial deliver the first message as there is pressure on rwnd. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, int priority)
+{
+ struct sctp_ulpevent *event;
+
+ /* Are we already in partial delivery mode? */
+ if (!sctp_sk(ulpq->asoc->base.sk)->pd_mode) {
+
+ /* Is partial delivery possible? */
+ event = sctp_ulpq_retrieve_first(ulpq);
+ /* Send event to the ULP. */
+ if (event) {
+ sctp_ulpq_tail_event(ulpq, event);
+ sctp_sk(ulpq->asoc->base.sk)->pd_mode = 1;
+ ulpq->pd_mode = 1;
+ return;
+ }
+ }
+
+ /* Assert: Either already in partial delivery mode or partial
+ * delivery wasn't possible, so now the only recourse is
+ * to renege. FIXME: Add renege support starts here.
+ */
+}
+
+/* Notify the application if an association is aborted and in
+ * partial delivery mode. Send up any pending received messages.
+ */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int priority)
+{
+ struct sctp_ulpevent *ev = NULL;
+ struct sock *sk;
+
+ if (!ulpq->pd_mode)
+ return;
+
+ sk = ulpq->asoc->base.sk;
+ if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
+ &sctp_sk(sk)->subscribe))
+ ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ priority);
+ if (ev)
+ skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
+
+ /* If there is data waiting, send it up the socket now. */
+ if (sctp_ulpq_clear_pd(ulpq) || ev)
+ wake_up_interruptible(sk->sleep);
+}