summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2003-09-08 20:16:16 -0700
committerLinus Torvalds <torvalds@home.osdl.org>2003-09-08 20:16:16 -0700
commitd2a65691b520f2687329ab8715d830abcf801e46 (patch)
tree8bf86e568a477f18eacef349494e7efe9280edf4
parent7770f5fead387c20042178ad0975276504840660 (diff)
[PATCH] RxRPC update
Here's a patch to update the RxRPC driver. Most of it is CodingStyle fixes, but it also includes a few miscellaneous bug fixes. stdint types are also turned into C99 forms (eg: u32 -> uint32_t).
-rw-r--r--include/rxrpc/call.h14
-rw-r--r--include/rxrpc/connection.h21
-rw-r--r--include/rxrpc/message.h8
-rw-r--r--include/rxrpc/packet.h47
-rw-r--r--include/rxrpc/peer.h5
-rw-r--r--include/rxrpc/rxrpc.h2
-rw-r--r--include/rxrpc/transport.h10
-rw-r--r--include/rxrpc/types.h4
-rw-r--r--net/rxrpc/Makefile12
-rw-r--r--net/rxrpc/call.c879
-rw-r--r--net/rxrpc/connection.c305
-rw-r--r--net/rxrpc/internal.h4
-rw-r--r--net/rxrpc/krxiod.c65
-rw-r--r--net/rxrpc/krxsecd.c66
-rw-r--r--net/rxrpc/krxtimod.c61
-rw-r--r--net/rxrpc/main.c77
-rw-r--r--net/rxrpc/peer.c131
-rw-r--r--net/rxrpc/proc.c136
-rw-r--r--net/rxrpc/sysctl.c19
-rw-r--r--net/rxrpc/transport.c291
20 files changed, 1276 insertions, 881 deletions
diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h
index 5afe044b3f46..6ac1df7012e9 100644
--- a/include/rxrpc/call.h
+++ b/include/rxrpc/call.h
@@ -67,8 +67,8 @@ struct rxrpc_call
wait_queue_head_t waitq; /* wait queue for events to happen */
struct list_head link; /* general internal list link */
struct list_head call_link; /* master call list link */
- u32 chan_ix; /* connection channel index (net order) */
- u32 call_id; /* call ID on connection (net order) */
+ uint32_t chan_ix; /* connection channel index (net order) */
+ uint32_t call_id; /* call ID on connection (net order) */
unsigned long cjif; /* jiffies at call creation */
unsigned long flags; /* control flags */
#define RXRPC_CALL_ACKS_TIMO 0x00000001 /* ACKS timeout reached */
@@ -103,7 +103,7 @@ struct rxrpc_call
char ackr_dfr_perm; /* request for deferred ACKs permitted */
rxrpc_seq_t ackr_dfr_seq; /* seqno for deferred ACK */
struct rxrpc_ackpacket ackr; /* pending normal ACK packet */
- u8 ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
+ uint8_t ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
/* presentation layer */
char app_last_rcv; /* T if received last packet from remote end */
@@ -131,14 +131,14 @@ struct rxrpc_call
struct list_head app_attn_link; /* application attention list linkage */
size_t app_mark; /* trigger callback when app_ready_qty>=app_mark */
char app_async_read; /* T if in async-read mode */
- u8 *app_read_buf; /* application async read buffer (app_mark size) */
- u8 *app_scr_alloc; /* application scratch allocation pointer */
+ uint8_t *app_read_buf; /* application async read buffer (app_mark size) */
+ uint8_t *app_scr_alloc; /* application scratch allocation pointer */
void *app_scr_ptr; /* application pointer into scratch buffer */
#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */
/* application scratch buffer */
- u8 app_scratch[0] __attribute__((aligned(sizeof(long))));
+ uint8_t app_scratch[0] __attribute__((aligned(sizeof(long))));
};
#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call))
@@ -206,7 +206,7 @@ extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t si
extern int rxrpc_call_write_data(struct rxrpc_call *call,
size_t sioc,
struct iovec siov[],
- u8 rxhdr_flags,
+ uint8_t rxhdr_flags,
int alloc_flags,
int dup_data,
size_t *size_sent);
diff --git a/include/rxrpc/connection.h b/include/rxrpc/connection.h
index fc10fed01b21..14de354724f9 100644
--- a/include/rxrpc/connection.h
+++ b/include/rxrpc/connection.h
@@ -34,6 +34,7 @@ struct rxrpc_connection
struct list_head link; /* link in peer's list */
struct list_head proc_link; /* link in proc list */
struct list_head err_link; /* link in ICMP error processing list */
+ struct list_head id_link; /* link in ID grant list */
struct sockaddr_in addr; /* remote address */
struct rxrpc_call *channels[4]; /* channels (active calls) */
wait_queue_head_t chanwait; /* wait for channel to become available */
@@ -44,19 +45,19 @@ struct rxrpc_connection
rxrpc_serial_t serial_counter; /* packet serial number counter */
/* the following should all be in net order */
- u32 in_epoch; /* peer's epoch */
- u32 out_epoch; /* my epoch */
- u32 conn_id; /* connection ID, appropriately shifted */
- u16 service_id; /* service ID */
- u8 security_ix; /* security ID */
- u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
- u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+ uint32_t in_epoch; /* peer's epoch */
+ uint32_t out_epoch; /* my epoch */
+ uint32_t conn_id; /* connection ID, appropriately shifted */
+ uint16_t service_id; /* service ID */
+ uint8_t security_ix; /* security ID */
+ uint8_t in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
+ uint8_t out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
};
extern int rxrpc_create_connection(struct rxrpc_transport *trans,
- u16 port,
- u32 addr,
- unsigned short service_id,
+ uint16_t port,
+ uint32_t addr,
+ uint16_t service_id,
void *security,
struct rxrpc_connection **_conn);
diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h
index 2e43c03c6857..9be208ab079e 100644
--- a/include/rxrpc/message.h
+++ b/include/rxrpc/message.h
@@ -9,8 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
-#define _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
+#ifndef _LINUX_RXRPC_MESSAGE_H
+#define _LINUX_RXRPC_MESSAGE_H
#include <rxrpc/packet.h>
@@ -61,7 +61,7 @@ static inline void rxrpc_put_message(struct rxrpc_message *msg)
extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
struct rxrpc_call *call,
- u8 type,
+ uint8_t type,
int count,
struct iovec diov[],
int alloc_flags,
@@ -69,4 +69,4 @@ extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
-#endif /* _H_3AD3363A_3A9C_11D6_83D8_0002B3163499 */
+#endif /* _LINUX_RXRPC_MESSAGE_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 78999077f5b8..068813d65345 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -27,21 +27,21 @@ extern size_t RXRPC_MAX_PACKET_SIZE;
*/
struct rxrpc_header
{
- u32 epoch; /* client boot timestamp */
+ uint32_t epoch; /* client boot timestamp */
- u32 cid; /* connection and channel ID */
+ uint32_t cid; /* connection and channel ID */
#define RXRPC_MAXCALLS 4 /* max active calls per conn */
#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */
#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */
#define RXRPC_CIDSHIFT 2 /* shift for connection ID */
- u32 callNumber; /* call ID (0 for connection-level packets) */
+ uint32_t callNumber; /* call ID (0 for connection-level packets) */
#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
- u32 seq; /* sequence number of pkt in call stream */
- u32 serial; /* serial number of pkt sent to network */
+ uint32_t seq; /* sequence number of pkt in call stream */
+ uint32_t serial; /* serial number of pkt sent to network */
- u8 type; /* packet type */
+ uint8_t type; /* packet type */
#define RXRPC_PACKET_TYPE_DATA 1 /* data */
#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */
#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */
@@ -52,7 +52,7 @@ struct rxrpc_header
#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
- u8 flags; /* packet flags */
+ uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */
#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */
@@ -60,10 +60,10 @@ struct rxrpc_header
#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */
#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */
- u8 userStatus; /* app-layer defined status */
- u8 securityIndex; /* security protocol ID */
- u16 _rsvd; /* reserved (used by kerberos security as cksum) */
- u16 serviceId; /* service ID */
+ uint8_t userStatus; /* app-layer defined status */
+ uint8_t securityIndex; /* security protocol ID */
+ uint16_t _rsvd; /* reserved (used by kerberos security as cksum) */
+ uint16_t serviceId; /* service ID */
} __attribute__((packed));
@@ -83,9 +83,9 @@ extern const char *rxrpc_pkts[];
*/
struct rxrpc_jumbo_header
{
- u8 flags; /* packet flags (as per rxrpc_header) */
- u8 pad;
- u16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ uint8_t flags; /* packet flags (as per rxrpc_header) */
+ uint8_t pad;
+ uint16_t _rsvd; /* reserved (used by kerberos security as cksum) */
};
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
@@ -97,13 +97,14 @@ struct rxrpc_jumbo_header
*/
struct rxrpc_ackpacket
{
- u16 bufferSpace; /* number of packet buffers available */
- u16 maxSkew; /* diff between serno being ACK'd and highest serial no received */
- u32 firstPacket; /* sequence no of first ACK'd packet in attached list */
- u32 previousPacket; /* sequence no of previous packet received */
- u32 serial; /* serial no of packet that prompted this ACK */
-
- u8 reason; /* reason for ACK */
+ uint16_t bufferSpace; /* number of packet buffers available */
+ uint16_t maxSkew; /* diff between serno being ACK'd and highest serial no
+ * received */
+ uint32_t firstPacket; /* sequence no of first ACK'd packet in attached list */
+ uint32_t previousPacket; /* sequence no of previous packet received */
+ uint32_t serial; /* serial no of packet that prompted this ACK */
+
+ uint8_t reason; /* reason for ACK */
#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */
#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */
#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */
@@ -114,10 +115,10 @@ struct rxrpc_ackpacket
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
- u8 nAcks; /* number of ACKs */
+ uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
- u8 acks[0]; /* list of ACK/NAKs */
+ uint8_t acks[0]; /* list of ACK/NAKs */
#define RXRPC_ACK_TYPE_NACK 0
#define RXRPC_ACK_TYPE_ACK 1
diff --git a/include/rxrpc/peer.h b/include/rxrpc/peer.h
index 0ab2730541ed..07e3a51b60b6 100644
--- a/include/rxrpc/peer.h
+++ b/include/rxrpc/peer.h
@@ -42,7 +42,10 @@ struct rxrpc_peer
struct rxrpc_timer timeout; /* timeout for grave destruction */
struct list_head link; /* link in transport's peer list */
struct list_head proc_link; /* link in /proc list */
- rwlock_t conn_lock; /* lock for connections */
+ rwlock_t conn_idlock; /* lock for connection IDs */
+ struct list_head conn_idlist; /* list of connections granted IDs */
+ uint32_t conn_idcounter; /* connection ID counter */
+ rwlock_t conn_lock; /* lock for active/dead connections */
struct list_head conn_active; /* active connections to/from this peer */
struct list_head conn_graveyard; /* graveyard for inactive connections */
spinlock_t conn_gylock; /* lock for conn_graveyard */
diff --git a/include/rxrpc/rxrpc.h b/include/rxrpc/rxrpc.h
index 454d59933675..df6595c32c37 100644
--- a/include/rxrpc/rxrpc.h
+++ b/include/rxrpc/rxrpc.h
@@ -14,7 +14,7 @@
#ifdef __KERNEL__
-extern u32 rxrpc_epoch;
+extern uint32_t rxrpc_epoch;
extern int rxrpc_ktrace;
extern int rxrpc_kdebug;
diff --git a/include/rxrpc/transport.h b/include/rxrpc/transport.h
index b9c225533158..92fb49c7d4b9 100644
--- a/include/rxrpc/transport.h
+++ b/include/rxrpc/transport.h
@@ -85,10 +85,11 @@ extern int rxrpc_create_transport(unsigned short port,
static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
{
- if (atomic_read(&trans->usage)<=0)
+ if (atomic_read(&trans->usage) <= 0)
BUG();
atomic_inc(&trans->usage);
- //printk("rxrpc_get_transport(%p{u=%d})\n",trans,atomic_read(&trans->usage));
+ //printk("rxrpc_get_transport(%p{u=%d})\n",
+ // trans, atomic_read(&trans->usage));
}
extern void rxrpc_put_transport(struct rxrpc_transport *trans);
@@ -99,11 +100,6 @@ extern int rxrpc_add_service(struct rxrpc_transport *trans,
extern void rxrpc_del_service(struct rxrpc_transport *trans,
struct rxrpc_service *srv);
-#if 0
-extern int rxrpc_trans_add_connection(struct rxrpc_transport *trans,
- struct rxrpc_connection *conn);
-#endif
-
extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans);
extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h
index 40700bc61a6f..2f37ad8bb582 100644
--- a/include/rxrpc/types.h
+++ b/include/rxrpc/types.h
@@ -19,8 +19,8 @@
#include <linux/spinlock.h>
#include <asm/atomic.h>
-typedef unsigned rxrpc_seq_t; /* Rx message sequence number */
-typedef unsigned rxrpc_serial_t; /* Rx message serial number */
+typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */
+typedef uint32_t rxrpc_serial_t; /* Rx message serial number */
struct rxrpc_call;
struct rxrpc_connection;
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index 01ff446eae18..6efcb6f162a0 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -2,7 +2,9 @@
# Makefile for Linux kernel Rx RPC
#
-rxrpc-y := \
+#CFLAGS += -finstrument-functions
+
+rxrpc-objs := \
call.o \
connection.o \
krxiod.o \
@@ -13,7 +15,11 @@ rxrpc-y := \
rxrpc_syms.o \
transport.o
-rxrpc-$(CONFIG_PROC_FS) += proc.o
-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
+ifeq ($(CONFIG_PROC_FS),y)
+rxrpc-objs += proc.o
+endif
+ifeq ($(CONFIG_SYSCTL),y)
+rxrpc-objs += sysctl.o
+endif
obj-$(CONFIG_RXRPC) := rxrpc.o
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
index a0e81b58191d..0b8193629153 100644
--- a/net/rxrpc/call.c
+++ b/net/rxrpc/call.c
@@ -53,20 +53,25 @@ const char *rxrpc_call_error_states[] = {
};
const char *rxrpc_pkts[] = {
- "?00", "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
+ "?00",
+ "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
"?09", "?10", "?11", "?12", "?13", "?14", "?15"
};
const char *rxrpc_acks[] = {
- "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", "-?-"
+ "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
+ "-?-"
};
static const char _acktype[] = "NA-";
static void rxrpc_call_receive_packet(struct rxrpc_call *call);
-static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
-static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
-static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t higest);
+static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
+ struct rxrpc_message *msg);
+static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
+ struct rxrpc_message *msg);
+static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
+ rxrpc_seq_t higest);
static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
static int __rxrpc_call_read_data(struct rxrpc_call *call);
@@ -75,7 +80,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
rxrpc_seq_t seq,
size_t count);
#define _state(call) \
- _debug("[[[ state %s ]]]",rxrpc_call_states[call->app_call_state]);
+ _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]);
static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
{
@@ -103,7 +108,7 @@ static void __rxrpc_call_acks_timeout(unsigned long _call)
{
struct rxrpc_call *call = (struct rxrpc_call *) _call;
- _debug("ACKS TIMEOUT %05lu",jiffies - call->cjif);
+ _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif);
call->flags |= RXRPC_CALL_ACKS_TIMO;
rxrpc_krxiod_queue_call(call);
@@ -113,7 +118,7 @@ static void __rxrpc_call_rcv_timeout(unsigned long _call)
{
struct rxrpc_call *call = (struct rxrpc_call *) _call;
- _debug("RCV TIMEOUT %05lu",jiffies - call->cjif);
+ _debug("RCV TIMEOUT %05lu", jiffies - call->cjif);
call->flags |= RXRPC_CALL_RCV_TIMO;
rxrpc_krxiod_queue_call(call);
@@ -133,15 +138,18 @@ static void __rxrpc_call_ackr_timeout(unsigned long _call)
/*
* calculate a timeout based on an RTT value
*/
-static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call, unsigned long val)
+static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call,
+ unsigned long val)
{
- unsigned long expiry = call->conn->peer->rtt / (1000000/HZ);
+ unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ);
expiry += 10;
- if (expiry<HZ/25) expiry = HZ/25;
- if (expiry>HZ) expiry = HZ;
+ if (expiry < HZ / 25)
+ expiry = HZ / 25;
+ if (expiry > HZ)
+ expiry = HZ;
- _leave(" = %lu jiffies",expiry);
+ _leave(" = %lu jiffies", expiry);
return jiffies + expiry;
} /* end __rxrpc_rtt_based_timeout() */
@@ -154,7 +162,7 @@ static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
{
struct rxrpc_call *call;
- _enter("%p",conn);
+ _enter("%p", conn);
/* allocate and initialise a call record */
call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
@@ -163,7 +171,7 @@ static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
return -ENOMEM;
}
- atomic_set(&call->usage,1);
+ atomic_set(&call->usage, 1);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
@@ -200,7 +208,7 @@ static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
call->cjif = jiffies;
- _leave(" = 0 (%p)",call);
+ _leave(" = 0 (%p)", call);
*_call = call;
@@ -217,34 +225,37 @@ int rxrpc_create_call(struct rxrpc_connection *conn,
rxrpc_call_aemap_func_t aemap,
struct rxrpc_call **_call)
{
- DECLARE_WAITQUEUE(myself,current);
+ DECLARE_WAITQUEUE(myself, current);
struct rxrpc_call *call;
int ret, cix, loop;
- _enter("%p",conn);
+ _enter("%p", conn);
/* allocate and initialise a call record */
- ret = __rxrpc_create_call(conn,&call);
- if (ret<0) {
- _leave(" = %d",ret);
+ ret = __rxrpc_create_call(conn, &call);
+ if (ret < 0) {
+ _leave(" = %d", ret);
return ret;
}
call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
- if (attn) call->app_attn_func = attn;
- if (error) call->app_error_func = error;
- if (aemap) call->app_aemap_func = aemap;
+ if (attn)
+ call->app_attn_func = attn;
+ if (error)
+ call->app_error_func = error;
+ if (aemap)
+ call->app_aemap_func = aemap;
_state(call);
spin_lock(&conn->lock);
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&conn->chanwait,&myself);
+ add_wait_queue(&conn->chanwait, &myself);
try_again:
/* try to find an unused channel */
- for (cix=0; cix<4; cix++)
+ for (cix = 0; cix < 4; cix++)
if (!conn->channels[cix])
goto obtained_chan;
@@ -263,14 +274,15 @@ int rxrpc_create_call(struct rxrpc_connection *conn,
/* got a channel - now attach to the connection */
obtained_chan:
- remove_wait_queue(&conn->chanwait,&myself);
+ remove_wait_queue(&conn->chanwait, &myself);
set_current_state(TASK_RUNNING);
/* concoct a unique call number */
next_callid:
call->call_id = htonl(++conn->call_counter);
- for (loop=0; loop<4; loop++)
- if (conn->channels[loop] && conn->channels[loop]->call_id==call->call_id)
+ for (loop = 0; loop < 4; loop++)
+ if (conn->channels[loop] &&
+ conn->channels[loop]->call_id == call->call_id)
goto next_callid;
rxrpc_get_connection(conn);
@@ -281,24 +293,23 @@ int rxrpc_create_call(struct rxrpc_connection *conn,
spin_unlock(&conn->lock);
down_write(&rxrpc_calls_sem);
- list_add_tail(&call->call_link,&rxrpc_calls);
+ list_add_tail(&call->call_link, &rxrpc_calls);
up_write(&rxrpc_calls_sem);
__RXACCT(atomic_inc(&rxrpc_call_count));
*_call = call;
- _leave(" = 0 (call=%p cix=%u)",call,cix);
+ _leave(" = 0 (call=%p cix=%u)", call, cix);
return 0;
error_unwait:
- remove_wait_queue(&conn->chanwait,&myself);
+ remove_wait_queue(&conn->chanwait, &myself);
set_current_state(TASK_RUNNING);
spin_unlock(&conn->lock);
- free_page((unsigned long)call);
- _leave(" = %d",ret);
+ free_page((unsigned long) call);
+ _leave(" = %d", ret);
return ret;
-
} /* end rxrpc_create_call() */
/*****************************************************************************/
@@ -315,18 +326,18 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn,
cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
- _enter("%p,%u,%u",conn,ntohl(msg->hdr.callNumber),cix);
+ _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix);
/* allocate and initialise a call record */
- ret = __rxrpc_create_call(conn,&call);
- if (ret<0) {
- _leave(" = %d",ret);
+ ret = __rxrpc_create_call(conn, &call);
+ if (ret < 0) {
+ _leave(" = %d", ret);
return ret;
}
call->pkt_rcv_count = 1;
call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
- call->app_mark = sizeof(u32);
+ call->app_mark = sizeof(uint32_t);
_state(call);
@@ -348,20 +359,20 @@ int rxrpc_incoming_call(struct rxrpc_connection *conn,
spin_unlock(&conn->lock);
- if (ret<0) {
- free_page((unsigned long)call);
+ if (ret < 0) {
+ free_page((unsigned long) call);
call = NULL;
}
- if (ret==0) {
+ if (ret == 0) {
down_write(&rxrpc_calls_sem);
- list_add_tail(&call->call_link,&rxrpc_calls);
+ list_add_tail(&call->call_link, &rxrpc_calls);
up_write(&rxrpc_calls_sem);
__RXACCT(atomic_inc(&rxrpc_call_count));
*_call = call;
}
- _leave(" = %d [%p]",ret,call);
+ _leave(" = %d [%p]", ret, call);
return ret;
} /* end rxrpc_incoming_call() */
@@ -377,10 +388,11 @@ void rxrpc_put_call(struct rxrpc_call *call)
_enter("%p{u=%d}",call,atomic_read(&call->usage));
/* sanity check */
- if (atomic_read(&call->usage)<=0)
+ if (atomic_read(&call->usage) <= 0)
BUG();
- /* to prevent a race, the decrement and the de-list must be effectively atomic */
+ /* to prevent a race, the decrement and the de-list must be effectively
+ * atomic */
spin_lock(&conn->lock);
if (likely(!atomic_dec_and_test(&call->usage))) {
spin_unlock(&conn->lock);
@@ -388,7 +400,7 @@ void rxrpc_put_call(struct rxrpc_call *call)
return;
}
- if (conn->channels[ntohl(call->chan_ix)]==call)
+ if (conn->channels[ntohl(call->chan_ix)] == call)
conn->channels[ntohl(call->chan_ix)] = NULL;
spin_unlock(&conn->lock);
@@ -412,25 +424,29 @@ void rxrpc_put_call(struct rxrpc_call *call)
rxrpc_put_message(call->snd_ping);
while (!list_empty(&call->acks_pendq)) {
- msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
+ msg = list_entry(call->acks_pendq.next,
+ struct rxrpc_message, link);
list_del(&msg->link);
rxrpc_put_message(msg);
}
while (!list_empty(&call->rcv_receiveq)) {
- msg = list_entry(call->rcv_receiveq.next,struct rxrpc_message,link);
+ msg = list_entry(call->rcv_receiveq.next,
+ struct rxrpc_message, link);
list_del(&msg->link);
rxrpc_put_message(msg);
}
while (!list_empty(&call->app_readyq)) {
- msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
+ msg = list_entry(call->app_readyq.next,
+ struct rxrpc_message, link);
list_del(&msg->link);
rxrpc_put_message(msg);
}
while (!list_empty(&call->app_unreadyq)) {
- msg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
+ msg = list_entry(call->app_unreadyq.next,
+ struct rxrpc_message, link);
list_del(&msg->link);
rxrpc_put_message(msg);
}
@@ -442,7 +458,7 @@ void rxrpc_put_call(struct rxrpc_call *call)
up_write(&rxrpc_calls_sem);
__RXACCT(atomic_dec(&rxrpc_call_count));
- free_page((unsigned long)call);
+ free_page((unsigned long) call);
_leave(" [destroyed]");
} /* end rxrpc_put_call() */
@@ -451,7 +467,8 @@ void rxrpc_put_call(struct rxrpc_call *call)
/*
* actually generate a normal ACK
*/
-static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq_t seq)
+static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call,
+ rxrpc_seq_t seq)
{
struct rxrpc_message *msg;
struct iovec diov[3];
@@ -478,35 +495,36 @@ static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq
diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
diov[0].iov_base = &call->ackr;
- diov[1].iov_len = (call->ackr_pend_cnt+3);
+ diov[1].iov_len = call->ackr_pend_cnt + 3;
diov[1].iov_base = call->ackr_array;
diov[2].iov_len = sizeof(aux);
diov[2].iov_base = &aux;
/* build and send the message */
- ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,3,diov,GFP_KERNEL,&msg);
- if (ret<0)
+ ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
+ 3, diov, GFP_KERNEL, &msg);
+ if (ret < 0)
goto out;
msg->seq = seq;
msg->hdr.seq = htonl(seq);
msg->hdr.flags |= RXRPC_SLOW_START_OK;
- ret = rxrpc_conn_sendmsg(call->conn,msg);
+ ret = rxrpc_conn_sendmsg(call->conn, msg);
rxrpc_put_message(msg);
- if (ret<0)
+ if (ret < 0)
goto out;
call->pkt_snd_count++;
/* count how many actual ACKs there were at the front */
- for (delta=0; delta<call->ackr_pend_cnt; delta++)
- if (call->ackr_array[delta]!=RXRPC_ACK_TYPE_ACK)
+ for (delta = 0; delta < call->ackr_pend_cnt; delta++)
+ if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK)
break;
call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
/* crank the ACK window around */
- if (delta==0) {
+ if (delta == 0) {
/* un-ACK'd window */
}
else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
@@ -528,22 +546,26 @@ static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq
/* fully ACK'd window
* - just clear the whole thing
*/
- memset(&call->ackr_array,RXRPC_ACK_TYPE_NACK,sizeof(call->ackr_array));
+ memset(&call->ackr_array,
+ RXRPC_ACK_TYPE_NACK,
+ sizeof(call->ackr_array));
}
/* clear this ACK */
- memset(&call->ackr,0,sizeof(call->ackr));
+ memset(&call->ackr, 0, sizeof(call->ackr));
out:
- if (!call->app_call_state) printk("___ STATE 0 ___\n");
+ if (!call->app_call_state)
+ printk("___ STATE 0 ___\n");
return ret;
} /* end __rxrpc_call_gen_normal_ACK() */
/*****************************************************************************/
/*
- * note the reception of a packet in the call's ACK records and generate an appropriate ACK packet
- * if necessary
- * - returns 0 if packet should be processed, 1 if packet should be ignored and -ve on an error
+ * note the reception of a packet in the call's ACK records and generate an
+ * appropriate ACK packet if necessary
+ * - returns 0 if packet should be processed, 1 if packet should be ignored
+ * and -ve on an error
*/
static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
struct rxrpc_header *hdr,
@@ -555,19 +577,20 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
int ret = 0, err;
u8 special_ACK, do_ACK, force;
- _enter("%p,%p { seq=%d tp=%d fl=%02x }",call,hdr,ntohl(hdr->seq),hdr->type,hdr->flags);
+ _enter("%p,%p { seq=%d tp=%d fl=%02x }",
+ call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags);
seq = ntohl(hdr->seq);
offset = seq - call->ackr_win_bot;
do_ACK = RXRPC_ACK_DELAY;
special_ACK = 0;
- force = (seq==1);
+ force = (seq == 1);
if (call->ackr_high_seq < seq)
call->ackr_high_seq = seq;
/* deal with generation of obvious special ACKs first */
- if (ack && ack->reason==RXRPC_ACK_PING) {
+ if (ack && ack->reason == RXRPC_ACK_PING) {
special_ACK = RXRPC_ACK_PING_RESPONSE;
ret = 1;
goto gen_ACK;
@@ -594,9 +617,9 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
/* okay... it's a normal data packet inside the ACK window */
call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
- if (offset<call->ackr_pend_cnt) {
+ if (offset < call->ackr_pend_cnt) {
}
- else if (offset>call->ackr_pend_cnt) {
+ else if (offset > call->ackr_pend_cnt) {
do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
call->ackr_pend_cnt = offset;
goto gen_ACK;
@@ -616,8 +639,8 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
}
/* re-ACK packets previously received out-of-order */
- for (offset++; offset<RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
- if (call->ackr_array[offset]!=RXRPC_ACK_TYPE_ACK)
+ for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
+ if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK)
break;
call->ackr_pend_cnt = offset;
@@ -629,55 +652,61 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
gen_ACK:
_debug("%05lu ACKs pend=%u norm=%s special=%s%s",
jiffies - call->cjif,
- call->ackr_pend_cnt,rxrpc_acks[do_ACK],rxrpc_acks[special_ACK],
+ call->ackr_pend_cnt,
+ rxrpc_acks[do_ACK],
+ rxrpc_acks[special_ACK],
force ? " immediate" :
- do_ACK==RXRPC_ACK_REQUESTED ? " merge-req" :
+ do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" :
hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
" defer"
);
/* send any pending normal ACKs if need be */
- if (call->ackr_pend_cnt>0) {
+ if (call->ackr_pend_cnt > 0) {
/* fill out the appropriate form */
- call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
- call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
- call->ackr.firstPacket = htonl(call->ackr_win_bot);
- call->ackr.previousPacket = call->ackr_prev_seq;
- call->ackr.serial = hdr->serial;
- call->ackr.nAcks = call->ackr_pend_cnt;
-
- if (do_ACK==RXRPC_ACK_REQUESTED)
+ call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
+ call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,
+ 65535U));
+ call->ackr.firstPacket = htonl(call->ackr_win_bot);
+ call->ackr.previousPacket = call->ackr_prev_seq;
+ call->ackr.serial = hdr->serial;
+ call->ackr.nAcks = call->ackr_pend_cnt;
+
+ if (do_ACK == RXRPC_ACK_REQUESTED)
call->ackr.reason = do_ACK;
/* generate the ACK immediately if necessary */
if (special_ACK || force) {
- err = __rxrpc_call_gen_normal_ACK(call,do_ACK==RXRPC_ACK_DELAY ? 0 : seq);
- if (err<0) {
+ err = __rxrpc_call_gen_normal_ACK(
+ call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq);
+ if (err < 0) {
ret = err;
goto out;
}
}
}
- if (call->ackr.reason==RXRPC_ACK_REQUESTED)
+ if (call->ackr.reason == RXRPC_ACK_REQUESTED)
call->ackr_dfr_seq = seq;
- /* start the ACK timer if not running if there are any pending deferred ACKs */
- if (call->ackr_pend_cnt>0 &&
- call->ackr.reason!=RXRPC_ACK_REQUESTED &&
+ /* start the ACK timer if not running if there are any pending deferred
+ * ACKs */
+ if (call->ackr_pend_cnt > 0 &&
+ call->ackr.reason != RXRPC_ACK_REQUESTED &&
!timer_pending(&call->ackr_dfr_timo)
) {
unsigned long timo;
timo = rxrpc_call_dfr_ack_timeout + jiffies;
- _debug("START ACKR TIMER for cj=%lu",timo-call->cjif);
+ _debug("START ACKR TIMER for cj=%lu", timo-call->cjif);
spin_lock(&call->lock);
- mod_timer(&call->ackr_dfr_timo,timo);
+ mod_timer(&call->ackr_dfr_timo, timo);
spin_unlock(&call->lock);
}
- else if ((call->ackr_pend_cnt==0 || call->ackr.reason==RXRPC_ACK_REQUESTED) &&
+ else if ((call->ackr_pend_cnt == 0 ||
+ call->ackr.reason == RXRPC_ACK_REQUESTED) &&
timer_pending(&call->ackr_dfr_timo)
) {
/* stop timer if no pending ACKs */
@@ -689,21 +718,25 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
if (special_ACK) {
struct rxrpc_ackpacket ack;
struct iovec diov[2];
- u8 acks[1] = { RXRPC_ACK_TYPE_ACK };
+ uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK };
/* fill out the appropriate form */
- ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
- ack.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
- ack.firstPacket = htonl(call->ackr_win_bot);
- ack.previousPacket = call->ackr_prev_seq;
- ack.serial = hdr->serial;
- ack.reason = special_ACK;
- ack.nAcks = 0;
- //ack.nAcks = special_ACK==RXRPC_ACK_OUT_OF_SEQUENCE ? 0 : hdr->seq ? 1 : 0;
-
- _proto("Rx Sending s-ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- ntohs(ack.maxSkew),ntohl(ack.firstPacket),ntohl(ack.previousPacket),
- ntohl(ack.serial),rxrpc_acks[ack.reason],ack.nAcks);
+ ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
+ ack.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
+ ack.firstPacket = htonl(call->ackr_win_bot);
+ ack.previousPacket = call->ackr_prev_seq;
+ ack.serial = hdr->serial;
+ ack.reason = special_ACK;
+ ack.nAcks = 0;
+
+ _proto("Rx Sending s-ACK"
+ " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ ntohs(ack.maxSkew),
+ ntohl(ack.firstPacket),
+ ntohl(ack.previousPacket),
+ ntohl(ack.serial),
+ rxrpc_acks[ack.reason],
+ ack.nAcks);
diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
diov[0].iov_base = &ack;
@@ -711,11 +744,11 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
diov[1].iov_base = acks;
/* build and send the message */
- err = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,
- hdr->seq ? 2 : 1,diov,
+ err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
+ hdr->seq ? 2 : 1, diov,
GFP_KERNEL,
&msg);
- if (err<0) {
+ if (err < 0) {
ret = err;
goto out;
}
@@ -724,9 +757,9 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
msg->hdr.seq = htonl(seq);
msg->hdr.flags |= RXRPC_SLOW_START_OK;
- err = rxrpc_conn_sendmsg(call->conn,msg);
+ err = rxrpc_conn_sendmsg(call->conn, msg);
rxrpc_put_message(msg);
- if (err<0) {
+ if (err < 0) {
ret = err;
goto out;
}
@@ -737,7 +770,7 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
if (hdr->seq)
call->ackr_prev_seq = hdr->seq;
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_call_generate_ACK() */
@@ -748,7 +781,7 @@ static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
*/
void rxrpc_call_do_stuff(struct rxrpc_call *call)
{
- _enter("%p{flags=%lx}",call,call->flags);
+ _enter("%p{flags=%lx}", call, call->flags);
/* handle packet reception */
if (call->flags & RXRPC_CALL_RCV_PKT) {
@@ -761,19 +794,19 @@ void rxrpc_call_do_stuff(struct rxrpc_call *call)
if (call->flags & RXRPC_CALL_ACKS_TIMO) {
_debug("- overdue ACK timeout");
call->flags &= ~RXRPC_CALL_ACKS_TIMO;
- rxrpc_call_resend(call,call->snd_seq_count);
+ rxrpc_call_resend(call, call->snd_seq_count);
}
/* handle lack of reception */
if (call->flags & RXRPC_CALL_RCV_TIMO) {
_debug("- reception timeout");
call->flags &= ~RXRPC_CALL_RCV_TIMO;
- rxrpc_call_abort(call,-EIO);
+ rxrpc_call_abort(call, -EIO);
}
/* handle deferred ACKs */
if (call->flags & RXRPC_CALL_ACKR_TIMO ||
- (call->ackr.nAcks>0 && call->ackr.reason==RXRPC_ACK_REQUESTED)
+ (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED)
) {
_debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
jiffies - call->cjif,
@@ -782,9 +815,10 @@ void rxrpc_call_do_stuff(struct rxrpc_call *call)
call->flags &= ~RXRPC_CALL_ACKR_TIMO;
- if (call->ackr.nAcks>0 && call->app_call_state!=RXRPC_CSTATE_ERROR) {
+ if (call->ackr.nAcks > 0 &&
+ call->app_call_state != RXRPC_CSTATE_ERROR) {
/* generate ACK */
- __rxrpc_call_gen_normal_ACK(call,call->ackr_dfr_seq);
+ __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq);
call->ackr_dfr_seq = 0;
}
}
@@ -807,10 +841,11 @@ static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
int ret;
u32 _error;
- _enter("%p{%08x},%p{%d},%d",conn,ntohl(conn->conn_id),call,ntohl(call->call_id),errno);
+ _enter("%p{%08x},%p{%d},%d",
+ conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno);
/* if this call is already aborted, then just wake up any waiters */
- if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ if (call->app_call_state == RXRPC_CSTATE_ERROR) {
spin_unlock(&call->lock);
call->app_error_func(call);
_leave(" = 0");
@@ -820,12 +855,12 @@ static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
rxrpc_get_call(call);
/* change the state _with_ the lock still held */
- call->app_call_state = RXRPC_CSTATE_ERROR;
- call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
- call->app_errno = errno;
- call->app_mark = RXRPC_APP_MARK_EOF;
- call->app_read_buf = NULL;
- call->app_async_read = 0;
+ call->app_call_state = RXRPC_CSTATE_ERROR;
+ call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
+ call->app_errno = errno;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
_state(call);
@@ -840,22 +875,25 @@ static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
del_timer_sync(&call->ackr_dfr_timo);
if (rxrpc_call_is_ack_pending(call))
- __rxrpc_call_gen_normal_ACK(call,0);
+ __rxrpc_call_gen_normal_ACK(call, 0);
- /* send the abort packet only if we actually traded some other packets */
+ /* send the abort packet only if we actually traded some other
+ * packets */
ret = 0;
if (call->pkt_snd_count || call->pkt_rcv_count) {
/* actually send the abort */
- _proto("Rx Sending Call ABORT { data=%d }",call->app_abort_code);
+ _proto("Rx Sending Call ABORT { data=%d }",
+ call->app_abort_code);
_error = htonl(call->app_abort_code);
diov[0].iov_len = sizeof(_error);
diov[0].iov_base = &_error;
- ret = rxrpc_conn_newmsg(conn,call,RXRPC_PACKET_TYPE_ABORT,1,diov,GFP_KERNEL,&msg);
- if (ret==0) {
- ret = rxrpc_conn_sendmsg(conn,msg);
+ ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT,
+ 1, diov, GFP_KERNEL, &msg);
+ if (ret == 0) {
+ ret = rxrpc_conn_sendmsg(conn, msg);
rxrpc_put_message(msg);
}
}
@@ -865,8 +903,7 @@ static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
rxrpc_put_call(call);
- _leave(" = %d",ret);
-
+ _leave(" = %d", ret);
return ret;
} /* end __rxrpc_call_abort() */
@@ -879,7 +916,7 @@ int rxrpc_call_abort(struct rxrpc_call *call, int error)
{
spin_lock(&call->lock);
- return __rxrpc_call_abort(call,error);
+ return __rxrpc_call_abort(call, error);
} /* end rxrpc_call_abort() */
@@ -891,11 +928,12 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
{
struct rxrpc_message *msg;
struct list_head *_p;
- u32 data32;
+ uint32_t data32;
- _enter("%p",call);
+ _enter("%p", call);
- rxrpc_get_call(call); /* must not go away too soon if aborted by app-layer */
+ rxrpc_get_call(call); /* must not go away too soon if aborted by
+ * app-layer */
while (!list_empty(&call->rcv_receiveq)) {
/* try to get next packet */
@@ -907,9 +945,10 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
}
spin_unlock(&call->lock);
- if (!_p) break;
+ if (!_p)
+ break;
- msg = list_entry(_p,struct rxrpc_message,link);
+ msg = list_entry(_p, struct rxrpc_message, link);
_proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
jiffies - call->cjif,
@@ -927,9 +966,10 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
/* deal with data packets */
case RXRPC_PACKET_TYPE_DATA:
/* ACK the packet if necessary */
- switch (rxrpc_call_generate_ACK(call,&msg->hdr,NULL)) {
+ switch (rxrpc_call_generate_ACK(call, &msg->hdr,
+ NULL)) {
case 0: /* useful packet */
- rxrpc_call_receive_data_packet(call,msg);
+ rxrpc_call_receive_data_packet(call, msg);
break;
case 1: /* duplicate or out-of-window packet */
break;
@@ -941,29 +981,30 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
/* deal with ACK packets */
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_call_receive_ack_packet(call,msg);
+ rxrpc_call_receive_ack_packet(call, msg);
break;
/* deal with abort packets */
case RXRPC_PACKET_TYPE_ABORT:
data32 = 0;
- if (skb_copy_bits(msg->pkt,msg->offset,&data32,sizeof(data32))<0) {
+ if (skb_copy_bits(msg->pkt, msg->offset,
+ &data32, sizeof(data32)) < 0) {
printk("Rx Received short ABORT packet\n");
}
else {
data32 = ntohl(data32);
}
- _proto("Rx Received Call ABORT { data=%d }",data32);
+ _proto("Rx Received Call ABORT { data=%d }", data32);
spin_lock(&call->lock);
- call->app_call_state = RXRPC_CSTATE_ERROR;
- call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
- call->app_abort_code = data32;
- call->app_errno = -ECONNABORTED;
- call->app_mark = RXRPC_APP_MARK_EOF;
- call->app_read_buf = NULL;
- call->app_async_read = 0;
+ call->app_call_state = RXRPC_CSTATE_ERROR;
+ call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
+ call->app_abort_code = data32;
+ call->app_errno = -ECONNABORTED;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
/* ask the app to translate the error code */
call->app_aemap_func(call);
@@ -974,7 +1015,8 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
default:
/* deal with other packet types */
- _proto("Rx Unsupported packet type %u (#%u)",msg->hdr.type,msg->seq);
+ _proto("Rx Unsupported packet type %u (#%u)",
+ msg->hdr.type, msg->seq);
break;
}
@@ -990,14 +1032,18 @@ static void rxrpc_call_receive_packet(struct rxrpc_call *call)
/*
* process next data packet
* - as the next data packet arrives:
- * - it is queued on app_readyq _if_ it is the next one expected (app_ready_seq+1)
+ * - it is queued on app_readyq _if_ it is the next one expected
+ * (app_ready_seq+1)
* - it is queued on app_unreadyq _if_ it is not the next one expected
- * - if a packet placed on app_readyq completely fills a hole leading up to the first packet
- * on app_unreadyq, then packets now in sequence are tranferred to app_readyq
- * - the application layer can only see packets on app_readyq (app_ready_qty bytes)
+ * - if a packet placed on app_readyq completely fills a hole leading up to
+ * the first packet on app_unreadyq, then packets now in sequence are
+ * tranferred to app_readyq
+ * - the application layer can only see packets on app_readyq
+ * (app_ready_qty bytes)
* - the application layer is prodded every time a new packet arrives
*/
-static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
+static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
+ struct rxrpc_message *msg)
{
const struct rxrpc_operation *optbl, *op;
struct rxrpc_message *pmsg;
@@ -1005,22 +1051,23 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
int ret, lo, hi, rmtimo;
u32 opid;
- _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
+ _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
rxrpc_get_message(msg);
- /* add to the unready queue if we'd have to create a hole in the ready queue otherwise */
- if (msg->seq != call->app_ready_seq+1) {
- _debug("Call add packet %d to unreadyq",msg->seq);
+ /* add to the unready queue if we'd have to create a hole in the ready
+ * queue otherwise */
+ if (msg->seq != call->app_ready_seq + 1) {
+ _debug("Call add packet %d to unreadyq", msg->seq);
/* insert in seq order */
list_for_each(_p,&call->app_unreadyq) {
- pmsg = list_entry(_p,struct rxrpc_message,link);
- if (pmsg->seq>msg->seq)
+ pmsg = list_entry(_p, struct rxrpc_message, link);
+ if (pmsg->seq > msg->seq)
break;
}
- list_add_tail(&msg->link,_p);
+ list_add_tail(&msg->link, _p);
_leave(" [unreadyq]");
return;
@@ -1028,33 +1075,35 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
/* next in sequence - simply append into the call's ready queue */
_debug("Call add packet %d to readyq (+%Zd => %Zd bytes)",
- msg->seq,msg->dsize,call->app_ready_qty);
+ msg->seq, msg->dsize, call->app_ready_qty);
spin_lock(&call->lock);
call->app_ready_seq = msg->seq;
call->app_ready_qty += msg->dsize;
- list_add_tail(&msg->link,&call->app_readyq);
+ list_add_tail(&msg->link, &call->app_readyq);
/* move unready packets to the readyq if we got rid of a hole */
while (!list_empty(&call->app_unreadyq)) {
- pmsg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
+ pmsg = list_entry(call->app_unreadyq.next,
+ struct rxrpc_message, link);
- if (pmsg->seq != call->app_ready_seq+1)
+ if (pmsg->seq != call->app_ready_seq + 1)
break;
/* next in sequence - just move list-to-list */
_debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)",
- pmsg->seq,pmsg->dsize,call->app_ready_qty);
+ pmsg->seq, pmsg->dsize, call->app_ready_qty);
call->app_ready_seq = pmsg->seq;
call->app_ready_qty += pmsg->dsize;
list_del_init(&pmsg->link);
- list_add_tail(&pmsg->link,&call->app_readyq);
+ list_add_tail(&pmsg->link, &call->app_readyq);
}
/* see if we've got the last packet yet */
if (!list_empty(&call->app_readyq)) {
- pmsg = list_entry(call->app_readyq.prev,struct rxrpc_message,link);
+ pmsg = list_entry(call->app_readyq.prev,
+ struct rxrpc_message, link);
if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
call->app_last_rcv = 1;
_debug("Last packet on readyq");
@@ -1068,25 +1117,27 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
_leave(" [error]");
return;
- /* extract the operation ID from an incoming call if that's not yet been done */
+ /* extract the operation ID from an incoming call if that's not
+ * yet been done */
case RXRPC_CSTATE_SRVR_RCV_OPID:
spin_unlock(&call->lock);
/* handle as yet insufficient data for the operation ID */
- if (call->app_ready_qty<4) {
+ if (call->app_ready_qty < 4) {
if (call->app_last_rcv)
- rxrpc_call_abort(call,-EINVAL); /* trouble - last packet seen */
+ /* trouble - last packet seen */
+ rxrpc_call_abort(call, -EINVAL);
_leave("");
return;
}
/* pull the operation ID out of the buffer */
- ret = rxrpc_call_read_data(call,&opid,sizeof(opid),0);
- if (ret<0) {
- printk("Unexpected error from read-data: %d\n",ret);
- if (call->app_call_state!=RXRPC_CSTATE_ERROR)
- rxrpc_call_abort(call,ret);
+ ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0);
+ if (ret < 0) {
+ printk("Unexpected error from read-data: %d\n", ret);
+ if (call->app_call_state != RXRPC_CSTATE_ERROR)
+ rxrpc_call_abort(call, ret);
_leave("");
return;
}
@@ -1097,38 +1148,42 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
lo = 0;
hi = call->conn->service->ops_end - optbl;
- while (lo<hi) {
- int mid = (hi+lo) / 2;
+ while (lo < hi) {
+ int mid = (hi + lo) / 2;
op = &optbl[mid];
- if (call->app_opcode==op->id)
+ if (call->app_opcode == op->id)
goto found_op;
- if (call->app_opcode>op->id)
- lo = mid+1;
+ if (call->app_opcode > op->id)
+ lo = mid + 1;
else
hi = mid;
}
/* search failed */
kproto("Rx Client requested operation %d from %s service",
- call->app_opcode,call->conn->service->name);
- rxrpc_call_abort(call,-EINVAL);
+ call->app_opcode, call->conn->service->name);
+ rxrpc_call_abort(call, -EINVAL);
_leave(" [inval]");
return;
found_op:
_proto("Rx Client requested operation %s from %s service",
- op->name,call->conn->service->name);
+ op->name, call->conn->service->name);
- /* we're now waiting for the argument block (unless the call was aborted) */
+ /* we're now waiting for the argument block (unless the call
+ * was aborted) */
spin_lock(&call->lock);
- if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_OPID ||
- call->app_call_state==RXRPC_CSTATE_SRVR_SND_REPLY) {
+ if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID ||
+ call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) {
if (!call->app_last_rcv)
- call->app_call_state = RXRPC_CSTATE_SRVR_RCV_ARGS;
- else if (call->app_ready_qty>0)
- call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
+ call->app_call_state =
+ RXRPC_CSTATE_SRVR_RCV_ARGS;
+ else if (call->app_ready_qty > 0)
+ call->app_call_state =
+ RXRPC_CSTATE_SRVR_GOT_ARGS;
else
- call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
+ call->app_call_state =
+ RXRPC_CSTATE_SRVR_SND_REPLY;
call->app_mark = op->asize;
call->app_user = op->user;
}
@@ -1166,20 +1221,21 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
default:
/* deal with data reception in an unexpected state */
- printk("Unexpected state [[[ %u ]]]\n",call->app_call_state);
- __rxrpc_call_abort(call,-EBADMSG);
+ printk("Unexpected state [[[ %u ]]]\n", call->app_call_state);
+ __rxrpc_call_abort(call, -EBADMSG);
_leave("");
return;
}
- if (call->app_call_state==RXRPC_CSTATE_CLNT_RCV_REPLY && call->app_last_rcv)
+ if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY &&
+ call->app_last_rcv)
BUG();
/* otherwise just invoke the data function whenever we can satisfy its desire for more
* data
*/
_proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s",
- call->app_call_state,call->app_ready_qty,call->app_mark,
+ call->app_call_state, call->app_ready_qty, call->app_mark,
call->app_last_rcv ? " last-rcvd" : "");
spin_lock(&call->lock);
@@ -1196,8 +1252,8 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
case -ECONNABORTED:
spin_unlock(&call->lock);
break;
- default:
- __rxrpc_call_abort(call,ret);
+ default:
+ __rxrpc_call_abort(call, ret);
break;
}
@@ -1211,17 +1267,18 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc
/*
* received an ACK packet
*/
-static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
+static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
+ struct rxrpc_message *msg)
{
struct rxrpc_ackpacket ack;
rxrpc_serial_t serial;
rxrpc_seq_t seq;
int ret;
- _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
+ _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
/* extract the basic ACK record */
- if (skb_copy_bits(msg->pkt,msg->offset,&ack,sizeof(ack))<0) {
+ if (skb_copy_bits(msg->pkt, msg->offset, &ack, sizeof(ack)) < 0) {
printk("Rx Received short ACK packet\n");
return;
}
@@ -1241,10 +1298,14 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
call->ackr.nAcks
);
- /* check the other side isn't ACK'ing a sequence number I haven't sent yet */
- if (ack.nAcks>0 && (seq > call->snd_seq_count || seq+ack.nAcks-1 > call->snd_seq_count)) {
- printk("Received ACK (#%u-#%u) for unsent packet\n",seq,seq+ack.nAcks-1);
- rxrpc_call_abort(call,-EINVAL);
+ /* check the other side isn't ACK'ing a sequence number I haven't sent
+ * yet */
+ if (ack.nAcks > 0 &&
+ (seq > call->snd_seq_count ||
+ seq + ack.nAcks - 1 > call->snd_seq_count)) {
+ printk("Received ACK (#%u-#%u) for unsent packet\n",
+ seq, seq + ack.nAcks - 1);
+ rxrpc_call_abort(call, -EINVAL);
_leave("");
return;
}
@@ -1255,7 +1316,7 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
/* find the prompting packet */
spin_lock(&call->lock);
- if (call->snd_ping && call->snd_ping->hdr.serial==serial) {
+ if (call->snd_ping && call->snd_ping->hdr.serial == serial) {
/* it was a ping packet */
rttmsg = call->snd_ping;
call->snd_ping = NULL;
@@ -1263,22 +1324,28 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
if (rttmsg) {
rttmsg->rttdone = 1;
- rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ rxrpc_peer_calculate_rtt(call->conn->peer,
+ rttmsg, msg);
rxrpc_put_message(rttmsg);
}
}
else {
struct list_head *_p;
- /* it ought to be a data packet - look in the pending ACK list */
- list_for_each(_p,&call->acks_pendq) {
- rttmsg = list_entry(_p,struct rxrpc_message,link);
- if (rttmsg->hdr.serial==serial) {
+ /* it ought to be a data packet - look in the pending
+ * ACK list */
+ list_for_each(_p, &call->acks_pendq) {
+ rttmsg = list_entry(_p, struct rxrpc_message,
+ link);
+ if (rttmsg->hdr.serial == serial) {
if (rttmsg->rttdone)
- break; /* never do RTT twice without resending */
+ /* never do RTT twice without
+ * resending */
+ break;
rttmsg->rttdone = 1;
- rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ rxrpc_peer_calculate_rtt(
+ call->conn->peer, rttmsg, msg);
break;
}
}
@@ -1287,24 +1354,25 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
}
switch (ack.reason) {
- /* deal with negative/positive acknowledgement of data packets */
+ /* deal with negative/positive acknowledgement of data
+ * packets */
case RXRPC_ACK_REQUESTED:
case RXRPC_ACK_DELAY:
case RXRPC_ACK_IDLE:
- rxrpc_call_definitively_ACK(call,seq-1);
+ rxrpc_call_definitively_ACK(call, seq - 1);
case RXRPC_ACK_DUPLICATE:
case RXRPC_ACK_OUT_OF_SEQUENCE:
case RXRPC_ACK_EXCEEDS_WINDOW:
call->snd_resend_cnt = 0;
- ret = rxrpc_call_record_ACK(call,msg,seq,ack.nAcks);
- if (ret<0)
- rxrpc_call_abort(call,ret);
+ ret = rxrpc_call_record_ACK(call, msg, seq, ack.nAcks);
+ if (ret < 0)
+ rxrpc_call_abort(call, ret);
break;
/* respond to ping packets immediately */
case RXRPC_ACK_PING:
- rxrpc_call_generate_ACK(call,&msg->hdr,&ack);
+ rxrpc_call_generate_ACK(call, &msg->hdr, &ack);
break;
/* only record RTT on ping response packets */
@@ -1312,10 +1380,12 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
if (call->snd_ping) {
struct rxrpc_message *rttmsg;
- /* only do RTT stuff if the response matches the retained ping */
+ /* only do RTT stuff if the response matches the
+ * retained ping */
rttmsg = NULL;
spin_lock(&call->lock);
- if (call->snd_ping && call->snd_ping->hdr.serial==ack.serial) {
+ if (call->snd_ping &&
+ call->snd_ping->hdr.serial == ack.serial) {
rttmsg = call->snd_ping;
call->snd_ping = NULL;
}
@@ -1323,14 +1393,15 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
if (rttmsg) {
rttmsg->rttdone = 1;
- rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ rxrpc_peer_calculate_rtt(call->conn->peer,
+ rttmsg, msg);
rxrpc_put_message(rttmsg);
}
}
break;
default:
- printk("Unsupported ACK reason %u\n",ack.reason);
+ printk("Unsupported ACK reason %u\n", ack.reason);
break;
}
@@ -1339,38 +1410,44 @@ static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_
/*****************************************************************************/
/*
- * record definitive ACKs for all messages up to and including the one with the 'highest' seq
+ * record definitive ACKs for all messages up to and including the one with the
+ * 'highest' seq
*/
-static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t highest)
+static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
+ rxrpc_seq_t highest)
{
struct rxrpc_message *msg;
int now_complete;
- _enter("%p{ads=%u},%u",call,call->acks_dftv_seq,highest);
+ _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest);
- while (call->acks_dftv_seq<highest) {
+ while (call->acks_dftv_seq < highest) {
call->acks_dftv_seq++;
- _proto("Definitive ACK on packet #%u",call->acks_dftv_seq);
+ _proto("Definitive ACK on packet #%u", call->acks_dftv_seq);
- /* discard those at front of queue until message with highest ACK is found */
+ /* discard those at front of queue until message with highest
+ * ACK is found */
spin_lock(&call->lock);
msg = NULL;
if (!list_empty(&call->acks_pendq)) {
- msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
+ msg = list_entry(call->acks_pendq.next,
+ struct rxrpc_message, link);
list_del_init(&msg->link); /* dequeue */
- if (msg->state==RXRPC_MSG_SENT)
+ if (msg->state == RXRPC_MSG_SENT)
call->acks_pend_cnt--;
}
spin_unlock(&call->lock);
/* insanity check */
if (!msg)
- panic("%s(): acks_pendq unexpectedly empty\n",__FUNCTION__);
+ panic("%s(): acks_pendq unexpectedly empty\n",
+ __FUNCTION__);
- if (msg->seq!=call->acks_dftv_seq)
- panic("%s(): Packet #%u expected at front of acks_pendq (#%u found)\n",
- __FUNCTION__,call->acks_dftv_seq,msg->seq);
+ if (msg->seq != call->acks_dftv_seq)
+ panic("%s(): Packet #%u expected at front of acks_pendq"
+ " (#%u found)\n",
+ __FUNCTION__, call->acks_dftv_seq, msg->seq);
/* discard the message */
msg->state = RXRPC_MSG_DONE;
@@ -1380,8 +1457,8 @@ static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t hig
/* if all sent packets are definitively ACK'd then prod any sleepers just in case */
now_complete = 0;
spin_lock(&call->lock);
- if (call->acks_dftv_seq==call->snd_seq_count) {
- if (call->app_call_state!=RXRPC_CSTATE_COMPLETE) {
+ if (call->acks_dftv_seq == call->snd_seq_count) {
+ if (call->app_call_state != RXRPC_CSTATE_COMPLETE) {
call->app_call_state = RXRPC_CSTATE_COMPLETE;
_state(call);
now_complete = 1;
@@ -1417,13 +1494,15 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
u8 acks[16];
_enter("%p{apc=%u ads=%u},%p,%u,%Zu",
- call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count);
+ call, call->acks_pend_cnt, call->acks_dftv_seq,
+ msg, seq, count);
- /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */
- if (seq<=call->acks_dftv_seq) {
+ /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order
+ * ACKs) */
+ if (seq <= call->acks_dftv_seq) {
unsigned delta = call->acks_dftv_seq - seq;
- if (count<=delta) {
+ if (count <= delta) {
_leave(" = 0 [all definitively ACK'd]");
return 0;
}
@@ -1435,14 +1514,14 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
highest = seq + count - 1;
resend = 0;
- while (count>0) {
+ while (count > 0) {
/* extract up to 16 ACK slots at a time */
- chunk = min(count,sizeof(acks));
+ chunk = min(count, sizeof(acks));
count -= chunk;
- memset(acks,2,sizeof(acks));
+ memset(acks, 2, sizeof(acks));
- if (skb_copy_bits(msg->pkt,msg->offset,&acks,chunk)<0) {
+ if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) {
printk("Rx Received short ACK packet\n");
_leave(" = -EINVAL");
return -EINVAL;
@@ -1450,7 +1529,7 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
msg->offset += chunk;
/* check that the ACK set is valid */
- for (ix=0; ix<chunk; ix++) {
+ for (ix = 0; ix < chunk; ix++) {
switch (acks[ix]) {
case RXRPC_ACK_TYPE_ACK:
break;
@@ -1458,14 +1537,16 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
resend = 1;
break;
default:
- printk("Rx Received unsupported ACK state %u\n",acks[ix]);
+ printk("Rx Received unsupported ACK state"
+ " %u\n", acks[ix]);
_leave(" = -EINVAL");
return -EINVAL;
}
}
- _proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
- seq,(unsigned)(seq+chunk-1),
+ _proto("Rx ACK of packets #%u-#%u "
+ "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
+ seq, (unsigned) (seq + chunk - 1),
_acktype[acks[0x0]],
_acktype[acks[0x1]],
_acktype[acks[0x2]],
@@ -1485,53 +1566,60 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
call->acks_pend_cnt
);
- /* mark the packets in the ACK queue as being provisionally ACK'd */
+ /* mark the packets in the ACK queue as being provisionally
+ * ACK'd */
ix = 0;
spin_lock(&call->lock);
/* find the first packet ACK'd/NAK'd here */
- list_for_each(_p,&call->acks_pendq) {
- dmsg = list_entry(_p,struct rxrpc_message,link);
- if (dmsg->seq==seq)
+ list_for_each(_p, &call->acks_pendq) {
+ dmsg = list_entry(_p, struct rxrpc_message, link);
+ if (dmsg->seq == seq)
goto found_first;
- _debug("- %u: skipping #%u",ix,dmsg->seq);
+ _debug("- %u: skipping #%u", ix, dmsg->seq);
}
goto bad_queue;
found_first:
do {
_debug("- %u: processing #%u (%c) apc=%u",
- ix,dmsg->seq,_acktype[acks[ix]],call->acks_pend_cnt);
+ ix, dmsg->seq, _acktype[acks[ix]],
+ call->acks_pend_cnt);
- if (acks[ix]==RXRPC_ACK_TYPE_ACK) {
- if (dmsg->state==RXRPC_MSG_SENT) call->acks_pend_cnt--;
+ if (acks[ix] == RXRPC_ACK_TYPE_ACK) {
+ if (dmsg->state == RXRPC_MSG_SENT)
+ call->acks_pend_cnt--;
dmsg->state = RXRPC_MSG_ACKED;
}
else {
- if (dmsg->state==RXRPC_MSG_ACKED) call->acks_pend_cnt++;
+ if (dmsg->state == RXRPC_MSG_ACKED)
+ call->acks_pend_cnt++;
dmsg->state = RXRPC_MSG_SENT;
}
ix++;
seq++;
_p = dmsg->link.next;
- dmsg = list_entry(_p,struct rxrpc_message,link);
- } while(ix<chunk && _p!=&call->acks_pendq && dmsg->seq==seq);
+ dmsg = list_entry(_p, struct rxrpc_message, link);
+ } while(ix < chunk &&
+ _p != &call->acks_pendq &&
+ dmsg->seq == seq);
- if (ix<chunk)
+ if (ix < chunk)
goto bad_queue;
spin_unlock(&call->lock);
}
if (resend)
- rxrpc_call_resend(call,highest);
+ rxrpc_call_resend(call, highest);
- /* if all packets are provisionally ACK'd, then wake up anyone who's waiting for that */
+ /* if all packets are provisionally ACK'd, then wake up anyone who's
+ * waiting for that */
now_complete = 0;
spin_lock(&call->lock);
- if (call->acks_pend_cnt==0) {
- if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
+ if (call->acks_pend_cnt == 0) {
+ if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
call->app_call_state = RXRPC_CSTATE_COMPLETE;
_state(call);
}
@@ -1547,19 +1635,21 @@ static int rxrpc_call_record_ACK(struct rxrpc_call *call,
call->app_attn_func(call);
}
- _leave(" = 0 (apc=%u)",call->acks_pend_cnt);
+ _leave(" = 0 (apc=%u)", call->acks_pend_cnt);
return 0;
bad_queue:
- panic("%s(): acks_pendq in bad state (packet #%u absent)\n",__FUNCTION__,seq);
+ panic("%s(): acks_pendq in bad state (packet #%u absent)\n",
+ __FUNCTION__, seq);
} /* end rxrpc_call_record_ACK() */
/*****************************************************************************/
/*
* transfer data from the ready packet queue to the asynchronous read buffer
- * - since this func is the only one going to look at packets queued on app_readyq, we don't need
- * a lock to modify or access them, only to modify the queue pointers
+ * - since this func is the only one going to look at packets queued on
+ * app_readyq, we don't need a lock to modify or access them, only to modify
+ * the queue pointers
* - called with call->lock held
* - the buffer must be in kernel space
* - returns:
@@ -1575,16 +1665,20 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
int ret;
_enter("%p{as=%d buf=%p qty=%Zu/%Zu}",
- call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark);
+ call,
+ call->app_async_read, call->app_read_buf,
+ call->app_ready_qty, call->app_mark);
/* check the state */
switch (call->app_call_state) {
case RXRPC_CSTATE_SRVR_RCV_ARGS:
case RXRPC_CSTATE_CLNT_RCV_REPLY:
if (call->app_last_rcv) {
- printk("%s(%p,%p,%Zd): Inconsistent call state (%s, last pkt)",
- __FUNCTION__,call,call->app_read_buf,call->app_mark,
- rxrpc_call_states[call->app_call_state]);
+ printk("%s(%p,%p,%Zd):"
+ " Inconsistent call state (%s, last pkt)",
+ __FUNCTION__,
+ call, call->app_read_buf, call->app_mark,
+ rxrpc_call_states[call->app_call_state]);
BUG();
}
break;
@@ -1596,9 +1690,11 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
case RXRPC_CSTATE_SRVR_SND_REPLY:
if (!call->app_last_rcv) {
- printk("%s(%p,%p,%Zd): Inconsistent call state (%s, not last pkt)",
- __FUNCTION__,call,call->app_read_buf,call->app_mark,
- rxrpc_call_states[call->app_call_state]);
+ printk("%s(%p,%p,%Zd):"
+ " Inconsistent call state (%s, not last pkt)",
+ __FUNCTION__,
+ call, call->app_read_buf, call->app_mark,
+ rxrpc_call_states[call->app_call_state]);
BUG();
}
_debug("Trying to read data from call in SND_REPLY state");
@@ -1609,13 +1705,14 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
return -ECONNABORTED;
default:
- printk("reading in unexpected state [[[ %u ]]]\n",call->app_call_state);
+ printk("reading in unexpected state [[[ %u ]]]\n",
+ call->app_call_state);
BUG();
}
/* handle the case of not having an async buffer */
if (!call->app_async_read) {
- if (call->app_mark==RXRPC_APP_MARK_EOF) {
+ if (call->app_mark == RXRPC_APP_MARK_EOF) {
ret = call->app_last_rcv ? 0 : -EAGAIN;
}
else {
@@ -1628,28 +1725,33 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
}
}
- _leave(" = %d [no buf]",ret);
+ _leave(" = %d [no buf]", ret);
return 0;
}
- while (!list_empty(&call->app_readyq) && call->app_mark>0) {
- msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
+ while (!list_empty(&call->app_readyq) && call->app_mark > 0) {
+ msg = list_entry(call->app_readyq.next,
+ struct rxrpc_message, link);
/* drag as much data as we need out of this packet */
- qty = min(call->app_mark,msg->dsize);
+ qty = min(call->app_mark, msg->dsize);
- _debug("reading %Zu from skb=%p off=%lu",qty,msg->pkt,msg->offset);
+ _debug("reading %Zu from skb=%p off=%lu",
+ qty, msg->pkt, msg->offset);
if (call->app_read_buf)
- if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0)
- panic("%s: Failed to copy data from packet: (%p,%p,%Zd)",
- __FUNCTION__,call,call->app_read_buf,qty);
+ if (skb_copy_bits(msg->pkt, msg->offset,
+ call->app_read_buf, qty) < 0)
+ panic("%s: Failed to copy data from packet:"
+ " (%p,%p,%Zd)",
+ __FUNCTION__,
+ call, call->app_read_buf, qty);
/* if that packet is now empty, discard it */
call->app_ready_qty -= qty;
msg->dsize -= qty;
- if (msg->dsize==0) {
+ if (msg->dsize == 0) {
list_del_init(&msg->link);
rxrpc_put_message(msg);
}
@@ -1658,10 +1760,11 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
}
call->app_mark -= qty;
- if (call->app_read_buf) call->app_read_buf += qty;
+ if (call->app_read_buf)
+ call->app_read_buf += qty;
}
- if (call->app_mark==0) {
+ if (call->app_mark == 0) {
call->app_async_read = 0;
call->app_mark = RXRPC_APP_MARK_EOF;
call->app_read_buf = NULL;
@@ -1695,7 +1798,8 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
}
if (call->app_last_rcv) {
- _debug("Insufficient data (%Zu/%Zu)",call->app_ready_qty,call->app_mark);
+ _debug("Insufficient data (%Zu/%Zu)",
+ call->app_ready_qty, call->app_mark);
call->app_async_read = 0;
call->app_mark = RXRPC_APP_MARK_EOF;
call->app_read_buf = NULL;
@@ -1710,22 +1814,26 @@ static int __rxrpc_call_read_data(struct rxrpc_call *call)
/*****************************************************************************/
/*
- * attempt to read the specified amount of data from the call's ready queue into the buffer
- * provided
- * - since this func is the only one going to look at packets queued on app_readyq, we don't need
- * a lock to modify or access them, only to modify the queue pointers
+ * attempt to read the specified amount of data from the call's ready queue
+ * into the buffer provided
+ * - since this func is the only one going to look at packets queued on
+ * app_readyq, we don't need a lock to modify or access them, only to modify
+ * the queue pointers
* - if the buffer pointer is NULL, then data is merely drained, not copied
- * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is enough data or an
- * error will be generated
- * - note that the caller must have added the calling task to the call's wait queue beforehand
- * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this function doesn't read
- * all available data
+ * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is
+ * enough data or an error will be generated
+ * - note that the caller must have added the calling task to the call's wait
+ * queue beforehand
+ * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this
+ * function doesn't read all available data
*/
-int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags)
+int rxrpc_call_read_data(struct rxrpc_call *call,
+ void *buffer, size_t size, int flags)
{
int ret;
- _enter("%p{arq=%Zu},%p,%Zd,%x",call,call->app_ready_qty,buffer,size,flags);
+ _enter("%p{arq=%Zu},%p,%Zd,%x",
+ call, call->app_ready_qty, buffer, size, flags);
spin_lock(&call->lock);
@@ -1744,9 +1852,10 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int
ret = __rxrpc_call_read_data(call);
switch (ret) {
case 0:
- if (flags&RXRPC_CALL_READ_ALL && (!call->app_last_rcv || call->app_ready_qty>0)) {
+ if (flags & RXRPC_CALL_READ_ALL &&
+ (!call->app_last_rcv || call->app_ready_qty > 0)) {
_leave(" = -EBADMSG");
- __rxrpc_call_abort(call,-EBADMSG);
+ __rxrpc_call_abort(call, -EBADMSG);
return -EBADMSG;
}
@@ -1757,18 +1866,18 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int
case -ECONNABORTED:
spin_unlock(&call->lock);
- _leave(" = %d [aborted]",ret);
+ _leave(" = %d [aborted]", ret);
return ret;
- default:
- __rxrpc_call_abort(call,ret);
- _leave(" = %d",ret);
+ default:
+ __rxrpc_call_abort(call, ret);
+ _leave(" = %d", ret);
return ret;
case -EAGAIN:
spin_unlock(&call->lock);
- if (!(flags&RXRPC_CALL_READ_BLOCK)) {
+ if (!(flags & RXRPC_CALL_READ_BLOCK)) {
_leave(" = -EAGAIN");
return -EAGAIN;
}
@@ -1789,7 +1898,7 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int
return -EINTR;
}
- if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ if (call->app_call_state == RXRPC_CSTATE_ERROR) {
_leave(" = -ECONNABORTED");
return -ECONNABORTED;
}
@@ -1804,8 +1913,8 @@ int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int
/*
* write data to a call
* - the data may not be sent immediately if it doesn't fill a buffer
- * - if we can't queue all the data for buffering now, siov[] will have been adjusted to take
- * account of what has been sent
+ * - if we can't queue all the data for buffering now, siov[] will have been
+ * adjusted to take account of what has been sent
*/
int rxrpc_call_write_data(struct rxrpc_call *call,
size_t sioc,
@@ -1821,7 +1930,9 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
char *buf;
int ret;
- _enter("%p,%Zu,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent);
+ _enter("%p,%Zu,%p,%02x,%x,%d,%p",
+ call, sioc, siov, rxhdr_flags, alloc_flags, dup_data,
+ size_sent);
*size_sent = 0;
size = 0;
@@ -1840,8 +1951,9 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
/* calculate how much data we've been given */
sptr = siov;
- for (; sioc>0; sptr++, sioc--) {
- if (!sptr->iov_len) continue;
+ for (; sioc > 0; sptr++, sioc--) {
+ if (!sptr->iov_len)
+ continue;
if (!sptr->iov_base)
goto out;
@@ -1849,27 +1961,30 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
size += sptr->iov_len;
}
- _debug("- size=%Zu mtu=%Zu",size,call->conn->mtu_size);
+ _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size);
do {
/* make sure there's a message under construction */
if (!call->snd_nextmsg) {
/* no - allocate a message with no data yet attached */
- ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_DATA,
- 0,NULL,alloc_flags,&call->snd_nextmsg);
- if (ret<0)
+ ret = rxrpc_conn_newmsg(call->conn, call,
+ RXRPC_PACKET_TYPE_DATA,
+ 0, NULL, alloc_flags,
+ &call->snd_nextmsg);
+ if (ret < 0)
goto out;
- _debug("- allocated new message [ds=%Zu]",call->snd_nextmsg->dsize);
+ _debug("- allocated new message [ds=%Zu]",
+ call->snd_nextmsg->dsize);
}
msg = call->snd_nextmsg;
msg->hdr.flags |= rxhdr_flags;
/* deal with zero-length terminal packet */
- if (size==0) {
+ if (size == 0) {
if (rxhdr_flags & RXRPC_LAST_PACKET) {
ret = rxrpc_call_flush(call);
- if (ret<0)
+ if (ret < 0)
goto out;
}
break;
@@ -1877,24 +1992,27 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
/* work out how much space current packet has available */
space = call->conn->mtu_size - msg->dsize;
- chunk = min(space,size);
+ chunk = min(space, size);
- _debug("- [before] space=%Zu chunk=%Zu",space,chunk);
+ _debug("- [before] space=%Zu chunk=%Zu", space, chunk);
while (!siov->iov_len)
siov++;
- /* if we are going to have to duplicate the data then coalesce it too */
+ /* if we are going to have to duplicate the data then coalesce
+ * it too */
if (dup_data) {
/* don't allocate more that 1 page at a time */
- if (chunk>PAGE_SIZE)
+ if (chunk > PAGE_SIZE)
chunk = PAGE_SIZE;
/* allocate a data buffer and attach to the message */
- buf = kmalloc(chunk,alloc_flags);
+ buf = kmalloc(chunk, alloc_flags);
if (unlikely(!buf)) {
- if (msg->dsize==sizeof(struct rxrpc_header)) {
- /* discard an empty msg and wind back the seq counter */
+ if (msg->dsize ==
+ sizeof(struct rxrpc_header)) {
+ /* discard an empty msg and wind back
+ * the seq counter */
rxrpc_put_message(msg);
call->snd_nextmsg = NULL;
call->snd_seq_count--;
@@ -1905,7 +2023,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
}
tmp = msg->dcount++;
- set_bit(tmp,&msg->dfree);
+ set_bit(tmp, &msg->dfree);
msg->data[tmp].iov_base = buf;
msg->data[tmp].iov_len = chunk;
msg->dsize += chunk;
@@ -1913,9 +2031,9 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
size -= chunk;
/* load the buffer with data */
- while (chunk>0) {
- tmp = min(chunk,siov->iov_len);
- memcpy(buf,siov->iov_base,tmp);
+ while (chunk > 0) {
+ tmp = min(chunk, siov->iov_len);
+ memcpy(buf, siov->iov_base, tmp);
buf += tmp;
siov->iov_base += tmp;
siov->iov_len -= tmp;
@@ -1926,7 +2044,8 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
}
else {
/* we want to attach the supplied buffers directly */
- while (chunk>0 && msg->dcount<RXRPC_MSG_MAX_IOCS) {
+ while (chunk > 0 &&
+ msg->dcount < RXRPC_MSG_MAX_IOCS) {
tmp = msg->dcount++;
msg->data[tmp].iov_base = siov->iov_base;
msg->data[tmp].iov_len = siov->iov_len;
@@ -1938,20 +2057,20 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
}
}
- _debug("- [loaded] chunk=%Zu size=%Zu",chunk,size);
+ _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size);
/* dispatch the message when full, final or requesting ACK */
- if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) {
+ if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) {
ret = rxrpc_call_flush(call);
- if (ret<0)
+ if (ret < 0)
goto out;
}
- } while(size>0);
+ } while(size > 0);
ret = 0;
out:
- _leave(" = %d (%Zd queued, %Zd rem)",ret,*size_sent,size);
+ _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size);
return ret;
} /* end rxrpc_call_write_data() */
@@ -1965,7 +2084,7 @@ int rxrpc_call_flush(struct rxrpc_call *call)
struct rxrpc_message *msg;
int ret = 0;
- _enter("%p",call);
+ _enter("%p", call);
rxrpc_get_call(call);
@@ -1983,25 +2102,28 @@ int rxrpc_call_flush(struct rxrpc_call *call)
}
_proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }",
- msg->dsize,msg->dcount,msg->dfree);
+ msg->dsize, msg->dcount, msg->dfree);
/* queue and adjust call state */
spin_lock(&call->lock);
- list_add_tail(&msg->link,&call->acks_pendq);
+ list_add_tail(&msg->link, &call->acks_pendq);
- /* decide what to do depending on current state and if this is the last packet */
+ /* decide what to do depending on current state and if this is
+ * the last packet */
ret = -EINVAL;
switch (call->app_call_state) {
case RXRPC_CSTATE_SRVR_SND_REPLY:
if (msg->hdr.flags & RXRPC_LAST_PACKET) {
- call->app_call_state = RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
+ call->app_call_state =
+ RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
_state(call);
}
break;
case RXRPC_CSTATE_CLNT_SND_ARGS:
if (msg->hdr.flags & RXRPC_LAST_PACKET) {
- call->app_call_state = RXRPC_CSTATE_CLNT_RCV_REPLY;
+ call->app_call_state =
+ RXRPC_CSTATE_CLNT_RCV_REPLY;
_state(call);
}
break;
@@ -2016,19 +2138,20 @@ int rxrpc_call_flush(struct rxrpc_call *call)
call->acks_pend_cnt++;
mod_timer(&call->acks_timeout,
- __rxrpc_rtt_based_timeout(call,rxrpc_call_acks_timeout));
+ __rxrpc_rtt_based_timeout(call,
+ rxrpc_call_acks_timeout));
spin_unlock(&call->lock);
- ret = rxrpc_conn_sendmsg(call->conn,msg);
- if (ret==0)
+ ret = rxrpc_conn_sendmsg(call->conn, msg);
+ if (ret == 0)
call->pkt_snd_count++;
}
out:
rxrpc_put_call(call);
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_call_flush() */
@@ -2043,14 +2166,16 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
struct list_head *_p;
rxrpc_seq_t seq = 0;
- _enter("%p,%u",call,highest);
+ _enter("%p,%u", call, highest);
_proto("Rx Resend required");
/* handle too many resends */
- if (call->snd_resend_cnt>=rxrpc_call_max_resend) {
- _debug("Aborting due to too many resends (rcv=%d)",call->pkt_rcv_count);
- rxrpc_call_abort(call,call->pkt_rcv_count>0?-EIO:-ETIMEDOUT);
+ if (call->snd_resend_cnt >= rxrpc_call_max_resend) {
+ _debug("Aborting due to too many resends (rcv=%d)",
+ call->pkt_rcv_count);
+ rxrpc_call_abort(call,
+ call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT);
_leave("");
return;
}
@@ -2059,35 +2184,38 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
call->snd_resend_cnt++;
for (;;) {
/* determine which the next packet we might need to ACK is */
- if (seq<=call->acks_dftv_seq)
+ if (seq <= call->acks_dftv_seq)
seq = call->acks_dftv_seq;
seq++;
- if (seq>highest)
+ if (seq > highest)
break;
/* look for the packet in the pending-ACK queue */
- list_for_each(_p,&call->acks_pendq) {
- msg = list_entry(_p,struct rxrpc_message,link);
- if (msg->seq==seq)
+ list_for_each(_p, &call->acks_pendq) {
+ msg = list_entry(_p, struct rxrpc_message, link);
+ if (msg->seq == seq)
goto found_msg;
}
- panic("%s(%p,%d): Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
- __FUNCTION__,call,highest,call->acks_dftv_seq,call->snd_seq_count,seq);
+ panic("%s(%p,%d):"
+ " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
+ __FUNCTION__, call, highest,
+ call->acks_dftv_seq, call->snd_seq_count, seq);
found_msg:
- if (msg->state!=RXRPC_MSG_SENT)
+ if (msg->state != RXRPC_MSG_SENT)
continue; /* only un-ACK'd packets */
rxrpc_get_message(msg);
spin_unlock(&call->lock);
- /* send each message again (and ignore any errors we might incur) */
+ /* send each message again (and ignore any errors we might
+ * incur) */
_proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }",
- msg->dsize,msg->dcount,msg->dfree);
+ msg->dsize, msg->dcount, msg->dfree);
- if (rxrpc_conn_sendmsg(call->conn,msg)==0)
+ if (rxrpc_conn_sendmsg(call->conn, msg) == 0)
call->pkt_snd_count++;
rxrpc_put_message(msg);
@@ -2096,7 +2224,8 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
}
/* reset the timeout */
- mod_timer(&call->acks_timeout,__rxrpc_rtt_based_timeout(call,rxrpc_call_acks_timeout));
+ mod_timer(&call->acks_timeout,
+ __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout));
spin_unlock(&call->lock);
@@ -2109,10 +2238,10 @@ static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
*/
void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
{
- _enter("%p{%u},%d",call,ntohl(call->call_id),errno);
+ _enter("%p{%u},%d", call, ntohl(call->call_id), errno);
/* if this call is already aborted, then just wake up any waiters */
- if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ if (call->app_call_state == RXRPC_CSTATE_ERROR) {
call->app_error_func(call);
}
else {
@@ -2124,10 +2253,10 @@ void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
else
call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
- call->app_errno = errno;
- call->app_mark = RXRPC_APP_MARK_EOF;
- call->app_read_buf = NULL;
- call->app_async_read = 0;
+ call->app_errno = errno;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
/* map the error */
call->app_aemap_func(call);
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index a60412d9f7bf..172c63127784 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -28,18 +28,20 @@ __RXACCT_DECL(atomic_t rxrpc_connection_count);
LIST_HEAD(rxrpc_conns);
DECLARE_RWSEM(rxrpc_conns_sem);
+unsigned long rxrpc_conn_timeout = 60 * 60;
static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
{
- struct rxrpc_connection *conn = list_entry(timer,struct rxrpc_connection,timeout);
+ struct rxrpc_connection *conn =
+ list_entry(timer, struct rxrpc_connection, timeout);
- _debug("Rx CONN TIMEOUT [%p{u=%d}]",conn,atomic_read(&conn->usage));
+ _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
rxrpc_conn_do_timeout(conn);
}
static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
- .timed_out = __rxrpc_conn_timeout,
+ timed_out: __rxrpc_conn_timeout,
};
/*****************************************************************************/
@@ -54,19 +56,20 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
_enter("%p",peer);
/* allocate and initialise a connection record */
- conn = kmalloc(sizeof(struct rxrpc_connection),GFP_KERNEL);
+ conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
if (!conn) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(conn,0,sizeof(struct rxrpc_connection));
- atomic_set(&conn->usage,1);
+ memset(conn, 0, sizeof(struct rxrpc_connection));
+ atomic_set(&conn->usage, 1);
INIT_LIST_HEAD(&conn->link);
+ INIT_LIST_HEAD(&conn->id_link);
init_waitqueue_head(&conn->chanwait);
spin_lock_init(&conn->lock);
- rxrpc_timer_init(&conn->timeout,&rxrpc_conn_timer_ops);
+ rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
do_gettimeofday(&conn->atime);
conn->mtu_size = 1024;
@@ -75,7 +78,7 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
__RXACCT(atomic_inc(&rxrpc_connection_count));
*_conn = conn;
- _leave(" = 0 (%p)",conn);
+ _leave(" = 0 (%p)", conn);
return 0;
} /* end __rxrpc_create_connection() */
@@ -85,61 +88,123 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
* create a new connection record for outgoing connections
*/
int rxrpc_create_connection(struct rxrpc_transport *trans,
- u16 port,
- u32 addr,
- unsigned short service_id,
+ uint16_t port,
+ uint32_t addr,
+ uint16_t service_id,
void *security,
struct rxrpc_connection **_conn)
{
- struct rxrpc_connection *conn;
+ struct rxrpc_connection *candidate, *conn;
struct rxrpc_peer *peer;
+ struct list_head *_p;
+ uint32_t connid;
int ret;
- _enter("%p{%hu},%u,%hu",trans,trans->port,ntohs(port),service_id);
+ _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
/* get a peer record */
- ret = rxrpc_peer_lookup(trans,addr,&peer);
- if (ret<0) {
- _leave(" = %d",ret);
+ ret = rxrpc_peer_lookup(trans, addr, &peer);
+ if (ret < 0) {
+ _leave(" = %d", ret);
return ret;
}
/* allocate and initialise a connection record */
- ret = __rxrpc_create_connection(peer,&conn);
- if (ret<0) {
+ ret = __rxrpc_create_connection(peer, &candidate);
+ if (ret < 0) {
rxrpc_put_peer(peer);
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
}
/* fill in the specific bits */
- conn->addr.sin_family = AF_INET;
- conn->addr.sin_port = port;
- conn->addr.sin_addr.s_addr = addr;
+ candidate->addr.sin_family = AF_INET;
+ candidate->addr.sin_port = port;
+ candidate->addr.sin_addr.s_addr = addr;
+
+ candidate->in_epoch = rxrpc_epoch;
+ candidate->out_epoch = rxrpc_epoch;
+ candidate->in_clientflag = 0;
+ candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
+ candidate->service_id = htons(service_id);
+
+ /* invent a unique connection ID */
+ write_lock(&peer->conn_idlock);
+
+ try_next_id:
+ connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
+ peer->conn_idcounter += RXRPC_MAXCALLS;
+
+ list_for_each(_p, &peer->conn_idlist) {
+ conn = list_entry(_p, struct rxrpc_connection, id_link);
+ if (connid == conn->conn_id)
+ goto try_next_id;
+ if (connid > conn->conn_id)
+ break;
+ }
+
+ _debug("selected candidate conn ID %x.%u",
+ ntohl(peer->addr.s_addr), ntohl(connid));
+
+ candidate->conn_id = connid;
+ list_add_tail(&candidate->id_link, _p);
- conn->in_epoch = rxrpc_epoch;
- conn->out_epoch = rxrpc_epoch;
- conn->in_clientflag = 0;
- conn->out_clientflag = RXRPC_CLIENT_INITIATED;
- conn->conn_id = htonl((unsigned long) conn & RXRPC_CIDMASK);
- conn->service_id = htons(service_id);
+ write_unlock(&peer->conn_idlock);
/* attach to peer */
- conn->peer = peer;
+ candidate->peer = peer;
write_lock(&peer->conn_lock);
- list_add_tail(&conn->link,&peer->conn_active);
+
+ /* search the peer's transport graveyard list */
+ spin_lock(&peer->conn_gylock);
+ list_for_each(_p, &peer->conn_graveyard) {
+ conn = list_entry(_p, struct rxrpc_connection, link);
+ if (conn->addr.sin_port == candidate->addr.sin_port &&
+ conn->security_ix == candidate->security_ix &&
+ conn->service_id == candidate->service_id &&
+ conn->in_clientflag == 0)
+ goto found_in_graveyard;
+ }
+ spin_unlock(&peer->conn_gylock);
+
+ /* pick the new candidate */
+ _debug("created connection: {%08x} [out]", htonl(candidate->conn_id));
atomic_inc(&peer->conn_count);
+ conn = candidate;
+ candidate = NULL;
+
+ make_active:
+ list_add_tail(&conn->link, &peer->conn_active);
write_unlock(&peer->conn_lock);
- down_write(&rxrpc_conns_sem);
- list_add_tail(&conn->proc_link,&rxrpc_conns);
- up_write(&rxrpc_conns_sem);
+ if (candidate) {
+ write_lock(&peer->conn_idlock);
+ list_del(&candidate->id_link);
+ write_unlock(&peer->conn_idlock);
+
+ __RXACCT(atomic_dec(&rxrpc_connection_count));
+ kfree(candidate);
+ }
+ else {
+ down_write(&rxrpc_conns_sem);
+ list_add_tail(&conn->proc_link, &rxrpc_conns);
+ up_write(&rxrpc_conns_sem);
+ }
*_conn = conn;
- _leave(" = 0 (%p)",conn);
+ _leave(" = 0 (%p)", conn);
return 0;
+
+ /* handle resurrecting a connection from the graveyard */
+ found_in_graveyard:
+ _debug("resurrecting connection: {%08x} [out]", htonl(conn->conn_id));
+ rxrpc_get_connection(conn);
+ rxrpc_krxtimod_del_timer(&conn->timeout);
+ list_del_init(&conn->link);
+ spin_unlock(&peer->conn_gylock);
+ goto make_active;
} /* end rxrpc_create_connection() */
/*****************************************************************************/
@@ -159,7 +224,10 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
u8 x_clflag;
_enter("%p{{%hu}},%u,%hu",
- peer,peer->trans->port,ntohs(msg->pkt->h.uh->source),ntohs(msg->hdr.serviceId));
+ peer,
+ peer->trans->port,
+ ntohs(msg->pkt->h.uh->source),
+ ntohs(msg->hdr.serviceId));
x_port = msg->pkt->h.uh->source;
x_epoch = msg->hdr.epoch;
@@ -170,8 +238,8 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
/* [common case] search the transport's active list first */
read_lock(&peer->conn_lock);
- list_for_each(_p,&peer->conn_active) {
- conn = list_entry(_p,struct rxrpc_connection,link);
+ list_for_each(_p, &peer->conn_active) {
+ conn = list_entry(_p, struct rxrpc_connection, link);
if (conn->addr.sin_port == x_port &&
conn->in_epoch == x_epoch &&
conn->conn_id == x_connid &&
@@ -187,9 +255,9 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
* - only examine the graveyard for an outbound connection
*/
if (x_clflag) {
- ret = __rxrpc_create_connection(peer,&candidate);
- if (ret<0) {
- _leave(" = %d",ret);
+ ret = __rxrpc_create_connection(peer, &candidate);
+ if (ret < 0) {
+ _leave(" = %d", ret);
return ret;
}
@@ -206,10 +274,11 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
candidate->security_ix = x_secix;
}
- /* search the active list again, just in case it appeared whilst we were busy */
+ /* search the active list again, just in case it appeared whilst we
+ * were busy */
write_lock(&peer->conn_lock);
- list_for_each(_p,&peer->conn_active) {
- conn = list_entry(_p,struct rxrpc_connection,link);
+ list_for_each(_p, &peer->conn_active) {
+ conn = list_entry(_p, struct rxrpc_connection, link);
if (conn->addr.sin_port == x_port &&
conn->in_epoch == x_epoch &&
conn->conn_id == x_connid &&
@@ -221,8 +290,8 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
/* search the transport's graveyard list */
spin_lock(&peer->conn_gylock);
- list_for_each(_p,&peer->conn_graveyard) {
- conn = list_entry(_p,struct rxrpc_connection,link);
+ list_for_each(_p, &peer->conn_graveyard) {
+ conn = list_entry(_p, struct rxrpc_connection, link);
if (conn->addr.sin_port == x_port &&
conn->in_epoch == x_epoch &&
conn->conn_id == x_connid &&
@@ -241,6 +310,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
}
/* we can now add the new candidate to the list */
+ _debug("created connection: {%08x} [in]", htonl(candidate->conn_id));
rxrpc_get_peer(peer);
conn = candidate;
candidate = NULL;
@@ -248,25 +318,29 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
fresh = 1;
make_active:
- list_add_tail(&conn->link,&peer->conn_active);
+ list_add_tail(&conn->link, &peer->conn_active);
success_uwfree:
write_unlock(&peer->conn_lock);
if (candidate) {
+ write_lock(&peer->conn_idlock);
+ list_del(&candidate->id_link);
+ write_unlock(&peer->conn_idlock);
+
__RXACCT(atomic_dec(&rxrpc_connection_count));
kfree(candidate);
}
if (fresh) {
down_write(&rxrpc_conns_sem);
- list_add_tail(&conn->proc_link,&rxrpc_conns);
+ list_add_tail(&conn->proc_link, &rxrpc_conns);
up_write(&rxrpc_conns_sem);
}
success:
*_conn = conn;
- _leave(" = 0 (%p)",conn);
+ _leave(" = 0 (%p)", conn);
return 0;
/* handle the connection being found in the active list straight off */
@@ -277,6 +351,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
/* handle resurrecting a connection from the graveyard */
found_in_graveyard:
+ _debug("resurrecting connection: {%08x} [in]", htonl(conn->conn_id));
rxrpc_get_peer(peer);
rxrpc_get_connection(conn);
rxrpc_krxtimod_del_timer(&conn->timeout);
@@ -284,7 +359,8 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
spin_unlock(&peer->conn_gylock);
goto make_active;
- /* handle finding the connection on the second time through the active list */
+ /* handle finding the connection on the second time through the active
+ * list */
found_active_second_chance:
rxrpc_get_connection(conn);
goto success_uwfree;
@@ -294,19 +370,26 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
/*****************************************************************************/
/*
* finish using a connection record
- * - it will be transferred to the peer's connection graveyard when refcount reaches 0
+ * - it will be transferred to the peer's connection graveyard when refcount
+ * reaches 0
*/
void rxrpc_put_connection(struct rxrpc_connection *conn)
{
- struct rxrpc_peer *peer = conn->peer;
+ struct rxrpc_peer *peer;
+
+ if (!conn)
+ return;
+
+ _enter("%p{u=%d p=%hu}",
+ conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
- _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
+ peer = conn->peer;
+ spin_lock(&peer->conn_gylock);
/* sanity check */
- if (atomic_read(&conn->usage)<=0)
+ if (atomic_read(&conn->usage) <= 0)
BUG();
- spin_lock(&peer->conn_gylock);
if (likely(!atomic_dec_and_test(&conn->usage))) {
spin_unlock(&peer->conn_gylock);
_leave("");
@@ -314,11 +397,11 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
}
/* move to graveyard queue */
+ _debug("burying connection: {%08x}", htonl(conn->conn_id));
list_del(&conn->link);
- list_add_tail(&conn->link,&peer->conn_graveyard);
+ list_add_tail(&conn->link, &peer->conn_graveyard);
- /* discard in 100 secs */
- rxrpc_krxtimod_add_timer(&conn->timeout,20*HZ);
+ rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
spin_unlock(&peer->conn_gylock);
@@ -335,16 +418,17 @@ void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
{
struct rxrpc_peer *peer;
- _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
+ _enter("%p{u=%d p=%hu}",
+ conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
peer = conn->peer;
- if (atomic_read(&conn->usage)<0)
+ if (atomic_read(&conn->usage) < 0)
BUG();
/* remove from graveyard if still dead */
spin_lock(&peer->conn_gylock);
- if (atomic_read(&conn->usage)==0) {
+ if (atomic_read(&conn->usage) == 0) {
list_del_init(&conn->link);
}
else {
@@ -357,12 +441,17 @@ void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
return; /* resurrected */
}
- _debug("--- Destroying Connection %p ---",conn);
+ _debug("--- Destroying Connection %p{%08x} ---",
+ conn, htonl(conn->conn_id));
down_write(&rxrpc_conns_sem);
list_del(&conn->proc_link);
up_write(&rxrpc_conns_sem);
+ write_lock(&peer->conn_idlock);
+ list_del(&conn->id_link);
+ write_unlock(&peer->conn_idlock);
+
__RXACCT(atomic_dec(&rxrpc_connection_count));
kfree(conn);
@@ -379,12 +468,12 @@ void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
*/
void rxrpc_conn_clearall(struct rxrpc_peer *peer)
{
- DECLARE_WAITQUEUE(myself,current);
+ DECLARE_WAITQUEUE(myself, current);
struct rxrpc_connection *conn;
int err;
- _enter("%p",peer);
+ _enter("%p", peer);
/* there shouldn't be any active conns remaining */
if (!list_empty(&peer->conn_active))
@@ -393,11 +482,12 @@ void rxrpc_conn_clearall(struct rxrpc_peer *peer)
/* manually timeout all conns in the graveyard */
spin_lock(&peer->conn_gylock);
while (!list_empty(&peer->conn_graveyard)) {
- conn = list_entry(peer->conn_graveyard.next,struct rxrpc_connection,link);
+ conn = list_entry(peer->conn_graveyard.next,
+ struct rxrpc_connection, link);
err = rxrpc_krxtimod_del_timer(&conn->timeout);
spin_unlock(&peer->conn_gylock);
- if (err==0)
+ if (err == 0)
rxrpc_conn_do_timeout(conn);
spin_lock(&peer->conn_gylock);
@@ -406,27 +496,27 @@ void rxrpc_conn_clearall(struct rxrpc_peer *peer)
/* wait for the the conn graveyard to be completely cleared */
set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&peer->conn_gy_waitq,&myself);
+ add_wait_queue(&peer->conn_gy_waitq, &myself);
- while (atomic_read(&peer->conn_count)!=0) {
+ while (atomic_read(&peer->conn_count) != 0) {
schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
- remove_wait_queue(&peer->conn_gy_waitq,&myself);
+ remove_wait_queue(&peer->conn_gy_waitq, &myself);
set_current_state(TASK_RUNNING);
_leave("");
-
} /* end rxrpc_conn_clearall() */
/*****************************************************************************/
/*
- * allocate and prepare a message for sending out through the transport endpoint
+ * allocate and prepare a message for sending out through the transport
+ * endpoint
*/
int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
struct rxrpc_call *call,
- u8 type,
+ uint8_t type,
int dcount,
struct iovec diov[],
int alloc_flags,
@@ -435,21 +525,21 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
struct rxrpc_message *msg;
int loop;
- _enter("%p{%d},%p,%u",conn,ntohs(conn->addr.sin_port),call,type);
+ _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
- if (dcount>3) {
+ if (dcount > 3) {
_leave(" = -EINVAL");
return -EINVAL;
}
- msg = kmalloc(sizeof(struct rxrpc_message),alloc_flags);
+ msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
if (!msg) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(msg,0,sizeof(*msg));
- atomic_set(&msg->usage,1);
+ memset(msg, 0, sizeof(*msg));
+ atomic_set(&msg->usage, 1);
INIT_LIST_HEAD(&msg->link);
@@ -471,7 +561,8 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
msg->hdr.seq = htonl(msg->seq);
break;
case RXRPC_PACKET_TYPE_ACK:
- /* ACK sequence numbers are complicated. The following may be wrong:
+ /* ACK sequence numbers are complicated. The following
+ * may be wrong:
* - jumbo packet ACKs should have a seq number
* - normal ACKs should not
*/
@@ -485,7 +576,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
msg->data[0].iov_len = sizeof(msg->hdr);
msg->data[0].iov_base = &msg->hdr;
- for (loop=0; loop<dcount; loop++) {
+ for (loop=0; loop < dcount; loop++) {
msg->dsize += diov[loop].iov_len;
msg->data[loop+1].iov_len = diov[loop].iov_len;
msg->data[loop+1].iov_base = diov[loop].iov_base;
@@ -493,7 +584,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
__RXACCT(atomic_inc(&rxrpc_message_count));
*_msg = msg;
- _leave(" = 0 (%p) #%d",msg,atomic_read(&rxrpc_message_count));
+ _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
return 0;
} /* end rxrpc_conn_newmsg() */
@@ -505,13 +596,14 @@ void __rxrpc_put_message(struct rxrpc_message *msg)
{
int loop;
- _enter("%p #%d",msg,atomic_read(&rxrpc_message_count));
+ _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
- if (msg->pkt) kfree_skb(msg->pkt);
- if (msg->conn) rxrpc_put_connection(msg->conn);
+ if (msg->pkt)
+ kfree_skb(msg->pkt);
+ rxrpc_put_connection(msg->conn);
- for (loop=0; loop<8; loop++)
- if (test_bit(loop,&msg->dfree))
+ for (loop = 0; loop < 8; loop++)
+ if (test_bit(loop, &msg->dfree))
kfree(msg->data[loop].iov_base);
__RXACCT(atomic_dec(&rxrpc_message_count));
@@ -524,13 +616,14 @@ void __rxrpc_put_message(struct rxrpc_message *msg)
/*
* send a message out through the transport endpoint
*/
-int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
+int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
+ struct rxrpc_message *msg)
{
struct msghdr msghdr;
mm_segment_t oldfs;
int ret;
- _enter("%p{%d}",conn,ntohs(conn->addr.sin_port));
+ _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
/* fill in some fields in the header */
spin_lock(&conn->lock);
@@ -545,7 +638,7 @@ int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
msghdr.msg_iovlen = msg->dcount;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
- msghdr.msg_flags = MSG_CONFIRM|MSG_DONTWAIT;
+ msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
_net("Sending message type %d of %Zd bytes to %08x:%d",
msg->hdr.type,
@@ -556,10 +649,10 @@ int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
/* send the message */
oldfs = get_fs();
set_fs(KERNEL_DS);
- ret = sock_sendmsg(conn->trans->socket,&msghdr,msg->dsize);
+ ret = sock_sendmsg(conn->trans->socket, &msghdr, msg->dsize);
set_fs(oldfs);
- if (ret<0) {
+ if (ret < 0) {
msg->state = RXRPC_MSG_ERROR;
}
else {
@@ -572,7 +665,7 @@ int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
spin_unlock(&conn->lock);
}
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_conn_sendmsg() */
@@ -590,7 +683,7 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
unsigned cix, seq;
int ret = 0;
- _enter("%p,%p,%p",conn,call,msg);
+ _enter("%p,%p,%p", conn, call, msg);
if (!call) {
cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
@@ -600,7 +693,7 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
if (!call || call->call_id != msg->hdr.callNumber) {
spin_unlock(&conn->lock);
- rxrpc_trans_immediate_abort(conn->trans,msg,-ENOENT);
+ rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
goto out;
}
else {
@@ -622,19 +715,21 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
call->pkt_rcv_count++;
if (msg->pkt->dst && msg->pkt->dst->dev)
- conn->peer->if_mtu = msg->pkt->dst->dev->mtu - msg->pkt->dst->dev->hard_header_len;
+ conn->peer->if_mtu =
+ msg->pkt->dst->dev->mtu -
+ msg->pkt->dst->dev->hard_header_len;
/* queue on the call in seq order */
rxrpc_get_message(msg);
seq = msg->seq;
spin_lock(&call->lock);
- list_for_each(_p,&call->rcv_receiveq) {
- pmsg = list_entry(_p,struct rxrpc_message,link);
- if (pmsg->seq>seq)
+ list_for_each(_p, &call->rcv_receiveq) {
+ pmsg = list_entry(_p, struct rxrpc_message, link);
+ if (pmsg->seq > seq)
break;
}
- list_add_tail(&msg->link,_p);
+ list_add_tail(&msg->link, _p);
/* reset the activity timeout */
call->flags |= RXRPC_CALL_RCV_PKT;
@@ -646,8 +741,7 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
rxrpc_put_call(call);
out:
- _leave(" = %d",ret);
-
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_conn_receive_call_packet() */
@@ -655,18 +749,19 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
/*
* handle an ICMP error being applied to a connection
*/
-void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno)
+void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
+ int local, int errno)
{
struct rxrpc_call *calls[4];
int loop;
- _enter("%p{%d},%d",conn,ntohs(conn->addr.sin_port),errno);
+ _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
/* get a ref to all my calls in one go */
- memset(calls,0,sizeof(calls));
+ memset(calls, 0, sizeof(calls));
spin_lock(&conn->lock);
- for (loop=3; loop>=0; loop--) {
+ for (loop = 3; loop >= 0; loop--) {
if (conn->channels[loop]) {
calls[loop] = conn->channels[loop];
rxrpc_get_call(calls[loop]);
@@ -676,9 +771,9 @@ void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno
spin_unlock(&conn->lock);
/* now kick them all */
- for (loop=3; loop>=0; loop--) {
+ for (loop = 3; loop >= 0; loop--) {
if (calls[loop]) {
- rxrpc_call_handle_error(calls[loop],local,errno);
+ rxrpc_call_handle_error(calls[loop], local, errno);
rxrpc_put_call(calls[loop]);
}
}
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h
index f76d48c4cf3d..752b6d71d017 100644
--- a/net/rxrpc/internal.h
+++ b/net/rxrpc/internal.h
@@ -55,7 +55,7 @@ static inline void rxrpc_discard_my_signals(void)
siginfo_t sinfo;
spin_lock_irq(&current->sighand->siglock);
- dequeue_signal(current,&current->blocked,&sinfo);
+ dequeue_signal(current, &current->blocked, &sinfo);
spin_unlock_irq(&current->sighand->siglock);
}
}
@@ -71,6 +71,7 @@ extern struct rw_semaphore rxrpc_calls_sem;
*/
extern struct list_head rxrpc_conns;
extern struct rw_semaphore rxrpc_conns_sem;
+extern unsigned long rxrpc_conn_timeout;
extern void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
@@ -80,6 +81,7 @@ extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
*/
extern struct list_head rxrpc_peers;
extern struct rw_semaphore rxrpc_peers_sem;
+extern unsigned long rxrpc_peer_timeout;
extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
struct rxrpc_message *msg,
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
index dfaf47994e8e..051ae4b397d6 100644
--- a/net/rxrpc/krxiod.c
+++ b/net/rxrpc/krxiod.c
@@ -44,6 +44,12 @@ static int rxrpc_krxiod(void *arg)
daemonize("krxiod");
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sighand->siglock);
+ siginitsetinv(&current->blocked, 0);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
/* loop around waiting for work to do */
do {
/* wait for work or to be told to exit */
@@ -51,7 +57,7 @@ static int rxrpc_krxiod(void *arg)
if (!atomic_read(&rxrpc_krxiod_qcount)) {
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
+ add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -63,7 +69,7 @@ static int rxrpc_krxiod(void *arg)
schedule();
}
- remove_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
+ remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
set_current_state(TASK_RUNNING);
}
_debug("### End Wait");
@@ -78,12 +84,16 @@ static int rxrpc_krxiod(void *arg)
spin_lock_irq(&rxrpc_krxiod_transportq_lock);
if (!list_empty(&rxrpc_krxiod_transportq)) {
- trans = list_entry(rxrpc_krxiod_transportq.next,
- struct rxrpc_transport,krxiodq_link);
+ trans = list_entry(
+ rxrpc_krxiod_transportq.next,
+ struct rxrpc_transport,
+ krxiodq_link);
+
list_del_init(&trans->krxiodq_link);
atomic_dec(&rxrpc_krxiod_qcount);
- /* make sure it hasn't gone away and doesn't go away */
+ /* make sure it hasn't gone away and doesn't go
+ * away */
if (atomic_read(&trans->usage)>0)
rxrpc_get_transport(trans);
else
@@ -106,13 +116,16 @@ static int rxrpc_krxiod(void *arg)
if (!list_empty(&rxrpc_krxiod_callq)) {
call = list_entry(rxrpc_krxiod_callq.next,
- struct rxrpc_call,rcv_krxiodq_lk);
+ struct rxrpc_call,
+ rcv_krxiodq_lk);
list_del_init(&call->rcv_krxiodq_lk);
atomic_dec(&rxrpc_krxiod_qcount);
- /* make sure it hasn't gone away and doesn't go away */
- if (atomic_read(&call->usage)>0) {
- _debug("@@@ KRXIOD Begin Attend Call %p",call);
+ /* make sure it hasn't gone away and doesn't go
+ * away */
+ if (atomic_read(&call->usage) > 0) {
+ _debug("@@@ KRXIOD"
+ " Begin Attend Call %p",call);
rxrpc_get_call(call);
}
else {
@@ -125,7 +138,7 @@ static int rxrpc_krxiod(void *arg)
if (call) {
rxrpc_call_do_stuff(call);
rxrpc_put_call(call);
- _debug("@@@ KRXIOD End Attend Call %p",call);
+ _debug("@@@ KRXIOD End Attend Call %p", call);
}
}
@@ -137,7 +150,7 @@ static int rxrpc_krxiod(void *arg)
} while (!rxrpc_krxiod_die);
/* and that's all */
- complete_and_exit(&rxrpc_krxiod_dead,0);
+ complete_and_exit(&rxrpc_krxiod_dead, 0);
} /* end rxrpc_krxiod() */
@@ -147,7 +160,7 @@ static int rxrpc_krxiod(void *arg)
*/
int __init rxrpc_krxiod_init(void)
{
- return kernel_thread(rxrpc_krxiod,NULL,0);
+ return kernel_thread(rxrpc_krxiod, NULL, 0);
} /* end rxrpc_krxiod_init() */
@@ -174,16 +187,17 @@ void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
_enter("");
if (list_empty(&trans->krxiodq_link)) {
- spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
+ spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
if (list_empty(&trans->krxiodq_link)) {
- if (atomic_read(&trans->usage)>0) {
- list_add_tail(&trans->krxiodq_link,&rxrpc_krxiod_transportq);
+ if (atomic_read(&trans->usage) > 0) {
+ list_add_tail(&trans->krxiodq_link,
+ &rxrpc_krxiod_transportq);
atomic_inc(&rxrpc_krxiod_qcount);
}
}
- spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
+ spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
wake_up_all(&rxrpc_krxiod_sleepq);
}
@@ -201,12 +215,12 @@ void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
_enter("");
- spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
+ spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
if (!list_empty(&trans->krxiodq_link)) {
list_del_init(&trans->krxiodq_link);
atomic_dec(&rxrpc_krxiod_qcount);
}
- spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
+ spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
_leave("");
@@ -221,15 +235,16 @@ void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
unsigned long flags;
if (list_empty(&call->rcv_krxiodq_lk)) {
- spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
- if (atomic_read(&call->usage)>0) {
- list_add_tail(&call->rcv_krxiodq_lk,&rxrpc_krxiod_callq);
+ spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
+ if (atomic_read(&call->usage) > 0) {
+ list_add_tail(&call->rcv_krxiodq_lk,
+ &rxrpc_krxiod_callq);
atomic_inc(&rxrpc_krxiod_qcount);
}
- spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
+ spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
}
wake_up_all(&rxrpc_krxiod_sleepq);
-
+
} /* end rxrpc_krxiod_queue_call() */
/*****************************************************************************/
@@ -240,11 +255,11 @@ void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
{
unsigned long flags;
- spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
+ spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
if (!list_empty(&call->rcv_krxiodq_lk)) {
list_del_init(&call->rcv_krxiodq_lk);
atomic_dec(&rxrpc_krxiod_qcount);
}
- spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
+ spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
} /* end rxrpc_krxiod_dequeue_call() */
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index 02f0e2bd9055..81c0b73ab6f5 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -36,7 +36,8 @@ static volatile int rxrpc_krxsecd_die;
static atomic_t rxrpc_krxsecd_qcount;
-/* queue of unprocessed inbound messages with seqno #1 and RXRPC_CLIENT_INITIATED flag set */
+/* queue of unprocessed inbound messages with seqno #1 and
+ * RXRPC_CLIENT_INITIATED flag set */
static LIST_HEAD(rxrpc_krxsecd_initmsgq);
static spinlock_t rxrpc_krxsecd_initmsgq_lock = SPIN_LOCK_UNLOCKED;
@@ -48,14 +49,20 @@ static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
*/
static int rxrpc_krxsecd(void *arg)
{
- DECLARE_WAITQUEUE(krxsecd,current);
+ DECLARE_WAITQUEUE(krxsecd, current);
int die;
- printk("Started krxsecd %d\n",current->pid);
+ printk("Started krxsecd %d\n", current->pid);
daemonize("krxsecd");
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sighand->siglock);
+ siginitsetinv(&current->blocked, 0);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
/* loop around waiting for work to do */
do {
/* wait for work or to be told to exit */
@@ -63,7 +70,7 @@ static int rxrpc_krxsecd(void *arg)
if (!atomic_read(&rxrpc_krxsecd_qcount)) {
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
+ add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -75,7 +82,7 @@ static int rxrpc_krxsecd(void *arg)
schedule();
}
- remove_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
+ remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
set_current_state(TASK_RUNNING);
}
die = rxrpc_krxsecd_die;
@@ -91,7 +98,7 @@ static int rxrpc_krxsecd(void *arg)
if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
msg = list_entry(rxrpc_krxsecd_initmsgq.next,
- struct rxrpc_message,link);
+ struct rxrpc_message, link);
list_del_init(&msg->link);
atomic_dec(&rxrpc_krxsecd_qcount);
}
@@ -112,7 +119,7 @@ static int rxrpc_krxsecd(void *arg)
} while (!die);
/* and that's all */
- complete_and_exit(&rxrpc_krxsecd_dead,0);
+ complete_and_exit(&rxrpc_krxsecd_dead, 0);
} /* end rxrpc_krxsecd() */
@@ -122,7 +129,7 @@ static int rxrpc_krxsecd(void *arg)
*/
int __init rxrpc_krxsecd_init(void)
{
- return kernel_thread(rxrpc_krxsecd,NULL,0);
+ return kernel_thread(rxrpc_krxsecd, NULL, 0);
} /* end rxrpc_krxsecd_init() */
@@ -154,11 +161,11 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
/* move all the messages for this transport onto a temp list */
spin_lock(&rxrpc_krxsecd_initmsgq_lock);
- list_for_each_safe(_p,_n,&rxrpc_krxsecd_initmsgq) {
- msg = list_entry(_p,struct rxrpc_message,link);
- if (msg->trans==trans) {
+ list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
+ msg = list_entry(_p, struct rxrpc_message, link);
+ if (msg->trans == trans) {
list_del(&msg->link);
- list_add_tail(&msg->link,&tmp);
+ list_add_tail(&msg->link, &tmp);
atomic_dec(&rxrpc_krxsecd_qcount);
}
}
@@ -167,7 +174,7 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
/* zap all messages on the temp list */
while (!list_empty(&tmp)) {
- msg = list_entry(tmp.next,struct rxrpc_message,link);
+ msg = list_entry(tmp.next, struct rxrpc_message, link);
list_del_init(&msg->link);
rxrpc_put_message(msg);
}
@@ -181,14 +188,14 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
*/
void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
{
- _enter("%p",msg);
+ _enter("%p", msg);
/* queue for processing by krxsecd */
spin_lock(&rxrpc_krxsecd_initmsgq_lock);
if (!rxrpc_krxsecd_die) {
rxrpc_get_message(msg);
- list_add_tail(&msg->link,&rxrpc_krxsecd_initmsgq);
+ list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq);
atomic_inc(&rxrpc_krxsecd_qcount);
}
@@ -212,10 +219,10 @@ void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
unsigned short sid;
int ret;
- _enter("%p{tr=%p}",msg,trans);
+ _enter("%p{tr=%p}", msg, trans);
- ret = rxrpc_incoming_call(msg->conn,msg,&call);
- if (ret<0)
+ ret = rxrpc_incoming_call(msg->conn, msg, &call);
+ if (ret < 0)
goto out;
/* find the matching service on the transport */
@@ -223,11 +230,11 @@ void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
srv = NULL;
spin_lock(&trans->lock);
- list_for_each(_p,&trans->services) {
- srv = list_entry(_p,struct rxrpc_service,link);
- if (srv->service_id==sid && try_module_get(srv->owner)) {
+ list_for_each(_p, &trans->services) {
+ srv = list_entry(_p, struct rxrpc_service, link);
+ if (srv->service_id == sid && try_module_get(srv->owner)) {
/* found a match (made sure it won't vanish) */
- _debug("found service '%s'",srv->name);
+ _debug("found service '%s'", srv->name);
call->owner = srv->owner;
break;
}
@@ -238,7 +245,7 @@ void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
* - the func must inc the call's usage count to keep it
*/
ret = -ENOENT;
- if (_p!=&trans->services) {
+ if (_p != &trans->services) {
/* attempt to accept the call */
call->conn->service = srv;
call->app_attn_func = srv->attn_func;
@@ -248,19 +255,20 @@ void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
ret = srv->new_call(call);
/* send an abort if an error occurred */
- if (ret<0) {
- rxrpc_call_abort(call,ret);
+ if (ret < 0) {
+ rxrpc_call_abort(call, ret);
}
else {
/* formally receive and ACK the new packet */
- ret = rxrpc_conn_receive_call_packet(call->conn,call,msg);
+ ret = rxrpc_conn_receive_call_packet(call->conn,
+ call, msg);
}
}
rxrpc_put_call(call);
out:
- if (ret<0)
- rxrpc_trans_immediate_abort(trans,msg,ret);
+ if (ret < 0)
+ rxrpc_trans_immediate_abort(trans, msg, ret);
- _leave(" (%d)",ret);
+ _leave(" (%d)", ret);
} /* end rxrpc_krxsecd_process_incoming_call() */
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
index e8e86446a156..68ea67bf9623 100644
--- a/net/rxrpc/krxtimod.c
+++ b/net/rxrpc/krxtimod.c
@@ -36,8 +36,8 @@ int rxrpc_krxtimod_start(void)
{
int ret;
- ret = kernel_thread(krxtimod,NULL,0);
- if (ret<0)
+ ret = kernel_thread(krxtimod, NULL, 0);
+ if (ret < 0)
return ret;
wait_for_completion(&krxtimod_alive);
@@ -64,30 +64,36 @@ void rxrpc_krxtimod_kill(void)
*/
static int krxtimod(void *arg)
{
- DECLARE_WAITQUEUE(myself,current);
+ DECLARE_WAITQUEUE(myself, current);
rxrpc_timer_t *timer;
- printk("Started krxtimod %d\n",current->pid);
+ printk("Started krxtimod %d\n", current->pid);
daemonize("krxtimod");
complete(&krxtimod_alive);
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sighand->siglock);
+ siginitsetinv(&current->blocked, 0);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
/* loop around looking for things to attend to */
loop:
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&krxtimod_sleepq,&myself);
+ add_wait_queue(&krxtimod_sleepq, &myself);
for (;;) {
unsigned long jif;
- unsigned long timeout;
+ signed long timeout;
/* deal with the server being asked to die */
if (krxtimod_die) {
- remove_wait_queue(&krxtimod_sleepq,&myself);
+ remove_wait_queue(&krxtimod_sleepq, &myself);
_leave("");
- complete_and_exit(&krxtimod_dead,0);
+ complete_and_exit(&krxtimod_dead, 0);
}
/* discard pending signals */
@@ -97,18 +103,19 @@ static int krxtimod(void *arg)
spin_lock(&krxtimod_lock);
if (list_empty(&krxtimod_list)) {
timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- unsigned long tmo;
-
+ }
+ else {
timer = list_entry(krxtimod_list.next,
rxrpc_timer_t, link);
- tmo = timer->timo_jif;
+ timeout = timer->timo_jif;
jif = jiffies;
- if (time_before_eq(tmo,jif))
+ if (time_before_eq((unsigned long) timeout, jif))
goto immediate;
- timeout = (long)tmo - (long)jiffies;
+ else {
+ timeout = (long) timeout - (long) jiffies;
+ }
}
spin_unlock(&krxtimod_lock);
@@ -118,13 +125,14 @@ static int krxtimod(void *arg)
}
/* the thing on the front of the queue needs processing
- * - we come here with the lock held and timer pointing to the expired entry
+ * - we come here with the lock held and timer pointing to the expired
+ * entry
*/
immediate:
- remove_wait_queue(&krxtimod_sleepq,&myself);
+ remove_wait_queue(&krxtimod_sleepq, &myself);
set_current_state(TASK_RUNNING);
- _debug("@@@ Begin Timeout of %p",timer);
+ _debug("@@@ Begin Timeout of %p", timer);
/* dequeue the timer */
list_del_init(&timer->link);
@@ -147,29 +155,30 @@ void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
struct list_head *_p;
rxrpc_timer_t *ptimer;
- _enter("%p,%lu",timer,timeout);
+ _enter("%p,%lu", timer, timeout);
spin_lock(&krxtimod_lock);
list_del(&timer->link);
- /* the timer was deferred or reset - put it back in the queue at the right place */
+ /* the timer was deferred or reset - put it back in the queue at the
+ * right place */
timer->timo_jif = jiffies + timeout;
- list_for_each(_p,&krxtimod_list) {
- ptimer = list_entry(_p,rxrpc_timer_t,link);
- if (time_before(timer->timo_jif,ptimer->timo_jif))
+ list_for_each(_p, &krxtimod_list) {
+ ptimer = list_entry(_p, rxrpc_timer_t, link);
+ if (time_before(timer->timo_jif, ptimer->timo_jif))
break;
}
- list_add_tail(&timer->link,_p); /* insert before stopping point */
+ list_add_tail(&timer->link, _p); /* insert before stopping point */
spin_unlock(&krxtimod_lock);
wake_up(&krxtimod_sleepq);
_leave("");
-} /* end rxrpc_krxtimod_queue_vlocation() */
+} /* end rxrpc_krxtimod_add_timer() */
/*****************************************************************************/
/*
@@ -180,7 +189,7 @@ int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
{
int ret = 0;
- _enter("%p",timer);
+ _enter("%p", timer);
spin_lock(&krxtimod_lock);
@@ -193,6 +202,6 @@ int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
wake_up(&krxtimod_sleepq);
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_krxtimod_del_timer() */
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
index 7da19a903817..21623a90b6e5 100644
--- a/net/rxrpc/main.c
+++ b/net/rxrpc/main.c
@@ -32,7 +32,7 @@ MODULE_DESCRIPTION("Rx RPC implementation");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
-u32 rxrpc_epoch;
+uint32_t rxrpc_epoch;
/*****************************************************************************/
/*
@@ -101,11 +101,16 @@ static void __exit rxrpc_cleanup(void)
{
kenter("");
- __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
- __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
- __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
- __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
- __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
+ __RXACCT(printk("Outstanding Messages : %d\n",
+ atomic_read(&rxrpc_message_count)));
+ __RXACCT(printk("Outstanding Calls : %d\n",
+ atomic_read(&rxrpc_call_count)));
+ __RXACCT(printk("Outstanding Connections: %d\n",
+ atomic_read(&rxrpc_connection_count)));
+ __RXACCT(printk("Outstanding Peers : %d\n",
+ atomic_read(&rxrpc_peer_count)));
+ __RXACCT(printk("Outstanding Transports : %d\n",
+ atomic_read(&rxrpc_transport_count)));
rxrpc_krxsecd_kill();
rxrpc_krxiod_kill();
@@ -117,11 +122,61 @@ static void __exit rxrpc_cleanup(void)
rxrpc_proc_cleanup();
#endif
- __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
- __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
- __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
- __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
- __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
+ __RXACCT(printk("Outstanding Messages : %d\n",
+ atomic_read(&rxrpc_message_count)));
+ __RXACCT(printk("Outstanding Calls : %d\n",
+ atomic_read(&rxrpc_call_count)));
+ __RXACCT(printk("Outstanding Connections: %d\n",
+ atomic_read(&rxrpc_connection_count)));
+ __RXACCT(printk("Outstanding Peers : %d\n",
+ atomic_read(&rxrpc_peer_count)));
+ __RXACCT(printk("Outstanding Transports : %d\n",
+ atomic_read(&rxrpc_transport_count)));
kleave("");
} /* end rxrpc_cleanup() */
+
+/*****************************************************************************/
+/*
+ * clear the dead space between task_struct and kernel stack
+ * - called by supplying -finstrument-functions to gcc
+ */
+#if 0
+void __cyg_profile_func_enter (void *this_fn, void *call_site)
+__attribute__((no_instrument_function));
+
+void __cyg_profile_func_enter (void *this_fn, void *call_site)
+{
+ asm volatile(" movl %%esp,%%edi \n"
+ " andl %0,%%edi \n"
+ " addl %1,%%edi \n"
+ " movl %%esp,%%ecx \n"
+ " subl %%edi,%%ecx \n"
+ " shrl $2,%%ecx \n"
+ " movl $0xedededed,%%eax \n"
+ " rep stosl \n"
+ :
+ : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
+ : "eax", "ecx", "edi", "memory", "cc"
+ );
+}
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+__attribute__((no_instrument_function));
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+{
+ asm volatile(" movl %%esp,%%edi \n"
+ " andl %0,%%edi \n"
+ " addl %1,%%edi \n"
+ " movl %%esp,%%ecx \n"
+ " subl %%edi,%%ecx \n"
+ " shrl $2,%%ecx \n"
+ " movl $0xdadadada,%%eax \n"
+ " rep stosl \n"
+ :
+ : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
+ : "eax", "ecx", "edi", "memory", "cc"
+ );
+}
+#endif
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
index edd98149e628..a0665d6298e9 100644
--- a/net/rxrpc/peer.c
+++ b/net/rxrpc/peer.c
@@ -28,12 +28,14 @@
__RXACCT_DECL(atomic_t rxrpc_peer_count);
LIST_HEAD(rxrpc_peers);
DECLARE_RWSEM(rxrpc_peers_sem);
+unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
{
- struct rxrpc_peer *peer = list_entry(timer,struct rxrpc_peer,timeout);
+ struct rxrpc_peer *peer =
+ list_entry(timer, struct rxrpc_peer, timeout);
- _debug("Rx PEER TIMEOUT [%p{u=%d}]",peer,atomic_read(&peer->usage));
+ _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
rxrpc_peer_do_timeout(peer);
}
@@ -46,32 +48,35 @@ static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
/*
* create a peer record
*/
-static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
+static int __rxrpc_create_peer(struct rxrpc_transport *trans, uint32_t addr,
+ struct rxrpc_peer **_peer)
{
struct rxrpc_peer *peer;
- _enter("%p,%08x",trans,ntohl(addr));
+ _enter("%p,%08x", trans, ntohl(addr));
/* allocate and initialise a peer record */
- peer = kmalloc(sizeof(struct rxrpc_peer),GFP_KERNEL);
+ peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
if (!peer) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(peer,0,sizeof(struct rxrpc_peer));
- atomic_set(&peer->usage,1);
+ memset(peer, 0, sizeof(struct rxrpc_peer));
+ atomic_set(&peer->usage, 1);
INIT_LIST_HEAD(&peer->link);
INIT_LIST_HEAD(&peer->proc_link);
+ INIT_LIST_HEAD(&peer->conn_idlist);
INIT_LIST_HEAD(&peer->conn_active);
INIT_LIST_HEAD(&peer->conn_graveyard);
spin_lock_init(&peer->conn_gylock);
init_waitqueue_head(&peer->conn_gy_waitq);
+ rwlock_init(&peer->conn_idlock);
rwlock_init(&peer->conn_lock);
- atomic_set(&peer->conn_count,0);
+ atomic_set(&peer->conn_count, 0);
spin_lock_init(&peer->lock);
- rxrpc_timer_init(&peer->timeout,&rxrpc_peer_timer_ops);
+ rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
peer->addr.s_addr = addr;
@@ -80,7 +85,7 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct r
__RXACCT(atomic_inc(&rxrpc_peer_count));
*_peer = peer;
- _leave(" = 0 (%p)",peer);
+ _leave(" = 0 (%p)", peer);
return 0;
} /* end __rxrpc_create_peer() */
@@ -91,43 +96,45 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct r
* - returns (if successful) with peer record usage incremented
* - resurrects it from the graveyard if found there
*/
-int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
+int rxrpc_peer_lookup(struct rxrpc_transport *trans, uint32_t addr,
+ struct rxrpc_peer **_peer)
{
struct rxrpc_peer *peer, *candidate = NULL;
struct list_head *_p;
int ret;
- _enter("%p{%hu},%08x",trans,trans->port,ntohl(addr));
+ _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
/* [common case] search the transport's active list first */
read_lock(&trans->peer_lock);
- list_for_each(_p,&trans->peer_active) {
- peer = list_entry(_p,struct rxrpc_peer,link);
- if (peer->addr.s_addr==addr)
+ list_for_each(_p, &trans->peer_active) {
+ peer = list_entry(_p, struct rxrpc_peer, link);
+ if (peer->addr.s_addr == addr)
goto found_active;
}
read_unlock(&trans->peer_lock);
/* [uncommon case] not active - create a candidate for a new record */
- ret = __rxrpc_create_peer(trans,addr,&candidate);
- if (ret<0) {
- _leave(" = %d",ret);
+ ret = __rxrpc_create_peer(trans, addr, &candidate);
+ if (ret < 0) {
+ _leave(" = %d", ret);
return ret;
}
- /* search the active list again, just in case it appeared whilst we were busy */
+ /* search the active list again, just in case it appeared whilst we
+ * were busy */
write_lock(&trans->peer_lock);
- list_for_each(_p,&trans->peer_active) {
- peer = list_entry(_p,struct rxrpc_peer,link);
- if (peer->addr.s_addr==addr)
+ list_for_each(_p, &trans->peer_active) {
+ peer = list_entry(_p, struct rxrpc_peer, link);
+ if (peer->addr.s_addr == addr)
goto found_active_second_chance;
}
/* search the transport's graveyard list */
spin_lock(&trans->peer_gylock);
- list_for_each(_p,&trans->peer_graveyard) {
- peer = list_entry(_p,struct rxrpc_peer,link);
- if (peer->addr.s_addr==addr)
+ list_for_each(_p, &trans->peer_graveyard) {
+ peer = list_entry(_p, struct rxrpc_peer, link);
+ if (peer->addr.s_addr == addr)
goto found_in_graveyard;
}
spin_unlock(&trans->peer_gylock);
@@ -141,12 +148,12 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
if (peer->ops && peer->ops->adding) {
ret = peer->ops->adding(peer);
- if (ret<0) {
+ if (ret < 0) {
write_unlock(&trans->peer_lock);
__RXACCT(atomic_dec(&rxrpc_peer_count));
kfree(peer);
rxrpc_put_transport(trans);
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
}
}
@@ -154,7 +161,7 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
atomic_inc(&trans->peer_count);
make_active:
- list_add_tail(&peer->link,&trans->peer_active);
+ list_add_tail(&peer->link, &trans->peer_active);
success_uwfree:
write_unlock(&trans->peer_lock);
@@ -166,7 +173,7 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
if (list_empty(&peer->proc_link)) {
down_write(&rxrpc_peers_sem);
- list_add_tail(&peer->proc_link,&rxrpc_peers);
+ list_add_tail(&peer->proc_link, &rxrpc_peers);
up_write(&rxrpc_peers_sem);
}
@@ -174,7 +181,9 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
*_peer = peer;
_leave(" = 0 (%p{u=%d cc=%d})",
- peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count));
+ peer,
+ atomic_read(&peer->usage),
+ atomic_read(&peer->conn_count));
return 0;
/* handle the peer being found in the active list straight off */
@@ -192,7 +201,8 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
spin_unlock(&trans->peer_gylock);
goto make_active;
- /* handle finding the peer on the second time through the active list */
+ /* handle finding the peer on the second time through the active
+ * list */
found_active_second_chance:
rxrpc_get_peer(peer);
goto success_uwfree;
@@ -202,16 +212,20 @@ int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer
/*****************************************************************************/
/*
* finish with a peer record
- * - it gets sent to the graveyard from where it can be resurrected or timed out
+ * - it gets sent to the graveyard from where it can be resurrected or timed
+ * out
*/
void rxrpc_put_peer(struct rxrpc_peer *peer)
{
struct rxrpc_transport *trans = peer->trans;
- _enter("%p{cc=%d a=%08x}",peer,atomic_read(&peer->conn_count),ntohl(peer->addr.s_addr));
+ _enter("%p{cc=%d a=%08x}",
+ peer,
+ atomic_read(&peer->conn_count),
+ ntohl(peer->addr.s_addr));
/* sanity check */
- if (atomic_read(&peer->usage)<=0)
+ if (atomic_read(&peer->usage) <= 0)
BUG();
write_lock(&trans->peer_lock);
@@ -227,12 +241,11 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
list_del(&peer->link);
write_unlock(&trans->peer_lock);
- list_add_tail(&peer->link,&trans->peer_graveyard);
+ list_add_tail(&peer->link, &trans->peer_graveyard);
- if (!list_empty(&peer->conn_active)) BUG();
+ BUG_ON(!list_empty(&peer->conn_active));
- /* discard in 600 secs */
- rxrpc_krxtimod_add_timer(&peer->timeout,100*HZ);
+ rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
spin_unlock(&trans->peer_gylock);
@@ -251,15 +264,16 @@ void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
struct rxrpc_transport *trans = peer->trans;
_enter("%p{u=%d cc=%d a=%08x}",
- peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count),
+ peer,
+ atomic_read(&peer->usage),
+ atomic_read(&peer->conn_count),
ntohl(peer->addr.s_addr));
- if (atomic_read(&peer->usage)<0)
- BUG();
+ BUG_ON(atomic_read(&peer->usage) < 0);
/* remove from graveyard if still dead */
spin_lock(&trans->peer_gylock);
- if (atomic_read(&peer->usage)==0)
+ if (atomic_read(&peer->usage) == 0)
list_del_init(&peer->link);
else
peer = NULL;
@@ -273,8 +287,8 @@ void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
/* clear all connections on this peer */
rxrpc_conn_clearall(peer);
- if (!list_empty(&peer->conn_active)) BUG();
- if (!list_empty(&peer->conn_graveyard)) BUG();
+ BUG_ON(!list_empty(&peer->conn_active));
+ BUG_ON(!list_empty(&peer->conn_graveyard));
/* inform the application layer */
if (peer->ops && peer->ops->discarding)
@@ -310,18 +324,18 @@ void rxrpc_peer_clearall(struct rxrpc_transport *trans)
_enter("%p",trans);
/* there shouldn't be any active peers remaining */
- if (!list_empty(&trans->peer_active))
- BUG();
+ BUG_ON(!list_empty(&trans->peer_active));
/* manually timeout all peers in the graveyard */
spin_lock(&trans->peer_gylock);
while (!list_empty(&trans->peer_graveyard)) {
- peer = list_entry(trans->peer_graveyard.next,struct rxrpc_peer,link);
- _debug("Clearing peer %p\n",peer);
+ peer = list_entry(trans->peer_graveyard.next,
+ struct rxrpc_peer, link);
+ _debug("Clearing peer %p\n", peer);
err = rxrpc_krxtimod_del_timer(&peer->timeout);
spin_unlock(&trans->peer_gylock);
- if (err==0)
+ if (err == 0)
rxrpc_peer_do_timeout(peer);
spin_lock(&trans->peer_gylock);
@@ -330,18 +344,17 @@ void rxrpc_peer_clearall(struct rxrpc_transport *trans)
/* wait for the the peer graveyard to be completely cleared */
set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&trans->peer_gy_waitq,&myself);
+ add_wait_queue(&trans->peer_gy_waitq, &myself);
- while (atomic_read(&trans->peer_count)!=0) {
+ while (atomic_read(&trans->peer_count) != 0) {
schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
- remove_wait_queue(&trans->peer_gy_waitq,&myself);
+ remove_wait_queue(&trans->peer_gy_waitq, &myself);
set_current_state(TASK_RUNNING);
_leave("");
-
} /* end rxrpc_peer_clearall() */
/*****************************************************************************/
@@ -355,7 +368,7 @@ void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
unsigned long long rtt;
int loop;
- _enter("%p,%p,%p",peer,msg,resp);
+ _enter("%p,%p,%p", peer, msg, resp);
/* calculate the latest RTT */
rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
@@ -367,16 +380,18 @@ void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
peer->rtt_point++;
peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
- if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++;
+ if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
+ peer->rtt_usage++;
/* recalculate RTT */
rtt = 0;
- for (loop=peer->rtt_usage-1; loop>=0; loop--)
+ for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
rtt += peer->rtt_cache[loop];
- do_div(rtt,peer->rtt_usage);
+ do_div(rtt, peer->rtt_usage);
peer->rtt = rtt;
- _leave(" RTT=%lu.%lums",(long)(peer->rtt/1000),(long)(peer->rtt%1000));
+ _leave(" RTT=%lu.%lums",
+ (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
} /* end rxrpc_peer_calculate_rtt() */
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index e9d979fc4eb0..0427b5f227a2 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -38,7 +38,6 @@ static struct seq_operations rxrpc_proc_transports_ops = {
};
static struct file_operations rxrpc_proc_transports_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_proc_transports_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -59,7 +58,6 @@ static struct seq_operations rxrpc_proc_peers_ops = {
};
static struct file_operations rxrpc_proc_peers_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_proc_peers_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -80,7 +78,6 @@ static struct seq_operations rxrpc_proc_conns_ops = {
};
static struct file_operations rxrpc_proc_conns_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_proc_conns_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -101,7 +98,6 @@ static struct seq_operations rxrpc_proc_calls_ops = {
};
static struct file_operations rxrpc_proc_calls_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_proc_calls_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -137,30 +133,30 @@ int rxrpc_proc_init(void)
{
struct proc_dir_entry *p;
- proc_rxrpc = proc_mkdir("rxrpc",proc_net);
+ proc_rxrpc = proc_mkdir("rxrpc", proc_net);
if (!proc_rxrpc)
goto error;
proc_rxrpc->owner = THIS_MODULE;
- p = create_proc_entry("calls",0,proc_rxrpc);
+ p = create_proc_entry("calls", 0, proc_rxrpc);
if (!p)
goto error_proc;
p->proc_fops = &rxrpc_proc_calls_fops;
p->owner = THIS_MODULE;
- p = create_proc_entry("connections",0,proc_rxrpc);
+ p = create_proc_entry("connections", 0, proc_rxrpc);
if (!p)
goto error_calls;
p->proc_fops = &rxrpc_proc_conns_fops;
p->owner = THIS_MODULE;
- p = create_proc_entry("peers",0,proc_rxrpc);
+ p = create_proc_entry("peers", 0, proc_rxrpc);
if (!p)
goto error_calls;
p->proc_fops = &rxrpc_proc_peers_fops;
p->owner = THIS_MODULE;
- p = create_proc_entry("transports",0,proc_rxrpc);
+ p = create_proc_entry("transports", 0, proc_rxrpc);
if (!p)
goto error_conns;
p->proc_fops = &rxrpc_proc_transports_fops;
@@ -169,11 +165,11 @@ int rxrpc_proc_init(void)
return 0;
error_conns:
- remove_proc_entry("conns",proc_rxrpc);
+ remove_proc_entry("connections", proc_rxrpc);
error_calls:
- remove_proc_entry("calls",proc_rxrpc);
+ remove_proc_entry("calls", proc_rxrpc);
error_proc:
- remove_proc_entry("rxrpc",proc_net);
+ remove_proc_entry("rxrpc", proc_net);
error:
return -ENOMEM;
} /* end rxrpc_proc_init() */
@@ -184,12 +180,12 @@ int rxrpc_proc_init(void)
*/
void rxrpc_proc_cleanup(void)
{
- remove_proc_entry("transports",proc_rxrpc);
- remove_proc_entry("peers",proc_rxrpc);
- remove_proc_entry("connections",proc_rxrpc);
- remove_proc_entry("calls",proc_rxrpc);
+ remove_proc_entry("transports", proc_rxrpc);
+ remove_proc_entry("peers", proc_rxrpc);
+ remove_proc_entry("connections", proc_rxrpc);
+ remove_proc_entry("calls", proc_rxrpc);
- remove_proc_entry("rxrpc",proc_net);
+ remove_proc_entry("rxrpc", proc_net);
} /* end rxrpc_proc_cleanup() */
@@ -202,8 +198,8 @@ static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret;
- ret = seq_open(file,&rxrpc_proc_transports_ops);
- if (ret<0)
+ ret = seq_open(file, &rxrpc_proc_transports_ops);
+ if (ret < 0)
return ret;
m = file->private_data;
@@ -226,15 +222,15 @@ static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
/* allow for the header line */
if (!pos)
- return (void *)1;
+ return (void *) 1;
pos--;
/* find the n'th element in the list */
- list_for_each(_p,&rxrpc_proc_transports)
+ list_for_each(_p, &rxrpc_proc_transports)
if (!pos--)
break;
- return _p!=&rxrpc_proc_transports ? _p : NULL;
+ return _p != &rxrpc_proc_transports ? _p : NULL;
} /* end rxrpc_proc_transports_start() */
/*****************************************************************************/
@@ -248,9 +244,9 @@ static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos
(*pos)++;
_p = v;
- _p = v==(void*)1 ? rxrpc_proc_transports.next : _p->next;
+ _p = v==(void *) 1 ? rxrpc_proc_transports.next : _p->next;
- return _p!=&rxrpc_proc_transports ? _p : NULL;
+ return _p != &rxrpc_proc_transports ? _p : NULL;
} /* end rxrpc_proc_transports_next() */
/*****************************************************************************/
@@ -269,16 +265,17 @@ static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
*/
static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
{
- struct rxrpc_transport *trans = list_entry(v,struct rxrpc_transport,proc_link);
+ struct rxrpc_transport *trans =
+ list_entry(v, struct rxrpc_transport, proc_link);
/* display header on line 1 */
- if (v == (void *)1) {
+ if (v == (void *) 1) {
seq_puts(m, "LOCAL USE\n");
return 0;
}
/* display one transport per line on subsequent lines */
- seq_printf(m,"%5hu %3d\n",
+ seq_printf(m, "%5hu %3d\n",
trans->port,
atomic_read(&trans->usage)
);
@@ -295,8 +292,8 @@ static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret;
- ret = seq_open(file,&rxrpc_proc_peers_ops);
- if (ret<0)
+ ret = seq_open(file, &rxrpc_proc_peers_ops);
+ if (ret < 0)
return ret;
m = file->private_data;
@@ -307,7 +304,8 @@ static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
/*****************************************************************************/
/*
- * set up the iterator to start reading from the peers list and return the first item
+ * set up the iterator to start reading from the peers list and return the
+ * first item
*/
static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
{
@@ -319,15 +317,15 @@ static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
/* allow for the header line */
if (!pos)
- return (void *)1;
+ return (void *) 1;
pos--;
/* find the n'th element in the list */
- list_for_each(_p,&rxrpc_peers)
+ list_for_each(_p, &rxrpc_peers)
if (!pos--)
break;
- return _p!=&rxrpc_peers ? _p : NULL;
+ return _p != &rxrpc_peers ? _p : NULL;
} /* end rxrpc_proc_peers_start() */
/*****************************************************************************/
@@ -341,9 +339,9 @@ static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
(*pos)++;
_p = v;
- _p = v==(void*)1 ? rxrpc_peers.next : _p->next;
+ _p = v == (void *) 1 ? rxrpc_peers.next : _p->next;
- return _p!=&rxrpc_peers ? _p : NULL;
+ return _p != &rxrpc_peers ? _p : NULL;
} /* end rxrpc_proc_peers_next() */
/*****************************************************************************/
@@ -362,21 +360,23 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
*/
static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
{
- struct rxrpc_peer *peer = list_entry(v,struct rxrpc_peer,proc_link);
+ struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
signed long timeout;
/* display header on line 1 */
- if (v == (void *)1) {
- seq_puts(m,"LOCAL REMOTE USAGE CONNS TIMEOUT MTU RTT(uS)\n");
+ if (v == (void *) 1) {
+ seq_puts(m, "LOCAL REMOTE USAGE CONNS TIMEOUT"
+ " MTU RTT(uS)\n");
return 0;
}
/* display one peer per line on subsequent lines */
timeout = 0;
if (!list_empty(&peer->timeout.link))
- timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies;
+ timeout = (signed long) peer->timeout.timo_jif -
+ (signed long) jiffies;
- seq_printf(m,"%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
+ seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
peer->trans->port,
ntohl(peer->addr.s_addr),
atomic_read(&peer->usage),
@@ -391,15 +391,16 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
/*****************************************************************************/
/*
- * open "/proc/net/rxrpc/connections" which provides a summary of extant connections
+ * open "/proc/net/rxrpc/connections" which provides a summary of extant
+ * connections
*/
static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
{
struct seq_file *m;
int ret;
- ret = seq_open(file,&rxrpc_proc_conns_ops);
- if (ret<0)
+ ret = seq_open(file, &rxrpc_proc_conns_ops);
+ if (ret < 0)
return ret;
m = file->private_data;
@@ -410,7 +411,8 @@ static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
/*****************************************************************************/
/*
- * set up the iterator to start reading from the conns list and return the first item
+ * set up the iterator to start reading from the conns list and return the
+ * first item
*/
static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
{
@@ -422,15 +424,15 @@ static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
/* allow for the header line */
if (!pos)
- return (void *)1;
+ return (void *) 1;
pos--;
/* find the n'th element in the list */
- list_for_each(_p,&rxrpc_conns)
+ list_for_each(_p, &rxrpc_conns)
if (!pos--)
break;
- return _p!=&rxrpc_conns ? _p : NULL;
+ return _p != &rxrpc_conns ? _p : NULL;
} /* end rxrpc_proc_conns_start() */
/*****************************************************************************/
@@ -444,9 +446,9 @@ static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
(*pos)++;
_p = v;
- _p = v==(void*)1 ? rxrpc_conns.next : _p->next;
+ _p = (v == (void *) 1) ? rxrpc_conns.next : _p->next;
- return _p!=&rxrpc_conns ? _p : NULL;
+ return _p != &rxrpc_conns ? _p : NULL;
} /* end rxrpc_proc_conns_next() */
/*****************************************************************************/
@@ -465,13 +467,16 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
*/
static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
{
- struct rxrpc_connection *conn = list_entry(v,struct rxrpc_connection,proc_link);
+ struct rxrpc_connection *conn;
signed long timeout;
+ conn = list_entry(v, struct rxrpc_connection, proc_link);
+
/* display header on line 1 */
- if (v == (void *)1) {
+ if (v == (void *) 1) {
seq_puts(m,
- "LOCAL REMOTE RPORT SRVC CONN END SERIALNO CALLNO MTU TIMEOUT"
+ "LOCAL REMOTE RPORT SRVC CONN END SERIALNO "
+ "CALLNO MTU TIMEOUT"
"\n");
return 0;
}
@@ -479,9 +484,11 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
/* display one conn per line on subsequent lines */
timeout = 0;
if (!list_empty(&conn->timeout.link))
- timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies;
+ timeout = (signed long) conn->timeout.timo_jif -
+ (signed long) jiffies;
- seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
+ seq_printf(m,
+ "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
conn->trans->port,
ntohl(conn->addr.sin_addr.s_addr),
ntohs(conn->addr.sin_port),
@@ -506,8 +513,8 @@ static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret;
- ret = seq_open(file,&rxrpc_proc_calls_ops);
- if (ret<0)
+ ret = seq_open(file, &rxrpc_proc_calls_ops);
+ if (ret < 0)
return ret;
m = file->private_data;
@@ -518,7 +525,8 @@ static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
/*****************************************************************************/
/*
- * set up the iterator to start reading from the calls list and return the first item
+ * set up the iterator to start reading from the calls list and return the
+ * first item
*/
static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
{
@@ -530,15 +538,15 @@ static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
/* allow for the header line */
if (!pos)
- return (void *)1;
+ return (void *) 1;
pos--;
/* find the n'th element in the list */
- list_for_each(_p,&rxrpc_calls)
+ list_for_each(_p, &rxrpc_calls)
if (!pos--)
break;
- return _p!=&rxrpc_calls ? _p : NULL;
+ return _p != &rxrpc_calls ? _p : NULL;
} /* end rxrpc_proc_calls_start() */
/*****************************************************************************/
@@ -552,9 +560,9 @@ static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
(*pos)++;
_p = v;
- _p = v==(void*)1 ? rxrpc_calls.next : _p->next;
+ _p = (v == (void *) 1) ? rxrpc_calls.next : _p->next;
- return _p!=&rxrpc_calls ? _p : NULL;
+ return _p != &rxrpc_calls ? _p : NULL;
} /* end rxrpc_proc_calls_next() */
/*****************************************************************************/
@@ -573,10 +581,10 @@ static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
*/
static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
{
- struct rxrpc_call *call = list_entry(v,struct rxrpc_call,call_link);
+ struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link);
/* display header on line 1 */
- if (v == (void *)1) {
+ if (v == (void *) 1) {
seq_puts(m,
"LOCAL REMOT SRVC CONN CALL DIR USE "
" L STATE OPCODE ABORT ERRNO\n"
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 08df88fcd643..fbf98729c748 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -60,6 +60,22 @@ static ctl_table rxrpc_sysctl_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec
},
+ {
+ .ctl_name = 5,
+ .procname = "peertimo",
+ .data = &rxrpc_peer_timeout,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax
+ },
+ {
+ .ctl_name = 6,
+ .procname = "conntimo",
+ .data = &rxrpc_conn_timeout,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax
+ },
{ .ctl_name = 0 }
};
@@ -67,6 +83,7 @@ static ctl_table rxrpc_dir_sysctl_table[] = {
{
.ctl_name = 1,
.procname = "rxrpc",
+ .maxlen = 0,
.mode = 0555,
.child = rxrpc_sysctl_table
},
@@ -81,7 +98,7 @@ static ctl_table rxrpc_dir_sysctl_table[] = {
int rxrpc_sysctl_init(void)
{
#ifdef CONFIG_SYSCTL
- rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table,0);
+ rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table, 0);
if (!rxrpc_sysctl)
return -ENOMEM;
#endif /* CONFIG_SYSCTL */
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 7d0df69b16fe..33b22b36d824 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -56,7 +56,8 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
/*
* create a new transport endpoint using the specified UDP port
*/
-int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
+int rxrpc_create_transport(unsigned short port,
+ struct rxrpc_transport **_trans)
{
struct rxrpc_transport *trans;
struct sockaddr_in sin;
@@ -64,14 +65,14 @@ int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
struct sock *sock;
int ret, opt;
- _enter("%hu",port);
+ _enter("%hu", port);
- trans = kmalloc(sizeof(struct rxrpc_transport),GFP_KERNEL);
+ trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
if (!trans)
return -ENOMEM;
- memset(trans,0,sizeof(struct rxrpc_transport));
- atomic_set(&trans->usage,1);
+ memset(trans, 0, sizeof(struct rxrpc_transport));
+ atomic_set(&trans->usage, 1);
INIT_LIST_HEAD(&trans->services);
INIT_LIST_HEAD(&trans->link);
INIT_LIST_HEAD(&trans->krxiodq_link);
@@ -81,58 +82,58 @@ int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
spin_lock_init(&trans->peer_gylock);
init_waitqueue_head(&trans->peer_gy_waitq);
rwlock_init(&trans->peer_lock);
- atomic_set(&trans->peer_count,0);
+ atomic_set(&trans->peer_count, 0);
trans->port = port;
/* create a UDP socket to be my actual transport endpoint */
- ret = sock_create(PF_INET,SOCK_DGRAM,IPPROTO_UDP,&trans->socket);
- if (ret<0)
+ ret = sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
+ if (ret < 0)
goto error;
/* use the specified port */
if (port) {
- memset(&sin,0,sizeof(sin));
+ memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = htons(port);
- ret = trans->socket->ops->bind(trans->socket,(struct sockaddr *)&sin,sizeof(sin));
- if (ret<0)
+ ret = trans->socket->ops->bind(trans->socket,
+ (struct sockaddr *) &sin,
+ sizeof(sin));
+ if (ret < 0)
goto error;
}
opt = 1;
oldfs = get_fs();
set_fs(KERNEL_DS);
- ret = trans->socket->ops->setsockopt(trans->socket,SOL_IP,IP_RECVERR,
- (char*)&opt,sizeof(opt));
+ ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
+ (char *) &opt, sizeof(opt));
set_fs(oldfs);
spin_lock(&rxrpc_transports_lock);
- list_add(&trans->link,&rxrpc_transports);
+ list_add(&trans->link, &rxrpc_transports);
spin_unlock(&rxrpc_transports_lock);
/* set the socket up */
sock = trans->socket->sk;
- sock->sk_user_data = trans;
- sock->sk_data_ready = rxrpc_data_ready;
- sock->sk_error_report = rxrpc_error_report;
+ sock->sk_user_data = trans;
+ sock->sk_data_ready = rxrpc_data_ready;
+ sock->sk_error_report = rxrpc_error_report;
down_write(&rxrpc_proc_transports_sem);
- list_add_tail(&trans->proc_link,&rxrpc_proc_transports);
+ list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
up_write(&rxrpc_proc_transports_sem);
__RXACCT(atomic_inc(&rxrpc_transport_count));
*_trans = trans;
- _leave(" = 0 (%p)",trans);
+ _leave(" = 0 (%p)", trans);
return 0;
error:
rxrpc_put_transport(trans);
- _leave(" = %d",ret);
-
+ _leave(" = %d", ret);
return ret;
-
} /* end rxrpc_create_transport() */
/*****************************************************************************/
@@ -151,12 +152,13 @@ void rxrpc_clear_transport(struct rxrpc_transport *trans)
*/
void rxrpc_put_transport(struct rxrpc_transport *trans)
{
- _enter("%p{u=%d p=%hu}",trans,atomic_read(&trans->usage),trans->port);
+ _enter("%p{u=%d p=%hu}",
+ trans, atomic_read(&trans->usage), trans->port);
- if (atomic_read(&trans->usage)<=0)
- BUG();
+ BUG_ON(atomic_read(&trans->usage) <= 0);
- /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ /* to prevent a race, the decrement and the dequeue must be
+ * effectively atomic */
spin_lock(&rxrpc_transports_lock);
if (likely(!atomic_dec_and_test(&trans->usage))) {
spin_unlock(&rxrpc_transports_lock);
@@ -169,7 +171,7 @@ void rxrpc_put_transport(struct rxrpc_transport *trans)
/* finish cleaning up the transport */
if (trans->socket)
- trans->socket->ops->shutdown(trans->socket,2);
+ trans->socket->ops->shutdown(trans->socket, 2);
rxrpc_krxsecd_clear_transport(trans);
rxrpc_krxiod_dequeue_transport(trans);
@@ -192,41 +194,41 @@ void rxrpc_put_transport(struct rxrpc_transport *trans)
kfree(trans);
_leave("");
-
} /* end rxrpc_put_transport() */
/*****************************************************************************/
/*
* add a service to a transport to be listened upon
*/
-int rxrpc_add_service(struct rxrpc_transport *trans, struct rxrpc_service *newsrv)
+int rxrpc_add_service(struct rxrpc_transport *trans,
+ struct rxrpc_service *newsrv)
{
struct rxrpc_service *srv;
struct list_head *_p;
int ret = -EEXIST;
- _enter("%p{%hu},%p{%hu}",trans,trans->port,newsrv,newsrv->service_id);
+ _enter("%p{%hu},%p{%hu}",
+ trans, trans->port, newsrv, newsrv->service_id);
/* verify that the service ID is not already present */
spin_lock(&trans->lock);
- list_for_each(_p,&trans->services) {
- srv = list_entry(_p,struct rxrpc_service,link);
- if (srv->service_id==newsrv->service_id)
+ list_for_each(_p, &trans->services) {
+ srv = list_entry(_p, struct rxrpc_service, link);
+ if (srv->service_id == newsrv->service_id)
goto out;
}
/* okay - add the transport to the list */
- list_add_tail(&newsrv->link,&trans->services);
+ list_add_tail(&newsrv->link, &trans->services);
rxrpc_get_transport(trans);
ret = 0;
out:
spin_unlock(&trans->lock);
- _leave("= %d",ret);
+ _leave("= %d", ret);
return ret;
-
} /* end rxrpc_add_service() */
/*****************************************************************************/
@@ -235,7 +237,7 @@ int rxrpc_add_service(struct rxrpc_transport *trans, struct rxrpc_service *newsr
*/
void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
{
- _enter("%p{%hu},%p{%hu}",trans,trans->port,srv,srv->service_id);
+ _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
spin_lock(&trans->lock);
list_del(&srv->link);
@@ -244,7 +246,6 @@ void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
rxrpc_put_transport(trans);
_leave("");
-
} /* end rxrpc_del_service() */
/*****************************************************************************/
@@ -255,7 +256,7 @@ static void rxrpc_data_ready(struct sock *sk, int count)
{
struct rxrpc_transport *trans;
- _enter("%p{t=%p},%d",sk,sk->sk_user_data,count);
+ _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
/* queue the transport for attention by krxiod */
trans = (struct rxrpc_transport *) sk->sk_user_data;
@@ -267,7 +268,6 @@ static void rxrpc_data_ready(struct sock *sk, int count)
wake_up_interruptible(sk->sk_sleep);
_leave("");
-
} /* end rxrpc_data_ready() */
/*****************************************************************************/
@@ -279,7 +279,7 @@ static void rxrpc_error_report(struct sock *sk)
{
struct rxrpc_transport *trans;
- _enter("%p{t=%p}",sk,sk->sk_user_data);
+ _enter("%p{t=%p}", sk, sk->sk_user_data);
/* queue the transport for attention by krxiod */
trans = (struct rxrpc_transport *) sk->sk_user_data;
@@ -293,13 +293,12 @@ static void rxrpc_error_report(struct sock *sk)
wake_up_interruptible(sk->sk_sleep);
_leave("");
-
} /* end rxrpc_error_report() */
/*****************************************************************************/
/*
- * split a message up, allocating message records and filling them in from the contents of a
- * socket buffer
+ * split a message up, allocating message records and filling them in
+ * from the contents of a socket buffer
*/
static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
struct sk_buff *pkt,
@@ -310,18 +309,19 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
_enter("");
- msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
+ msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
if (!msg) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(msg,0,sizeof(*msg));
- atomic_set(&msg->usage,1);
+ memset(msg, 0, sizeof(*msg));
+ atomic_set(&msg->usage, 1);
list_add_tail(&msg->link,msgq);
/* dig out the Rx routing parameters */
- if (skb_copy_bits(pkt,sizeof(struct udphdr),&msg->hdr,sizeof(msg->hdr))<0) {
+ if (skb_copy_bits(pkt, sizeof(struct udphdr),
+ &msg->hdr, sizeof(msg->hdr)) < 0) {
ret = -EBADMSG;
goto error;
}
@@ -352,7 +352,9 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
__RXACCT(atomic_inc(&rxrpc_message_count));
/* split off jumbo packets */
- while (msg->hdr.type==RXRPC_PACKET_TYPE_DATA && msg->hdr.flags & RXRPC_JUMBO_PACKET) {
+ while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ msg->hdr.flags & RXRPC_JUMBO_PACKET
+ ) {
struct rxrpc_jumbo_header jumbo;
struct rxrpc_message *jumbomsg = msg;
@@ -360,23 +362,25 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
/* quick sanity check */
ret = -EBADMSG;
- if (msg->dsize < RXRPC_JUMBO_DATALEN+sizeof(struct rxrpc_jumbo_header))
+ if (msg->dsize <
+ RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
goto error;
if (msg->hdr.flags & RXRPC_LAST_PACKET)
goto error;
/* dig out the secondary header */
- if (skb_copy_bits(pkt,msg->offset+RXRPC_JUMBO_DATALEN,&jumbo,sizeof(jumbo))<0)
+ if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
+ &jumbo, sizeof(jumbo)) < 0)
goto error;
/* allocate a new message record */
ret = -ENOMEM;
- msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
+ msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
if (!msg)
goto error;
- memcpy(msg,jumbomsg,sizeof(*msg));
- list_add_tail(&msg->link,msgq);
+ memcpy(msg, jumbomsg, sizeof(*msg));
+ list_add_tail(&msg->link, msgq);
/* adjust the jumbo packet */
jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
@@ -388,12 +392,15 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
msg->seq++;
msg->hdr.seq = htonl(msg->seq);
msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
- msg->offset += RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
- msg->dsize -= RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
+ msg->offset += RXRPC_JUMBO_DATALEN +
+ sizeof(struct rxrpc_jumbo_header);
+ msg->dsize -= RXRPC_JUMBO_DATALEN +
+ sizeof(struct rxrpc_jumbo_header);
msg->hdr.flags = jumbo.flags;
msg->hdr._rsvd = jumbo._rsvd;
- _net("Rx Split jumbo packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
+ _net("Rx Split jumbo packet from %s"
+ " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
ntohl(msg->hdr.epoch),
(ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
@@ -407,18 +414,18 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
__RXACCT(atomic_inc(&rxrpc_message_count));
}
- _leave(" = 0 #%d",atomic_read(&rxrpc_message_count));
+ _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
return 0;
error:
while (!list_empty(msgq)) {
- msg = list_entry(msgq->next,struct rxrpc_message,link);
+ msg = list_entry(msgq->next, struct rxrpc_message, link);
list_del_init(&msg->link);
rxrpc_put_message(msg);
}
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_incoming_msg() */
@@ -438,7 +445,7 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
LIST_HEAD(msgq);
- _enter("%p{%d}",trans,trans->port);
+ _enter("%p{%d}", trans, trans->port);
for (;;) {
/* deal with outstanting errors first */
@@ -446,22 +453,25 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
rxrpc_trans_receive_error_report(trans);
/* attempt to receive a packet */
- pkt = skb_recv_datagram(trans->socket->sk,0,1,&ret);
+ pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
if (!pkt) {
- if (ret==-EAGAIN) {
+ if (ret == -EAGAIN) {
_leave(" EAGAIN");
return;
}
/* an icmp error may have occurred */
rxrpc_krxiod_queue_transport(trans);
- _leave(" error %d\n",ret);
+ _leave(" error %d\n", ret);
return;
}
- /* we'll probably need to checksum it (didn't call sock_recvmsg) */
+ /* we'll probably need to checksum it (didn't call
+ * sock_recvmsg) */
if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
- if ((unsigned short)csum_fold(skb_checksum(pkt,0,pkt->len,pkt->csum))) {
+ if ((unsigned short)
+ csum_fold(skb_checksum(pkt, 0, pkt->len,
+ pkt->csum))) {
kfree_skb(pkt);
rxrpc_krxiod_queue_transport(trans);
_leave(" CSUM failed");
@@ -472,34 +482,36 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
addr = pkt->nh.iph->saddr;
port = pkt->h.uh->source;
- _net("Rx Received UDP packet from %08x:%04hu",ntohl(addr),ntohs(port));
+ _net("Rx Received UDP packet from %08x:%04hu",
+ ntohl(addr), ntohs(port));
/* unmarshall the Rx parameters and split jumbo packets */
- ret = rxrpc_incoming_msg(trans,pkt,&msgq);
- if (ret<0) {
+ ret = rxrpc_incoming_msg(trans, pkt, &msgq);
+ if (ret < 0) {
kfree_skb(pkt);
rxrpc_krxiod_queue_transport(trans);
_leave(" bad packet");
return;
}
- if (list_empty(&msgq)) BUG();
+ BUG_ON(list_empty(&msgq));
- msg = list_entry(msgq.next,struct rxrpc_message,link);
+ msg = list_entry(msgq.next, struct rxrpc_message, link);
- /* locate the record for the peer from which it originated */
- ret = rxrpc_peer_lookup(trans,addr,&peer);
- if (ret<0) {
+ /* locate the record for the peer from which it
+ * originated */
+ ret = rxrpc_peer_lookup(trans, addr, &peer);
+ if (ret < 0) {
kdebug("Rx No connections from that peer");
- rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
+ rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
goto finished_msg;
}
/* try and find a matching connection */
- ret = rxrpc_connection_lookup(peer,msg,&msg->conn);
- if (ret<0) {
+ ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
+ if (ret < 0) {
kdebug("Rx Unknown Connection");
- rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
+ rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
rxrpc_put_peer(peer);
goto finished_msg;
}
@@ -507,23 +519,23 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
/* deal with the first packet of a new call */
if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
- msg->hdr.type==RXRPC_PACKET_TYPE_DATA &&
- ntohl(msg->hdr.seq)==1
+ msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ ntohl(msg->hdr.seq) == 1
) {
_debug("Rx New server call");
- rxrpc_trans_receive_new_call(trans,&msgq);
+ rxrpc_trans_receive_new_call(trans, &msgq);
goto finished_msg;
}
/* deal with subsequent packet(s) of call */
_debug("Rx Call packet");
while (!list_empty(&msgq)) {
- msg = list_entry(msgq.next,struct rxrpc_message,link);
+ msg = list_entry(msgq.next, struct rxrpc_message, link);
list_del_init(&msg->link);
- ret = rxrpc_conn_receive_call_packet(msg->conn,NULL,msg);
- if (ret<0) {
- rxrpc_trans_immediate_abort(trans,msg,ret);
+ ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
+ if (ret < 0) {
+ rxrpc_trans_immediate_abort(trans, msg, ret);
rxrpc_put_message(msg);
goto finished_msg;
}
@@ -536,7 +548,7 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
/* dispose of the packets */
finished_msg:
while (!list_empty(&msgq)) {
- msg = list_entry(msgq.next,struct rxrpc_message,link);
+ msg = list_entry(msgq.next, struct rxrpc_message, link);
list_del_init(&msg->link);
rxrpc_put_message(msg);
@@ -561,7 +573,7 @@ static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
_enter("");
/* only bother with the first packet */
- msg = list_entry(msgq->next,struct rxrpc_message,link);
+ msg = list_entry(msgq->next, struct rxrpc_message, link);
list_del_init(&msg->link);
rxrpc_krxsecd_queue_incoming_call(msg);
rxrpc_put_message(msg);
@@ -584,13 +596,13 @@ int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
struct msghdr msghdr;
struct iovec iov[2];
mm_segment_t oldfs;
+ uint32_t _error;
int len, ret;
- u32 _error;
- _enter("%p,%p,%d",trans,msg,error);
+ _enter("%p,%p,%d", trans, msg, error);
/* don't abort an abort packet */
- if (msg->hdr.type==RXRPC_PACKET_TYPE_ABORT) {
+ if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
_leave(" = 0");
return 0;
}
@@ -598,12 +610,13 @@ int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
_error = htonl(-error);
/* set up the message to be transmitted */
- memcpy(&ahdr,&msg->hdr,sizeof(ahdr));
+ memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
ahdr.epoch = msg->hdr.epoch;
ahdr.serial = htonl(1);
ahdr.seq = 0;
ahdr.type = RXRPC_PACKET_TYPE_ABORT;
- ahdr.flags = RXRPC_LAST_PACKET | (~msg->hdr.flags & RXRPC_CLIENT_INITIATED);
+ ahdr.flags = RXRPC_LAST_PACKET;
+ ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
iov[0].iov_len = sizeof(ahdr);
iov[0].iov_base = &ahdr;
@@ -634,17 +647,17 @@ int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
/* send the message */
oldfs = get_fs();
set_fs(KERNEL_DS);
- ret = sock_sendmsg(trans->socket,&msghdr,len);
+ ret = sock_sendmsg(trans->socket, &msghdr, len);
set_fs(oldfs);
- _leave(" = %d",ret);
+ _leave(" = %d", ret);
return ret;
} /* end rxrpc_trans_immediate_abort() */
/*****************************************************************************/
/*
- * receive an ICMP error report and percolate it to all connections heading to the affected
- * host or port
+ * receive an ICMP error report and percolate it to all connections
+ * heading to the affected host or port
*/
static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
{
@@ -655,10 +668,10 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
struct errormsg emsg;
struct msghdr msg;
mm_segment_t oldfs;
+ uint16_t port;
int local, err;
- u16 port;
- _enter("%p",trans);
+ _enter("%p", trans);
for (;;) {
trans->error_rcvd = 0;
@@ -674,48 +687,63 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
oldfs = get_fs();
set_fs(KERNEL_DS);
- err = sock_recvmsg(trans->socket,&msg,0,MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
+ err = sock_recvmsg(trans->socket, &msg, 0,
+ MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
set_fs(oldfs);
- if (err==-EAGAIN) {
+ if (err == -EAGAIN) {
_leave("");
return;
}
- if (err<0) {
- printk("%s: unable to recv an error report: %d\n",__FUNCTION__,err);
+ if (err < 0) {
+ printk("%s: unable to recv an error report: %d\n",
+ __FUNCTION__, err);
_leave("");
return;
}
- msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg;
+ msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
- if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) {
- printk("%s: short control message (nlen=%u clen=%Zu fl=%x)\n",
- __FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags);
+ if (msg.msg_controllen < sizeof(emsg.cmsg) ||
+ msg.msg_namelen < sizeof(sin)) {
+ printk("%s: short control message"
+ " (nlen=%u clen=%Zu fl=%x)\n",
+ __FUNCTION__,
+ msg.msg_namelen,
+ msg.msg_controllen,
+ msg.msg_flags);
continue;
}
- _net("Rx Received control message { len=%Zu level=%u type=%u }",
- emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
+ _net("Rx Received control message"
+ " { len=%Zu level=%u type=%u }",
+ emsg.cmsg.cmsg_len,
+ emsg.cmsg.cmsg_level,
+ emsg.cmsg.cmsg_type);
- if (sin.sin_family!=AF_INET) {
- printk("Rx Ignoring error report with non-INET address (fam=%u)",
+ if (sin.sin_family != AF_INET) {
+ printk("Rx Ignoring error report with non-INET address"
+ " (fam=%u)",
sin.sin_family);
continue;
}
_net("Rx Received message pertaining to host addr=%x port=%hu",
- ntohl(sin.sin_addr.s_addr),ntohs(sin.sin_port));
-
- if (emsg.cmsg.cmsg_level!=SOL_IP || emsg.cmsg.cmsg_type!=IP_RECVERR) {
- printk("Rx Ignoring unknown error report { level=%u type=%u }",
- emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
+ ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+
+ if (emsg.cmsg.cmsg_level != SOL_IP ||
+ emsg.cmsg.cmsg_type != IP_RECVERR) {
+ printk("Rx Ignoring unknown error report"
+ " { level=%u type=%u }",
+ emsg.cmsg.cmsg_level,
+ emsg.cmsg.cmsg_type);
continue;
}
- if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) {
- printk("%s: short error message (%Zu)\n",__FUNCTION__,msg.msg_controllen);
+ if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
+ printk("%s: short error message (%Zu)\n",
+ __FUNCTION__, msg.msg_controllen);
_leave("");
return;
}
@@ -767,14 +795,15 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
default:
_proto("Rx Received ICMP error { type=%u code=%u }",
- emsg.ee.ee_type,emsg.ee.ee_code);
+ emsg.ee.ee_type, emsg.ee.ee_code);
err = emsg.ee.ee_errno;
break;
}
break;
case SO_EE_ORIGIN_LOCAL:
- _proto("Rx Received local error { error=%d }",emsg.ee.ee_errno);
+ _proto("Rx Received local error { error=%d }",
+ emsg.ee.ee_errno);
local = 1;
err = emsg.ee.ee_errno;
break;
@@ -782,35 +811,41 @@ static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
case SO_EE_ORIGIN_NONE:
case SO_EE_ORIGIN_ICMP6:
default:
- _proto("Rx Received error report { orig=%u }",emsg.ee.ee_origin);
+ _proto("Rx Received error report { orig=%u }",
+ emsg.ee.ee_origin);
local = 0;
err = emsg.ee.ee_errno;
break;
}
- /* find all the connections between this transport and the affected destination */
+ /* find all the connections between this transport and the
+ * affected destination */
INIT_LIST_HEAD(&connq);
- if (rxrpc_peer_lookup(trans,sin.sin_addr.s_addr,&peer)==0) {
+ if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
+ &peer) == 0) {
read_lock(&peer->conn_lock);
- list_for_each(_p,&peer->conn_active) {
- conn = list_entry(_p,struct rxrpc_connection,link);
- if (port && conn->addr.sin_port!=port)
+ list_for_each(_p, &peer->conn_active) {
+ conn = list_entry(_p, struct rxrpc_connection,
+ link);
+ if (port && conn->addr.sin_port != port)
continue;
if (!list_empty(&conn->err_link))
continue;
rxrpc_get_connection(conn);
- list_add_tail(&conn->err_link,&connq);
+ list_add_tail(&conn->err_link, &connq);
}
read_unlock(&peer->conn_lock);
/* service all those connections */
while (!list_empty(&connq)) {
- conn = list_entry(connq.next,struct rxrpc_connection,err_link);
+ conn = list_entry(connq.next,
+ struct rxrpc_connection,
+ err_link);
list_del(&conn->err_link);
- rxrpc_conn_handle_error(conn,local,err);
+ rxrpc_conn_handle_error(conn, local, err);
rxrpc_put_connection(conn);
}