diff options
Diffstat (limited to 'net/tipc/socket.c')
| -rw-r--r-- | net/tipc/socket.c | 188 | 
1 files changed, 137 insertions, 51 deletions
| diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 7dfa9fc99ec3..1fd1c8b5ce03 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -289,10 +289,9 @@ static bool tipc_sk_type_connectionless(struct sock *sk)  static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)  {  	struct sock *sk = &tsk->sk; -	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); +	u32 self = tipc_own_addr(sock_net(sk));  	u32 peer_port = tsk_peer_port(tsk); -	u32 orig_node; -	u32 peer_node; +	u32 orig_node, peer_node;  	if (unlikely(!tipc_sk_connected(sk)))  		return false; @@ -306,10 +305,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)  	if (likely(orig_node == peer_node))  		return true; -	if (!orig_node && (peer_node == tn->own_addr)) +	if (!orig_node && peer_node == self)  		return true; -	if (!peer_node && (orig_node == tn->own_addr)) +	if (!peer_node && orig_node == self)  		return true;  	return false; @@ -461,8 +460,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,  	/* Ensure tsk is visible before we read own_addr. */  	smp_mb(); -	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, -		      NAMED_H_SIZE, 0); +	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, +		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);  	msg_set_origport(msg, tsk->portid);  	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); @@ -644,7 +643,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,  		goto exit;  	} -	res = (addr->scope > 0) ? +	res = (addr->scope >= 0) ?  		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :  		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);  exit: @@ -666,12 +665,11 @@ exit:   *       a completely predictable manner).   */  static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, -			int *uaddr_len, int peer) +			int peer)  {  	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;  	struct sock *sk = sock->sk;  	struct tipc_sock *tsk = tipc_sk(sk); -	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);  	memset(addr, 0, sizeof(*addr));  	if (peer) { @@ -682,16 +680,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,  		addr->addr.id.node = tsk_peer_node(tsk);  	} else {  		addr->addr.id.ref = tsk->portid; -		addr->addr.id.node = tn->own_addr; +		addr->addr.id.node = tipc_own_addr(sock_net(sk));  	} -	*uaddr_len = sizeof(*addr);  	addr->addrtype = TIPC_ADDR_ID;  	addr->family = AF_TIPC;  	addr->scope = 0;  	addr->addr.name.domain = 0; -	return 0; +	return sizeof(*addr);  }  /** @@ -1281,8 +1278,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)  	struct tipc_msg *hdr = &tsk->phdr;  	struct tipc_name_seq *seq;  	struct sk_buff_head pkts; -	u32 type, inst, domain;  	u32 dnode, dport; +	u32 type, inst;  	int mtu, rc;  	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) @@ -1333,13 +1330,12 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)  	if (dest->addrtype == TIPC_ADDR_NAME) {  		type = dest->addr.name.name.type;  		inst = dest->addr.name.name.instance; -		domain = dest->addr.name.domain; -		dnode = domain; +		dnode = dest->addr.name.domain;  		msg_set_type(hdr, TIPC_NAMED_MSG);  		msg_set_hdr_sz(hdr, NAMED_H_SIZE);  		msg_set_nametype(hdr, type);  		msg_set_nameinst(hdr, inst); -		msg_set_lookup_scope(hdr, tipc_addr_scope(domain)); +		msg_set_lookup_scope(hdr, tipc_node2scope(dnode));  		dport = tipc_nametbl_translate(net, type, inst, &dnode);  		msg_set_destnode(hdr, dnode);  		msg_set_destport(hdr, dport); @@ -2124,8 +2120,10 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,  		    (!sk_conn && msg_connected(hdr)) ||  		    (!grp && msg_in_group(hdr)))  			err = TIPC_ERR_NO_PORT; -		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) +		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { +			atomic_inc(&sk->sk_drops);  			err = TIPC_ERR_OVERLOAD; +		}  		if (unlikely(err)) {  			tipc_skb_reject(net, err, skb, xmitq); @@ -2204,6 +2202,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,  		/* Overload => reject message back to sender */  		onode = tipc_own_addr(sock_net(sk)); +		atomic_inc(&sk->sk_drops);  		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))  			__skb_queue_tail(xmitq, skb);  		break; @@ -2593,6 +2592,9 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,  	struct publication *publ;  	u32 key; +	if (scope != TIPC_NODE_SCOPE) +		scope = TIPC_CLUSTER_SCOPE; +  	if (tipc_sk_connected(sk))  		return -EINVAL;  	key = tsk->portid + tsk->pub_count + 1; @@ -2604,7 +2606,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,  	if (unlikely(!publ))  		return -EINVAL; -	list_add(&publ->pport_list, &tsk->publications); +	list_add(&publ->binding_sock, &tsk->publications);  	tsk->pub_count++;  	tsk->published = 1;  	return 0; @@ -2618,7 +2620,10 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,  	struct publication *safe;  	int rc = -EINVAL; -	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { +	if (scope != TIPC_NODE_SCOPE) +		scope = TIPC_CLUSTER_SCOPE; + +	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {  		if (seq) {  			if (publ->scope != scope)  				continue; @@ -2629,12 +2634,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,  			if (publ->upper != seq->upper)  				break;  			tipc_nametbl_withdraw(net, publ->type, publ->lower, -					      publ->ref, publ->key); +					      publ->upper, publ->key);  			rc = 0;  			break;  		}  		tipc_nametbl_withdraw(net, publ->type, publ->lower, -				      publ->ref, publ->key); +				      publ->upper, publ->key);  		rc = 0;  	}  	if (list_empty(&tsk->publications)) @@ -2660,8 +2665,8 @@ void tipc_sk_reinit(struct net *net)  		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {  			spin_lock_bh(&tsk->sk.sk_lock.slock);  			msg = &tsk->phdr; -			msg_set_prevnode(msg, tn->own_addr); -			msg_set_orignode(msg, tn->own_addr); +			msg_set_prevnode(msg, tipc_own_addr(net)); +			msg_set_orignode(msg, tipc_own_addr(net));  			spin_unlock_bh(&tsk->sk.sk_lock.slock);  		} @@ -3156,16 +3161,32 @@ msg_full:  	return -EMSGSIZE;  } +static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock +			  *tsk) +{ +	struct net *net = sock_net(skb->sk); +	struct sock *sk = &tsk->sk; + +	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) +		return -EMSGSIZE; + +	if (tipc_sk_connected(sk)) { +		if (__tipc_nl_add_sk_con(skb, tsk)) +			return -EMSGSIZE; +	} else if (!list_empty(&tsk->publications)) { +		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) +			return -EMSGSIZE; +	} +	return 0; +} +  /* Caller should hold socket lock for the passed tipc socket. */  static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,  			    struct tipc_sock *tsk)  { -	int err; -	void *hdr;  	struct nlattr *attrs; -	struct net *net = sock_net(skb->sk); -	struct tipc_net *tn = net_generic(net, tipc_net_id); -	struct sock *sk = &tsk->sk; +	void *hdr;  	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,  			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); @@ -3175,19 +3196,10 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,  	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);  	if (!attrs)  		goto genlmsg_cancel; -	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid)) -		goto attr_msg_cancel; -	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) + +	if (__tipc_nl_add_sk_info(skb, tsk))  		goto attr_msg_cancel; -	if (tipc_sk_connected(sk)) { -		err = __tipc_nl_add_sk_con(skb, tsk); -		if (err) -			goto attr_msg_cancel; -	} else if (!list_empty(&tsk->publications)) { -		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) -			goto attr_msg_cancel; -	}  	nla_nest_end(skb, attrs);  	genlmsg_end(skb, hdr); @@ -3201,16 +3213,19 @@ msg_cancel:  	return -EMSGSIZE;  } -int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) +int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, +		    int (*skb_handler)(struct sk_buff *skb, +				       struct netlink_callback *cb, +				       struct tipc_sock *tsk))  { -	int err; -	struct tipc_sock *tsk; -	const struct bucket_table *tbl; -	struct rhash_head *pos;  	struct net *net = sock_net(skb->sk); -	struct tipc_net *tn = net_generic(net, tipc_net_id); -	u32 tbl_id = cb->args[0]; +	struct tipc_net *tn = tipc_net(net); +	const struct bucket_table *tbl;  	u32 prev_portid = cb->args[1]; +	u32 tbl_id = cb->args[0]; +	struct rhash_head *pos; +	struct tipc_sock *tsk; +	int err;  	rcu_read_lock();  	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); @@ -3222,12 +3237,13 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)  				continue;  			} -			err = __tipc_nl_add_sk(skb, cb, tsk); +			err = skb_handler(skb, cb, tsk);  			if (err) {  				prev_portid = tsk->portid;  				spin_unlock_bh(&tsk->sk.sk_lock.slock);  				goto out;  			} +  			prev_portid = 0;  			spin_unlock_bh(&tsk->sk.sk_lock.slock);  		} @@ -3239,6 +3255,76 @@ out:  	return skb->len;  } +EXPORT_SYMBOL(tipc_nl_sk_walk); + +int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, +			   struct tipc_sock *tsk, u32 sk_filter_state, +			   u64 (*tipc_diag_gen_cookie)(struct sock *sk)) +{ +	struct sock *sk = &tsk->sk; +	struct nlattr *attrs; +	struct nlattr *stat; + +	/*filter response w.r.t sk_state*/ +	if (!(sk_filter_state & (1 << sk->sk_state))) +		return 0; + +	attrs = nla_nest_start(skb, TIPC_NLA_SOCK); +	if (!attrs) +		goto msg_cancel; + +	if (__tipc_nl_add_sk_info(skb, tsk)) +		goto attr_msg_cancel; + +	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_UID, +			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), +					 sock_i_uid(sk))) || +	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, +			      tipc_diag_gen_cookie(sk), +			      TIPC_NLA_SOCK_PAD)) +		goto attr_msg_cancel; + +	stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); +	if (!stat) +		goto attr_msg_cancel; + +	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, +			skb_queue_len(&sk->sk_receive_queue)) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, +			skb_queue_len(&sk->sk_write_queue)) || +	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, +			atomic_read(&sk->sk_drops))) +		goto stat_msg_cancel; + +	if (tsk->cong_link_cnt && +	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) +		goto stat_msg_cancel; + +	if (tsk_conn_cong(tsk) && +	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) +		goto stat_msg_cancel; + +	nla_nest_end(skb, stat); +	nla_nest_end(skb, attrs); + +	return 0; + +stat_msg_cancel: +	nla_nest_cancel(skb, stat); +attr_msg_cancel: +	nla_nest_cancel(skb, attrs); +msg_cancel: +	return -EMSGSIZE; +} +EXPORT_SYMBOL(tipc_sk_fill_sock_diag); + +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ +	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); +}  /* Caller should hold socket lock for the passed tipc socket. */  static int __tipc_nl_add_sk_publ(struct sk_buff *skb, @@ -3288,7 +3374,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,  	struct publication *p;  	if (*last_publ) { -		list_for_each_entry(p, &tsk->publications, pport_list) { +		list_for_each_entry(p, &tsk->publications, binding_sock) {  			if (p->key == *last_publ)  				break;  		} @@ -3305,10 +3391,10 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,  		}  	} else {  		p = list_first_entry(&tsk->publications, struct publication, -				     pport_list); +				     binding_sock);  	} -	list_for_each_entry_from(p, &tsk->publications, pport_list) { +	list_for_each_entry_from(p, &tsk->publications, binding_sock) {  		err = __tipc_nl_add_sk_publ(skb, cb, p);  		if (err) {  			*last_publ = p->key; | 
