diff options
Diffstat (limited to 'drivers/infiniband/sw')
| -rw-r--r-- | drivers/infiniband/sw/rdmavt/cq.c | 3 | ||||
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mr.c | 1 | ||||
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 49 | ||||
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_odp.c | 1 | ||||
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_qp.c | 49 | ||||
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_srq.c | 7 | ||||
| -rw-r--r-- | drivers/infiniband/sw/siw/siw_cm.c | 59 |
7 files changed, 156 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 0ca2743f1075..e7835ca70e2b 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -518,7 +518,8 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) */ int rvt_driver_cq_init(void) { - comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE, + comp_vector_wq = alloc_workqueue("%s", + WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_PERCPU, 0, "rdmavt_cq"); if (!comp_vector_wq) return -ENOMEM; diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index bcb97b3ea58a..b1df05238848 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -452,7 +452,6 @@ static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int leng length -= bytes; iova += bytes; - page_offset = 0; } return 0; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index ac0183a2ff7a..0195d361e5e3 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -20,6 +20,54 @@ static struct rxe_recv_sockets recv_sockets; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * lockdep can detect false positive circular dependencies + * when there are user-space socket API users or in kernel + * users switching between a tcp and rdma transport. + * Maybe also switching between siw and rxe may cause + * problems as per default sockets are only classified + * by family and not by ip protocol. And there might + * be different locks used between the application + * and the low level sockets. + * + * Problems were seen with ksmbd.ko and cifs.ko, + * switching transports, use git blame to find + * more details. + */ +static struct lock_class_key rxe_recv_sk_key[2]; +static struct lock_class_key rxe_recv_slock_key[2]; +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rxe_reclassify_recv_socket(struct socket *sock) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, + "slock-AF_INET-RDMA-RXE-RECV", + &rxe_recv_slock_key[0], + "sk_lock-AF_INET-RDMA-RXE-RECV", + &rxe_recv_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, + "slock-AF_INET6-RDMA-RXE-RECV", + &rxe_recv_slock_key[1], + "sk_lock-AF_INET6-RDMA-RXE-RECV", + &rxe_recv_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +} + static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, struct net_device *ndev, struct in_addr *saddr, @@ -192,6 +240,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, err = udp_sock_create(net, &udp_cfg, &sock); if (err < 0) return ERR_PTR(err); + rxe_reclassify_recv_socket(sock); tnl_cfg.encap_type = 1; tnl_cfg.encap_rcv = rxe_udp_encap_recv; diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c index f58e3ec6252f..ae71812bea82 100644 --- a/drivers/infiniband/sw/rxe/rxe_odp.c +++ b/drivers/infiniband/sw/rxe/rxe_odp.c @@ -358,7 +358,6 @@ int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, length -= bytes; iova += bytes; - page_offset = 0; } mutex_unlock(&umem_odp->umem_mutex); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 95f1c1c2949d..845bdd03ca28 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -15,6 +15,54 @@ #include "rxe_queue.h" #include "rxe_task.h" +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * lockdep can detect false positive circular dependencies + * when there are user-space socket API users or in kernel + * users switching between a tcp and rdma transport. + * Maybe also switching between siw and rxe may cause + * problems as per default sockets are only classified + * by family and not by ip protocol. And there might + * be different locks used between the application + * and the low level sockets. + * + * Problems were seen with ksmbd.ko and cifs.ko, + * switching transports, use git blame to find + * more details. + */ +static struct lock_class_key rxe_send_sk_key[2]; +static struct lock_class_key rxe_send_slock_key[2]; +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rxe_reclassify_send_socket(struct socket *sock) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, + "slock-AF_INET-RDMA-RXE-SEND", + &rxe_send_slock_key[0], + "sk_lock-AF_INET-RDMA-RXE-SEND", + &rxe_send_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, + "slock-AF_INET6-RDMA-RXE-SEND", + &rxe_send_slock_key[1], + "sk_lock-AF_INET6-RDMA-RXE-SEND", + &rxe_send_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +} + static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq) { @@ -244,6 +292,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; + rxe_reclassify_send_socket(qp->sk); qp->sk->sk->sk_user_data = qp; /* pick a source UDP port number for this QP based on diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index 3661cb627d28..2a234f26ac10 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -171,7 +171,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, udata, mi, &srq->rq.producer_lock, &srq->rq.consumer_lock); if (err) - goto err_free; + return err; srq->rq.max_wr = attr->max_wr; } @@ -180,11 +180,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, srq->limit = attr->srq_limit; return 0; - -err_free: - rxe_queue_cleanup(q); - srq->rq.queue = NULL; - return err; } void rxe_srq_cleanup(struct rxe_pool_elem *elem) diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 708b13993fdf..1d3de8209bfa 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -39,6 +39,55 @@ static void siw_cm_llp_error_report(struct sock *s); static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason, int status); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * lockdep can detect false positive circular dependencies + * when there are user-space socket API users or in kernel + * users switching between a tcp and rdma transport. + * Maybe also switching between siw and rxe may cause + * problems as per default sockets are only classified + * by family and not by ip protocol. And there might + * be different locks used between the application + * and the low level sockets. + * + * Problems were seen with ksmbd.ko and cifs.ko, + * switching transports, use git blame to find + * more details. + */ +static struct lock_class_key siw_sk_key[2]; +static struct lock_class_key siw_slock_key[2]; +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void siw_reclassify_socket(struct socket *sock) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, + "slock-AF_INET-RDMA-SIW", + &siw_slock_key[0], + "sk_lock-AF_INET-RDMA-SIW", + &siw_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, + "slock-AF_INET6-RDMA-SIW", + &siw_slock_key[1], + "sk_lock-AF_INET6-RDMA-SIW", + &siw_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +} + static void siw_sk_assign_cm_upcalls(struct sock *sk) { struct siw_cep *cep = sk_to_cep(sk); @@ -1340,11 +1389,11 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr, return rv; } - rv = s->ops->bind(s, laddr, size); + rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr, size); if (rv < 0) return rv; - rv = s->ops->connect(s, raddr, size, flags); + rv = s->ops->connect(s, (struct sockaddr_unsized *)raddr, size, flags); return rv < 0 ? rv : 0; } @@ -1394,6 +1443,7 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) goto error; + siw_reclassify_socket(s); /* * NOTE: For simplification, connect() is called in blocking @@ -1770,6 +1820,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) return rv; + siw_reclassify_socket(s); /* * Allow binding local port when still in TIME_WAIT from last close. @@ -1789,7 +1840,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) goto error; } } - rv = s->ops->bind(s, (struct sockaddr *)laddr, + rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr, sizeof(struct sockaddr_in)); } else { struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr); @@ -1813,7 +1864,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) goto error; } } - rv = s->ops->bind(s, (struct sockaddr *)laddr, + rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr, sizeof(struct sockaddr_in6)); } if (rv) { |
