summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c25
3 files changed, 26 insertions, 20 deletions
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e825e2ef7966..134a79eecfcb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -492,7 +492,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
- u32 ret;
+ int ret;
u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
RVT_AIP_QPN_MAX : RVT_QPN_MAX;
@@ -510,7 +510,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
else
qpt->flags |= n;
spin_unlock(&qpt->lock);
- goto bail;
+
+ return ret;
}
qpn = qpt->last + qpt->incr;
@@ -530,7 +531,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
if (!test_and_set_bit(offset, map->page)) {
qpt->last = qpn;
ret = qpn;
- goto bail;
+
+ return ret;
}
offset += qpt->incr;
/*
@@ -565,10 +567,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
qpn = mk_qpn(qpt, map, offset);
}
- ret = -ENOMEM;
-
-bail:
- return ret;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6f8f353e9583..f522820b950c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
* yield the cpu and reschedule the task
*/
if (!ret) {
- task->state = TASK_STATE_IDLE;
- resched = 1;
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
goto exit;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 35c3bde0d00a..efa2f097b582 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -769,7 +769,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
struct siw_wqe *wqe = tx_wqe(qp);
unsigned long flags;
- int rv = 0;
+ int rv = 0, imm_err = 0;
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
@@ -955,9 +955,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
* Send directly if SQ processing is not in progress.
* Eventual immediate errors (rv < 0) do not affect the involved
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
- * processing, if new work is already pending. But rv must be passed
- * to caller.
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
*/
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
if (wqe->wr_status != SIW_WR_IDLE) {
spin_unlock_irqrestore(&qp->sq_lock, flags);
goto skip_direct_sending;
@@ -982,15 +990,10 @@ skip_direct_sending:
up_read(&qp->state_lock);
- if (rv >= 0)
- return 0;
- /*
- * Immediate error
- */
- siw_dbg_qp(qp, "error %d\n", rv);
+ if (unlikely(imm_err))
+ return imm_err;
- *bad_wr = wr;
- return rv;
+ return (rv >= 0) ? 0 : rv;
}
/*