summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/utils.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/irdma/utils.c')
-rw-r--r--drivers/infiniband/hw/irdma/utils.c58
1 files changed, 19 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 8b94d87b0192..cc2a12f735d3 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -452,6 +452,7 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
cqp_request->waiting = wait;
refcount_set(&cqp_request->refcnt, 1);
memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+ memset(&cqp_request->info, 0, sizeof(cqp_request->info));
return cqp_request;
}
@@ -1068,7 +1069,6 @@ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
- memset(qp_info, 0, sizeof(*qp_info));
qp_info->cq_num_valid = true;
qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
@@ -1343,7 +1343,6 @@ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_destroy.qp = qp;
@@ -1749,7 +1748,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
cqp_info->post_sq = 1;
cqp_info->in.u.stats_gather.info = pestat->gather_info;
@@ -1789,7 +1787,6 @@ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
cqp_info->in.u.stats_manage.info = *stats_info;
@@ -1890,7 +1887,6 @@ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
cqp_info->in.u.ws_node.info = *node_info;
@@ -2357,24 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
}
-bool irdma_cq_empty(struct irdma_cq *iwcq)
-{
- struct irdma_cq_uk *ukcq;
- u64 qword3;
- __le64 *cqe;
- u8 polarity;
-
- ukcq = &iwcq->sc_cq.cq_uk;
- if (ukcq->avoid_mem_cflct)
- cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
- else
- cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
- get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
-
- return polarity != ukcq->polarity;
-}
-
void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
{
struct irdma_cmpl_gen *cmpl_node;
@@ -2436,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
struct irdma_ring *sq_ring = &qp->sq_ring;
struct irdma_ring *rq_ring = &qp->rq_ring;
+ struct irdma_cq *iwscq = iwqp->iwscq;
+ struct irdma_cq *iwrcq = iwqp->iwrcq;
struct irdma_cmpl_gen *cmpl;
__le64 *sw_wqe;
u64 wqe_qword;
@@ -2443,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
bool compl_generated = false;
unsigned long flags1;
- spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwscq)) {
+ spin_lock_irqsave(&iwscq->lock, flags1);
+ if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2452,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
return;
}
@@ -2471,24 +2451,24 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
kfree(cmpl);
continue;
}
- ibdev_dbg(iwqp->iwscq->ibcq.device,
+ ibdev_dbg(iwscq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id);
- list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
+ list_add_tail(&cmpl->list, &iwscq->cmpl_generated);
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwscq);
+ irdma_comp_handler(iwscq);
} else {
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
- spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwrcq)) {
+ spin_lock_irqsave(&iwrcq->lock, flags1);
+ if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2496,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
return;
}
@@ -2508,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
/* remove the RQ WR by moving RQ tail */
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
- ibdev_dbg(iwqp->iwrcq->ibcq.device,
+ ibdev_dbg(iwrcq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id,
wqe_idx);
- list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
+ list_add_tail(&cmpl->list, &iwrcq->cmpl_generated);
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwrcq);
} else {
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}