summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/mediatek/mt76/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c75
1 files changed, 37 insertions, 38 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 1fa7de1d2c45..f240016ed9f0 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
@@ -7,37 +7,6 @@
#include "mt76.h"
#include "dma.h"
-#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
-
-#define Q_READ(_q, _field) ({ \
- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
- u32 _val; \
- if ((_q)->flags & MT_QFLAG_WED) \
- _val = mtk_wed_device_reg_read((_q)->wed, \
- ((_q)->wed_regs + \
- _offset)); \
- else \
- _val = readl(&(_q)->regs->_field); \
- _val; \
-})
-
-#define Q_WRITE(_q, _field, _val) do { \
- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
- if ((_q)->flags & MT_QFLAG_WED) \
- mtk_wed_device_reg_write((_q)->wed, \
- ((_q)->wed_regs + _offset), \
- _val); \
- else \
- writel(_val, &(_q)->regs->_field); \
-} while (0)
-
-#else
-
-#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
-#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
-
-#endif
-
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
@@ -220,10 +189,15 @@ static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(q, desc_base, q->desc_dma);
- if (q->flags & MT_QFLAG_WED_RRO_EN)
+ if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev))
Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
else
Q_WRITE(q, ring_size, q->ndesc);
+
+ if (mt76_queue_is_npu_tx(q)) {
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ }
q->head = Q_READ(q, dma_idx);
q->tail = q->head;
}
@@ -235,7 +209,7 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
return;
if (!mt76_queue_is_wed_rro_ind(q) &&
- !mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
int i;
/* clear descriptors */
@@ -446,6 +420,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+ mt76_npu_txdesc_cleanup(q, q->tail);
mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
@@ -680,6 +655,10 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
if (test_bit(MT76_RESET, &phy->state))
goto free_skb;
+ /* TODO: Take into account unlinear skbs */
+ if (mt76_npu_device_active(dev) && skb_linearize(skb))
+ goto free_skb;
+
t = mt76_get_txwi(dev);
if (!t)
goto free_skb;
@@ -727,6 +706,9 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
if (ret < 0)
goto unmap;
+ if (mt76_npu_device_active(dev))
+ return mt76_npu_dma_add_buf(phy, q, skb, &tx_info.buf[1], txwi);
+
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t);
@@ -825,9 +807,17 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
+ q->dev = dev;
+
+ if (mt76_queue_is_wed_rro_ind(q))
+ size = sizeof(struct mt76_wed_rro_desc);
+ else if (mt76_queue_is_npu_tx(q))
+ size = sizeof(struct airoha_npu_tx_dma_desc);
+ else if (mt76_queue_is_npu_rx(q))
+ size = sizeof(struct airoha_npu_rx_dma_desc);
+ else
+ size = sizeof(struct mt76_desc);
- size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
- : sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
&q->desc_dma, GFP_KERNEL);
if (!q->desc)
@@ -843,6 +833,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
+ mt76_npu_queue_setup(dev, q);
ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@@ -870,6 +861,11 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
if (!q->ndesc)
return;
+ if (mt76_queue_is_npu(q)) {
+ mt76_npu_queue_cleanup(dev, q);
+ return;
+ }
+
do {
spin_lock_bh(&q->lock);
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
@@ -900,7 +896,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
if (!mt76_queue_is_wed_rro_ind(q) &&
- !mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
int i;
for (i = 0; i < q->ndesc; i++)
@@ -920,7 +916,10 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill_buf(dev, q, false);
+ if (mt76_queue_is_npu(q))
+ mt76_npu_fill_rx_queue(dev, q);
+ else
+ mt76_dma_rx_fill(dev, q, false);
}
static void