diff options
| author | Mateusz Litwin <mateusz.litwin@nokia.com> | 2025-12-18 22:33:05 +0100 |
|---|---|---|
| committer | Mark Brown <broonie@kernel.org> | 2025-12-23 10:58:57 +0000 |
| commit | 5bfbbf0a49ee4b5dcf46a3bfd4cd860d72cc887d (patch) | |
| tree | f94c1e0747abdfb53ccb7d189813b869cd93d4a3 | |
| parent | d67396c9d697041b385d70ff2fd59cb07ae167e8 (diff) | |
spi: cadence-quadspi: Improve CQSPI_SLOW_SRAM quirk if flash is slow
CQSPI_SLOW_SRAM quirk on the Stratix10 platform causes fewer interrupts,
but also causes timeouts if a small block is used or if flash devices are
slower than or equal in speed to SRAM's read operations. Adding the
CQSPI_REG_IRQ_IND_COMP interrupt would resolve the problem for small
reads, and removing the disabling of interrupts would resolve the issue
with lost interrupts.
This marginally increases IRQ count. Tests show that this will cause only
a few percent more interrupts.
Test:
$ dd if=/dev/mtd0 of=/dev/null bs=1M count=64
Results from the Stratix10 platform with mt25qu02g flash.
FIFO size in all tests: 128
Serviced interrupt call counts:
Without CQSPI_SLOW_SRAM quirk: 16 668 850
With CQSPI_SLOW_SRAM quirk: 204 176
With CQSPI_SLOW_SRAM and this commit: 224 528
Signed-off-by: Mateusz Litwin <mateusz.litwin@nokia.com>
Link: https://patch.msgid.link/20251218-cqspi_indirect_read_improve-v2-2-396079972f2a@nokia.com
Signed-off-by: Mark Brown <broonie@kernel.org>
| -rw-r--r-- | drivers/spi/spi-cadence-quadspi.c | 19 |
1 files changed, 5 insertions, 14 deletions
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 837dd646481f..965b4cea3388 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -300,6 +300,9 @@ struct cqspi_driver_platdata { CQSPI_REG_IRQ_IND_SRAM_FULL | \ CQSPI_REG_IRQ_IND_COMP) +#define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \ + CQSPI_REG_IRQ_IND_COMP) + #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ CQSPI_REG_IRQ_WATERMARK | \ CQSPI_REG_IRQ_UNDERFLOW) @@ -381,7 +384,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) else if (!cqspi->slow_sram) irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; else - irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR; + irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR; if (irq_status) complete(&cqspi->transfer_complete); @@ -757,7 +760,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, */ if (use_irq && cqspi->slow_sram) - writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); + writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK); else if (use_irq) writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); else @@ -776,13 +779,6 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, ret = -ETIMEDOUT; /* - * Disable all read interrupts until - * we are out of "bytes to read" - */ - if (cqspi->slow_sram) - writel(0x0, reg_base + CQSPI_REG_IRQMASK); - - /* * Prevent lost interrupt and race condition by reinitializing early. * A spurious wakeup and another wait cycle can occur here, * which is preferable to waiting until timeout if interrupt is lost. @@ -820,11 +816,6 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, remaining -= bytes_to_read; bytes_to_read = cqspi_get_rd_sram_level(cqspi); } - - if (use_irq && remaining > 0) { - if (cqspi->slow_sram) - writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); - } } /* Check indirect done status */ |
