Skip to content

Commit 72f5801

Browse files
nxpfranklivinodkoul
authored andcommitted
dmaengine: fsl-edma: integrate v3 support
Significant alterations have been made to the EDMA v3's register layout. Now, each channel possesses a separate address space, encapsulating all channel-related controls and statuses, including IRQs. There are changes in bit position definitions as well. However, the fundamental control flow remains analogous to the previous versions. EDMA v3 was utilized in imx8qm, imx93, and will be in forthcoming chips. Signed-off-by: Frank Li <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Vinod Koul <[email protected]>
1 parent 6eb439d commit 72f5801

File tree

3 files changed

+453
-23
lines changed

3 files changed

+453
-23
lines changed

drivers/dma/fsl-edma-common.c

Lines changed: 143 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
#include <linux/module.h>
88
#include <linux/slab.h>
99
#include <linux/dma-mapping.h>
10+
#include <linux/pm_runtime.h>
11+
#include <linux/pm_domain.h>
1012

1113
#include "fsl-edma-common.h"
1214

@@ -66,11 +68,46 @@ void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
6668
spin_unlock(&fsl_chan->vchan.lock);
6769
}
6870

71+
static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
72+
{
73+
u32 val, flags;
74+
75+
flags = fsl_edma_drvflags(fsl_chan);
76+
val = edma_readl_chreg(fsl_chan, ch_sbr);
77+
/* Remote/local swapped wrongly on iMX8 QM Audio edma */
78+
if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
79+
if (!fsl_chan->is_rxchan)
80+
val |= EDMA_V3_CH_SBR_RD;
81+
else
82+
val |= EDMA_V3_CH_SBR_WR;
83+
} else {
84+
if (fsl_chan->is_rxchan)
85+
val |= EDMA_V3_CH_SBR_RD;
86+
else
87+
val |= EDMA_V3_CH_SBR_WR;
88+
}
89+
90+
if (fsl_chan->is_remote)
91+
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
92+
93+
edma_writel_chreg(fsl_chan, val, ch_sbr);
94+
95+
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
96+
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
97+
98+
val = edma_readl_chreg(fsl_chan, ch_csr);
99+
val |= EDMA_V3_CH_CSR_ERQ;
100+
edma_writel_chreg(fsl_chan, val, ch_csr);
101+
}
102+
69103
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
70104
{
71105
struct edma_regs *regs = &fsl_chan->edma->regs;
72106
u32 ch = fsl_chan->vchan.chan.chan_id;
73107

108+
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
109+
return fsl_edma3_enable_request(fsl_chan);
110+
74111
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
75112
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
76113
edma_writeb(fsl_chan->edma, ch, regs->serq);
@@ -83,11 +120,28 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
83120
}
84121
}
85122

123+
static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
124+
{
125+
u32 val = edma_readl_chreg(fsl_chan, ch_csr);
126+
u32 flags;
127+
128+
flags = fsl_edma_drvflags(fsl_chan);
129+
130+
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
131+
edma_writel_chreg(fsl_chan, 0, ch_mux);
132+
133+
val &= ~EDMA_V3_CH_CSR_ERQ;
134+
edma_writel_chreg(fsl_chan, val, ch_csr);
135+
}
136+
86137
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
87138
{
88139
struct edma_regs *regs = &fsl_chan->edma->regs;
89140
u32 ch = fsl_chan->vchan.chan.chan_id;
90141

142+
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
143+
return fsl_edma3_disable_request(fsl_chan);
144+
91145
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
92146
edma_writeb(fsl_chan->edma, ch, regs->cerq);
93147
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
@@ -135,6 +189,9 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
135189
int endian_diff[4] = {3, 1, -1, -3};
136190
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
137191

192+
if (!dmamux_nr)
193+
return;
194+
138195
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
139196
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
140197

@@ -186,6 +243,10 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
186243
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
187244
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
188245
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
246+
247+
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
248+
pm_runtime_allow(fsl_chan->pd_dev);
249+
189250
return 0;
190251
}
191252

@@ -286,12 +347,16 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
286347
enum dma_transfer_direction dir = edesc->dirn;
287348
dma_addr_t cur_addr, dma_addr;
288349
size_t len, size;
350+
u32 nbytes = 0;
289351
int i;
290352

291353
/* calculate the total size in this desc */
292-
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
293-
len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
294-
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
354+
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
355+
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
356+
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
357+
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
358+
len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
359+
}
295360

296361
if (!in_progress)
297362
return len;
@@ -303,8 +368,12 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
303368

304369
/* figure out the finished and calculate the residue */
305370
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
306-
size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
307-
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
371+
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
372+
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
373+
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
374+
375+
size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
376+
308377
if (dir == DMA_MEM_TO_DEV)
309378
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
310379
else
@@ -389,12 +458,15 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
389458
}
390459

391460
static inline
392-
void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
461+
void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
462+
struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
393463
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
394464
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
395465
bool disable_req, bool enable_sg)
396466
{
467+
struct dma_slave_config *cfg = &fsl_chan->cfg;
397468
u16 csr = 0;
469+
u32 burst;
398470

399471
/*
400472
* eDMA hardware SGs require the TCDs to be stored in little
@@ -409,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
409481

410482
tcd->soff = cpu_to_le16(soff);
411483

484+
if (fsl_chan->is_multi_fifo) {
485+
/* set mloff to support multiple fifo */
486+
burst = cfg->direction == DMA_DEV_TO_MEM ?
487+
cfg->src_addr_width : cfg->dst_addr_width;
488+
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
489+
/* enable DMLOE/SMLOE */
490+
if (cfg->direction == DMA_MEM_TO_DEV) {
491+
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
492+
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
493+
} else {
494+
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
495+
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
496+
}
497+
}
498+
412499
tcd->nbytes = cpu_to_le32(nbytes);
413500
tcd->slast = cpu_to_le32(slast);
414501

@@ -427,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
427514
if (enable_sg)
428515
csr |= EDMA_TCD_CSR_E_SG;
429516

517+
if (fsl_chan->is_rxchan)
518+
csr |= EDMA_TCD_CSR_ACTIVE;
519+
520+
if (fsl_chan->is_sw)
521+
csr |= EDMA_TCD_CSR_START;
522+
430523
tcd->csr = cpu_to_le16(csr);
431524
}
432525

@@ -466,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
466559
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
467560
struct fsl_edma_desc *fsl_desc;
468561
dma_addr_t dma_buf_next;
562+
bool major_int = true;
469563
int sg_len, i;
470564
u32 src_addr, dst_addr, last_sg, nbytes;
471565
u16 soff, doff, iter;
@@ -509,17 +603,23 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
509603
src_addr = dma_buf_next;
510604
dst_addr = fsl_chan->dma_dev_addr;
511605
soff = fsl_chan->cfg.dst_addr_width;
512-
doff = 0;
513-
} else {
606+
doff = fsl_chan->is_multi_fifo ? 4 : 0;
607+
} else if (direction == DMA_DEV_TO_MEM) {
514608
src_addr = fsl_chan->dma_dev_addr;
515609
dst_addr = dma_buf_next;
516-
soff = 0;
610+
soff = fsl_chan->is_multi_fifo ? 4 : 0;
517611
doff = fsl_chan->cfg.src_addr_width;
612+
} else {
613+
/* DMA_DEV_TO_DEV */
614+
src_addr = fsl_chan->cfg.src_addr;
615+
dst_addr = fsl_chan->cfg.dst_addr;
616+
soff = doff = 0;
617+
major_int = false;
518618
}
519619

520-
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
620+
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
521621
fsl_chan->attr, soff, nbytes, 0, iter,
522-
iter, doff, last_sg, true, false, true);
622+
iter, doff, last_sg, major_int, false, true);
523623
dma_buf_next += period_len;
524624
}
525625

@@ -568,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
568668
dst_addr = fsl_chan->dma_dev_addr;
569669
soff = fsl_chan->cfg.dst_addr_width;
570670
doff = 0;
571-
} else {
671+
} else if (direction == DMA_DEV_TO_MEM) {
572672
src_addr = fsl_chan->dma_dev_addr;
573673
dst_addr = sg_dma_address(sg);
574674
soff = 0;
575675
doff = fsl_chan->cfg.src_addr_width;
676+
} else {
677+
/* DMA_DEV_TO_DEV */
678+
src_addr = fsl_chan->cfg.src_addr;
679+
dst_addr = fsl_chan->cfg.dst_addr;
680+
soff = 0;
681+
doff = 0;
576682
}
577683

684+
/*
685+
* Choose the suitable burst length if sg_dma_len is not
686+
* multiple of burst length so that the whole transfer length is
687+
* multiple of minor loop(burst length).
688+
*/
689+
if (sg_dma_len(sg) % nbytes) {
690+
u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
691+
u32 burst = (direction == DMA_DEV_TO_MEM) ?
692+
fsl_chan->cfg.src_maxburst :
693+
fsl_chan->cfg.dst_maxburst;
694+
int j;
695+
696+
for (j = burst; j > 1; j--) {
697+
if (!(sg_dma_len(sg) % (j * width))) {
698+
nbytes = j * width;
699+
break;
700+
}
701+
}
702+
/* Set burst size as 1 if there's no suitable one */
703+
if (j == 1)
704+
nbytes = width;
705+
}
578706
iter = sg_dma_len(sg) / nbytes;
579707
if (i < sg_len - 1) {
580708
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
581-
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
709+
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
582710
dst_addr, fsl_chan->attr, soff,
583711
nbytes, 0, iter, iter, doff, last_sg,
584712
false, false, true);
585713
} else {
586714
last_sg = 0;
587-
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
715+
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
588716
dst_addr, fsl_chan->attr, soff,
589717
nbytes, 0, iter, iter, doff, last_sg,
590718
true, true, false);
@@ -609,7 +737,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
609737
fsl_chan->is_sw = true;
610738

611739
/* To match with copy_align and max_seg_size so 1 tcd is enough */
612-
fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
740+
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
613741
fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
614742
32, len, 0, 1, 1, 32, 0, true, true, false);
615743

0 commit comments

Comments
 (0)