7
7
#include <linux/module.h>
8
8
#include <linux/slab.h>
9
9
#include <linux/dma-mapping.h>
10
+ #include <linux/pm_runtime.h>
11
+ #include <linux/pm_domain.h>
10
12
11
13
#include "fsl-edma-common.h"
12
14
@@ -66,11 +68,46 @@ void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
66
68
spin_unlock (& fsl_chan -> vchan .lock );
67
69
}
68
70
71
+ static void fsl_edma3_enable_request (struct fsl_edma_chan * fsl_chan )
72
+ {
73
+ u32 val , flags ;
74
+
75
+ flags = fsl_edma_drvflags (fsl_chan );
76
+ val = edma_readl_chreg (fsl_chan , ch_sbr );
77
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
78
+ if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED ) {
79
+ if (!fsl_chan -> is_rxchan )
80
+ val |= EDMA_V3_CH_SBR_RD ;
81
+ else
82
+ val |= EDMA_V3_CH_SBR_WR ;
83
+ } else {
84
+ if (fsl_chan -> is_rxchan )
85
+ val |= EDMA_V3_CH_SBR_RD ;
86
+ else
87
+ val |= EDMA_V3_CH_SBR_WR ;
88
+ }
89
+
90
+ if (fsl_chan -> is_remote )
91
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR );
92
+
93
+ edma_writel_chreg (fsl_chan , val , ch_sbr );
94
+
95
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX )
96
+ edma_writel_chreg (fsl_chan , fsl_chan -> srcid , ch_mux );
97
+
98
+ val = edma_readl_chreg (fsl_chan , ch_csr );
99
+ val |= EDMA_V3_CH_CSR_ERQ ;
100
+ edma_writel_chreg (fsl_chan , val , ch_csr );
101
+ }
102
+
69
103
static void fsl_edma_enable_request (struct fsl_edma_chan * fsl_chan )
70
104
{
71
105
struct edma_regs * regs = & fsl_chan -> edma -> regs ;
72
106
u32 ch = fsl_chan -> vchan .chan .chan_id ;
73
107
108
+ if (fsl_edma_drvflags (fsl_chan ) & FSL_EDMA_DRV_SPLIT_REG )
109
+ return fsl_edma3_enable_request (fsl_chan );
110
+
74
111
if (fsl_chan -> edma -> drvdata -> flags & FSL_EDMA_DRV_WRAP_IO ) {
75
112
edma_writeb (fsl_chan -> edma , EDMA_SEEI_SEEI (ch ), regs -> seei );
76
113
edma_writeb (fsl_chan -> edma , ch , regs -> serq );
@@ -83,11 +120,28 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
83
120
}
84
121
}
85
122
123
+ static void fsl_edma3_disable_request (struct fsl_edma_chan * fsl_chan )
124
+ {
125
+ u32 val = edma_readl_chreg (fsl_chan , ch_csr );
126
+ u32 flags ;
127
+
128
+ flags = fsl_edma_drvflags (fsl_chan );
129
+
130
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX )
131
+ edma_writel_chreg (fsl_chan , 0 , ch_mux );
132
+
133
+ val &= ~EDMA_V3_CH_CSR_ERQ ;
134
+ edma_writel_chreg (fsl_chan , val , ch_csr );
135
+ }
136
+
86
137
void fsl_edma_disable_request (struct fsl_edma_chan * fsl_chan )
87
138
{
88
139
struct edma_regs * regs = & fsl_chan -> edma -> regs ;
89
140
u32 ch = fsl_chan -> vchan .chan .chan_id ;
90
141
142
+ if (fsl_edma_drvflags (fsl_chan ) & FSL_EDMA_DRV_SPLIT_REG )
143
+ return fsl_edma3_disable_request (fsl_chan );
144
+
91
145
if (fsl_chan -> edma -> drvdata -> flags & FSL_EDMA_DRV_WRAP_IO ) {
92
146
edma_writeb (fsl_chan -> edma , ch , regs -> cerq );
93
147
edma_writeb (fsl_chan -> edma , EDMA_CEEI_CEEI (ch ), regs -> ceei );
@@ -135,6 +189,9 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
135
189
int endian_diff [4 ] = {3 , 1 , -1 , -3 };
136
190
u32 dmamux_nr = fsl_chan -> edma -> drvdata -> dmamuxs ;
137
191
192
+ if (!dmamux_nr )
193
+ return ;
194
+
138
195
chans_per_mux = fsl_chan -> edma -> n_chans / dmamux_nr ;
139
196
ch_off = fsl_chan -> vchan .chan .chan_id % chans_per_mux ;
140
197
@@ -186,6 +243,10 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
186
243
vchan_get_all_descriptors (& fsl_chan -> vchan , & head );
187
244
spin_unlock_irqrestore (& fsl_chan -> vchan .lock , flags );
188
245
vchan_dma_desc_free_list (& fsl_chan -> vchan , & head );
246
+
247
+ if (fsl_edma_drvflags (fsl_chan ) & FSL_EDMA_DRV_HAS_PD )
248
+ pm_runtime_allow (fsl_chan -> pd_dev );
249
+
189
250
return 0 ;
190
251
}
191
252
@@ -286,12 +347,16 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
286
347
enum dma_transfer_direction dir = edesc -> dirn ;
287
348
dma_addr_t cur_addr , dma_addr ;
288
349
size_t len , size ;
350
+ u32 nbytes = 0 ;
289
351
int i ;
290
352
291
353
/* calculate the total size in this desc */
292
- for (len = i = 0 ; i < fsl_chan -> edesc -> n_tcds ; i ++ )
293
- len += le32_to_cpu (edesc -> tcd [i ].vtcd -> nbytes )
294
- * le16_to_cpu (edesc -> tcd [i ].vtcd -> biter );
354
+ for (len = i = 0 ; i < fsl_chan -> edesc -> n_tcds ; i ++ ) {
355
+ nbytes = le32_to_cpu (edesc -> tcd [i ].vtcd -> nbytes );
356
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE ))
357
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES (nbytes );
358
+ len += nbytes * le16_to_cpu (edesc -> tcd [i ].vtcd -> biter );
359
+ }
295
360
296
361
if (!in_progress )
297
362
return len ;
@@ -303,8 +368,12 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
303
368
304
369
/* figure out the finished and calculate the residue */
305
370
for (i = 0 ; i < fsl_chan -> edesc -> n_tcds ; i ++ ) {
306
- size = le32_to_cpu (edesc -> tcd [i ].vtcd -> nbytes )
307
- * le16_to_cpu (edesc -> tcd [i ].vtcd -> biter );
371
+ nbytes = le32_to_cpu (edesc -> tcd [i ].vtcd -> nbytes );
372
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE ))
373
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES (nbytes );
374
+
375
+ size = nbytes * le16_to_cpu (edesc -> tcd [i ].vtcd -> biter );
376
+
308
377
if (dir == DMA_MEM_TO_DEV )
309
378
dma_addr = le32_to_cpu (edesc -> tcd [i ].vtcd -> saddr );
310
379
else
@@ -389,12 +458,15 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
389
458
}
390
459
391
460
static inline
392
- void fsl_edma_fill_tcd (struct fsl_edma_hw_tcd * tcd , u32 src , u32 dst ,
461
+ void fsl_edma_fill_tcd (struct fsl_edma_chan * fsl_chan ,
462
+ struct fsl_edma_hw_tcd * tcd , u32 src , u32 dst ,
393
463
u16 attr , u16 soff , u32 nbytes , u32 slast , u16 citer ,
394
464
u16 biter , u16 doff , u32 dlast_sga , bool major_int ,
395
465
bool disable_req , bool enable_sg )
396
466
{
467
+ struct dma_slave_config * cfg = & fsl_chan -> cfg ;
397
468
u16 csr = 0 ;
469
+ u32 burst ;
398
470
399
471
/*
400
472
* eDMA hardware SGs require the TCDs to be stored in little
@@ -409,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
409
481
410
482
tcd -> soff = cpu_to_le16 (soff );
411
483
484
+ if (fsl_chan -> is_multi_fifo ) {
485
+ /* set mloff to support multiple fifo */
486
+ burst = cfg -> direction == DMA_DEV_TO_MEM ?
487
+ cfg -> src_addr_width : cfg -> dst_addr_width ;
488
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF (- (burst * 4 ));
489
+ /* enable DMLOE/SMLOE */
490
+ if (cfg -> direction == DMA_MEM_TO_DEV ) {
491
+ nbytes |= EDMA_V3_TCD_NBYTES_DMLOE ;
492
+ nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE ;
493
+ } else {
494
+ nbytes |= EDMA_V3_TCD_NBYTES_SMLOE ;
495
+ nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE ;
496
+ }
497
+ }
498
+
412
499
tcd -> nbytes = cpu_to_le32 (nbytes );
413
500
tcd -> slast = cpu_to_le32 (slast );
414
501
@@ -427,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
427
514
if (enable_sg )
428
515
csr |= EDMA_TCD_CSR_E_SG ;
429
516
517
+ if (fsl_chan -> is_rxchan )
518
+ csr |= EDMA_TCD_CSR_ACTIVE ;
519
+
520
+ if (fsl_chan -> is_sw )
521
+ csr |= EDMA_TCD_CSR_START ;
522
+
430
523
tcd -> csr = cpu_to_le16 (csr );
431
524
}
432
525
@@ -466,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
466
559
struct fsl_edma_chan * fsl_chan = to_fsl_edma_chan (chan );
467
560
struct fsl_edma_desc * fsl_desc ;
468
561
dma_addr_t dma_buf_next ;
562
+ bool major_int = true;
469
563
int sg_len , i ;
470
564
u32 src_addr , dst_addr , last_sg , nbytes ;
471
565
u16 soff , doff , iter ;
@@ -509,17 +603,23 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
509
603
src_addr = dma_buf_next ;
510
604
dst_addr = fsl_chan -> dma_dev_addr ;
511
605
soff = fsl_chan -> cfg .dst_addr_width ;
512
- doff = 0 ;
513
- } else {
606
+ doff = fsl_chan -> is_multi_fifo ? 4 : 0 ;
607
+ } else if ( direction == DMA_DEV_TO_MEM ) {
514
608
src_addr = fsl_chan -> dma_dev_addr ;
515
609
dst_addr = dma_buf_next ;
516
- soff = 0 ;
610
+ soff = fsl_chan -> is_multi_fifo ? 4 : 0 ;
517
611
doff = fsl_chan -> cfg .src_addr_width ;
612
+ } else {
613
+ /* DMA_DEV_TO_DEV */
614
+ src_addr = fsl_chan -> cfg .src_addr ;
615
+ dst_addr = fsl_chan -> cfg .dst_addr ;
616
+ soff = doff = 0 ;
617
+ major_int = false;
518
618
}
519
619
520
- fsl_edma_fill_tcd (fsl_desc -> tcd [i ].vtcd , src_addr , dst_addr ,
620
+ fsl_edma_fill_tcd (fsl_chan , fsl_desc -> tcd [i ].vtcd , src_addr , dst_addr ,
521
621
fsl_chan -> attr , soff , nbytes , 0 , iter ,
522
- iter , doff , last_sg , true , false, true);
622
+ iter , doff , last_sg , major_int , false, true);
523
623
dma_buf_next += period_len ;
524
624
}
525
625
@@ -568,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
568
668
dst_addr = fsl_chan -> dma_dev_addr ;
569
669
soff = fsl_chan -> cfg .dst_addr_width ;
570
670
doff = 0 ;
571
- } else {
671
+ } else if ( direction == DMA_DEV_TO_MEM ) {
572
672
src_addr = fsl_chan -> dma_dev_addr ;
573
673
dst_addr = sg_dma_address (sg );
574
674
soff = 0 ;
575
675
doff = fsl_chan -> cfg .src_addr_width ;
676
+ } else {
677
+ /* DMA_DEV_TO_DEV */
678
+ src_addr = fsl_chan -> cfg .src_addr ;
679
+ dst_addr = fsl_chan -> cfg .dst_addr ;
680
+ soff = 0 ;
681
+ doff = 0 ;
576
682
}
577
683
684
+ /*
685
+ * Choose the suitable burst length if sg_dma_len is not
686
+ * multiple of burst length so that the whole transfer length is
687
+ * multiple of minor loop(burst length).
688
+ */
689
+ if (sg_dma_len (sg ) % nbytes ) {
690
+ u32 width = (direction == DMA_DEV_TO_MEM ) ? doff : soff ;
691
+ u32 burst = (direction == DMA_DEV_TO_MEM ) ?
692
+ fsl_chan -> cfg .src_maxburst :
693
+ fsl_chan -> cfg .dst_maxburst ;
694
+ int j ;
695
+
696
+ for (j = burst ; j > 1 ; j -- ) {
697
+ if (!(sg_dma_len (sg ) % (j * width ))) {
698
+ nbytes = j * width ;
699
+ break ;
700
+ }
701
+ }
702
+ /* Set burst size as 1 if there's no suitable one */
703
+ if (j == 1 )
704
+ nbytes = width ;
705
+ }
578
706
iter = sg_dma_len (sg ) / nbytes ;
579
707
if (i < sg_len - 1 ) {
580
708
last_sg = fsl_desc -> tcd [(i + 1 )].ptcd ;
581
- fsl_edma_fill_tcd (fsl_desc -> tcd [i ].vtcd , src_addr ,
709
+ fsl_edma_fill_tcd (fsl_chan , fsl_desc -> tcd [i ].vtcd , src_addr ,
582
710
dst_addr , fsl_chan -> attr , soff ,
583
711
nbytes , 0 , iter , iter , doff , last_sg ,
584
712
false, false, true);
585
713
} else {
586
714
last_sg = 0 ;
587
- fsl_edma_fill_tcd (fsl_desc -> tcd [i ].vtcd , src_addr ,
715
+ fsl_edma_fill_tcd (fsl_chan , fsl_desc -> tcd [i ].vtcd , src_addr ,
588
716
dst_addr , fsl_chan -> attr , soff ,
589
717
nbytes , 0 , iter , iter , doff , last_sg ,
590
718
true, true, false);
@@ -609,7 +737,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
609
737
fsl_chan -> is_sw = true;
610
738
611
739
/* To match with copy_align and max_seg_size so 1 tcd is enough */
612
- fsl_edma_fill_tcd (fsl_desc -> tcd [0 ].vtcd , dma_src , dma_dst ,
740
+ fsl_edma_fill_tcd (fsl_chan , fsl_desc -> tcd [0 ].vtcd , dma_src , dma_dst ,
613
741
fsl_edma_get_tcd_attr (DMA_SLAVE_BUSWIDTH_32_BYTES ),
614
742
32 , len , 0 , 1 , 1 , 32 , 0 , true, true, false);
615
743
0 commit comments