@@ -414,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
414
414
struct at_xdmac_desc * desc = txd_to_at_desc (tx );
415
415
struct at_xdmac_chan * atchan = to_at_xdmac_chan (tx -> chan );
416
416
dma_cookie_t cookie ;
417
+ unsigned long irqflags ;
417
418
418
- spin_lock_bh (& atchan -> lock );
419
+ spin_lock_irqsave (& atchan -> lock , irqflags );
419
420
cookie = dma_cookie_assign (tx );
420
421
421
422
dev_vdbg (chan2dev (tx -> chan ), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n" ,
@@ -424,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
424
425
if (list_is_singular (& atchan -> xfers_list ))
425
426
at_xdmac_start_xfer (atchan , desc );
426
427
427
- spin_unlock_bh (& atchan -> lock );
428
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
428
429
return cookie ;
429
430
}
430
431
@@ -595,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595
596
struct scatterlist * sg ;
596
597
int i ;
597
598
unsigned int xfer_size = 0 ;
599
+ unsigned long irqflags ;
600
+ struct dma_async_tx_descriptor * ret = NULL ;
598
601
599
602
if (!sgl )
600
603
return NULL ;
@@ -610,7 +613,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
610
613
flags );
611
614
612
615
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
613
- spin_lock_bh (& atchan -> lock );
616
+ spin_lock_irqsave (& atchan -> lock , irqflags );
614
617
615
618
if (at_xdmac_compute_chan_conf (chan , direction ))
616
619
goto spin_unlock ;
@@ -624,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
624
627
mem = sg_dma_address (sg );
625
628
if (unlikely (!len )) {
626
629
dev_err (chan2dev (chan ), "sg data length is zero\n" );
627
- spin_unlock_bh (& atchan -> lock );
628
- return NULL ;
630
+ goto spin_unlock ;
629
631
}
630
632
dev_dbg (chan2dev (chan ), "%s: * sg%d len=%u, mem=0x%08x\n" ,
631
633
__func__ , i , len , mem );
@@ -635,8 +637,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
635
637
dev_err (chan2dev (chan ), "can't get descriptor\n" );
636
638
if (first )
637
639
list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
638
- spin_unlock_bh (& atchan -> lock );
639
- return NULL ;
640
+ goto spin_unlock ;
640
641
}
641
642
642
643
/* Linked list descriptor setup. */
@@ -679,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
679
680
xfer_size += len ;
680
681
}
681
682
682
- spin_unlock_bh (& atchan -> lock );
683
683
684
684
first -> tx_dma_desc .flags = flags ;
685
685
first -> xfer_size = xfer_size ;
686
686
first -> direction = direction ;
687
+ ret = & first -> tx_dma_desc ;
687
688
688
- return & first -> tx_dma_desc ;
689
+ spin_unlock :
690
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
691
+ return ret ;
689
692
}
690
693
691
694
static struct dma_async_tx_descriptor *
@@ -698,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
698
701
struct at_xdmac_desc * first = NULL , * prev = NULL ;
699
702
unsigned int periods = buf_len / period_len ;
700
703
int i ;
704
+ unsigned long irqflags ;
701
705
702
706
dev_dbg (chan2dev (chan ), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n" ,
703
707
__func__ , & buf_addr , buf_len , period_len ,
@@ -719,16 +723,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
719
723
for (i = 0 ; i < periods ; i ++ ) {
720
724
struct at_xdmac_desc * desc = NULL ;
721
725
722
- spin_lock_bh (& atchan -> lock );
726
+ spin_lock_irqsave (& atchan -> lock , irqflags );
723
727
desc = at_xdmac_get_desc (atchan );
724
728
if (!desc ) {
725
729
dev_err (chan2dev (chan ), "can't get descriptor\n" );
726
730
if (first )
727
731
list_splice_init (& first -> descs_list , & atchan -> free_descs_list );
728
- spin_unlock_bh (& atchan -> lock );
732
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
729
733
return NULL ;
730
734
}
731
- spin_unlock_bh (& atchan -> lock );
735
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
732
736
dev_dbg (chan2dev (chan ),
733
737
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n" ,
734
738
__func__ , desc , & desc -> tx_dma_desc .phys );
@@ -802,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
802
806
| AT_XDMAC_CC_SIF (0 )
803
807
| AT_XDMAC_CC_MBSIZE_SIXTEEN
804
808
| AT_XDMAC_CC_TYPE_MEM_TRAN ;
809
+ unsigned long irqflags ;
805
810
806
811
dev_dbg (chan2dev (chan ), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n" ,
807
812
__func__ , & src , & dest , len , flags );
@@ -834,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
834
839
835
840
dev_dbg (chan2dev (chan ), "%s: remaining_size=%zu\n" , __func__ , remaining_size );
836
841
837
- spin_lock_bh (& atchan -> lock );
842
+ spin_lock_irqsave (& atchan -> lock , irqflags );
838
843
desc = at_xdmac_get_desc (atchan );
839
- spin_unlock_bh (& atchan -> lock );
844
+ spin_unlock_irqrestore (& atchan -> lock , irqflags );
840
845
if (!desc ) {
841
846
dev_err (chan2dev (chan ), "can't get descriptor\n" );
842
847
if (first )
@@ -922,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
922
927
int residue ;
923
928
u32 cur_nda , mask , value ;
924
929
u8 dwidth = 0 ;
930
+ unsigned long flags ;
925
931
926
932
ret = dma_cookie_status (chan , cookie , txstate );
927
933
if (ret == DMA_COMPLETE )
@@ -930,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
930
936
if (!txstate )
931
937
return ret ;
932
938
933
- spin_lock_bh (& atchan -> lock );
939
+ spin_lock_irqsave (& atchan -> lock , flags );
934
940
935
941
desc = list_first_entry (& atchan -> xfers_list , struct at_xdmac_desc , xfer_node );
936
942
@@ -940,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
940
946
*/
941
947
if (!desc -> active_xfer ) {
942
948
dma_set_residue (txstate , desc -> xfer_size );
943
- spin_unlock_bh (& atchan -> lock );
944
- return ret ;
949
+ goto spin_unlock ;
945
950
}
946
951
947
952
residue = desc -> xfer_size ;
@@ -972,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
972
977
}
973
978
residue += at_xdmac_chan_read (atchan , AT_XDMAC_CUBC ) << dwidth ;
974
979
975
- spin_unlock_bh (& atchan -> lock );
976
-
977
980
dma_set_residue (txstate , residue );
978
981
979
982
dev_dbg (chan2dev (chan ),
980
983
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n" ,
981
984
__func__ , desc , & desc -> tx_dma_desc .phys , ret , cookie , residue );
982
985
986
+ spin_unlock :
987
+ spin_unlock_irqrestore (& atchan -> lock , flags );
983
988
return ret ;
984
989
}
985
990
@@ -1000,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1000
1005
static void at_xdmac_advance_work (struct at_xdmac_chan * atchan )
1001
1006
{
1002
1007
struct at_xdmac_desc * desc ;
1008
+ unsigned long flags ;
1003
1009
1004
- spin_lock_bh (& atchan -> lock );
1010
+ spin_lock_irqsave (& atchan -> lock , flags );
1005
1011
1006
1012
/*
1007
1013
* If channel is enabled, do nothing, advance_work will be triggered
@@ -1016,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1016
1022
at_xdmac_start_xfer (atchan , desc );
1017
1023
}
1018
1024
1019
- spin_unlock_bh (& atchan -> lock );
1025
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1020
1026
}
1021
1027
1022
1028
static void at_xdmac_handle_cyclic (struct at_xdmac_chan * atchan )
@@ -1152,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1152
1158
{
1153
1159
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1154
1160
int ret ;
1161
+ unsigned long flags ;
1155
1162
1156
1163
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1157
1164
1158
- spin_lock_bh (& atchan -> lock );
1165
+ spin_lock_irqsave (& atchan -> lock , flags );
1159
1166
ret = at_xdmac_set_slave_config (chan , config );
1160
- spin_unlock_bh (& atchan -> lock );
1167
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1161
1168
1162
1169
return ret ;
1163
1170
}
@@ -1166,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1166
1173
{
1167
1174
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1168
1175
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1176
+ unsigned long flags ;
1169
1177
1170
1178
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1171
1179
1172
1180
if (test_and_set_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status ))
1173
1181
return 0 ;
1174
1182
1175
- spin_lock_bh (& atchan -> lock );
1183
+ spin_lock_irqsave (& atchan -> lock , flags );
1176
1184
at_xdmac_write (atxdmac , AT_XDMAC_GRWS , atchan -> mask );
1177
1185
while (at_xdmac_chan_read (atchan , AT_XDMAC_CC )
1178
1186
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP ))
1179
1187
cpu_relax ();
1180
- spin_unlock_bh (& atchan -> lock );
1188
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1181
1189
1182
1190
return 0 ;
1183
1191
}
@@ -1186,16 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1186
1194
{
1187
1195
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1188
1196
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1197
+ unsigned long flags ;
1189
1198
1190
1199
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1191
1200
1192
- spin_lock_bh (& atchan -> lock );
1193
- if (!at_xdmac_chan_is_paused (atchan ))
1201
+ spin_lock_irqsave (& atchan -> lock , flags );
1202
+ if (!at_xdmac_chan_is_paused (atchan )) {
1203
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1194
1204
return 0 ;
1205
+ }
1195
1206
1196
1207
at_xdmac_write (atxdmac , AT_XDMAC_GRWR , atchan -> mask );
1197
1208
clear_bit (AT_XDMAC_CHAN_IS_PAUSED , & atchan -> status );
1198
- spin_unlock_bh (& atchan -> lock );
1209
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1199
1210
1200
1211
return 0 ;
1201
1212
}
@@ -1205,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1205
1216
struct at_xdmac_desc * desc , * _desc ;
1206
1217
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1207
1218
struct at_xdmac * atxdmac = to_at_xdmac (atchan -> chan .device );
1219
+ unsigned long flags ;
1208
1220
1209
1221
dev_dbg (chan2dev (chan ), "%s\n" , __func__ );
1210
1222
1211
- spin_lock_bh (& atchan -> lock );
1223
+ spin_lock_irqsave (& atchan -> lock , flags );
1212
1224
at_xdmac_write (atxdmac , AT_XDMAC_GD , atchan -> mask );
1213
1225
while (at_xdmac_read (atxdmac , AT_XDMAC_GS ) & atchan -> mask )
1214
1226
cpu_relax ();
@@ -1218,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1218
1230
at_xdmac_remove_xfer (atchan , desc );
1219
1231
1220
1232
clear_bit (AT_XDMAC_CHAN_IS_CYCLIC , & atchan -> status );
1221
- spin_unlock_bh (& atchan -> lock );
1233
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1222
1234
1223
1235
return 0 ;
1224
1236
}
@@ -1228,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1228
1240
struct at_xdmac_chan * atchan = to_at_xdmac_chan (chan );
1229
1241
struct at_xdmac_desc * desc ;
1230
1242
int i ;
1243
+ unsigned long flags ;
1231
1244
1232
- spin_lock_bh (& atchan -> lock );
1245
+ spin_lock_irqsave (& atchan -> lock , flags );
1233
1246
1234
1247
if (at_xdmac_chan_is_enabled (atchan )) {
1235
1248
dev_err (chan2dev (chan ),
@@ -1260,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1260
1273
dev_dbg (chan2dev (chan ), "%s: allocated %d descriptors\n" , __func__ , i );
1261
1274
1262
1275
spin_unlock :
1263
- spin_unlock_bh (& atchan -> lock );
1276
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1264
1277
return i ;
1265
1278
}
1266
1279
0 commit comments