Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit a5ae9b5

Browse files
ldesrochesgregkh
authored andcommitted
dmaengine: at_xdmac: lock fixes
commit 4c374fc upstream. Using _bh variant for spin locks causes this kind of warning: Starting logging: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151 __local_bh_enable_ip+0xe8/0xf4() Modules linked in: CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ #94 Hardware name: Atmel SAMA5 [<c0013c04>] (unwind_backtrace) from [<c00118a4>] (show_stack+0x10/0x14) [<c00118a4>] (show_stack) from [<c001bbcc>] (warn_slowpath_common+0x80/0xac) [<c001bbcc>] (warn_slowpath_common) from [<c001bc14>] (warn_slowpath_null+0x1c/0x24) [<c001bc14>] (warn_slowpath_null) from [<c001e28c>] (__local_bh_enable_ip+0xe8/0xf4) [<c001e28c>] (__local_bh_enable_ip) from [<c01fdbd0>] (at_xdmac_device_terminate_all+0xf4/0x100) [<c01fdbd0>] (at_xdmac_device_terminate_all) from [<c02221a4>] (atmel_complete_tx_dma+0x34/0xf4) [<c02221a4>] (atmel_complete_tx_dma) from [<c01fe4ac>] (at_xdmac_tasklet+0x14c/0x1ac) [<c01fe4ac>] (at_xdmac_tasklet) from [<c001de58>] (tasklet_action+0x68/0xb4) [<c001de58>] (tasklet_action) from [<c001dfdc>] (__do_softirq+0xfc/0x238) [<c001dfdc>] (__do_softirq) from [<c001e140>] (run_ksoftirqd+0x28/0x34) [<c001e140>] (run_ksoftirqd) from [<c0033a3c>] (smpboot_thread_fn+0x138/0x18c) [<c0033a3c>] (smpboot_thread_fn) from [<c0030e7c>] (kthread+0xdc/0xf0) [<c0030e7c>] (kthread) from [<c000f480>] (ret_from_fork+0x14/0x34) ---[ end trace b57b14a99c1d8812 ]--- It comes from the fact that devices can called some code from the DMA controller with irq disabled. _bh variant is not intended to be used in this case since it can enable irqs. Switch to irqsave/irqrestore variant to avoid this situation. Signed-off-by: Ludovic Desroches <[email protected]> Signed-off-by: Vinod Koul <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 9bbdcea commit a5ae9b5

File tree

1 file changed

+45
-32
lines changed

1 file changed

+45
-32
lines changed

drivers/dma/at_xdmac.c

Lines changed: 45 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -414,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
414414
struct at_xdmac_desc *desc = txd_to_at_desc(tx);
415415
struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
416416
dma_cookie_t cookie;
417+
unsigned long irqflags;
417418

418-
spin_lock_bh(&atchan->lock);
419+
spin_lock_irqsave(&atchan->lock, irqflags);
419420
cookie = dma_cookie_assign(tx);
420421

421422
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -424,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
424425
if (list_is_singular(&atchan->xfers_list))
425426
at_xdmac_start_xfer(atchan, desc);
426427

427-
spin_unlock_bh(&atchan->lock);
428+
spin_unlock_irqrestore(&atchan->lock, irqflags);
428429
return cookie;
429430
}
430431

@@ -595,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595596
struct scatterlist *sg;
596597
int i;
597598
unsigned int xfer_size = 0;
599+
unsigned long irqflags;
600+
struct dma_async_tx_descriptor *ret = NULL;
598601

599602
if (!sgl)
600603
return NULL;
@@ -610,7 +613,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
610613
flags);
611614

612615
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
613-
spin_lock_bh(&atchan->lock);
616+
spin_lock_irqsave(&atchan->lock, irqflags);
614617

615618
if (at_xdmac_compute_chan_conf(chan, direction))
616619
goto spin_unlock;
@@ -624,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
624627
mem = sg_dma_address(sg);
625628
if (unlikely(!len)) {
626629
dev_err(chan2dev(chan), "sg data length is zero\n");
627-
spin_unlock_bh(&atchan->lock);
628-
return NULL;
630+
goto spin_unlock;
629631
}
630632
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
631633
__func__, i, len, mem);
@@ -635,8 +637,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
635637
dev_err(chan2dev(chan), "can't get descriptor\n");
636638
if (first)
637639
list_splice_init(&first->descs_list, &atchan->free_descs_list);
638-
spin_unlock_bh(&atchan->lock);
639-
return NULL;
640+
goto spin_unlock;
640641
}
641642

642643
/* Linked list descriptor setup. */
@@ -679,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
679680
xfer_size += len;
680681
}
681682

682-
spin_unlock_bh(&atchan->lock);
683683

684684
first->tx_dma_desc.flags = flags;
685685
first->xfer_size = xfer_size;
686686
first->direction = direction;
687+
ret = &first->tx_dma_desc;
687688

688-
return &first->tx_dma_desc;
689+
spin_unlock:
690+
spin_unlock_irqrestore(&atchan->lock, irqflags);
691+
return ret;
689692
}
690693

691694
static struct dma_async_tx_descriptor *
@@ -698,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
698701
struct at_xdmac_desc *first = NULL, *prev = NULL;
699702
unsigned int periods = buf_len / period_len;
700703
int i;
704+
unsigned long irqflags;
701705

702706
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
703707
__func__, &buf_addr, buf_len, period_len,
@@ -719,16 +723,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
719723
for (i = 0; i < periods; i++) {
720724
struct at_xdmac_desc *desc = NULL;
721725

722-
spin_lock_bh(&atchan->lock);
726+
spin_lock_irqsave(&atchan->lock, irqflags);
723727
desc = at_xdmac_get_desc(atchan);
724728
if (!desc) {
725729
dev_err(chan2dev(chan), "can't get descriptor\n");
726730
if (first)
727731
list_splice_init(&first->descs_list, &atchan->free_descs_list);
728-
spin_unlock_bh(&atchan->lock);
732+
spin_unlock_irqrestore(&atchan->lock, irqflags);
729733
return NULL;
730734
}
731-
spin_unlock_bh(&atchan->lock);
735+
spin_unlock_irqrestore(&atchan->lock, irqflags);
732736
dev_dbg(chan2dev(chan),
733737
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
734738
__func__, desc, &desc->tx_dma_desc.phys);
@@ -802,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
802806
| AT_XDMAC_CC_SIF(0)
803807
| AT_XDMAC_CC_MBSIZE_SIXTEEN
804808
| AT_XDMAC_CC_TYPE_MEM_TRAN;
809+
unsigned long irqflags;
805810

806811
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
807812
__func__, &src, &dest, len, flags);
@@ -834,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
834839

835840
dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
836841

837-
spin_lock_bh(&atchan->lock);
842+
spin_lock_irqsave(&atchan->lock, irqflags);
838843
desc = at_xdmac_get_desc(atchan);
839-
spin_unlock_bh(&atchan->lock);
844+
spin_unlock_irqrestore(&atchan->lock, irqflags);
840845
if (!desc) {
841846
dev_err(chan2dev(chan), "can't get descriptor\n");
842847
if (first)
@@ -922,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
922927
int residue;
923928
u32 cur_nda, mask, value;
924929
u8 dwidth = 0;
930+
unsigned long flags;
925931

926932
ret = dma_cookie_status(chan, cookie, txstate);
927933
if (ret == DMA_COMPLETE)
@@ -930,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
930936
if (!txstate)
931937
return ret;
932938

933-
spin_lock_bh(&atchan->lock);
939+
spin_lock_irqsave(&atchan->lock, flags);
934940

935941
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
936942

@@ -940,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
940946
*/
941947
if (!desc->active_xfer) {
942948
dma_set_residue(txstate, desc->xfer_size);
943-
spin_unlock_bh(&atchan->lock);
944-
return ret;
949+
goto spin_unlock;
945950
}
946951

947952
residue = desc->xfer_size;
@@ -972,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
972977
}
973978
residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
974979

975-
spin_unlock_bh(&atchan->lock);
976-
977980
dma_set_residue(txstate, residue);
978981

979982
dev_dbg(chan2dev(chan),
980983
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
981984
__func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
982985

986+
spin_unlock:
987+
spin_unlock_irqrestore(&atchan->lock, flags);
983988
return ret;
984989
}
985990

@@ -1000,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
10001005
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
10011006
{
10021007
struct at_xdmac_desc *desc;
1008+
unsigned long flags;
10031009

1004-
spin_lock_bh(&atchan->lock);
1010+
spin_lock_irqsave(&atchan->lock, flags);
10051011

10061012
/*
10071013
* If channel is enabled, do nothing, advance_work will be triggered
@@ -1016,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
10161022
at_xdmac_start_xfer(atchan, desc);
10171023
}
10181024

1019-
spin_unlock_bh(&atchan->lock);
1025+
spin_unlock_irqrestore(&atchan->lock, flags);
10201026
}
10211027

10221028
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1152,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
11521158
{
11531159
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11541160
int ret;
1161+
unsigned long flags;
11551162

11561163
dev_dbg(chan2dev(chan), "%s\n", __func__);
11571164

1158-
spin_lock_bh(&atchan->lock);
1165+
spin_lock_irqsave(&atchan->lock, flags);
11591166
ret = at_xdmac_set_slave_config(chan, config);
1160-
spin_unlock_bh(&atchan->lock);
1167+
spin_unlock_irqrestore(&atchan->lock, flags);
11611168

11621169
return ret;
11631170
}
@@ -1166,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
11661173
{
11671174
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11681175
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1176+
unsigned long flags;
11691177

11701178
dev_dbg(chan2dev(chan), "%s\n", __func__);
11711179

11721180
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
11731181
return 0;
11741182

1175-
spin_lock_bh(&atchan->lock);
1183+
spin_lock_irqsave(&atchan->lock, flags);
11761184
at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
11771185
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
11781186
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
11791187
cpu_relax();
1180-
spin_unlock_bh(&atchan->lock);
1188+
spin_unlock_irqrestore(&atchan->lock, flags);
11811189

11821190
return 0;
11831191
}
@@ -1186,16 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
11861194
{
11871195
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
11881196
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1197+
unsigned long flags;
11891198

11901199
dev_dbg(chan2dev(chan), "%s\n", __func__);
11911200

1192-
spin_lock_bh(&atchan->lock);
1193-
if (!at_xdmac_chan_is_paused(atchan))
1201+
spin_lock_irqsave(&atchan->lock, flags);
1202+
if (!at_xdmac_chan_is_paused(atchan)) {
1203+
spin_unlock_irqrestore(&atchan->lock, flags);
11941204
return 0;
1205+
}
11951206

11961207
at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
11971208
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1198-
spin_unlock_bh(&atchan->lock);
1209+
spin_unlock_irqrestore(&atchan->lock, flags);
11991210

12001211
return 0;
12011212
}
@@ -1205,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
12051216
struct at_xdmac_desc *desc, *_desc;
12061217
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
12071218
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1219+
unsigned long flags;
12081220

12091221
dev_dbg(chan2dev(chan), "%s\n", __func__);
12101222

1211-
spin_lock_bh(&atchan->lock);
1223+
spin_lock_irqsave(&atchan->lock, flags);
12121224
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
12131225
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
12141226
cpu_relax();
@@ -1218,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
12181230
at_xdmac_remove_xfer(atchan, desc);
12191231

12201232
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1221-
spin_unlock_bh(&atchan->lock);
1233+
spin_unlock_irqrestore(&atchan->lock, flags);
12221234

12231235
return 0;
12241236
}
@@ -1228,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
12281240
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
12291241
struct at_xdmac_desc *desc;
12301242
int i;
1243+
unsigned long flags;
12311244

1232-
spin_lock_bh(&atchan->lock);
1245+
spin_lock_irqsave(&atchan->lock, flags);
12331246

12341247
if (at_xdmac_chan_is_enabled(atchan)) {
12351248
dev_err(chan2dev(chan),
@@ -1260,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
12601273
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
12611274

12621275
spin_unlock:
1263-
spin_unlock_bh(&atchan->lock);
1276+
spin_unlock_irqrestore(&atchan->lock, flags);
12641277
return i;
12651278
}
12661279

0 commit comments

Comments
 (0)