@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
75
75
min_not_zero (dev -> coherent_dma_mask , dev -> bus_dma_limit );
76
76
}
77
77
78
+ static int dma_set_decrypted (struct device * dev , void * vaddr , size_t size )
79
+ {
80
+ if (!force_dma_unencrypted (dev ))
81
+ return 0 ;
82
+ return set_memory_decrypted ((unsigned long )vaddr , 1 << get_order (size ));
83
+ }
84
+
85
+ static int dma_set_encrypted (struct device * dev , void * vaddr , size_t size )
86
+ {
87
+ if (!force_dma_unencrypted (dev ))
88
+ return 0 ;
89
+ return set_memory_encrypted ((unsigned long )vaddr , 1 << get_order (size ));
90
+ }
91
+
78
92
static void __dma_direct_free_pages (struct device * dev , struct page * page ,
79
93
size_t size )
80
94
{
@@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
154
168
{
155
169
struct page * page ;
156
170
void * ret ;
157
- int err ;
158
171
159
172
size = PAGE_ALIGN (size );
160
173
if (attrs & DMA_ATTR_NO_WARN )
@@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
216
229
__builtin_return_address (0 ));
217
230
if (!ret )
218
231
goto out_free_pages ;
219
- if (force_dma_unencrypted (dev )) {
220
- err = set_memory_decrypted ((unsigned long )ret ,
221
- 1 << get_order (size ));
222
- if (err )
223
- goto out_free_pages ;
224
- }
232
+ if (dma_set_decrypted (dev , ret , size ))
233
+ goto out_free_pages ;
225
234
memset (ret , 0 , size );
226
235
goto done ;
227
236
}
@@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
238
247
}
239
248
240
249
ret = page_address (page );
241
- if (force_dma_unencrypted (dev )) {
242
- err = set_memory_decrypted ((unsigned long )ret ,
243
- 1 << get_order (size ));
244
- if (err )
245
- goto out_free_pages ;
246
- }
247
-
250
+ if (dma_set_decrypted (dev , ret , size ))
251
+ goto out_free_pages ;
248
252
memset (ret , 0 , size );
249
253
250
254
if (IS_ENABLED (CONFIG_ARCH_HAS_DMA_SET_UNCACHED ) &&
@@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
259
263
return ret ;
260
264
261
265
out_encrypt_pages :
262
- if (force_dma_unencrypted (dev )) {
263
- err = set_memory_encrypted ((unsigned long )page_address (page ),
264
- 1 << get_order (size ));
265
- /* If memory cannot be re-encrypted, it must be leaked */
266
- if (err )
267
- return NULL ;
268
- }
266
+ /* If memory cannot be re-encrypted, it must be leaked */
267
+ if (dma_set_encrypted (dev , page_address (page ), size ))
268
+ return NULL ;
269
269
out_free_pages :
270
270
__dma_direct_free_pages (dev , page , size );
271
271
return NULL ;
@@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
304
304
dma_free_from_pool (dev , cpu_addr , PAGE_ALIGN (size )))
305
305
return ;
306
306
307
- if (force_dma_unencrypted (dev ))
308
- set_memory_encrypted ((unsigned long )cpu_addr , 1 << page_order );
307
+ dma_set_encrypted (dev , cpu_addr , 1 << page_order );
309
308
310
309
if (IS_ENABLED (CONFIG_DMA_REMAP ) && is_vmalloc_addr (cpu_addr ))
311
310
vunmap (cpu_addr );
@@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
341
340
}
342
341
343
342
ret = page_address (page );
344
- if (force_dma_unencrypted (dev )) {
345
- if (set_memory_decrypted ((unsigned long )ret ,
346
- 1 << get_order (size )))
347
- goto out_free_pages ;
348
- }
343
+ if (dma_set_decrypted (dev , ret , size ))
344
+ goto out_free_pages ;
349
345
memset (ret , 0 , size );
350
346
* dma_handle = phys_to_dma_direct (dev , page_to_phys (page ));
351
347
return page ;
@@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
366
362
dma_free_from_pool (dev , vaddr , size ))
367
363
return ;
368
364
369
- if (force_dma_unencrypted (dev ))
370
- set_memory_encrypted ((unsigned long )vaddr , 1 << page_order );
371
-
365
+ dma_set_encrypted (dev , vaddr , 1 << page_order );
372
366
__dma_direct_free_pages (dev , page , size );
373
367
}
374
368
0 commit comments