@@ -62,7 +62,7 @@ pub struct PMP<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> {
62
62
/// This is a 64-bit mask of locked regions.
63
63
/// Each bit that is set in this mask indicates that the region is locked
64
64
/// and cannot be used by Tock.
65
- locked_region_mask : u64 ,
65
+ locked_region_mask : Cell < u64 > ,
66
66
/// This is the total number of avaliable regions.
67
67
/// This will be between 0 and MAX_AVAILABLE_REGIONS_OVER_TWO * 2 depending
68
68
/// on the hardware and previous boot stages.
@@ -115,7 +115,7 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> PMP<MAX_AVAILABLE_REGIONS_OVER
115
115
Self {
116
116
last_configured_for : MapCell :: empty ( ) ,
117
117
num_regions,
118
- locked_region_mask,
118
+ locked_region_mask : Cell :: new ( locked_region_mask ) ,
119
119
}
120
120
}
121
121
}
@@ -244,6 +244,7 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> fmt::Display
244
244
}
245
245
246
246
impl < const MAX_AVAILABLE_REGIONS_OVER_TWO : usize > PMPConfig < MAX_AVAILABLE_REGIONS_OVER_TWO > {
247
+ /// Get the first unused region
247
248
fn unused_region_number ( & self , locked_region_mask : u64 ) -> Option < usize > {
248
249
for ( number, region) in self . regions . iter ( ) . enumerate ( ) {
249
250
if self . app_memory_region . contains ( & number) {
@@ -259,6 +260,26 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> PMPConfig<MAX_AVAILABLE_REGION
259
260
}
260
261
None
261
262
}
263
+
264
+ /// Get the last unused region
265
+ /// The app regions need to be lower then the kernel to ensure they
266
+ /// match before the kernel ones.
267
+ fn unused_kernel_region_number ( & self , locked_region_mask : u64 ) -> Option < usize > {
268
+ for ( num, region) in self . regions . iter ( ) . rev ( ) . enumerate ( ) {
269
+ let number = MAX_AVAILABLE_REGIONS_OVER_TWO - num - 1 ;
270
+ if self . app_memory_region . contains ( & number) {
271
+ continue ;
272
+ }
273
+ // This region exists, but is locked
274
+ if locked_region_mask & ( 1 << number) > 0 {
275
+ continue ;
276
+ }
277
+ if region. is_none ( ) {
278
+ return Some ( number) ;
279
+ }
280
+ }
281
+ None
282
+ }
262
283
}
263
284
264
285
impl < const MAX_AVAILABLE_REGIONS_OVER_TWO : usize > kernel:: mpu:: MPU
@@ -357,7 +378,7 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> kernel::mpu::MPU
357
378
}
358
379
}
359
380
360
- let region_num = config. unused_region_number ( self . locked_region_mask ) ?;
381
+ let region_num = config. unused_region_number ( self . locked_region_mask . get ( ) ) ?;
361
382
362
383
// Logical region
363
384
let mut start = unallocated_memory_start as usize ;
@@ -411,7 +432,7 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> kernel::mpu::MPU
411
432
let region_num = if config. app_memory_region . is_some ( ) {
412
433
config. app_memory_region . unwrap_or ( 0 )
413
434
} else {
414
- config. unused_region_number ( self . locked_region_mask ) ?
435
+ config. unused_region_number ( self . locked_region_mask . get ( ) ) ?
415
436
} ;
416
437
417
438
// App memory size is what we actual set the region to. So this region
@@ -556,3 +577,120 @@ impl<const MAX_AVAILABLE_REGIONS_OVER_TWO: usize> kernel::mpu::MPU
556
577
}
557
578
}
558
579
}
580
+
581
+ /// This is PMP support for kernel regions
582
+ /// PMP does not allow a deny by default option, so all regions not marked
583
+ /// with the below commands will have full access.
584
+ /// This is still a useful implementation as it can be used to limit the
585
+ /// kernels access, for example removing execute permission from regions
586
+ /// we don't need to execute from and removing write permissions from
587
+ /// executable reions.
588
+ impl < const MAX_AVAILABLE_REGIONS_OVER_TWO : usize > kernel:: mpu:: KernelMPU
589
+ for PMP < MAX_AVAILABLE_REGIONS_OVER_TWO >
590
+ {
591
+ type KernelMpuConfig = PMPConfig < MAX_AVAILABLE_REGIONS_OVER_TWO > ;
592
+
593
+ fn allocate_kernel_region (
594
+ & self ,
595
+ memory_start : * const u8 ,
596
+ memory_size : usize ,
597
+ permissions : mpu:: Permissions ,
598
+ config : & mut Self :: KernelMpuConfig ,
599
+ ) -> Option < mpu:: Region > {
600
+ for region in config. regions . iter ( ) {
601
+ if region. is_some ( ) {
602
+ if region. unwrap ( ) . overlaps ( memory_start, memory_size) {
603
+ return None ;
604
+ }
605
+ }
606
+ }
607
+
608
+ let region_num = config. unused_kernel_region_number ( self . locked_region_mask . get ( ) ) ?;
609
+
610
+ // Logical region
611
+ let mut start = memory_start as usize ;
612
+ let mut size = memory_size;
613
+
614
+ // Region start always has to align to 4 bytes
615
+ if start % 4 != 0 {
616
+ start += 4 - ( start % 4 ) ;
617
+ }
618
+
619
+ // Region size always has to align to 4 bytes
620
+ if size % 4 != 0 {
621
+ size += 4 - ( size % 4 ) ;
622
+ }
623
+
624
+ // Regions must be at least 8 bytes
625
+ if size < 8 {
626
+ size = 8 ;
627
+ }
628
+
629
+ let region = PMPRegion :: new ( start as * const u8 , size, permissions) ;
630
+
631
+ config. regions [ region_num] = Some ( region) ;
632
+
633
+ // Mark the region as locked so that the app PMP doesn't use it.
634
+ let mut mask = self . locked_region_mask . get ( ) ;
635
+ mask |= 1 << region_num;
636
+ self . locked_region_mask . set ( mask) ;
637
+
638
+ Some ( mpu:: Region :: new ( start as * const u8 , size) )
639
+ }
640
+
641
+ fn enable_kernel_mpu ( & self , config : & mut Self :: KernelMpuConfig ) {
642
+ for ( i, region) in config. regions . iter ( ) . rev ( ) . enumerate ( ) {
643
+ let x = MAX_AVAILABLE_REGIONS_OVER_TWO - i - 1 ;
644
+ match region {
645
+ Some ( r) => {
646
+ let cfg_val = r. cfg . value as usize ;
647
+ let start = r. location . 0 as usize ;
648
+ let size = r. location . 1 ;
649
+
650
+ match x % 2 {
651
+ 0 => {
652
+ csr:: CSR . pmpaddr_set ( ( x * 2 ) + 1 , ( start + size) >> 2 ) ;
653
+ // Disable access up to the start address
654
+ csr:: CSR . pmpconfig_modify (
655
+ x / 2 ,
656
+ csr:: pmpconfig:: pmpcfg:: r0:: CLEAR
657
+ + csr:: pmpconfig:: pmpcfg:: w0:: CLEAR
658
+ + csr:: pmpconfig:: pmpcfg:: x0:: CLEAR
659
+ + csr:: pmpconfig:: pmpcfg:: a0:: CLEAR ,
660
+ ) ;
661
+ csr:: CSR . pmpaddr_set ( x * 2 , start >> 2 ) ;
662
+
663
+ // Set access to end address
664
+ csr:: CSR
665
+ . pmpconfig_set ( x / 2 , cfg_val << 8 | csr:: CSR . pmpconfig_get ( x / 2 ) ) ;
666
+ // Lock the CSR
667
+ csr:: CSR . pmpconfig_modify ( x / 2 , csr:: pmpconfig:: pmpcfg:: l1:: SET ) ;
668
+ }
669
+ 1 => {
670
+ csr:: CSR . pmpaddr_set ( ( x * 2 ) + 1 , ( start + size) >> 2 ) ;
671
+ // Disable access up to the start address
672
+ csr:: CSR . pmpconfig_modify (
673
+ x / 2 ,
674
+ csr:: pmpconfig:: pmpcfg:: r2:: CLEAR
675
+ + csr:: pmpconfig:: pmpcfg:: w2:: CLEAR
676
+ + csr:: pmpconfig:: pmpcfg:: x2:: CLEAR
677
+ + csr:: pmpconfig:: pmpcfg:: a2:: CLEAR ,
678
+ ) ;
679
+ csr:: CSR . pmpaddr_set ( x * 2 , start >> 2 ) ;
680
+
681
+ // Set access to end address
682
+ csr:: CSR . pmpconfig_set (
683
+ x / 2 ,
684
+ cfg_val << 24 | csr:: CSR . pmpconfig_get ( x / 2 ) ,
685
+ ) ;
686
+ // Lock the CSR
687
+ csr:: CSR . pmpconfig_modify ( x / 2 , csr:: pmpconfig:: pmpcfg:: l3:: SET ) ;
688
+ }
689
+ _ => break ,
690
+ }
691
+ }
692
+ None => { }
693
+ } ;
694
+ }
695
+ }
696
+ }
0 commit comments