@@ -3598,6 +3598,165 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3598
3598
return ret ;
3599
3599
}
3600
3600
3601
+ static int reserve_compress_blocks (struct dnode_of_data * dn , pgoff_t count )
3602
+ {
3603
+ struct f2fs_sb_info * sbi = F2FS_I_SB (dn -> inode );
3604
+ unsigned int reserved_blocks = 0 ;
3605
+ int cluster_size = F2FS_I (dn -> inode )-> i_cluster_size ;
3606
+ block_t blkaddr ;
3607
+ int i ;
3608
+
3609
+ for (i = 0 ; i < count ; i ++ ) {
3610
+ blkaddr = data_blkaddr (dn -> inode , dn -> node_page ,
3611
+ dn -> ofs_in_node + i );
3612
+
3613
+ if (!__is_valid_data_blkaddr (blkaddr ))
3614
+ continue ;
3615
+ if (unlikely (!f2fs_is_valid_blkaddr (sbi , blkaddr ,
3616
+ DATA_GENERIC_ENHANCE )))
3617
+ return - EFSCORRUPTED ;
3618
+ }
3619
+
3620
+ while (count ) {
3621
+ int compr_blocks = 0 ;
3622
+ blkcnt_t reserved ;
3623
+ int ret ;
3624
+
3625
+ for (i = 0 ; i < cluster_size ; i ++ , dn -> ofs_in_node ++ ) {
3626
+ blkaddr = f2fs_data_blkaddr (dn );
3627
+
3628
+ if (i == 0 ) {
3629
+ if (blkaddr == COMPRESS_ADDR )
3630
+ continue ;
3631
+ dn -> ofs_in_node += cluster_size ;
3632
+ goto next ;
3633
+ }
3634
+
3635
+ if (__is_valid_data_blkaddr (blkaddr )) {
3636
+ compr_blocks ++ ;
3637
+ continue ;
3638
+ }
3639
+
3640
+ dn -> data_blkaddr = NEW_ADDR ;
3641
+ f2fs_set_data_blkaddr (dn );
3642
+ }
3643
+
3644
+ reserved = cluster_size - compr_blocks ;
3645
+ ret = inc_valid_block_count (sbi , dn -> inode , & reserved );
3646
+ if (ret )
3647
+ return ret ;
3648
+
3649
+ if (reserved != cluster_size - compr_blocks )
3650
+ return - ENOSPC ;
3651
+
3652
+ f2fs_i_compr_blocks_update (dn -> inode , compr_blocks , true);
3653
+
3654
+ reserved_blocks += reserved ;
3655
+ next :
3656
+ count -= cluster_size ;
3657
+ }
3658
+
3659
+ return reserved_blocks ;
3660
+ }
3661
+
3662
+ static int f2fs_reserve_compress_blocks (struct file * filp , unsigned long arg )
3663
+ {
3664
+ struct inode * inode = file_inode (filp );
3665
+ struct f2fs_sb_info * sbi = F2FS_I_SB (inode );
3666
+ pgoff_t page_idx = 0 , last_idx ;
3667
+ unsigned int reserved_blocks = 0 ;
3668
+ int ret ;
3669
+
3670
+ if (!f2fs_sb_has_compression (F2FS_I_SB (inode )))
3671
+ return - EOPNOTSUPP ;
3672
+
3673
+ if (!f2fs_compressed_file (inode ))
3674
+ return - EINVAL ;
3675
+
3676
+ if (f2fs_readonly (sbi -> sb ))
3677
+ return - EROFS ;
3678
+
3679
+ ret = mnt_want_write_file (filp );
3680
+ if (ret )
3681
+ return ret ;
3682
+
3683
+ if (F2FS_I (inode )-> i_compr_blocks )
3684
+ goto out ;
3685
+
3686
+ f2fs_balance_fs (F2FS_I_SB (inode ), true);
3687
+
3688
+ inode_lock (inode );
3689
+
3690
+ if (!IS_IMMUTABLE (inode )) {
3691
+ ret = - EINVAL ;
3692
+ goto unlock_inode ;
3693
+ }
3694
+
3695
+ down_write (& F2FS_I (inode )-> i_gc_rwsem [WRITE ]);
3696
+ down_write (& F2FS_I (inode )-> i_mmap_sem );
3697
+
3698
+ last_idx = DIV_ROUND_UP (i_size_read (inode ), PAGE_SIZE );
3699
+
3700
+ while (page_idx < last_idx ) {
3701
+ struct dnode_of_data dn ;
3702
+ pgoff_t end_offset , count ;
3703
+
3704
+ set_new_dnode (& dn , inode , NULL , NULL , 0 );
3705
+ ret = f2fs_get_dnode_of_data (& dn , page_idx , LOOKUP_NODE );
3706
+ if (ret ) {
3707
+ if (ret == - ENOENT ) {
3708
+ page_idx = f2fs_get_next_page_offset (& dn ,
3709
+ page_idx );
3710
+ ret = 0 ;
3711
+ continue ;
3712
+ }
3713
+ break ;
3714
+ }
3715
+
3716
+ end_offset = ADDRS_PER_PAGE (dn .node_page , inode );
3717
+ count = min (end_offset - dn .ofs_in_node , last_idx - page_idx );
3718
+ count = roundup (count , F2FS_I (inode )-> i_cluster_size );
3719
+
3720
+ ret = reserve_compress_blocks (& dn , count );
3721
+
3722
+ f2fs_put_dnode (& dn );
3723
+
3724
+ if (ret < 0 )
3725
+ break ;
3726
+
3727
+ page_idx += count ;
3728
+ reserved_blocks += ret ;
3729
+ }
3730
+
3731
+ up_write (& F2FS_I (inode )-> i_gc_rwsem [WRITE ]);
3732
+ up_write (& F2FS_I (inode )-> i_mmap_sem );
3733
+
3734
+ if (ret >= 0 ) {
3735
+ F2FS_I (inode )-> i_flags &= ~F2FS_IMMUTABLE_FL ;
3736
+ f2fs_set_inode_flags (inode );
3737
+ inode -> i_ctime = current_time (inode );
3738
+ f2fs_mark_inode_dirty_sync (inode , true);
3739
+ }
3740
+ unlock_inode :
3741
+ inode_unlock (inode );
3742
+ out :
3743
+ mnt_drop_write_file (filp );
3744
+
3745
+ if (ret >= 0 ) {
3746
+ ret = put_user (reserved_blocks , (u64 __user * )arg );
3747
+ } else if (reserved_blocks && F2FS_I (inode )-> i_compr_blocks ) {
3748
+ set_sbi_flag (sbi , SBI_NEED_FSCK );
3749
+ f2fs_warn (sbi , "%s: partial blocks were released i_ino=%lx "
3750
+ "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
3751
+ "run fsck to fix." ,
3752
+ __func__ , inode -> i_ino , inode -> i_blocks ,
3753
+ reserved_blocks ,
3754
+ F2FS_I (inode )-> i_compr_blocks );
3755
+ }
3756
+
3757
+ return ret ;
3758
+ }
3759
+
3601
3760
long f2fs_ioctl (struct file * filp , unsigned int cmd , unsigned long arg )
3602
3761
{
3603
3762
if (unlikely (f2fs_cp_error (F2FS_I_SB (file_inode (filp )))))
@@ -3682,6 +3841,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3682
3841
return f2fs_get_compress_blocks (filp , arg );
3683
3842
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS :
3684
3843
return f2fs_release_compress_blocks (filp , arg );
3844
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS :
3845
+ return f2fs_reserve_compress_blocks (filp , arg );
3685
3846
default :
3686
3847
return - ENOTTY ;
3687
3848
}
@@ -3850,6 +4011,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3850
4011
case F2FS_IOC_SET_VOLUME_NAME :
3851
4012
case F2FS_IOC_GET_COMPRESS_BLOCKS :
3852
4013
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS :
4014
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS :
3853
4015
break ;
3854
4016
default :
3855
4017
return - ENOIOCTLCMD ;
0 commit comments