@@ -3224,6 +3224,178 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3224
3224
return ret ;
3225
3225
}
3226
3226
3227
+ static void io_sqe_file_unregister (struct io_ring_ctx * ctx , int index )
3228
+ {
3229
+ #if defined(CONFIG_UNIX )
3230
+ struct file * file = ctx -> user_files [index ];
3231
+ struct sock * sock = ctx -> ring_sock -> sk ;
3232
+ struct sk_buff_head list , * head = & sock -> sk_receive_queue ;
3233
+ struct sk_buff * skb ;
3234
+ int i ;
3235
+
3236
+ __skb_queue_head_init (& list );
3237
+
3238
+ /*
3239
+ * Find the skb that holds this file in its SCM_RIGHTS. When found,
3240
+ * remove this entry and rearrange the file array.
3241
+ */
3242
+ skb = skb_dequeue (head );
3243
+ while (skb ) {
3244
+ struct scm_fp_list * fp ;
3245
+
3246
+ fp = UNIXCB (skb ).fp ;
3247
+ for (i = 0 ; i < fp -> count ; i ++ ) {
3248
+ int left ;
3249
+
3250
+ if (fp -> fp [i ] != file )
3251
+ continue ;
3252
+
3253
+ unix_notinflight (fp -> user , fp -> fp [i ]);
3254
+ left = fp -> count - 1 - i ;
3255
+ if (left ) {
3256
+ memmove (& fp -> fp [i ], & fp -> fp [i + 1 ],
3257
+ left * sizeof (struct file * ));
3258
+ }
3259
+ fp -> count -- ;
3260
+ if (!fp -> count ) {
3261
+ kfree_skb (skb );
3262
+ skb = NULL ;
3263
+ } else {
3264
+ __skb_queue_tail (& list , skb );
3265
+ }
3266
+ fput (file );
3267
+ file = NULL ;
3268
+ break ;
3269
+ }
3270
+
3271
+ if (!file )
3272
+ break ;
3273
+
3274
+ __skb_queue_tail (& list , skb );
3275
+
3276
+ skb = skb_dequeue (head );
3277
+ }
3278
+
3279
+ if (skb_peek (& list )) {
3280
+ spin_lock_irq (& head -> lock );
3281
+ while ((skb = __skb_dequeue (& list )) != NULL )
3282
+ __skb_queue_tail (head , skb );
3283
+ spin_unlock_irq (& head -> lock );
3284
+ }
3285
+ #else
3286
+ fput (ctx -> user_files [index ]);
3287
+ #endif
3288
+ }
3289
+
3290
+ static int io_sqe_file_register (struct io_ring_ctx * ctx , struct file * file ,
3291
+ int index )
3292
+ {
3293
+ #if defined(CONFIG_UNIX )
3294
+ struct sock * sock = ctx -> ring_sock -> sk ;
3295
+ struct sk_buff_head * head = & sock -> sk_receive_queue ;
3296
+ struct sk_buff * skb ;
3297
+
3298
+ /*
3299
+ * See if we can merge this file into an existing skb SCM_RIGHTS
3300
+ * file set. If there's no room, fall back to allocating a new skb
3301
+ * and filling it in.
3302
+ */
3303
+ spin_lock_irq (& head -> lock );
3304
+ skb = skb_peek (head );
3305
+ if (skb ) {
3306
+ struct scm_fp_list * fpl = UNIXCB (skb ).fp ;
3307
+
3308
+ if (fpl -> count < SCM_MAX_FD ) {
3309
+ __skb_unlink (skb , head );
3310
+ spin_unlock_irq (& head -> lock );
3311
+ fpl -> fp [fpl -> count ] = get_file (file );
3312
+ unix_inflight (fpl -> user , fpl -> fp [fpl -> count ]);
3313
+ fpl -> count ++ ;
3314
+ spin_lock_irq (& head -> lock );
3315
+ __skb_queue_head (head , skb );
3316
+ } else {
3317
+ skb = NULL ;
3318
+ }
3319
+ }
3320
+ spin_unlock_irq (& head -> lock );
3321
+
3322
+ if (skb ) {
3323
+ fput (file );
3324
+ return 0 ;
3325
+ }
3326
+
3327
+ return __io_sqe_files_scm (ctx , 1 , index );
3328
+ #else
3329
+ return 0 ;
3330
+ #endif
3331
+ }
3332
+
3333
+ static int io_sqe_files_update (struct io_ring_ctx * ctx , void __user * arg ,
3334
+ unsigned nr_args )
3335
+ {
3336
+ struct io_uring_files_update up ;
3337
+ __s32 __user * fds ;
3338
+ int fd , i , err ;
3339
+ __u32 done ;
3340
+
3341
+ if (!ctx -> user_files )
3342
+ return - ENXIO ;
3343
+ if (!nr_args )
3344
+ return - EINVAL ;
3345
+ if (copy_from_user (& up , arg , sizeof (up )))
3346
+ return - EFAULT ;
3347
+ if (check_add_overflow (up .offset , nr_args , & done ))
3348
+ return - EOVERFLOW ;
3349
+ if (done > ctx -> nr_user_files )
3350
+ return - EINVAL ;
3351
+
3352
+ done = 0 ;
3353
+ fds = (__s32 __user * ) up .fds ;
3354
+ while (nr_args ) {
3355
+ err = 0 ;
3356
+ if (copy_from_user (& fd , & fds [done ], sizeof (fd ))) {
3357
+ err = - EFAULT ;
3358
+ break ;
3359
+ }
3360
+ i = array_index_nospec (up .offset , ctx -> nr_user_files );
3361
+ if (ctx -> user_files [i ]) {
3362
+ io_sqe_file_unregister (ctx , i );
3363
+ ctx -> user_files [i ] = NULL ;
3364
+ }
3365
+ if (fd != -1 ) {
3366
+ struct file * file ;
3367
+
3368
+ file = fget (fd );
3369
+ if (!file ) {
3370
+ err = - EBADF ;
3371
+ break ;
3372
+ }
3373
+ /*
3374
+ * Don't allow io_uring instances to be registered. If
3375
+ * UNIX isn't enabled, then this causes a reference
3376
+ * cycle and this instance can never get freed. If UNIX
3377
+ * is enabled we'll handle it just fine, but there's
3378
+ * still no point in allowing a ring fd as it doesn't
3379
+ * support regular read/write anyway.
3380
+ */
3381
+ if (file -> f_op == & io_uring_fops ) {
3382
+ fput (file );
3383
+ err = - EBADF ;
3384
+ break ;
3385
+ }
3386
+ ctx -> user_files [i ] = file ;
3387
+ err = io_sqe_file_register (ctx , file , i );
3388
+ if (err )
3389
+ break ;
3390
+ }
3391
+ nr_args -- ;
3392
+ done ++ ;
3393
+ up .offset ++ ;
3394
+ }
3395
+
3396
+ return done ? done : err ;
3397
+ }
3398
+
3227
3399
static int io_sq_offload_start (struct io_ring_ctx * ctx ,
3228
3400
struct io_uring_params * p )
3229
3401
{
@@ -4031,6 +4203,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4031
4203
break ;
4032
4204
ret = io_sqe_files_unregister (ctx );
4033
4205
break ;
4206
+ case IORING_REGISTER_FILES_UPDATE :
4207
+ ret = io_sqe_files_update (ctx , arg , nr_args );
4208
+ break ;
4034
4209
case IORING_REGISTER_EVENTFD :
4035
4210
ret = - EINVAL ;
4036
4211
if (nr_args != 1 )
0 commit comments