@@ -1693,13 +1693,32 @@ waitfd_to_waiting_flag(int wfd_event)
1693
1693
return wfd_event << 1 ;
1694
1694
}
1695
1695
1696
+ static struct ccan_list_head *
1697
+ rb_io_blocking_operations (struct rb_io * io )
1698
+ {
1699
+ rb_serial_t fork_generation = GET_VM ()-> fork_gen ;
1700
+
1701
+ // On fork, all existing entries in this list (which are stack allocated) become invalid. Therefore, we re-initialize the list which clears it.
1702
+ if (io -> fork_generation != fork_generation ) {
1703
+ ccan_list_head_init (& io -> blocking_operations );
1704
+ io -> fork_generation = fork_generation ;
1705
+ }
1706
+
1707
+ return & io -> blocking_operations ;
1708
+ }
1709
+
1710
+ static void
1711
+ rb_io_blocking_operation_enter (struct rb_io * io , struct rb_io_blocking_operation * blocking_operation ) {
1712
+ ccan_list_add (rb_io_blocking_operations (io ), & blocking_operation -> list );
1713
+ }
1714
+
1696
1715
struct io_blocking_operation_arguments {
1697
1716
struct rb_io * io ;
1698
1717
struct rb_io_blocking_operation * blocking_operation ;
1699
1718
};
1700
1719
1701
1720
static VALUE
1702
- io_blocking_operation_release (VALUE _arguments ) {
1721
+ io_blocking_operation_exit (VALUE _arguments ) {
1703
1722
struct io_blocking_operation_arguments * arguments = (void * )_arguments ;
1704
1723
struct rb_io_blocking_operation * blocking_operation = arguments -> blocking_operation ;
1705
1724
@@ -1719,7 +1738,7 @@ io_blocking_operation_release(VALUE _arguments) {
1719
1738
}
1720
1739
1721
1740
static void
1722
- rb_io_blocking_operation_release (struct rb_io * io , struct rb_io_blocking_operation * blocking_operation )
1741
+ rb_io_blocking_operation_exit (struct rb_io * io , struct rb_io_blocking_operation * blocking_operation )
1723
1742
{
1724
1743
VALUE wakeup_mutex = io -> wakeup_mutex ;
1725
1744
@@ -1729,7 +1748,7 @@ rb_io_blocking_operation_release(struct rb_io *io, struct rb_io_blocking_operati
1729
1748
.blocking_operation = blocking_operation
1730
1749
};
1731
1750
1732
- rb_mutex_synchronize (wakeup_mutex , io_blocking_operation_release , (VALUE )& arguments );
1751
+ rb_mutex_synchronize (wakeup_mutex , io_blocking_operation_exit , (VALUE )& arguments );
1733
1752
} else {
1734
1753
ccan_list_del (& blocking_operation -> list );
1735
1754
}
@@ -1824,7 +1843,7 @@ rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void
1824
1843
struct rb_io_blocking_operation blocking_operation = {
1825
1844
.ec = ec ,
1826
1845
};
1827
- ccan_list_add ( & io -> blocking_operations , & blocking_operation . list );
1846
+ rb_io_blocking_operation_enter ( io , & blocking_operation );
1828
1847
1829
1848
{
1830
1849
EC_PUSH_TAG (ec );
@@ -1851,7 +1870,7 @@ rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void
1851
1870
th -> mn_schedulable = prev_mn_schedulable ;
1852
1871
}
1853
1872
1854
- rb_io_blocking_operation_release (io , & blocking_operation );
1873
+ rb_io_blocking_operation_exit (io , & blocking_operation );
1855
1874
1856
1875
if (state ) {
1857
1876
EC_JUMP_TAG (ec , state );
@@ -2658,10 +2677,11 @@ thread_io_close_notify_all(struct rb_io *io)
2658
2677
VALUE error = vm -> special_exceptions [ruby_error_stream_closed ];
2659
2678
2660
2679
struct rb_io_blocking_operation * blocking_operation ;
2661
- ccan_list_for_each (& io -> blocking_operations , blocking_operation , list ) {
2680
+ ccan_list_for_each (rb_io_blocking_operations ( io ) , blocking_operation , list ) {
2662
2681
rb_execution_context_t * ec = blocking_operation -> ec ;
2663
2682
2664
2683
rb_thread_t * thread = ec -> thread_ptr ;
2684
+
2665
2685
rb_threadptr_pending_interrupt_enque (thread , error );
2666
2686
2667
2687
// This operation is slow:
@@ -2684,7 +2704,7 @@ rb_thread_io_close_interrupt(struct rb_io *io)
2684
2704
}
2685
2705
2686
2706
// If there are no blocking operations, we are done:
2687
- if (ccan_list_empty (& io -> blocking_operations )) {
2707
+ if (ccan_list_empty (rb_io_blocking_operations ( io ) )) {
2688
2708
return 0 ;
2689
2709
}
2690
2710
@@ -2709,7 +2729,7 @@ rb_thread_io_close_wait(struct rb_io* io)
2709
2729
}
2710
2730
2711
2731
rb_mutex_lock (wakeup_mutex );
2712
- while (!ccan_list_empty (& io -> blocking_operations )) {
2732
+ while (!ccan_list_empty (rb_io_blocking_operations ( io ) )) {
2713
2733
rb_mutex_sleep (wakeup_mutex , Qnil );
2714
2734
}
2715
2735
rb_mutex_unlock (wakeup_mutex );
@@ -4435,7 +4455,7 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4435
4455
4436
4456
if (io ) {
4437
4457
blocking_operation .ec = ec ;
4438
- ccan_list_add ( & io -> blocking_operations , & blocking_operation . list );
4458
+ rb_io_blocking_operation_enter ( io , & blocking_operation );
4439
4459
}
4440
4460
4441
4461
if (timeout == NULL && thread_io_wait_events (th , fd , events , NULL )) {
@@ -4461,7 +4481,7 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4461
4481
}
4462
4482
4463
4483
if (io ) {
4464
- rb_io_blocking_operation_release (io , & blocking_operation );
4484
+ rb_io_blocking_operation_exit (io , & blocking_operation );
4465
4485
}
4466
4486
4467
4487
if (state ) {
@@ -4539,7 +4559,7 @@ select_single_cleanup(VALUE ptr)
4539
4559
struct select_args * args = (struct select_args * )ptr ;
4540
4560
4541
4561
if (args -> blocking_operation ) {
4542
- rb_io_blocking_operation_release (args -> io , args -> blocking_operation );
4562
+ rb_io_blocking_operation_exit (args -> io , args -> blocking_operation );
4543
4563
}
4544
4564
4545
4565
if (args -> read ) rb_fd_term (args -> read );
@@ -4572,7 +4592,7 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4572
4592
if (io ) {
4573
4593
args .io = io ;
4574
4594
blocking_operation .ec = GET_EC ();
4575
- ccan_list_add ( & io -> blocking_operations , & blocking_operation . list );
4595
+ rb_io_blocking_operation_enter ( io , & blocking_operation );
4576
4596
args .blocking_operation = & blocking_operation ;
4577
4597
} else {
4578
4598
args .io = NULL ;
0 commit comments