Thanks to visit codestin.com
Credit goes to github.com

Skip to content

YJIT: Add RubyVM::YJIT.code_gc #6644

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 31, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 6 additions & 11 deletions test/ruby/test_yjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -830,10 +830,10 @@ def foo
def test_code_gc
assert_compiles(code_gc_helpers + <<~'RUBY', exits: :any, result: :ok)
return :not_paged unless add_pages(100) # prepare freeable pages
code_gc # first code GC
RubyVM::YJIT.code_gc # first code GC
return :not_compiled1 unless compiles { nil } # should be JITable again

code_gc # second code GC
RubyVM::YJIT.code_gc # second code GC
return :not_compiled2 unless compiles { nil } # should be JITable again

code_gc_count = RubyVM::YJIT.runtime_stats[:code_gc_count]
Expand All @@ -854,7 +854,7 @@ def test_on_stack_code_gc_call

return :not_paged1 unless add_pages(400) # go to a page without initial ocb code
return :broken_resume1 if fiber.resume != 0 # JIT the fiber
code_gc # first code GC, which should not free the fiber page
RubyVM::YJIT.code_gc # first code GC, which should not free the fiber page
return :broken_resume2 if fiber.resume != 0 # The code should be still callable

code_gc_count = RubyVM::YJIT.runtime_stats[:code_gc_count]
Expand All @@ -873,19 +873,19 @@ def test_on_stack_code_gc_twice

return :not_paged1 unless add_pages(400) # go to a page without initial ocb code
return :broken_resume1 if fiber.resume(true) != 0 # JIT the fiber
code_gc # first code GC, which should not free the fiber page
RubyVM::YJIT.code_gc # first code GC, which should not free the fiber page

return :not_paged2 unless add_pages(300) # add some stuff to be freed
# Not calling fiber.resume here to test the case that the YJIT payload loses some
# information at the previous code GC. The payload should still be there, and
# thus we could know the fiber ISEQ is still on stack on this second code GC.
code_gc # second code GC, which should still not free the fiber page
RubyVM::YJIT.code_gc # second code GC, which should still not free the fiber page

return :not_paged3 unless add_pages(200) # attempt to overwrite the fiber page (it shouldn't)
return :broken_resume2 if fiber.resume(true) != 0 # The fiber code should be still fine

return :broken_resume3 if fiber.resume(false) != nil # terminate the fiber
code_gc # third code GC, freeing a page that used to be on stack
RubyVM::YJIT.code_gc # third code GC, freeing a page that used to be on stack

return :not_paged4 unless add_pages(100) # check everything still works

Expand Down Expand Up @@ -933,11 +933,6 @@ def add_pages(num_jits)
num_jits.times { return false unless eval('compiles { nil.to_i }') }
pages.nil? || pages < RubyVM::YJIT.runtime_stats[:compiled_page_count]
end

def code_gc
RubyVM::YJIT.simulate_oom! # bump write_pos
eval('proc { nil }.call') # trigger code GC
end
RUBY
end

Expand Down
1 change: 1 addition & 0 deletions yjit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1053,6 +1053,7 @@ VALUE rb_yjit_get_stats(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_reset_stats_bang(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_disasm_iseq(rb_execution_context_t *ec, VALUE self, VALUE iseq);
VALUE rb_yjit_insns_compiled(rb_execution_context_t *ec, VALUE self, VALUE iseq);
VALUE rb_yjit_code_gc(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_simulate_oom_bang(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_get_exit_locations(rb_execution_context_t *ec, VALUE self);

Expand Down
9 changes: 7 additions & 2 deletions yjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@ def self.insns_compiled(iseq)
end
end

# Free and recompile all existing JIT code
def self.code_gc
Primitive.rb_yjit_code_gc
end

def self.simulate_oom!
Primitive.rb_yjit_simulate_oom_bang
end
Expand Down Expand Up @@ -214,14 +219,14 @@ def _print_stats
$stderr.puts "compilation_failure: " + ("%10d" % compilation_failure) if compilation_failure != 0
$stderr.puts "compiled_block_count: " + ("%10d" % stats[:compiled_block_count])
$stderr.puts "compiled_iseq_count: " + ("%10d" % stats[:compiled_iseq_count])
$stderr.puts "compiled_page_count: " + ("%10d" % stats[:compiled_page_count])
$stderr.puts "freed_iseq_count: " + ("%10d" % stats[:freed_iseq_count])
$stderr.puts "freed_page_count: " + ("%10d" % stats[:freed_page_count])
$stderr.puts "invalidation_count: " + ("%10d" % stats[:invalidation_count])
$stderr.puts "constant_state_bumps: " + ("%10d" % stats[:constant_state_bumps])
$stderr.puts "inline_code_size: " + ("%10d" % stats[:inline_code_size])
$stderr.puts "outlined_code_size: " + ("%10d" % stats[:outlined_code_size])
$stderr.puts "freed_code_size: " + ("%10d" % stats[:freed_code_size])
$stderr.puts "live_page_count: " + ("%10d" % stats[:live_page_count])
$stderr.puts "freed_page_count: " + ("%10d" % stats[:freed_page_count])
$stderr.puts "code_gc_count: " + ("%10d" % stats[:code_gc_count])
$stderr.puts "num_gc_obj_refs: " + ("%10d" % stats[:num_gc_obj_refs])

Expand Down
24 changes: 18 additions & 6 deletions yjit/src/asm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,17 +209,25 @@ impl CodeBlock {
self.page_size
}

/// Return the number of code pages that have been allocated by the VirtualMemory.
pub fn num_pages(&self) -> usize {
/// Return the number of code pages that have been mapped by the VirtualMemory.
pub fn num_mapped_pages(&self) -> usize {
let mapped_region_size = self.mem_block.borrow().mapped_region_size();
// CodeBlock's page size != VirtualMem's page size on Linux,
// so mapped_region_size % self.page_size may not be 0
((mapped_region_size - 1) / self.page_size) + 1
}

/// Return the number of code pages that have been reserved by the VirtualMemory.
pub fn num_virtual_pages(&self) -> usize {
let virtual_region_size = self.mem_block.borrow().virtual_region_size();
// CodeBlock's page size != VirtualMem's page size on Linux,
// so mapped_region_size % self.page_size may not be 0
((virtual_region_size - 1) / self.page_size) + 1
}

/// Return the number of code pages that have been freed and not used yet.
pub fn num_freed_pages(&self) -> usize {
(0..self.num_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count()
(0..self.num_mapped_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count()
}

pub fn has_freed_page(&self, page_idx: usize) -> bool {
Expand Down Expand Up @@ -303,7 +311,7 @@ impl CodeBlock {
pub fn code_size(&self) -> usize {
let mut size = 0;
let current_page_idx = self.write_pos / self.page_size;
for page_idx in 0..self.num_pages() {
for page_idx in 0..self.num_mapped_pages() {
if page_idx == current_page_idx {
// Count only actually used bytes for the current page.
size += (self.write_pos % self.page_size).saturating_sub(self.page_start());
Expand Down Expand Up @@ -546,7 +554,7 @@ impl CodeBlock {
}

// Check which pages are still in use
let mut pages_in_use = vec![false; self.num_pages()];
let mut pages_in_use = vec![false; self.num_mapped_pages()];
// For each ISEQ, we currently assume that only code pages used by inline code
// are used by outlined code, so we mark only code pages used by inlined code.
for_each_on_stack_iseq_payload(|iseq_payload| {
Expand All @@ -560,10 +568,14 @@ impl CodeBlock {
}

// Let VirtuamMem free the pages
let freed_pages: Vec<usize> = pages_in_use.iter().enumerate()
let mut freed_pages: Vec<usize> = pages_in_use.iter().enumerate()
.filter(|&(_, &in_use)| !in_use).map(|(page, _)| page).collect();
self.free_pages(&freed_pages);

// Append virtual pages in case RubyVM::YJIT.code_gc is manually triggered.
let mut virtual_pages: Vec<usize> = (self.num_mapped_pages()..self.num_virtual_pages()).collect();
freed_pages.append(&mut virtual_pages);

// Invalidate everything to have more compact code after code GC.
// This currently patches every ISEQ, which works, but in the future,
// we could limit that to patch only on-stack ISEQs for optimizing code GC.
Expand Down
4 changes: 2 additions & 2 deletions yjit/src/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -381,8 +381,8 @@ fn rb_yjit_gen_stats_dict() -> VALUE {
// GCed code size
hash_aset_usize!(hash, "freed_code_size", freed_page_count * cb.page_size());

// Compiled pages
hash_aset_usize!(hash, "compiled_page_count", cb.num_pages() - freed_page_count);
// Live pages
hash_aset_usize!(hash, "live_page_count", cb.num_mapped_pages() - freed_page_count);
}

// If we're not generating stats, the hash is done
Expand Down
12 changes: 12 additions & 0 deletions yjit/src/yjit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,18 @@ pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> *con
}
}

/// Free and recompile all existing JIT code
#[no_mangle]
pub extern "C" fn rb_yjit_code_gc(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
if !yjit_enabled_p() {
return Qnil;
}

let cb = CodegenGlobals::get_inline_cb();
cb.code_gc();
Qnil
}

/// Simulate a situation where we are out of executable memory
#[no_mangle]
pub extern "C" fn rb_yjit_simulate_oom_bang(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
Expand Down