diff --git a/class.c b/class.c index ce2ede5c2b3fca..9c520a19d0291d 100644 --- a/class.c +++ b/class.c @@ -162,7 +162,7 @@ rb_class_detach_module_subclasses(VALUE klass) static VALUE class_alloc(VALUE flags, VALUE klass) { - NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); + NEW_TCLASS_OBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); obj->ptr = ZALLOC(rb_classext_t); /* ZALLOC RCLASS_IV_TBL(obj) = 0; diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c index 127d33283ad0a7..674bc781e63178 100644 --- a/ext/objspace/objspace_dump.c +++ b/ext/objspace/objspace_dump.c @@ -21,6 +21,7 @@ #include "objspace.h" static VALUE sym_output, sym_stdout, sym_string, sym_file; +static VALUE sym_include_pages, sym_include_none; struct dump_config { VALUE type; @@ -31,6 +32,9 @@ struct dump_config { VALUE cur_obj; VALUE cur_obj_klass; size_t cur_obj_references; + int include_pages; + int include_none; + int pages_seen; }; PRINTF_ARGS(static void dump_append(struct dump_config *, const char *, ...), 2, 3); @@ -190,6 +194,18 @@ dump_append_string_content(struct dump_config *dc, VALUE obj) } } +static void +dump_empty(VALUE obj, struct dump_config *dc) +{ + dump_append(dc, "{\"address\":\"%p\", ", (void *)obj); + dump_append(dc, "\"type\":\"NONE\""); + + if (dc->include_pages) + dump_append(dc, ", \"page_number\":%d", dc->pages_seen); + dump_append(dc, "}\n"); + return; +} + static void dump_object(VALUE obj, struct dump_config *dc) { @@ -215,6 +231,8 @@ dump_object(VALUE obj, struct dump_config *dc) if (dc->cur_obj_klass) dump_append(dc, ", \"class\":\"%p\"", (void *)dc->cur_obj_klass); + if (dc->include_pages) + dump_append(dc, ", \"page_number\":%d", dc->pages_seen); if (rb_obj_frozen_p(obj)) dump_append(dc, ", \"frozen\":true"); @@ -318,11 +336,15 @@ dump_object(VALUE obj, struct dump_config *dc) static int heap_i(void *vstart, void *vend, size_t stride, void *data) { + struct dump_config *dc = (struct dump_config *)data; VALUE v = (VALUE)vstart; for (; v != (VALUE)vend; v += stride) { if (RBASIC(v)->flags) - dump_object(v, data); + dump_object(v, dc); + else if (dc->include_none && T_NONE == BUILTIN_TYPE(v)) + dump_empty(v, dc); } + dc->pages_seen++; return 0; } @@ -347,9 +369,20 @@ dump_output(struct dump_config *dc, VALUE opts, VALUE output, const char *filena { VALUE tmp; - if (RTEST(opts)) + dc->pages_seen = 0; + dc->include_pages = 0; + dc->include_none = 0; + + if (RTEST(opts)) { output = rb_hash_aref(opts, sym_output); + if (Qtrue == rb_hash_lookup2(opts, sym_include_pages, Qfalse)) + dc->include_pages = 1; + + if (Qtrue == rb_hash_lookup2(opts, sym_include_none, Qfalse)) + dc->include_none = 1; + } + if (output == sym_stdout) { dc->stream = stdout; dc->string = Qnil; @@ -474,6 +507,8 @@ Init_objspace_dump(VALUE rb_mObjSpace) sym_stdout = ID2SYM(rb_intern("stdout")); sym_string = ID2SYM(rb_intern("string")); sym_file = ID2SYM(rb_intern("file")); + sym_include_pages = ID2SYM(rb_intern("include_pages")); + sym_include_none = ID2SYM(rb_intern("include_none")); /* force create static IDs */ rb_obj_gc_flags(rb_mObjSpace, 0, 0); diff --git a/gc.c b/gc.c index 2eb049184cae81..64580aff1edd46 100644 --- a/gc.c +++ b/gc.c @@ -527,6 +527,7 @@ typedef struct rb_objspace { size_t total_allocated_objects; rb_heap_t eden_heap; + rb_heap_t tclass_heap; rb_heap_t tomb_heap; /* heap for zombies and ghosts */ struct { @@ -732,6 +733,7 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress; #define heap_pages_final_slots objspace->heap_pages.final_slots #define heap_pages_deferred_final objspace->heap_pages.deferred_final #define heap_eden (&objspace->eden_heap) +#define heap_tclass (&objspace->tclass_heap) #define heap_tomb (&objspace->tomb_heap) #define dont_gc objspace->flags.dont_gc #define during_gc objspace->flags.during_gc @@ -783,7 +785,7 @@ gc_mode_verify(enum gc_mode mode) #define will_be_incremental_marking(objspace) FALSE #endif #define has_sweeping_pages(heap) ((heap)->sweep_pages != 0) -#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap)) +#define is_lazy_sweeping(_objspace) (GC_ENABLE_LAZY_SWEEP && (has_sweeping_pages(&(_objspace->eden_heap)) || has_sweeping_pages(&(_objspace->tclass_heap)))) #if SIZEOF_LONG == SIZEOF_VOIDP # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) @@ -842,7 +844,7 @@ static int gc_marks_finish(rb_objspace_t *objspace); static void gc_marks_rest(rb_objspace_t *objspace); #if GC_ENABLE_INCREMENTAL_MARK static void gc_marks_step(rb_objspace_t *objspace, int slots); -static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t * requested_heap); #endif static void gc_sweep(rb_objspace_t *objspace); @@ -851,7 +853,7 @@ static void gc_sweep_finish(rb_objspace_t *objspace); static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap); static void gc_sweep_rest(rb_objspace_t *objspace); #if GC_ENABLE_LAZY_SWEEP -static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap, int need_increment); #endif static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr); @@ -1320,7 +1322,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); void rb_objspace_free(rb_objspace_t *objspace) { - if (is_lazy_sweeping(heap_eden)) + if (is_lazy_sweeping(objspace)) rb_bug("lazy sweeping underway when freeing object space"); if (objspace->profile.records) { @@ -1349,6 +1351,9 @@ rb_objspace_free(rb_objspace_t *objspace) objspace->eden_heap.total_pages = 0; objspace->eden_heap.total_slots = 0; objspace->eden_heap.pages = NULL; + objspace->tclass_heap.total_pages = 0; + objspace->tclass_heap.total_slots = 0; + objspace->tclass_heap.pages = NULL; } free_stack_chunks(&objspace->mark_stack); #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) @@ -1362,6 +1367,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace) { size_t next_length = heap_allocatable_pages; next_length += heap_eden->total_pages; + next_length += heap_tclass->total_pages; next_length += heap_tomb->total_pages; if (next_length > heap_pages_sorted_length) { @@ -1647,7 +1653,7 @@ heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots static void heap_set_increment(rb_objspace_t *objspace, size_t additional_pages) { - size_t used = heap_eden->total_pages; + size_t used = heap_eden->total_pages + heap_tclass->total_pages; size_t next_used_limit = used + additional_pages; if (next_used_limit == heap_allocated_pages) next_used_limit++; @@ -1677,8 +1683,14 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap) if (RGENGC_CHECK_MODE) assert(heap->free_pages == NULL); #if GC_ENABLE_LAZY_SWEEP - if (is_lazy_sweeping(heap)) { - gc_sweep_continue(objspace, heap); + if (is_lazy_sweeping(objspace)) { + gc_sweep_continue(objspace, heap_eden, heap == heap_eden ? 1 : 0); + gc_sweep_continue(objspace, heap_tclass, heap == heap_tclass ? 1 : 0); + if (heap->free_pages == NULL) { + if(has_sweeping_pages(heap_eden) || has_sweeping_pages(heap_tclass)) { + gc_sweep_rest(objspace); + } + } } #endif #if GC_ENABLE_INCREMENTAL_MARK @@ -1846,7 +1858,7 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote } static inline VALUE -newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected) +newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected, rb_heap_t * eden) { VALUE obj; @@ -1864,31 +1876,30 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp } } - obj = heap_get_freeobj(objspace, heap_eden); + obj = heap_get_freeobj(objspace, eden); newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj); gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj); return obj; } -NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)); -NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)); +NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden)); +NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden)); static VALUE -newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace) +newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden) { - return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE); + return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE, eden); } static VALUE -newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace) +newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden) { - return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE); + return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE, eden); } static inline VALUE -newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +newobj_of_with_eden(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t * objspace, rb_heap_t *eden) { - rb_objspace_t *objspace = &rb_objspace; VALUE obj; #if GC_DEBUG_STRESS_TO_CLASS @@ -1903,16 +1914,30 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect if (!(during_gc || ruby_gc_stressful || gc_event_hook_available_p(objspace)) && - (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) { + (obj = heap_get_freeobj_head(objspace, eden)) != Qfalse) { return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj); } else { return wb_protected ? - newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) : - newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace); + newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace, eden) : + newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace, eden); } } +static inline VALUE +tclassobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +{ + rb_objspace_t *objspace = &rb_objspace; + return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_tclass); +} + +static inline VALUE +newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +{ + rb_objspace_t *objspace = &rb_objspace; + return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_eden); +} + VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags) { @@ -1935,6 +1960,12 @@ rb_newobj(void) return newobj_of(0, T_NONE, 0, 0, 0, FALSE); } +VALUE +rb_new_tclass_obj_of(VALUE klass, VALUE flags) +{ + return tclassobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); +} + VALUE rb_newobj_of(VALUE klass, VALUE flags) { @@ -2315,6 +2346,7 @@ Init_heap(void) #endif heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); + heap_add_pages(objspace, heap_tclass, 2); init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK @@ -2934,9 +2966,9 @@ heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr) } static inline int -is_swept_object(rb_objspace_t *objspace, VALUE ptr) +is_swept_object(rb_objspace_t *objspace, VALUE ptr, rb_heap_t * heap) { - if (heap_is_swept_object(objspace, heap_eden, ptr)) { + if (heap_is_swept_object(objspace, heap, ptr)) { return TRUE; } else { @@ -2948,8 +2980,9 @@ is_swept_object(rb_objspace_t *objspace, VALUE ptr) static inline int is_garbage_object(rb_objspace_t *objspace, VALUE ptr) { - if (!is_lazy_sweeping(heap_eden) || - is_swept_object(objspace, ptr) || + if (!is_lazy_sweeping(objspace) || + is_swept_object(objspace, ptr, heap_eden) || + is_swept_object(objspace, ptr, heap_tclass) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) { return FALSE; @@ -3386,7 +3419,7 @@ count_objects(int argc, VALUE *argv, VALUE os) static size_t objspace_available_slots(rb_objspace_t *objspace) { - return heap_eden->total_slots + heap_tomb->total_slots; + return heap_eden->total_slots + heap_tomb->total_slots + heap_tclass->total_slots; } static size_t @@ -3552,7 +3585,6 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap) heap->free_pages = NULL; #if GC_ENABLE_INCREMENTAL_MARK heap->pooled_pages = NULL; - objspace->rincgc.pooled_slots = 0; #endif if (heap->using_page) { RVALUE **p = &heap->using_page->freelist; @@ -3572,7 +3604,11 @@ static void gc_sweep_start(rb_objspace_t *objspace) { gc_mode_transition(objspace, gc_mode_sweeping); +#if GC_ENABLE_INCREMENTAL_MARK + objspace->rincgc.pooled_slots = 0; +#endif gc_sweep_start_heap(objspace, heap_eden); + gc_sweep_start_heap(objspace, heap_tclass); } static void @@ -3651,7 +3687,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap) sweep_page = next_sweep_page; } - if (heap->sweep_pages == NULL) { + if (objspace->eden_heap.sweep_pages == NULL && objspace->tclass_heap.sweep_pages == NULL) { gc_sweep_finish(objspace); } @@ -3667,6 +3703,13 @@ gc_sweep_rest(rb_objspace_t *objspace) { rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */ + while (has_sweeping_pages(heap_eden) || has_sweeping_pages(heap_tclass)) { + gc_sweep_step(objspace, heap_eden); + gc_sweep_step(objspace, heap_tclass); + } + + heap = heap_tclass; + while (has_sweeping_pages(heap)) { gc_sweep_step(objspace, heap); } @@ -3674,13 +3717,13 @@ gc_sweep_rest(rb_objspace_t *objspace) #if GC_ENABLE_LAZY_SWEEP static void -gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap) +gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap, int need_increment) { if (RGENGC_CHECK_MODE) assert(dont_gc == FALSE); gc_enter(objspace, "sweep_continue"); #if USE_RGENGC - if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) { + if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && need_increment && heap_increment(objspace, heap)) { gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n"); } #endif @@ -3689,6 +3732,17 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap) } #endif +static void +gc_mark_before_sweep(rb_heap_t *heap) +{ + struct heap_page *page; + page = heap->sweep_pages; + while (page) { + page->flags.before_sweep = TRUE; + page = page->next; + } +} + static void gc_sweep(rb_objspace_t *objspace) { @@ -3707,17 +3761,15 @@ gc_sweep(rb_objspace_t *objspace) #endif } else { - struct heap_page *page; gc_sweep_start(objspace); - page = heap_eden->sweep_pages; - while (page) { - page->flags.before_sweep = TRUE; - page = page->next; - } + gc_mark_before_sweep(heap_eden); + gc_mark_before_sweep(heap_tclass); gc_sweep_step(objspace, heap_eden); + gc_sweep_step(objspace, heap_tclass); } gc_heap_prepare_minimum_pages(objspace, heap_eden); + gc_heap_prepare_minimum_pages(objspace, heap_tclass); } /* Marking - Marking stack */ @@ -5183,6 +5235,7 @@ gc_verify_heap_pages(rb_objspace_t *objspace) { int rememberd_old_objects = 0; rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_eden->pages); + rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_tclass->pages); rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_tomb->pages); return rememberd_old_objects; } @@ -5227,7 +5280,7 @@ gc_verify_internal_consistency(VALUE dummy) /* check counters */ - if (!is_lazy_sweeping(heap_eden) && !finalizing) { + if (!is_lazy_sweeping(objspace) && !finalizing) { if (objspace_live_slots(objspace) != data.live_object_count) { fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n", (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects); @@ -5305,6 +5358,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.last_major_gc = objspace->profile.count; objspace->marked_slots = 0; rgengc_mark_and_rememberset_clear(objspace, heap_eden); + rgengc_mark_and_rememberset_clear(objspace, heap_tclass); } else { objspace->flags.during_minor_gc = TRUE; @@ -5312,6 +5366,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */ objspace->profile.minor_gc_count++; rgengc_rememberset_mark(objspace, heap_eden); + rgengc_rememberset_mark(objspace, heap_tclass); } #endif @@ -5322,9 +5377,9 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) #if GC_ENABLE_INCREMENTAL_MARK static void -gc_marks_wb_unprotected_objects(rb_objspace_t *objspace) +gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap) { - struct heap_page *page = heap_eden->pages; + struct heap_page *page = heap->pages; while (page) { bits_t *mark_bits = page->mark_bits; @@ -5381,12 +5436,20 @@ gc_marks_finish(rb_objspace_t *objspace) #if GC_ENABLE_INCREMENTAL_MARK /* finish incremental GC */ if (is_incremental_marking(objspace)) { - if (heap_eden->pooled_pages) { - heap_move_pooled_pages_to_free_pages(heap_eden); + if (heap_eden->pooled_pages || heap_tclass->pooled_pages) { + if (heap_eden->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_eden); + } + + if (heap_tclass->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_tclass); + } + gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n"); return FALSE; /* continue marking phase */ } + if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) { rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack)); } @@ -5406,7 +5469,8 @@ gc_marks_finish(rb_objspace_t *objspace) objspace->flags.during_incremental_marking = FALSE; /* check children of all marked wb-unprotected objects */ - gc_marks_wb_unprotected_objects(objspace); + gc_marks_wb_unprotected_objects(objspace, heap_eden); + gc_marks_wb_unprotected_objects(objspace, heap_tclass); } #endif /* GC_ENABLE_INCREMENTAL_MARK */ @@ -5430,14 +5494,15 @@ gc_marks_finish(rb_objspace_t *objspace) { /* decide full GC is needed or not */ rb_heap_t *heap = heap_eden; - size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots; + rb_heap_t *oheap = heap_tclass; + size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots + oheap->total_slots; size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */ size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio); size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio); int full_marking = is_full_marking(objspace); #if RGENGC_CHECK_MODE - assert(heap->total_slots >= objspace->marked_slots); + assert((heap->total_slots + oheap->total_slots) >= objspace->marked_slots); #endif /* setup free-able page counts */ @@ -5531,6 +5596,7 @@ gc_marks_rest(rb_objspace_t *objspace) #if GC_ENABLE_INCREMENTAL_MARK heap_eden->pooled_pages = NULL; + heap_tclass->pooled_pages = NULL; #endif if (is_incremental_marking(objspace)) { @@ -5548,32 +5614,52 @@ gc_marks_rest(rb_objspace_t *objspace) } #if GC_ENABLE_INCREMENTAL_MARK -static void -gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap) +static int +gc_marks_continue_slots(rb_objspace_t *objspace, rb_heap_t *heap) { int slots = 0; const char *from; + if (heap->pooled_pages) { + while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) { + struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); + slots += page->free_slots; + } + from = "pooled-pages"; + } + else if (heap_increment(objspace, heap)) { + slots = heap->free_pages->free_slots; + from = "incremented-pages"; + } + if (slots > 0) { + gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from); + } + return slots; +} + +static void +gc_marks_continue(rb_objspace_t *objspace, rb_heap_t * requested_heap) +{ + int eden_slots = 0; + int tclass_slots = 0; + int slots = 0; + if (RGENGC_CHECK_MODE) assert(dont_gc == FALSE); gc_enter(objspace, "marks_continue"); PUSH_MARK_FUNC_DATA(NULL); { - if (heap->pooled_pages) { - while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) { - struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); - slots += page->free_slots; - } - from = "pooled-pages"; - } - else if (heap_increment(objspace, heap)) { - slots = heap->free_pages->free_slots; - from = "incremented-pages"; + eden_slots = gc_marks_continue_slots(objspace, heap_eden); + tclass_slots = gc_marks_continue_slots(objspace, heap_tclass); + + if (requested_heap == heap_eden) { + slots = eden_slots; + } else { + slots = tclass_slots; } if (slots > 0) { - gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from); gc_marks_step(objspace, (int)objspace->rincgc.step_slots); } else { @@ -5634,7 +5720,7 @@ gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...) status = is_full_marking(objspace) ? "+" : "-"; } else { - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(objspace)) { status = "S"; } if (is_incremental_marking(objspace)) { @@ -6214,6 +6300,7 @@ ready_to_gc(rb_objspace_t *objspace) { if (dont_gc || during_gc || ruby_disable_gc) { heap_ready_to_gc(objspace, heap_eden); + heap_ready_to_gc(objspace, heap_tclass); return FALSE; } else { @@ -6317,7 +6404,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, if (RGENGC_CHECK_MODE) { assert(gc_mode(objspace) == gc_mode_none); - assert(!is_lazy_sweeping(heap_eden)); + assert(!is_lazy_sweeping(objspace)); assert(!is_incremental_marking(objspace)); #if RGENGC_CHECK_MODE >= 2 gc_verify_internal_consistency(Qnil); @@ -6397,7 +6484,7 @@ static void gc_rest(rb_objspace_t *objspace) { int marking = is_incremental_marking(objspace); - int sweeping = is_lazy_sweeping(heap_eden); + int sweeping = is_lazy_sweeping(objspace); if (marking || sweeping) { gc_enter(objspace, "gc_rest"); @@ -6409,7 +6496,7 @@ gc_rest(rb_objspace_t *objspace) gc_marks_rest(objspace); POP_MARK_FUNC_DATA(); } - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(objspace)) { gc_sweep_rest(objspace); } gc_exit(objspace, "gc_rest"); @@ -6439,7 +6526,7 @@ gc_current_status_fill(rb_objspace_t *objspace, char *buff) } else if (is_sweeping(objspace)) { buff[i++] = 'S'; - if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L'; + if (is_lazy_sweeping(objspace)) buff[i++] = 'L'; } else { buff[i++] = 'N'; @@ -6825,6 +6912,7 @@ enum gc_stat_sym { gc_stat_sym_heap_final_slots, gc_stat_sym_heap_marked_slots, gc_stat_sym_heap_eden_pages, + gc_stat_sym_heap_tclass_pages, gc_stat_sym_heap_tomb_pages, gc_stat_sym_total_allocated_pages, gc_stat_sym_total_freed_pages, @@ -6858,6 +6946,7 @@ enum gc_stat_sym { enum gc_stat_compat_sym { gc_stat_compat_sym_gc_stat_heap_used, gc_stat_compat_sym_heap_eden_page_length, + gc_stat_compat_sym_heap_tclass_page_length, gc_stat_compat_sym_heap_tomb_page_length, gc_stat_compat_sym_heap_increment, gc_stat_compat_sym_heap_length, @@ -6901,6 +6990,7 @@ setup_gc_stat_symbols(void) S(heap_final_slots); S(heap_marked_slots); S(heap_eden_pages); + S(heap_tclass_pages); S(heap_tomb_pages); S(total_allocated_pages); S(total_freed_pages); @@ -6932,6 +7022,7 @@ setup_gc_stat_symbols(void) #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s)) S(gc_stat_heap_used); S(heap_eden_page_length); + S(heap_tclass_page_length); S(heap_tomb_page_length); S(heap_increment); S(heap_length); @@ -6965,6 +7056,7 @@ setup_gc_stat_symbols(void) #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s] rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages)); rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages)); + rb_hash_aset(table, OLD_SYM(heap_tclass_page_length), NEW_SYM(heap_tclass_pages)); rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages)); rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages)); rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length)); @@ -7073,6 +7165,7 @@ gc_stat_internal(VALUE hash_or_sym) SET(heap_final_slots, heap_pages_final_slots); SET(heap_marked_slots, objspace->marked_slots); SET(heap_eden_pages, heap_eden->total_pages); + SET(heap_tclass_pages, heap_tclass->total_pages); SET(heap_tomb_pages, heap_tomb->total_pages); SET(total_allocated_pages, objspace->profile.total_allocated_pages); SET(total_freed_pages, objspace->profile.total_freed_pages); @@ -7147,6 +7240,7 @@ gc_stat_internal(VALUE hash_or_sym) * :heap_final_slots=>0, * :heap_marked_slots=>0, * :heap_eden_pages=>24, + * :heap_tclass_pages=>24, * :heap_tomb_pages=>0, * :total_allocated_pages=>24, * :total_freed_pages=>0, @@ -7407,6 +7501,7 @@ gc_set_initial_pages(void) if (min_pages > heap_eden->total_pages) { heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages); } + heap_add_pages(objspace, heap_tclass, 10); } /* @@ -7725,7 +7820,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si if (type == MEMOP_TYPE_MALLOC) { retry: if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) { - if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) { + if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(objspace))) { gc_rest(objspace); /* gc_rest can reduce malloc_increase */ goto retry; } @@ -9390,7 +9485,7 @@ rb_gcdebug_print_obj_condition(VALUE obj) fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false"); #endif - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(objspace)) { fprintf(stderr, "lazy sweeping?: true\n"); fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet"); } diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h index 634fe60c5125b3..94043c4e91db0b 100644 --- a/include/ruby/ruby.h +++ b/include/ruby/ruby.h @@ -735,11 +735,14 @@ VALUE rb_int2big(SIGNED_VALUE); VALUE rb_newobj(void); VALUE rb_newobj_of(VALUE, VALUE); +VALUE rb_new_tclass_obj_of(VALUE, VALUE); VALUE rb_obj_setup(VALUE obj, VALUE klass, VALUE type); #define RB_NEWOBJ(obj,type) type *(obj) = (type*)rb_newobj() #define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) +#define RB_NEW_TCLASS_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_tclass_obj_of(klass, flags) #define NEWOBJ(obj,type) RB_NEWOBJ(obj,type) #define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ +#define NEW_TCLASS_OBJ_OF(obj,type,klass,flags) RB_NEW_TCLASS_OBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ #define OBJSETUP(obj,c,t) rb_obj_setup(obj, c, t) /* use NEWOBJ_OF instead of NEWOBJ()+OBJSETUP() */ #define CLONESETUP(clone,obj) rb_clone_setup(clone,obj) #define DUPSETUP(dup,obj) rb_dup_setup(dup,obj)