Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
unusable frag
  • Loading branch information
Maoni0 committed Aug 12, 2024
commit ee0f717ece0a5944bb18897ff94fbe7418ae2a6f
82 changes: 81 additions & 1 deletion src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3326,7 +3326,7 @@ gc_heap::dt_high_frag_p (gc_tuning_point tp,
}
}
#endif //!MULTIPLE_HEAPS
size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
size_t fr = generation_unusable_fragmentation (generation_of (gen_number), heap_number);
ret = (fr > dd_fragmentation_limit(dd));
if (ret)
{
Expand Down Expand Up @@ -21338,6 +21338,68 @@ bool gc_heap::init_table_for_region (int gen_number, heap_segment* region)
}
#endif //USE_REGIONS

// The following 2 methods Use integer division to prevent potential floating point exception.
// FPE may occur if we use floating point division because of speculative execution.
//
// Return the percentage of efficiency (between 0 and 100) of the allocator.
inline
size_t gc_heap::generation_allocator_efficiency_percent (generation* inst)
{
#ifdef DYNAMIC_HEAP_COUNT
if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes)
{
uint64_t total_plan_allocated = generation_total_plan_allocated (inst);
uint64_t condemned_allocated = generation_condemned_allocated (inst);
return ((total_plan_allocated == 0) ? 0 : (100 * (total_plan_allocated - condemned_allocated) / total_plan_allocated));
}
else
#endif //DYNAMIC_HEAP_COUNT
{
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)((100 * free_list_allocated) / (free_list_allocated + free_obj_space));
}
}

inline
size_t gc_heap::generation_unusable_fragmentation (generation* inst, int hn)
{
#ifdef DYNAMIC_HEAP_COUNT
if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes)
{
uint64_t total_plan_allocated = generation_total_plan_allocated (inst);
uint64_t condemned_allocated = generation_condemned_allocated (inst);
uint64_t unusable_frag = 0;
size_t fo_space = (((ptrdiff_t)generation_free_obj_space (inst) < 0) ? 0 : generation_free_obj_space (inst));

if (total_plan_allocated != 0)
{
unusable_frag = fo_space + (condemned_allocated * generation_free_list_space (inst) / total_plan_allocated);
}

dprintf (6666, ("h%d g%d FLa: %Id, ESa: %Id, Ca: %Id | FO: %Id, FL %Id, fl effi %.3f, unusable fl is %Id",
hn, inst->gen_num,
generation_free_list_allocated (inst), generation_end_seg_allocated (inst), (size_t)condemned_allocated,
fo_space, generation_free_list_space (inst),
((total_plan_allocated == 0) ? 1.0 : ((float)(total_plan_allocated - condemned_allocated) / (float)total_plan_allocated)),
(size_t)unusable_frag));

return (size_t)unusable_frag;
}
else
#endif //DYNAMIC_HEAP_COUNT
{
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
uint64_t free_list_space = generation_free_list_space (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)(free_obj_space + (free_obj_space * free_list_space) / (free_list_allocated + free_obj_space));
}
}

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
Expand Down Expand Up @@ -43813,6 +43875,24 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
//keep track of fragmentation
dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);

// We need to reset the condemned alloc for the condemned generation because it will participate in the free list efficiency
// calculation. And if a generation is condemned, it means all the allocations into this generation during that GC will be
// condemned and it wouldn't make sense to use this value to calculate the FL efficiency since at this point the FL hasn't
// been built.
generation_condemned_allocated (gen) = 0;

if (settings.concurrent)
{
// For BGC we could have non zero values due to gen1 FGCs. We reset all 3 allocs to start anew.
generation_free_list_allocated (gen) = 0;
generation_end_seg_allocated (gen) = 0;
}
else
{
assert (generation_free_list_allocated (gen) == 0);
assert (generation_end_seg_allocated (gen) == 0);
}

// make sure the subtraction below doesn't overflow
if (dd_fragmentation (dd) <= total_gen_size)
dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
Expand Down
35 changes: 9 additions & 26 deletions src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2110,6 +2110,9 @@ class gc_heap
PER_HEAP_METHOD void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
PER_HEAP_METHOD void walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number);

PER_HEAP_ISOLATED_METHOD size_t generation_allocator_efficiency_percent (generation* inst);
PER_HEAP_ISOLATED_METHOD size_t generation_unusable_fragmentation (generation* inst, int hn);

PER_HEAP_METHOD int generation_to_condemn (int n,
BOOL* blocking_collection_p,
BOOL* elevation_requested_p,
Expand Down Expand Up @@ -5825,6 +5828,12 @@ size_t& generation_sweep_allocated (generation* inst)
{
return inst->sweep_allocated;
}
// These are allocations we did while doing planning, we use this to calculate free list efficiency.
inline
size_t generation_total_plan_allocated (generation* inst)
{
return (inst->free_list_allocated + inst->end_seg_allocated + inst->condemned_allocated);
}
#ifdef DOUBLY_LINKED_FL
inline
BOOL& generation_set_bgc_mark_bit_p (generation* inst)
Expand Down Expand Up @@ -5855,32 +5864,6 @@ size_t& generation_allocated_since_last_pin (generation* inst)
}
#endif //FREE_USAGE_STATS

// Return the percentage of efficiency (between 0 and 100) of the allocator.
inline
size_t generation_allocator_efficiency_percent (generation* inst)
{
// Use integer division to prevent potential floating point exception.
// FPE may occur if we use floating point division because of speculative execution.
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)((100 * free_list_allocated) / (free_list_allocated + free_obj_space));
}

inline
size_t generation_unusable_fragmentation (generation* inst)
{
// Use integer division to prevent potential floating point exception.
// FPE may occur if we use floating point division because of speculative execution.
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
uint64_t free_list_space = generation_free_list_space (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)(free_obj_space + (free_obj_space * free_list_space) / (free_list_allocated + free_obj_space));
}

#define plug_skew sizeof(ObjHeader)
// We always use USE_PADDING_TAIL when fitting so items on the free list should be
// twice the min_obj_size.
Expand Down