diff --git a/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Shared.xml b/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Shared.xml
index e3682b253e3a7e..5bf4d422a76ef9 100644
--- a/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Shared.xml
+++ b/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Shared.xml
@@ -4,6 +4,7 @@
+
diff --git a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
index 7504b884a22905..3bd17b2e1bc197 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
@@ -789,5 +789,44 @@ internal enum GCConfigurationType
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_EnumerateConfigurationValues")]
internal static unsafe partial void _EnumerateConfigurationValues(void* configurationDictionary, delegate* unmanaged callback);
+
+ private static int _RefreshMemoryLimit()
+ {
+ ulong heapHardLimit = (AppContext.GetData("GCHeapHardLimit") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitPercent = (AppContext.GetData("GCHeapHardLimitPercent") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitSOH = (AppContext.GetData("GCHeapHardLimitSOH") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitLOH = (AppContext.GetData("GCHeapHardLimitLOH") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitPOH = (AppContext.GetData("GCHeapHardLimitPOH") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitSOHPercent = (AppContext.GetData("GCHeapHardLimitSOHPercent") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitLOHPercent = (AppContext.GetData("GCHeapHardLimitLOHPercent") as ulong?) ?? ulong.MaxValue;
+ ulong heapHardLimitPOHPercent = (AppContext.GetData("GCHeapHardLimitPOHPercent") as ulong?) ?? ulong.MaxValue;
+ GCHeapHardLimitInfo heapHardLimitInfo = new GCHeapHardLimitInfo
+ {
+ HeapHardLimit = heapHardLimit,
+ HeapHardLimitPercent = heapHardLimitPercent,
+ HeapHardLimitSOH = heapHardLimitSOH,
+ HeapHardLimitLOH = heapHardLimitLOH,
+ HeapHardLimitPOH = heapHardLimitPOH,
+ HeapHardLimitSOHPercent = heapHardLimitSOHPercent,
+ HeapHardLimitLOHPercent = heapHardLimitLOHPercent,
+ HeapHardLimitPOHPercent = heapHardLimitPOHPercent,
+ };
+ return RefreshMemoryLimit(heapHardLimitInfo);
+ }
+
+ [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "GCInterface_RefreshMemoryLimit")]
+ internal static partial int RefreshMemoryLimit(GCHeapHardLimitInfo heapHardLimitInfo);
+
+ internal struct GCHeapHardLimitInfo
+ {
+ internal ulong HeapHardLimit;
+ internal ulong HeapHardLimitPercent;
+ internal ulong HeapHardLimitSOH;
+ internal ulong HeapHardLimitLOH;
+ internal ulong HeapHardLimitPOH;
+ internal ulong HeapHardLimitSOHPercent;
+ internal ulong HeapHardLimitLOHPercent;
+ internal ulong HeapHardLimitPOHPercent;
+ }
}
}
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 358f803887e35f..8857f6528f90b7 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -2250,6 +2250,8 @@ uint64_t gc_heap::gc_last_ephemeral_decommit_time = 0;
CLRCriticalSection gc_heap::check_commit_cs;
+CLRCriticalSection gc_heap::decommit_lock;
+
size_t gc_heap::current_total_committed = 0;
size_t gc_heap::committed_by_oh[recorded_committed_bucket_counts];
@@ -2277,13 +2279,13 @@ int gc_heap::high_mem_percent_from_config = 0;
bool gc_heap::use_frozen_segments_p = false;
-bool gc_heap::hard_limit_config_p = false;
-
#ifdef FEATURE_LOH_COMPACTION
gc_heap::etw_loh_compact_info* gc_heap::loh_compact_info;
#endif //FEATURE_LOH_COMPACTION
#endif //FEATURE_EVENT_TRACE
+bool gc_heap::hard_limit_config_p = false;
+
#if defined(SHORT_PLUGS) && !defined(USE_REGIONS)
double gc_heap::short_plugs_pad_ratio = 0;
#endif //SHORT_PLUGS && !USE_REGIONS
@@ -3770,7 +3772,7 @@ void region_allocator::print_map (const char* msg)
uint32_t total_regions = (uint32_t)((global_region_end - global_region_start) / region_alignment);
- dprintf (REGIONS_LOG, ("[%s]-----end printing----[%d total, left used %zd (free: %d), right used %zd (free: %d)]\n", heap_type, total_regions,
+ dprintf (REGIONS_LOG, ("[%s]-----end printing----[%d total, left used %zd (free: %d), right used %zd (free: %d)]\n", heap_type, total_regions,
(region_map_left_end - region_map_left_start), num_left_used_free_units, (region_map_right_end - region_map_right_start), num_right_used_free_units));
#endif //_DEBUG
}
@@ -4049,7 +4051,7 @@ void region_allocator::delete_region_impl (uint8_t* region_start)
assert (free_index >= region_map_right_start);
num_right_used_free_units += free_block_size;
}
-
+
if ((current_index != region_map_left_start) && (current_index != region_map_right_start))
{
uint32_t previous_val = *(current_index - 1);
@@ -6827,7 +6829,9 @@ void gc_heap::gc_thread_function ()
uint32_t wait_result = gc_heap::ee_suspend_event.Wait(gradual_decommit_in_progress_p ? DECOMMIT_TIME_STEP_MILLISECONDS : INFINITE, FALSE);
if (wait_result == WAIT_TIMEOUT)
{
+ decommit_lock.Enter();
gradual_decommit_in_progress_p = decommit_step (DECOMMIT_TIME_STEP_MILLISECONDS);
+ decommit_lock.Leave();
continue;
}
@@ -6986,7 +6990,9 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
dprintf(3, ("commit-accounting: commit in %d [%p, %p) for heap %d", bucket, address, ((uint8_t*)address + size), h_number));
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
check_commit_cs.Enter();
bool exceeded_p = false;
@@ -7009,6 +7015,11 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
exceeded_p = true;
}
}
+#ifdef COMMITTED_BYTES_SHADOW
+ if (!heap_hard_limit) {
+ exceeded_p = false;
+ }
+#endif //COMMITTED_BYTES_SHADOW
if (!exceeded_p)
{
@@ -7087,7 +7098,10 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu
dprintf(3, ("commit-accounting: decommit in %d [%p, %p) for heap %d", bucket, address, ((uint8_t*)address + size), h_number));
- if (decommit_succeeded_p && heap_hard_limit)
+ if (decommit_succeeded_p)
+#ifndef COMMITTED_BYTES_SHADOW
+ if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
check_commit_cs.Enter();
assert (committed_by_oh[bucket] >= size);
@@ -8859,87 +8873,99 @@ bool gc_heap::on_used_changed (uint8_t* new_used)
return true;
}
-bool gc_heap::inplace_commit_card_table (uint8_t* from, uint8_t* to)
+bool gc_heap::get_card_table_commit_layout (uint8_t* from, uint8_t* to,
+ uint8_t* commit_begins[total_bookkeeping_elements],
+ size_t commit_sizes[total_bookkeeping_elements],
+ size_t new_sizes[total_bookkeeping_elements])
{
- dprintf (REGIONS_LOG, ("inplace_commit_card_table(%p, %p), size = %zd", from, to, to - from));
-
uint8_t* start = g_gc_lowest_address;
uint8_t* end = g_gc_highest_address;
- uint8_t* commit_begins[total_bookkeeping_elements];
- size_t commit_sizes[total_bookkeeping_elements];
- size_t new_sizes[total_bookkeeping_elements];
-
bool initial_commit = (from == start);
bool additional_commit = !initial_commit && (to > from);
- if (initial_commit || additional_commit)
+ if (!initial_commit && !additional_commit)
{
+ return false;
+ }
#ifdef DEBUG
- size_t offsets[total_bookkeeping_elements + 1];
- get_card_table_element_layout(start, end, offsets);
+ size_t offsets[total_bookkeeping_elements + 1];
+ get_card_table_element_layout(start, end, offsets);
- dprintf (REGIONS_LOG, ("layout"));
- for (int i = card_table_element; i <= total_bookkeeping_elements; i++)
- {
- assert (offsets[i] == card_table_element_layout[i]);
- dprintf (REGIONS_LOG, ("%zd", card_table_element_layout[i]));
- }
-#endif
- get_card_table_element_sizes (start, to, new_sizes);
+ dprintf (REGIONS_LOG, ("layout"));
+ for (int i = card_table_element; i <= total_bookkeeping_elements; i++)
+ {
+ assert (offsets[i] == card_table_element_layout[i]);
+ dprintf (REGIONS_LOG, ("%zd", card_table_element_layout[i]));
+ }
+#endif //DEBUG
+ get_card_table_element_sizes (start, to, new_sizes);
#ifdef DEBUG
- dprintf (REGIONS_LOG, ("new_sizes"));
+ dprintf (REGIONS_LOG, ("new_sizes"));
+ for (int i = card_table_element; i < total_bookkeeping_elements; i++)
+ {
+ dprintf (REGIONS_LOG, ("%zd", new_sizes[i]));
+ }
+ if (additional_commit)
+ {
+ size_t current_sizes[total_bookkeeping_elements];
+ get_card_table_element_sizes (start, from, current_sizes);
+ dprintf (REGIONS_LOG, ("old_sizes"));
for (int i = card_table_element; i < total_bookkeeping_elements; i++)
{
- dprintf (REGIONS_LOG, ("%zd", new_sizes[i]));
+ assert (current_sizes[i] == bookkeeping_sizes[i]);
+ dprintf (REGIONS_LOG, ("%zd", bookkeeping_sizes[i]));
}
- if (additional_commit)
+ }
+#endif //DEBUG
+ for (int i = card_table_element; i <= seg_mapping_table_element; i++)
+ {
+ uint8_t* required_begin = nullptr;
+ uint8_t* required_end = nullptr;
+ uint8_t* commit_begin = nullptr;
+ uint8_t* commit_end = nullptr;
+ if (initial_commit)
{
- size_t current_sizes[total_bookkeeping_elements];
- get_card_table_element_sizes (start, from, current_sizes);
- dprintf (REGIONS_LOG, ("old_sizes"));
- for (int i = card_table_element; i < total_bookkeeping_elements; i++)
- {
- assert (current_sizes[i] == bookkeeping_sizes[i]);
- dprintf (REGIONS_LOG, ("%zd", bookkeeping_sizes[i]));
- }
+ required_begin = bookkeeping_start + ((i == card_table_element) ? 0 : card_table_element_layout[i]);
+ required_end = bookkeeping_start + card_table_element_layout[i] + new_sizes[i];
+ commit_begin = align_lower_page(required_begin);
}
-#endif
- for (int i = card_table_element; i <= seg_mapping_table_element; i++)
+ else
{
- uint8_t* required_begin = nullptr;
- uint8_t* required_end = nullptr;
- uint8_t* commit_begin = nullptr;
- uint8_t* commit_end = nullptr;
- if (initial_commit)
- {
- required_begin = bookkeeping_start + ((i == card_table_element) ? 0 : card_table_element_layout[i]);
- required_end = bookkeeping_start + card_table_element_layout[i] + new_sizes[i];
- commit_begin = align_lower_page(required_begin);
- }
- else
- {
- assert (additional_commit);
- required_begin = bookkeeping_start + card_table_element_layout[i] + bookkeeping_sizes[i];
- required_end = required_begin + new_sizes[i] - bookkeeping_sizes[i];
- commit_begin = align_on_page(required_begin);
- }
- assert (required_begin <= required_end);
- commit_end = align_on_page(required_end);
+ assert (additional_commit);
+ required_begin = bookkeeping_start + card_table_element_layout[i] + bookkeeping_sizes[i];
+ required_end = required_begin + new_sizes[i] - bookkeeping_sizes[i];
+ commit_begin = align_on_page(required_begin);
+ }
+ assert (required_begin <= required_end);
+ commit_end = align_on_page(required_end);
- commit_end = min (commit_end, align_lower_page(bookkeeping_start + card_table_element_layout[i + 1]));
- commit_begin = min (commit_begin, commit_end);
- assert (commit_begin <= commit_end);
+ commit_end = min (commit_end, align_lower_page(bookkeeping_start + card_table_element_layout[i + 1]));
+ commit_begin = min (commit_begin, commit_end);
+ assert (commit_begin <= commit_end);
- dprintf (REGIONS_LOG, ("required = [%p, %p), size = %zd", required_begin, required_end, required_end - required_begin));
- dprintf (REGIONS_LOG, ("commit = [%p, %p), size = %zd", commit_begin, commit_end, commit_end - commit_begin));
+ dprintf (REGIONS_LOG, ("required = [%p, %p), size = %zd", required_begin, required_end, required_end - required_begin));
+ dprintf (REGIONS_LOG, ("commit = [%p, %p), size = %zd", commit_begin, commit_end, commit_end - commit_begin));
- commit_begins[i] = commit_begin;
- commit_sizes[i] = (size_t)(commit_end - commit_begin);
- }
- dprintf (REGIONS_LOG, ("---------------------------------------"));
+ commit_begins[i] = commit_begin;
+ commit_sizes[i] = (size_t)(commit_end - commit_begin);
}
- else
+ dprintf (REGIONS_LOG, ("---------------------------------------"));
+ return true;
+}
+
+bool gc_heap::inplace_commit_card_table (uint8_t* from, uint8_t* to)
+{
+ dprintf (REGIONS_LOG, ("inplace_commit_card_table(%p, %p), size = %zd", from, to, to - from));
+
+ uint8_t* start = g_gc_lowest_address;
+ uint8_t* end = g_gc_highest_address;
+
+ uint8_t* commit_begins[total_bookkeeping_elements];
+ size_t commit_sizes[total_bookkeeping_elements];
+ size_t new_sizes[total_bookkeeping_elements];
+
+ if (!get_card_table_commit_layout(from, to, commit_begins, commit_sizes, new_sizes))
{
return true;
}
@@ -11370,7 +11396,9 @@ void gc_heap::return_free_region (heap_segment* region)
{
gc_oh_num oh = heap_segment_oh (region);
dprintf(3, ("commit-accounting: from %d to free [%p, %p) for heap %d", oh, get_region_start (region), heap_segment_committed (region), heap_number));
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
size_t committed = heap_segment_committed (region) - get_region_start (region);
if (committed > 0)
@@ -11456,7 +11484,9 @@ heap_segment* gc_heap::get_free_region (int gen_number, size_t size)
gc_oh_num oh = gen_to_oh (gen_number);
dprintf(3, ("commit-accounting: from free to %d [%p, %p) for heap %d", oh, get_region_start (region), heap_segment_committed (region), heap_number));
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
size_t committed = heap_segment_committed (region) - get_region_start (region);
if (committed > 0)
@@ -13710,10 +13740,13 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
int number_of_heaps = 1;
#endif //MULTIPLE_HEAPS
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
check_commit_cs.Initialize();
}
+ decommit_lock.Initialize();
#ifdef USE_REGIONS
if (regions_range)
@@ -23254,7 +23287,9 @@ heap_segment* gc_heap::unlink_first_rw_region (int gen_idx)
dprintf (REGIONS_LOG, ("unlink_first_rw_region on heap: %d gen: %d region: %p", heap_number, gen_idx, heap_segment_mem (region)));
#if defined(_DEBUG) && defined(HOST_64BIT)
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
int old_oh = heap_segment_oh (region);
int old_heap = heap_segment_heap (region)->heap_number;
@@ -23291,7 +23326,9 @@ void gc_heap::thread_rw_region_front (int gen_idx, heap_segment* region)
dprintf (REGIONS_LOG, ("thread_rw_region_front on heap: %d gen: %d region: %p", heap_number, gen_idx, heap_segment_mem (region)));
#if defined(_DEBUG) && defined(HOST_64BIT)
+#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
+#endif //!COMMITTED_BYTES_SHADOW
{
int new_oh = gen_to_oh (gen_idx);
int new_heap = this->heap_number;
@@ -27995,7 +28032,7 @@ uint8_t* gc_heap::loh_allocate_in_condemned (size_t size)
retry:
{
heap_segment* seg = generation_allocation_segment (gen);
- if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen),
+ if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen),
(generation_allocation_limit (gen) == heap_segment_plan_allocated (seg)))))
{
if ((!(loh_pinned_plug_que_empty_p()) &&
@@ -45292,128 +45329,12 @@ HRESULT GCHeap::Initialize()
{
gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&gc_heap::is_restricted_physical_mem);
}
- gc_heap::heap_hard_limit_oh[soh] = 0;
-#ifdef HOST_64BIT
- gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();
- gc_heap::heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH();
- gc_heap::heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH();
- gc_heap::heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH();
-
memset (gc_heap::committed_by_oh, 0, sizeof (gc_heap::committed_by_oh));
-
- gc_heap::use_large_pages_p = GCConfig::GetGCLargePages();
-
- if (gc_heap::heap_hard_limit_oh[soh] || gc_heap::heap_hard_limit_oh[loh] || gc_heap::heap_hard_limit_oh[poh])
- {
- if (!gc_heap::heap_hard_limit_oh[soh])
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- if (!gc_heap::heap_hard_limit_oh[loh])
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] +
- gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh];
- }
- else
- {
- uint32_t percent_of_mem_soh = (uint32_t)GCConfig::GetGCHeapHardLimitSOHPercent();
- uint32_t percent_of_mem_loh = (uint32_t)GCConfig::GetGCHeapHardLimitLOHPercent();
- uint32_t percent_of_mem_poh = (uint32_t)GCConfig::GetGCHeapHardLimitPOHPercent();
- if (percent_of_mem_soh || percent_of_mem_loh || percent_of_mem_poh)
- {
- if ((percent_of_mem_soh <= 0) || (percent_of_mem_soh >= 100))
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- if ((percent_of_mem_loh <= 0) || (percent_of_mem_loh >= 100))
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- else if ((percent_of_mem_poh < 0) || (percent_of_mem_poh >= 100))
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- if ((percent_of_mem_soh + percent_of_mem_loh + percent_of_mem_poh) >= 100)
- {
- return CLR_E_GC_BAD_HARD_LIMIT;
- }
- gc_heap::heap_hard_limit_oh[soh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100);
- gc_heap::heap_hard_limit_oh[loh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100);
- gc_heap::heap_hard_limit_oh[poh] = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100);
- gc_heap::heap_hard_limit = gc_heap::heap_hard_limit_oh[soh] +
- gc_heap::heap_hard_limit_oh[loh] + gc_heap::heap_hard_limit_oh[poh];
- }
- }
-
- if (gc_heap::heap_hard_limit_oh[soh] && (!gc_heap::heap_hard_limit_oh[poh]) && (!gc_heap::use_large_pages_p))
+ if (!gc_heap::compute_hard_limit())
{
return CLR_E_GC_BAD_HARD_LIMIT;
}
- if (!(gc_heap::heap_hard_limit))
- {
- uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent();
- if ((percent_of_mem > 0) && (percent_of_mem < 100))
- {
- gc_heap::heap_hard_limit = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);
- }
- }
-
- // If the hard limit is specified, the user is saying even if the process is already
- // running in a container, use this limit for the GC heap.
- if (gc_heap::heap_hard_limit)
- {
-#ifdef FEATURE_EVENT_TRACE
- gc_heap::hard_limit_config_p = true;
-#endif //FEATURE_EVENT_TRACE
- }
- else
- {
- if (gc_heap::is_restricted_physical_mem)
- {
- uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100;
- gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
- }
- }
-
- if ((!gc_heap::heap_hard_limit) && gc_heap::use_large_pages_p)
- {
- return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT;
- }
-
-#ifdef USE_REGIONS
- gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange();
- if (gc_heap::regions_range == 0)
- {
- if (gc_heap::heap_hard_limit)
- {
- if (gc_heap::heap_hard_limit_oh[soh])
- {
- gc_heap::regions_range = gc_heap::heap_hard_limit;
- }
- else
- {
- // We use this calculation because it's close to what we used for segments.
- gc_heap::regions_range = ((gc_heap::use_large_pages_p) ? (2 * gc_heap::heap_hard_limit)
- : (5 * gc_heap::heap_hard_limit));
- }
- }
- else
- {
- // If no hard_limit is configured the reservation size is max of 256gb or 2x physical limit
- gc_heap::regions_range = max(((size_t)256 * 1024 * 1024 * 1024), (size_t)(2 * gc_heap::total_physical_mem));
- }
- gc_heap::regions_range = align_on_page(gc_heap::regions_range);
- }
-
- GCConfig::SetGCRegionRange(gc_heap::regions_range);
-#endif //USE_REGIONS
-
-#endif //HOST_64BIT
- GCConfig::SetGCLargePages(gc_heap::use_large_pages_p);
-
uint32_t nhp = 1;
uint32_t nhp_from_config = 0;
@@ -45478,32 +45399,51 @@ HRESULT GCHeap::Initialize()
}
#endif //!MULTIPLE_HEAPS
- size_t seg_size = 0;
- size_t large_seg_size = 0;
- size_t pin_seg_size = 0;
+ if (gc_heap::heap_hard_limit)
+ {
+ gc_heap::hard_limit_config_p = true;
+ }
+
size_t seg_size_from_config = 0;
+ bool compute_memory_settings_succeed = gc_heap::compute_memory_settings(true, nhp, nhp_from_config, seg_size_from_config, 0);
+ assert (compute_memory_settings_succeed);
- if (gc_heap::heap_hard_limit)
+ if ((!gc_heap::heap_hard_limit) && gc_heap::use_large_pages_p)
{
- if (!nhp_from_config)
+ return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT;
+ }
+ GCConfig::SetGCLargePages(gc_heap::use_large_pages_p);
+
+#ifdef USE_REGIONS
+ gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange();
+ if (gc_heap::regions_range == 0)
+ {
+ if (gc_heap::heap_hard_limit)
{
- nhp = gc_heap::adjust_heaps_hard_limit (nhp);
+ if (gc_heap::heap_hard_limit_oh[soh])
+ {
+ gc_heap::regions_range = gc_heap::heap_hard_limit;
+ }
+ else
+ {
+ // We use this calculation because it's close to what we used for segments.
+ gc_heap::regions_range = ((gc_heap::use_large_pages_p) ? (2 * gc_heap::heap_hard_limit)
+ : (5 * gc_heap::heap_hard_limit));
+ }
}
-
- seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
- if (seg_size_from_config)
+ else
{
- seg_size_from_config = gc_heap::adjust_segment_size_hard_limit_va (seg_size_from_config);
+ // If no hard_limit is configured the reservation size is max of 256gb or 2x physical limit
+ gc_heap::regions_range = max(((size_t)256 * 1024 * 1024 * 1024), (size_t)(2 * gc_heap::total_physical_mem));
}
-
- size_t limit_to_check = (gc_heap::heap_hard_limit_oh[soh] ? gc_heap::heap_hard_limit_oh[soh] : gc_heap::heap_hard_limit);
- gc_heap::soh_segment_size = max (gc_heap::adjust_segment_size_hard_limit (limit_to_check, nhp), seg_size_from_config);
- }
- else
- {
- gc_heap::soh_segment_size = get_valid_segment_size();
+ gc_heap::regions_range = align_on_page(gc_heap::regions_range);
}
+ GCConfig::SetGCRegionRange(gc_heap::regions_range);
+#endif //USE_REGIONS
+ size_t seg_size = 0;
+ size_t large_seg_size = 0;
+ size_t pin_seg_size = 0;
seg_size = gc_heap::soh_segment_size;
#ifndef USE_REGIONS
@@ -45551,8 +45491,8 @@ HRESULT GCHeap::Initialize()
gc_heap::enable_special_regions_p = (bool)GCConfig::GetGCEnableSpecialRegions();
size_t gc_region_size = (size_t)GCConfig::GetGCRegionSize();
- // Constraining the size of region size to be < 2 GB.
- if (gc_region_size >= MAX_REGION_SIZE)
+ // Constraining the size of region size to be < 2 GB.
+ if (gc_region_size >= MAX_REGION_SIZE)
{
return CLR_E_GC_BAD_REGION_SIZE;
}
@@ -45605,41 +45545,6 @@ HRESULT GCHeap::Initialize()
if (hr != S_OK)
return hr;
- gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
-#ifndef MULTIPLE_HEAPS
- gc_heap::mem_one_percent /= g_num_processors;
-#endif //!MULTIPLE_HEAPS
-
- uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent();
- if (highmem_th_from_config)
- {
- gc_heap::high_memory_load_th = min (99, highmem_th_from_config);
- gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7));
-#ifdef FEATURE_EVENT_TRACE
- gc_heap::high_mem_percent_from_config = highmem_th_from_config;
-#endif //FEATURE_EVENT_TRACE
- }
- else
- {
- // We should only use this if we are in the "many process" mode which really is only applicable
- // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory.
- // For now I am using an estimate to calculate these numbers but this should really be obtained
- // programmatically going forward.
- // I am assuming 47 processes using WKS GC and 3 using SVR GC.
- // I am assuming 3 in part due to the "very high memory load" is 97%.
- int available_mem_th = 10;
- if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
- {
- int adjusted_available_mem_th = 3 + (int)((float)47 / (float)g_num_processors);
- available_mem_th = min (available_mem_th, adjusted_available_mem_th);
- }
-
- gc_heap::high_memory_load_th = 100 - available_mem_th;
- gc_heap::v_high_memory_load_th = 97;
- }
-
- gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th);
-
gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0);
#if defined(HOST_64BIT)
@@ -47500,6 +47405,12 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
{
dprintf (2, ("triggered a GC!"));
+#ifdef COMMITTED_BYTES_SHADOW
+ // This stress the refresh memory limit work by
+ // refreshing all the time when a GC happens.
+ GCHeap::RefreshMemoryLimit();
+#endif //COMMITTED_BYTES_SHADOW
+
#ifdef MULTIPLE_HEAPS
gc_heap* hpt = gc_heap::g_heaps[0];
#else
@@ -49377,3 +49288,421 @@ void PopulateDacVars(GcDacVars *gcDacVars)
gcDacVars->generation_field_offsets = reinterpret_cast(&generation_field_offsets);
gcDacVars->bookkeeping_start = &gc_heap::bookkeeping_start;
}
+
+int GCHeap::RefreshMemoryLimit()
+{
+ return gc_heap::refresh_memory_limit();
+}
+
+bool gc_heap::compute_hard_limit()
+{
+ heap_hard_limit_oh[soh] = 0;
+#ifdef HOST_64BIT
+ heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();
+ heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH();
+ heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH();
+ heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH();
+
+ use_large_pages_p = GCConfig::GetGCLargePages();
+
+ if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh])
+ {
+ if (!heap_hard_limit_oh[soh])
+ {
+ return false;
+ }
+ if (!heap_hard_limit_oh[loh])
+ {
+ return false;
+ }
+ heap_hard_limit = heap_hard_limit_oh[soh] +
+ heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
+ }
+ else
+ {
+ uint32_t percent_of_mem_soh = (uint32_t)GCConfig::GetGCHeapHardLimitSOHPercent();
+ uint32_t percent_of_mem_loh = (uint32_t)GCConfig::GetGCHeapHardLimitLOHPercent();
+ uint32_t percent_of_mem_poh = (uint32_t)GCConfig::GetGCHeapHardLimitPOHPercent();
+ if (percent_of_mem_soh || percent_of_mem_loh || percent_of_mem_poh)
+ {
+ if ((percent_of_mem_soh <= 0) || (percent_of_mem_soh >= 100))
+ {
+ return false;
+ }
+ if ((percent_of_mem_loh <= 0) || (percent_of_mem_loh >= 100))
+ {
+ return false;
+ }
+ else if ((percent_of_mem_poh < 0) || (percent_of_mem_poh >= 100))
+ {
+ return false;
+ }
+ if ((percent_of_mem_soh + percent_of_mem_loh + percent_of_mem_poh) >= 100)
+ {
+ return false;
+ }
+ heap_hard_limit_oh[soh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100);
+ heap_hard_limit_oh[loh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100);
+ heap_hard_limit_oh[poh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100);
+ heap_hard_limit = heap_hard_limit_oh[soh] +
+ heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
+ }
+ }
+
+ if (heap_hard_limit_oh[soh] && (!heap_hard_limit_oh[poh]) && (!use_large_pages_p))
+ {
+ return false;
+ }
+
+ if (!(heap_hard_limit))
+ {
+ uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent();
+ if ((percent_of_mem > 0) && (percent_of_mem < 100))
+ {
+ heap_hard_limit = (size_t)(total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);
+ }
+ }
+#endif //HOST_64BIT
+ return true;
+}
+
+bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uint32_t nhp_from_config, size_t& seg_size_from_config, size_t new_current_total_committed)
+{
+ // If the hard limit is specified, the user is saying even if the process is already
+ // running in a container, use this limit for the GC heap.
+ if (!hard_limit_config_p)
+ {
+ if (is_restricted_physical_mem)
+ {
+ uint64_t physical_mem_for_gc = total_physical_mem * (uint64_t)75 / (uint64_t)100;
+#ifndef USE_REGIONS
+ // Establishing a heap_hard_limit when we don't already have one requires
+ // us to figure out how many bytes are committed for what purposes. This is going
+ // to be very tedious for segments and therefore we chose not to support this scenario.
+ if (is_initialization)
+#endif //USE_REGIONS
+ {
+ heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
+ }
+ }
+ }
+
+ if (heap_hard_limit && (heap_hard_limit < new_current_total_committed))
+ {
+ return false;
+ }
+
+#ifdef USE_REGIONS
+ {
+#else
+ // Changing segment size in the hard limit case for segments is not supported
+ if (is_initialization)
+ {
+#endif //USE_REGIONS
+ if (heap_hard_limit)
+ {
+ if (is_initialization && (!nhp_from_config))
+ {
+ nhp = adjust_heaps_hard_limit (nhp);
+ }
+
+ seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
+ if (seg_size_from_config)
+ {
+ seg_size_from_config = adjust_segment_size_hard_limit_va (seg_size_from_config);
+ }
+
+ size_t limit_to_check = (heap_hard_limit_oh[soh] ? heap_hard_limit_oh[soh] : heap_hard_limit);
+ soh_segment_size = max (adjust_segment_size_hard_limit (limit_to_check, nhp), seg_size_from_config);
+ }
+ else
+ {
+ soh_segment_size = get_valid_segment_size();
+ }
+ }
+
+ mem_one_percent = total_physical_mem / 100;
+#ifndef MULTIPLE_HEAPS
+ mem_one_percent /= g_num_processors;
+#endif //!MULTIPLE_HEAPS
+
+ uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent();
+ if (highmem_th_from_config)
+ {
+ high_memory_load_th = min (99, highmem_th_from_config);
+ v_high_memory_load_th = min (99, (highmem_th_from_config + 7));
+#ifdef FEATURE_EVENT_TRACE
+ high_mem_percent_from_config = highmem_th_from_config;
+#endif //FEATURE_EVENT_TRACE
+ }
+ else
+ {
+ // We should only use this if we are in the "many process" mode which really is only applicable
+ // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory.
+ // For now I am using an estimate to calculate these numbers but this should really be obtained
+ // programmatically going forward.
+ // I am assuming 47 processes using WKS GC and 3 using SVR GC.
+ // I am assuming 3 in part due to the "very high memory load" is 97%.
+ int available_mem_th = 10;
+ if (total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
+ {
+ int adjusted_available_mem_th = 3 + (int)((float)47 / (float)g_num_processors);
+ available_mem_th = min (available_mem_th, adjusted_available_mem_th);
+ }
+
+ high_memory_load_th = 100 - available_mem_th;
+ v_high_memory_load_th = 97;
+ }
+
+ m_high_memory_load_th = min ((high_memory_load_th + 5), v_high_memory_load_th);
+
+ return true;
+}
+
+int gc_heap::refresh_memory_limit()
+{
+ int status = REFRESH_MEMORY_SUCCEED;
+
+ if (GCConfig::GetGCTotalPhysicalMemory() != 0)
+ {
+ return status;
+ }
+
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
+
+#ifdef USE_REGIONS
+ decommit_lock.Enter();
+ size_t total_committed = 0;
+ size_t committed_bookkeeping = 0;
+ size_t new_current_total_committed;
+ size_t new_current_total_committed_bookkeeping;
+ size_t new_committed_by_oh[recorded_committed_bucket_counts];
+
+ // Accounting for the bytes committed for the regions
+ for (int oh = soh; oh < total_oh_count; oh++)
+ {
+ int start_generation = (oh == 0) ? 0 : oh + max_generation;
+ int end_generation = oh + max_generation;
+ size_t total_committed_per_oh = 0;
+#ifdef MULTIPLE_HEAPS
+ for (int h = 0; h < n_heaps; h++)
+ {
+ gc_heap* heap = g_heaps[h];
+#else
+ {
+ gc_heap* heap = pGenGCHeap;
+#endif //MULTIPLE_HEAPS
+ size_t total_committed_per_heap = 0;
+ for (int gen = start_generation; gen <= end_generation; gen++)
+ {
+ heap->accumulate_committed_bytes ( generation_start_segment (heap->generation_of (gen)), total_committed_per_heap, committed_bookkeeping);
+ }
+ if (oh == soh)
+ {
+ heap->accumulate_committed_bytes (heap->freeable_soh_segment, total_committed_per_heap, committed_bookkeeping);
+ }
+ else
+ {
+ heap->accumulate_committed_bytes (heap->freeable_uoh_segment, total_committed_per_heap, committed_bookkeeping, (gc_oh_num)oh);
+ }
+#if defined(MULTIPLE_HEAPS) && defined(_DEBUG)
+ heap->committed_by_oh_per_heap_refresh[oh] = total_committed_per_heap;
+#endif //MULTIPLE_HEAPS && _DEBUG
+ total_committed_per_oh += total_committed_per_heap;
+ }
+ new_committed_by_oh[oh] = total_committed_per_oh;
+ total_committed += total_committed_per_oh;
+ }
+
+ // Accounting for the bytes committed for the free lists
+ size_t committed_free = 0;
+#ifdef MULTIPLE_HEAPS
+ for (int h = 0; h < n_heaps; h++)
+ {
+ gc_heap* heap = g_heaps[h];
+#else
+ {
+ gc_heap* heap = pGenGCHeap;
+#endif //MULTIPLE_HEAPS
+ for (int i = 0; i < count_free_region_kinds; i++)
+ {
+ heap_segment* seg = heap->free_regions[i].get_first_free_region();
+ heap->accumulate_committed_bytes (seg, committed_free, committed_bookkeeping);
+ }
+ }
+ for (int i = 0; i < count_free_region_kinds; i++)
+ {
+ heap_segment* seg = global_regions_to_decommit[i].get_first_free_region();
+#ifdef MULTIPLE_HEAPS
+ gc_heap* heap = g_heaps[0];
+#else
+ gc_heap* heap = nullptr;
+#endif //MULTIPLE_HEAPS
+ heap->accumulate_committed_bytes (seg, committed_free, committed_bookkeeping);
+ }
+ {
+ heap_segment* seg = global_free_huge_regions.get_first_free_region();
+#ifdef MULTIPLE_HEAPS
+ gc_heap* heap = g_heaps[0];
+#else
+ gc_heap* heap = pGenGCHeap;
+#endif //MULTIPLE_HEAPS
+ heap->accumulate_committed_bytes (seg, committed_free, committed_bookkeeping);
+ }
+
+ new_committed_by_oh[recorded_committed_free_bucket] = committed_free;
+ total_committed += committed_free;
+
+ // Accounting for the bytes committed for the book keeping elements
+ uint8_t* commit_begins[total_bookkeeping_elements];
+ size_t commit_sizes[total_bookkeeping_elements];
+ size_t new_sizes[total_bookkeeping_elements];
+ bool get_card_table_commit_layout_result = get_card_table_commit_layout(g_gc_lowest_address, bookkeeping_covered_committed, commit_begins, commit_sizes, new_sizes);
+ assert (get_card_table_commit_layout_result);
+
+ for (int i = card_table_element; i <= seg_mapping_table_element; i++)
+ {
+ // In case background GC is disabled - the software write watch table is still there
+ // but with size 0
+ assert (commit_sizes[i] >= 0);
+ committed_bookkeeping += commit_sizes[i];
+ }
+
+ new_current_total_committed_bookkeeping = committed_bookkeeping;
+ new_committed_by_oh[recorded_committed_bookkeeping_bucket] = committed_bookkeeping;
+ total_committed += committed_bookkeeping;
+ new_current_total_committed = total_committed;
+#endif //USE_REGIONS
+
+ uint32_t nhp_from_config = static_cast(GCConfig::GetHeapCount());
+#ifdef MULTIPLE_HEAPS
+ uint32_t nhp = n_heaps;
+#else
+ uint32_t nhp = 1;
+#endif //MULTIPLE_HEAPS
+ size_t seg_size_from_config;
+
+ bool old_is_restricted_physical_mem = is_restricted_physical_mem;
+ uint64_t old_total_physical_mem = total_physical_mem;
+ size_t old_heap_hard_limit = heap_hard_limit;
+ size_t old_heap_hard_limit_soh = heap_hard_limit_oh[soh];
+ size_t old_heap_hard_limit_loh = heap_hard_limit_oh[loh];
+ size_t old_heap_hard_limit_poh = heap_hard_limit_oh[poh];
+ bool old_hard_limit_config_p = hard_limit_config_p;
+
+ total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&is_restricted_physical_mem);
+
+ bool succeed = true;
+
+#ifdef USE_REGIONS
+ GCConfig::RefreshHeapHardLimitSettings();
+
+ if (!compute_hard_limit())
+ {
+ succeed = false;
+ status = REFRESH_MEMORY_HARD_LIMIT_INVALID;
+ }
+ hard_limit_config_p = heap_hard_limit != 0;
+#else
+ size_t new_current_total_committed = 0;
+#endif //USE_REGIONS
+
+ if (succeed && !compute_memory_settings(false, nhp, nhp_from_config, seg_size_from_config, new_current_total_committed))
+ {
+ succeed = false;
+ status = REFRESH_MEMORY_HARD_LIMIT_TOO_LOW;
+ }
+
+ if (!succeed)
+ {
+ is_restricted_physical_mem = old_is_restricted_physical_mem;
+ total_physical_mem = old_total_physical_mem;
+ heap_hard_limit = old_heap_hard_limit;
+ heap_hard_limit_oh[soh] = old_heap_hard_limit_soh;
+ heap_hard_limit_oh[loh] = old_heap_hard_limit_loh;
+ heap_hard_limit_oh[poh] = old_heap_hard_limit_poh;
+ hard_limit_config_p = old_hard_limit_config_p;
+ }
+ else
+#ifndef COMMITTED_BYTES_SHADOW
+ if (!old_heap_hard_limit && heap_hard_limit)
+#endif //COMMITTED_BYTES_SHADOW
+ {
+#ifdef USE_REGIONS
+ check_commit_cs.Initialize();
+#ifdef COMMITTED_BYTES_SHADOW
+ assert (new_current_total_committed == current_total_committed);
+ assert (new_current_total_committed_bookkeeping == current_total_committed_bookkeeping);
+#else
+ current_total_committed = new_current_total_committed;
+ current_total_committed_bookkeeping = new_current_total_committed_bookkeeping;
+#endif
+ for (int i = 0; i < recorded_committed_bucket_counts; i++)
+ {
+#ifdef COMMITTED_BYTES_SHADOW
+ assert (new_committed_by_oh[i] == committed_by_oh[i]);
+#else
+ new_committed_by_oh[i] = committed_by_oh[i];
+#endif
+ }
+#ifdef MULTIPLE_HEAPS
+#ifdef _DEBUG
+ for (int h = 0; h < n_heaps; h++)
+ {
+ for (int oh = soh; oh < total_oh_count; oh++)
+ {
+#ifdef COMMITTED_BYTES_SHADOW
+ assert (g_heaps[h]->committed_by_oh_per_heap[oh] == g_heaps[h]->committed_by_oh_per_heap_refresh[oh]);
+#else
+ g_heaps[h]->committed_by_oh_per_heap[oh] = g_heaps[h]->committed_by_oh_per_heap_refresh[oh];
+#endif
+ }
+ }
+#endif //_DEBUG
+#endif //MULTIPLE_HEAPS
+#else
+ assert (!"NYI - Segments");
+#endif //USE_REGIONS
+ }
+
+
+#ifdef USE_REGIONS
+ decommit_lock.Leave();
+#endif
+ GCToEEInterface::RestartEE(TRUE);
+
+ return status;
+}
+
+#ifdef USE_REGIONS
+
+void gc_heap::accumulate_committed_bytes(heap_segment* seg, size_t& committed_bytes, size_t& mark_array_committed_bytes, gc_oh_num oh)
+{
+ seg = heap_segment_rw (seg);
+ while (seg)
+ {
+ if ((oh == unknown) || (heap_segment_oh (seg) == oh))
+ {
+ mark_array_committed_bytes += get_mark_array_size (seg);
+ committed_bytes += (heap_segment_committed (seg) - get_region_start (seg));
+ }
+ seg = heap_segment_next_rw (seg);
+ }
+}
+
+size_t gc_heap::get_mark_array_size (heap_segment* seg)
+{
+ if (seg->flags & heap_segment_flags_ma_committed)
+ {
+ uint32_t* mark_array_addr = mark_array;
+ uint8_t* begin = get_start_address (seg);
+ uint8_t* end = heap_segment_reserved (seg);
+ size_t beg_word = mark_word_of (begin);
+ size_t end_word = mark_word_of (align_on_mark_word (end));
+ uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]);
+ uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]);
+ return (size_t)(commit_end - commit_start);
+ }
+ return 0;
+}
+#endif //USE_REGIONS
diff --git a/src/coreclr/gc/gcconfig.cpp b/src/coreclr/gc/gcconfig.cpp
index 6112b1edc8b446..02d95dff997353 100644
--- a/src/coreclr/gc/gcconfig.cpp
+++ b/src/coreclr/gc/gcconfig.cpp
@@ -67,6 +67,18 @@ GC_CONFIGURATION_KEYS
#undef STRING_CONFIG
}
+void GCConfig::RefreshHeapHardLimitSettings()
+{
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimit", "System.GC.HeapHardLimit", &s_GCHeapHardLimit); s_UpdatedGCHeapHardLimit = s_GCHeapHardLimit;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitPercent", "System.GC.HeapHardLimitPercent", &s_GCHeapHardLimitPercent); s_UpdatedGCHeapHardLimitPercent = s_GCHeapHardLimitPercent;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitSOH", "System.GC.HeapHardLimitSOH", &s_GCHeapHardLimitSOH); s_UpdatedGCHeapHardLimitSOH = s_GCHeapHardLimitSOH;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitLOH", "System.GC.HeapHardLimitLOH", &s_GCHeapHardLimitLOH); s_UpdatedGCHeapHardLimitLOH = s_GCHeapHardLimitLOH;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitPOH", "System.GC.HeapHardLimitPOH", &s_GCHeapHardLimitPOH); s_UpdatedGCHeapHardLimitPOH = s_GCHeapHardLimitPOH;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitSOHPercent", "System.GC.HeapHardLimitSOHPercent", &s_GCHeapHardLimitSOHPercent); s_UpdatedGCHeapHardLimitSOHPercent = s_GCHeapHardLimitSOHPercent;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitLOHPercent", "System.GC.HeapHardLimitLOHPercent", &s_GCHeapHardLimitLOHPercent); s_UpdatedGCHeapHardLimitLOHPercent = s_GCHeapHardLimitLOHPercent;
+ GCToEEInterface::GetIntConfigValue("GCHeapHardLimitPOHPercent", "System.GC.HeapHardLimitPOHPercent", &s_GCHeapHardLimitPOHPercent); s_UpdatedGCHeapHardLimitPOHPercent = s_GCHeapHardLimitPOHPercent;
+}
+
void GCConfig::Initialize()
{
#define BOOL_CONFIG(name, private_key, public_key, unused_default, unused_doc) \
diff --git a/src/coreclr/gc/gcconfig.h b/src/coreclr/gc/gcconfig.h
index cf10191056d4b1..c576261fc24855 100644
--- a/src/coreclr/gc/gcconfig.h
+++ b/src/coreclr/gc/gcconfig.h
@@ -169,6 +169,8 @@ GC_CONFIGURATION_KEYS
public:
+ static void RefreshHeapHardLimitSettings();
+
static void EnumerateConfigurationValues(void* context, ConfigurationValueFunc configurationValueFunc);
// Flags that may inhabit the number returned for the HeapVerifyLevel config option.
diff --git a/src/coreclr/gc/gcimpl.h b/src/coreclr/gc/gcimpl.h
index 23ddcbc67e73a9..b82f17a78f9597 100644
--- a/src/coreclr/gc/gcimpl.h
+++ b/src/coreclr/gc/gcimpl.h
@@ -324,6 +324,8 @@ class GCHeap : public IGCHeapInternal
virtual void Shutdown();
static void ReportGenerationBounds();
+
+ virtual int RefreshMemoryLimit();
};
#endif // GCIMPL_H_
diff --git a/src/coreclr/gc/gcinterface.h b/src/coreclr/gc/gcinterface.h
index d04e1f87cce9a1..86fa8470fbcb95 100644
--- a/src/coreclr/gc/gcinterface.h
+++ b/src/coreclr/gc/gcinterface.h
@@ -472,7 +472,7 @@ typedef enum
*
* NOTE: HNDTYPE_WEAK_NATIVE_COM is no longer used in the VM starting .NET 8
* but we are keeping it here for backward compatibility purposes"
- *
+ *
*/
HNDTYPE_WEAK_NATIVE_COM = 9
} HandleType;
@@ -565,11 +565,15 @@ enum class GCConfigurationType
{
Int64,
StringUtf8,
- Boolean
+ Boolean
};
using ConfigurationValueFunc = void (*)(void* context, void* name, void* publicKey, GCConfigurationType type, int64_t data);
+const int REFRESH_MEMORY_SUCCEED = 0;
+const int REFRESH_MEMORY_HARD_LIMIT_TOO_LOW = 1;
+const int REFRESH_MEMORY_HARD_LIMIT_INVALID = 2;
+
// IGCHeap is the interface that the VM will use when interacting with the GC.
class IGCHeap {
public:
@@ -973,6 +977,9 @@ class IGCHeap {
// Updates given frozen segment
virtual void UpdateFrozenSegment(segment_handle seg, uint8_t* allocated, uint8_t* committed) PURE_VIRTUAL
+
+ // Refresh the memory limit
+ virtual int RefreshMemoryLimit() PURE_VIRTUAL
};
#ifdef WRITE_BARRIER_CHECK
diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h
index d72c9495dd8eab..08bb071df19dbf 100644
--- a/src/coreclr/gc/gcpriv.h
+++ b/src/coreclr/gc/gcpriv.h
@@ -18,46 +18,46 @@
#include "gcrecord.h"
// The per heap and global fields are separated into the following categories -
-//
+//
// Used in GC and needs to be maintained, ie, next GC can be using this field so it needs to have the right value.
// Note that for some fields this doesn't mean the value of the field itself will change (it could remain the same
// throughout the process lifetime (for example, finalize_queue) but you'll need to pay attention to its content
-// and make sure it's updated correctly through each GC.
-//
+// and make sure it's updated correctly through each GC.
+//
// Some fields are marked as "loosely maintained" in their comments - this means they are really only modified during
// a single GC *except* they can be resized during a GC so the reinit-ed value will carry over to later GCs.
// PER_HEAP_FIELD_MAINTAINED
-//
+//
// Like PER_HEAP_FIELD_MAINTAINED and also used in the allocator code paths
// PER_HEAP_FIELD_MAINTAINED_ALLOC
-//
+//
// Used only during a single GC so we could fill it with an arbitrary value and shouldn't break anything.
// Note that for BGC fields, this means it's initialized at the beginning of that BGC. Ephemeral GCs can happen during
// this single BGC but they do not actually modify these fields.
// PER_HEAP_FIELD_SINGLE_GC
-//
+//
// Like PER_HEAP_FIELD_SINGLE_GC and also used in the allocator code paths
// PER_HEAP_FIELD_SINGLE_GC_ALLOC
-//
+//
// Only used by the allocator code paths
// PER_HEAP_FIELD_ALLOC
-//
+//
// Initialized during the GC init and never changes
// PER_HEAP_FIELD_INIT_ONLY
-//
+//
// Used for diagnostics purpose only
// PER_HEAP_FIELD_DIAG_ONLY
-//
+//
// Corresponding annotation for global fields
// PER_HEAP_ISOLATED_FIELD_MAINTAINED
// PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC
// PER_HEAP_ISOLATED_FIELD_SINGLE_GC
// PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC
// PER_HEAP_ISOLATED_FIELD_INIT_ONLY
-//
-// If a field does not fit any of the above category, such as fgn_maxgen_percent which is only updated by an API,
+//
+// If a field does not fit any of the above category, such as fgn_maxgen_percent which is only updated by an API,
// it will be marked as PER_HEAP_FIELD/PER_HEAP_ISOLATED_FIELD.
-//
+//
// A few notes -
//
// + within the section of a particular category of fields I use the following policy to list them -
@@ -70,7 +70,7 @@
//
// + some of the fields are used by both regions and segments share. When that's the case, the annotation
// is based on regions. So for segments they may or may not apply (segments code is in maintainence mode only).
-//
+//
// + some fields are used by the GC and WB but not by the allocator, in which case I will indicate them as such.
#ifdef MULTIPLE_HEAPS
#define PER_HEAP_FIELD
@@ -148,6 +148,7 @@ inline void FATAL_GC_ERROR()
// + creates some ro segs
// We can add more mechanisms here.
//#define STRESS_REGIONS
+//#define COMMITTED_BYTES_SHADOW
#define MARK_PHASE_PREFETCH
#endif //USE_REGIONS
@@ -278,7 +279,7 @@ void GCLogConfig (const char *fmt, ... );
#define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
#ifdef USE_REGIONS
-#define MAX_REGION_SIZE 0x80000000
+#define MAX_REGION_SIZE 0x80000000
#endif // USE_REGIONS
#define MAX_NUM_FREE_SPACES 200
@@ -969,7 +970,7 @@ class generation
// When a generation is condemned, these are re-calculated. For older generations these are maintained across GCs as
// younger generation GCs allocate into this generation's FL.
//
- // If we rearrange regions between heaps, we need to adjust these values accordingly. free_list_space can be adjusted
+ // If we rearrange regions between heaps, we need to adjust these values accordingly. free_list_space can be adjusted
// when we adjust the FL. However, since we don't actually maintain free_obj_space per region and walking an entire
// region just to get free_obj_space is not really worth it, we might just have to live with inaccurate value till
// the next GC that condemns this generation which is okay since this is usually a small value anyway.
@@ -1028,7 +1029,7 @@ struct static_data
};
// dynamic data is maintained per generation, so we have total_generation_count number of them.
-//
+//
// The dynamic data fields are grouped into 3 categories:
//
// calculated logical data (like desired_allocation)
@@ -1046,7 +1047,7 @@ class dynamic_data
//
// The next group of fields are updated during a GC if that GC condemns this generation.
- //
+ //
// Same as new_allocation but only updated during a GC if the generation is condemned.
// We should really just get rid of this.
ptrdiff_t gc_new_allocation;
@@ -1637,6 +1638,12 @@ class gc_heap
// This relocates the SIP regions and return the next non SIP region.
PER_HEAP_METHOD heap_segment* relocate_advance_to_non_sip (heap_segment* region);
+ // Compute the size committed for the mark array for this region.
+ PER_HEAP_METHOD size_t get_mark_array_size(heap_segment* seg);
+
+ // Accumulate the committed bytes for both the region and the mark array for this list of regions.
+ PER_HEAP_METHOD void accumulate_committed_bytes(heap_segment* seg, size_t& committed_bytes, size_t& mark_array_committed_bytes, gc_oh_num oh = unknown);
+
PER_HEAP_ISOLATED_METHOD void verify_region_to_generation_map();
PER_HEAP_ISOLATED_METHOD void compute_gc_and_ephemeral_range (int condemned_gen_number, bool end_of_gc_p);
@@ -1995,6 +2002,13 @@ class gc_heap
#ifdef USE_REGIONS
PER_HEAP_ISOLATED_METHOD bool on_used_changed (uint8_t* left);
+ // new_sizes are the logical sizes of the card table elements while
+ // commit_sizes are the physical sizes of the card table elements due to alignment constraints.
+ PER_HEAP_ISOLATED_METHOD bool get_card_table_commit_layout (uint8_t* from, uint8_t* to,
+ uint8_t* commit_begins[total_bookkeeping_elements],
+ size_t commit_sizes[total_bookkeeping_elements],
+ size_t new_sizes[total_bookkeeping_elements]);
+
PER_HEAP_ISOLATED_METHOD bool inplace_commit_card_table (uint8_t* from, uint8_t* to);
#else //USE_REGIONS
PER_HEAP_ISOLATED_METHOD int grow_brick_card_tables (uint8_t* start,
@@ -3243,6 +3257,11 @@ class gc_heap
PER_HEAP_ISOLATED_METHOD BOOL dt_high_memory_load_p();
+ PER_HEAP_ISOLATED_METHOD bool compute_hard_limit();
+
+ PER_HEAP_ISOLATED_METHOD bool compute_memory_settings(bool is_initialization, uint32_t& nhp, uint32_t nhp_from_config, size_t& seg_size_from_config,
+ size_t new_current_total_committed);
+
PER_HEAP_METHOD void update_collection_counts ();
/*****************************************************************************************************************/
@@ -3543,7 +3562,7 @@ class gc_heap
PER_HEAP_FIELD_MAINTAINED generation generation_table[total_generation_count];
// These are loosely maintained, ie, could be reinitialized at any GC if needed. All that's
- // maintained is just the # of elements in mark_stack_array.
+ // maintained is just the # of elements in mark_stack_array.
// The content of mark_stack_array is only maintained during a single GC.
PER_HEAP_FIELD_MAINTAINED size_t mark_stack_array_length;
PER_HEAP_FIELD_MAINTAINED mark* mark_stack_array;
@@ -3634,7 +3653,7 @@ class gc_heap
PER_HEAP_FIELD_MAINTAINED_ALLOC alloc_list poh_alloc_list[NUM_POH_ALIST - 1];
// Keeps track of the highest address allocated by Alloc
- // Used in allocator code path. Blocking GCs do use it at the beginning (to update heap_segment_allocated) and
+ // Used in allocator code path. Blocking GCs do use it at the beginning (to update heap_segment_allocated) and
// at the end they get initialized for the allocator.
PER_HEAP_FIELD_MAINTAINED_ALLOC uint8_t* alloc_allocated;
@@ -3700,13 +3719,13 @@ class gc_heap
#define vm_heap ((GCHeap*) g_theGCHeap)
#define heap_number (0)
#endif //MULTIPLE_HEAPS
-
+
#ifdef BACKGROUND_GC
// We only use this when we need to timeout BGC threads.
PER_HEAP_FIELD_INIT_ONLY CLRCriticalSection bgc_threads_timeout_cs;
// For regions these are the same as g_gc_lowest_address/g_gc_highest_address
- // and never change.
+ // and never change.
PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_lowest_address;
PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_highest_address;
@@ -3736,6 +3755,7 @@ class gc_heap
#ifdef MULTIPLE_HEAPS
#ifdef _DEBUG
PER_HEAP_FIELD_DIAG_ONLY size_t committed_by_oh_per_heap[total_oh_count];
+ PER_HEAP_FIELD_DIAG_ONLY size_t committed_by_oh_per_heap_refresh[total_oh_count];
#endif //_DEBUG
#else //MULTIPLE_HEAPS
#endif //MULTIPLE_HEAPS
@@ -3972,7 +3992,7 @@ class gc_heap
// Highest and lowest address for ephemeral generations.
// For regions these are only used during a GC (init-ed at beginning of mark and
- // used later in that GC).
+ // used later in that GC).
// They could be used for WB but we currently don't use them for that purpose, even
// thought we do pass them to the WB code.
//
@@ -4164,11 +4184,13 @@ class gc_heap
// TODO: some of the logic here applies to the general case as well
// such as LOH automatic compaction. However it will require more
//testing to change the general case.
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool hard_limit_config_p;
PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit;
PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit_oh[total_oh_count];
// Used both in a GC and on the allocator code paths when heap_hard_limit is non zero
PER_HEAP_ISOLATED_FIELD_INIT_ONLY CLRCriticalSection check_commit_cs;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY CLRCriticalSection decommit_lock;
// Indicate to use large pages. This only works if hardlimit is also enabled.
PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p;
@@ -4244,7 +4266,7 @@ class gc_heap
// For implementation of GCHeap::GetMemoryInfo which is called by
// the GC.GetGCMemoryInfo API
- //
+ //
// We record the time GC work is done while EE is suspended.
// suspended_start_ts is what we get right before we call
// SuspendEE. We omit the time between GC end and RestartEE
@@ -4353,7 +4375,6 @@ class gc_heap
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t gen0_max_budget_from_config;
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int high_mem_percent_from_config;
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool use_frozen_segments_p;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool hard_limit_config_p;
#endif //FEATURE_EVENT_TRACE
#ifdef HEAP_BALANCE_INSTRUMENTATION
@@ -4407,6 +4428,8 @@ class gc_heap
PER_HEAP_ISOLATED_METHOD uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
+ PER_HEAP_ISOLATED_METHOD int refresh_memory_limit();
+
/***************************************************************************************************/
// public fields //
/***************************************************************************************************/
@@ -4427,7 +4450,6 @@ class gc_heap
#ifdef FEATURE_BASICFREEZE
PER_HEAP_ISOLATED_FIELD_MAINTAINED sorted_table* seg_table;
#endif //FEATURE_BASICFREEZE
-
}; // class gc_heap
#ifdef FEATURE_PREMORTEM_FINALIZATION
diff --git a/src/coreclr/gc/windows/gcenv.windows.cpp b/src/coreclr/gc/windows/gcenv.windows.cpp
index 7826be5d3658c7..f12a64d7ed1ab3 100644
--- a/src/coreclr/gc/windows/gcenv.windows.cpp
+++ b/src/coreclr/gc/windows/gcenv.windows.cpp
@@ -17,8 +17,6 @@
GCSystemInfo g_SystemInfo;
-static size_t g_RestrictedPhysicalMemoryLimit = (size_t)UINTPTR_MAX;
-
static bool g_SeLockMemoryPrivilegeAcquired = false;
static AffinitySet g_processAffinitySet;
@@ -252,10 +250,6 @@ static size_t GetRestrictedPhysicalMemoryLimit()
{
LIMITED_METHOD_CONTRACT;
- // The limit was cached already
- if (g_RestrictedPhysicalMemoryLimit != (size_t)UINTPTR_MAX)
- return g_RestrictedPhysicalMemoryLimit;
-
size_t job_physical_memory_limit = (size_t)UINTPTR_MAX;
uint64_t total_virtual = 0;
uint64_t total_physical = 0;
@@ -337,8 +331,7 @@ static size_t GetRestrictedPhysicalMemoryLimit()
job_physical_memory_limit = 0;
}
- VolatileStore(&g_RestrictedPhysicalMemoryLimit, job_physical_memory_limit);
- return g_RestrictedPhysicalMemoryLimit;
+ return job_physical_memory_limit;
}
// This function checks to see if GetLogicalProcessorInformation API is supported.
diff --git a/src/coreclr/vm/comutilnative.cpp b/src/coreclr/vm/comutilnative.cpp
index 99e51aae7397ea..488655ee7a62c9 100644
--- a/src/coreclr/vm/comutilnative.cpp
+++ b/src/coreclr/vm/comutilnative.cpp
@@ -1210,6 +1210,35 @@ void GCInterface::EnumerateConfigurationValues(void* configurationContext, Enume
pHeap->EnumerateConfigurationValues(configurationContext, callback);
}
+GCHeapHardLimitInfo g_gcHeapHardLimitInfo;
+
+extern "C" int QCALLTYPE GCInterface_RefreshMemoryLimit(GCHeapHardLimitInfo heapHardLimitInfo)
+{
+ QCALL_CONTRACT;
+
+ int result = 0;
+
+ BEGIN_QCALL;
+ g_gcHeapHardLimitInfo = heapHardLimitInfo;
+ result = GCInterface::RefreshMemoryLimit();
+ END_QCALL;
+
+ return result;
+}
+
+int GCInterface::RefreshMemoryLimit()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ return GCHeapUtilities::GetGCHeap()->RefreshMemoryLimit();
+}
+
#ifdef HOST_64BIT
const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB
#else // HOST_64BIT
diff --git a/src/coreclr/vm/comutilnative.h b/src/coreclr/vm/comutilnative.h
index 258120c3fed36f..496c4747b0b396 100644
--- a/src/coreclr/vm/comutilnative.h
+++ b/src/coreclr/vm/comutilnative.h
@@ -127,6 +127,18 @@ typedef GCMemoryInfoData * GCMEMORYINFODATAREF;
using EnumerateConfigurationValuesCallback = void (*)(void* context, void* name, void* publicKey, GCConfigurationType type, int64_t data);
+struct GCHeapHardLimitInfo
+{
+ UINT64 heapHardLimit;
+ UINT64 heapHardLimitPercent;
+ UINT64 heapHardLimitSOH;
+ UINT64 heapHardLimitLOH;
+ UINT64 heapHardLimitPOH;
+ UINT64 heapHardLimitSOHPercent;
+ UINT64 heapHardLimitLOHPercent;
+ UINT64 heapHardLimitPOHPercent;
+};
+
class GCInterface {
private:
static INT32 m_gc_counts[3];
@@ -175,6 +187,7 @@ class GCInterface {
static void AddMemoryPressure(UINT64 bytesAllocated);
static void EnumerateConfigurationValues(void* configurationContext, EnumerateConfigurationValuesCallback callback);
+ static int RefreshMemoryLimit();
private:
// Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
@@ -202,6 +215,8 @@ extern "C" void QCALLTYPE GCInterface_RemoveMemoryPressure(UINT64 bytesAllocated
extern "C" void QCALLTYPE GCInterface_EnumerateConfigurationValues(void* configurationContext, EnumerateConfigurationValuesCallback callback);
+extern "C" int QCALLTYPE GCInterface_RefreshMemoryLimit(GCHeapHardLimitInfo heapHardLimitInfo);
+
class COMInterlocked
{
public:
diff --git a/src/coreclr/vm/gcenv.ee.cpp b/src/coreclr/vm/gcenv.ee.cpp
index 7d9843e062bf07..253370eefe52c4 100644
--- a/src/coreclr/vm/gcenv.ee.cpp
+++ b/src/coreclr/vm/gcenv.ee.cpp
@@ -1191,6 +1191,15 @@ bool GCToEEInterface::GetIntConfigValue(const char* privateKey, const char* publ
return true;
}
+ if ((g_gcHeapHardLimitInfo.heapHardLimit != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimit") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimit; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitPercent != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitPercent") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitPercent; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitSOH != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitSOH") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitSOH; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitLOH != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitLOH") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitLOH; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitPOH != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitPOH") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitPOH; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitSOHPercent != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitSOHPercent") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitSOHPercent; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitLOHPercent != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitLOHPercent") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitLOHPercent; return true; }
+ if ((g_gcHeapHardLimitInfo.heapHardLimitPOHPercent != UINT64_MAX) && strcmp(privateKey, "GCHeapHardLimitPOHPercent") == 0) { *value = g_gcHeapHardLimitInfo.heapHardLimitPOHPercent; return true; }
+
WCHAR configKey[MaxConfigKeyLength];
if (MultiByteToWideChar(CP_ACP, 0, privateKey, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
{
diff --git a/src/coreclr/vm/gcenv.ee.standalone.cpp b/src/coreclr/vm/gcenv.ee.standalone.cpp
index 6a73fca18f2ce9..c71d8b195731bf 100644
--- a/src/coreclr/vm/gcenv.ee.standalone.cpp
+++ b/src/coreclr/vm/gcenv.ee.standalone.cpp
@@ -20,6 +20,8 @@
// Finalizes a weak reference directly.
extern void FinalizeWeakReference(Object* obj);
+extern GCHeapHardLimitInfo g_gcHeapHardLimitInfo;
+
namespace standalone
{
diff --git a/src/coreclr/vm/gcenv.ee.static.cpp b/src/coreclr/vm/gcenv.ee.static.cpp
index 9648ede32cf9e1..4e778b3d7ca081 100644
--- a/src/coreclr/vm/gcenv.ee.static.cpp
+++ b/src/coreclr/vm/gcenv.ee.static.cpp
@@ -20,4 +20,6 @@
// Finalizes a weak reference directly.
extern void FinalizeWeakReference(Object* obj);
+extern GCHeapHardLimitInfo g_gcHeapHardLimitInfo;
+
#include "gcenv.ee.cpp"
diff --git a/src/coreclr/vm/qcallentrypoints.cpp b/src/coreclr/vm/qcallentrypoints.cpp
index e886eca93b74d7..9e928ba29d6492 100644
--- a/src/coreclr/vm/qcallentrypoints.cpp
+++ b/src/coreclr/vm/qcallentrypoints.cpp
@@ -220,6 +220,7 @@ static const Entry s_QCall[] =
DllImportEntry(GCInterface_UnregisterFrozenSegment)
#endif
DllImportEntry(GCInterface_EnumerateConfigurationValues)
+ DllImportEntry(GCInterface_RefreshMemoryLimit)
DllImportEntry(MarshalNative_Prelink)
DllImportEntry(MarshalNative_IsBuiltInComSupported)
DllImportEntry(MarshalNative_GetHINSTANCE)