From e8512b92c0b43b65b7246180de44ad8bc22946f2 Mon Sep 17 00:00:00 2001 From: Andrew Au <3410332+cshung@users.noreply.github.com> Date: Mon, 20 Apr 2026 09:19:14 -0700 Subject: [PATCH 1/3] Skip decommit for large pages and add fake large pages test mode With large pages, VirtualDecommit is a no-op since large pages cannot be partially decommitted. PR #126929 fixed the resulting stale data corruption by adding memclr in virtual_decommit, but this approach has downsides: the memory is never returned to the OS, yet we pay for the clearing and produce misleading committed/used bookkeeping. Instead, skip the decommit entirely for large pages: 1. distribute_free_regions: skip the aggressive tail-region decommit (the committed-but-unallocated tail of in-use regions). This was the path that caused the heap corruption in #126903. 2. decommit_heap_segment: skip the whole-segment decommit used for segment hoarding and BGC segment deletion. Same class of issue: committed/used are lowered but physical memory retains stale data. 3. decommit_region: bypass virtual_decommit and call reduce_committed_bytes directly, since decommit_region already handles large pages correctly by clearing memory itself. 4. virtual_decommit: add an assert that it is never called for heap memory when large pages are on. This catches any future caller that forgets to handle the large pages case. The end_of_data parameter and no-op ternary added by #126929 are removed. Add GCLargePages=2 mode that simulates large pages using small pages: sets use_large_pages_p=true but reserves with normal pages and commits everything upfront. This exercises all large page GC code paths without requiring OS large page setup or privileges, enabling CI testing. Fix #126903 --- src/coreclr/gc/gc.cpp | 1 + src/coreclr/gc/gcconfig.h | 2 +- src/coreclr/gc/gcpriv.h | 4 +- src/coreclr/gc/init.cpp | 19 +++- src/coreclr/gc/interface.cpp | 2 +- src/coreclr/gc/memory.cpp | 28 ++++-- src/coreclr/gc/regions_segments.cpp | 52 ++++++---- .../API/GC/Collect_Aggressive_LargePages.cs | 96 +++++++++++++++++++ .../GC/Collect_Aggressive_LargePages.csproj | 18 ++++ 9 files changed, 186 insertions(+), 36 deletions(-) create mode 100644 src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs create mode 100644 src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index d747b02faee6e7..de1778d558c4b7 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -2619,6 +2619,7 @@ size_t gc_heap::eph_gen_starts_size = 0; heap_segment* gc_heap::segment_standby_list; #endif //USE_REGIONS bool gc_heap::use_large_pages_p = 0; +bool gc_heap::large_pages_fake_mode_p = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION size_t gc_heap::last_gc_end_time_us = 0; #endif //HEAP_BALANCE_INSTRUMENTATION diff --git a/src/coreclr/gc/gcconfig.h b/src/coreclr/gc/gcconfig.h index aa23ff7ebc59c8..3114a9aac4d9e0 100644 --- a/src/coreclr/gc/gcconfig.h +++ b/src/coreclr/gc/gcconfig.h @@ -76,7 +76,7 @@ class GCConfigStringHolder BOOL_CONFIG (ConfigLogEnabled, "GCConfigLogEnabled", NULL, false, "Specifies the name of the GC config log file") \ BOOL_CONFIG (GCNumaAware, "GCNumaAware", NULL, true, "Enables numa allocations in the GC") \ BOOL_CONFIG (GCCpuGroup, "GCCpuGroup", "System.GC.CpuGroup", false, "Enables CPU groups in the GC") \ - BOOL_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", false, "Enables using Large Pages in the GC") \ + INT_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", 0, "Enables Large Pages in the GC (1=real large pages, 2=fake mode for testing)") \ INT_CONFIG (HeapVerifyLevel, "HeapVerify", NULL, HEAPVERIFY_NONE, "When set verifies the integrity of the managed heap on entry and exit of each GC") \ INT_CONFIG (LOHCompactionMode, "GCLOHCompact", NULL, 0, "Specifies the LOH compaction mode") \ INT_CONFIG (LOHThreshold, "GCLOHThreshold", "System.GC.LOHThreshold", LARGE_OBJECT_SIZE, "Specifies the size that will make objects go on LOH") \ diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 501c93d154ad23..ac6506b28e5861 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -2485,7 +2485,7 @@ class gc_heap PER_HEAP_METHOD void decommit_heap_segment (heap_segment* seg); PER_HEAP_ISOLATED_METHOD bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number); PER_HEAP_ISOLATED_METHOD bool virtual_commit (void* address, size_t size, int bucket, int h_number=-1, bool* hard_limit_exceeded_p=NULL); - PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1, void* end_of_data=nullptr); + PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1); PER_HEAP_ISOLATED_METHOD void reduce_committed_bytes (void* address, size_t size, int bucket, int h_number, bool decommit_succeeded_p); friend void destroy_card_table (uint32_t*); PER_HEAP_ISOLATED_METHOD void destroy_card_table_helper (uint32_t* c_table); @@ -5369,7 +5369,9 @@ class gc_heap #endif // Indicate to use large pages. This only works if hardlimit is also enabled. + // GCLargePages=1 uses real OS large pages, GCLargePages=2 fakes it for testing. PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p; + PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool large_pages_fake_mode_p; #ifdef MULTIPLE_HEAPS // Init-ed in gc_heap::initialize_gc diff --git a/src/coreclr/gc/init.cpp b/src/coreclr/gc/init.cpp index ccf0b35b3d312c..71b6cb24990546 100644 --- a/src/coreclr/gc/init.cpp +++ b/src/coreclr/gc/init.cpp @@ -882,13 +882,23 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, // Right now all the non mark array portions are commmitted since I'm calling make_card_table // on the whole range. This can be committed as needed. size_t reserve_size = regions_range; - uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_large_pages_p); + // In fake large pages mode, use normal reserve (not real large pages) then + // commit all upfront to simulate the "always committed" property. + bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p; + uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_real_large_pages); if (!reserve_range) { log_init_error_to_host ("Reserving %zd bytes (%zd GiB) for the regions range failed, do you have a virtual memory limit set on this process?", reserve_size, gib (reserve_size)); return E_OUTOFMEMORY; } + if (large_pages_fake_mode_p) + { + if (!GCToOSInterface::VirtualCommit (reserve_range, reserve_size)) + { + return E_OUTOFMEMORY; + } + } if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size), ((size_t)1 << min_segment_size_shr), @@ -909,8 +919,9 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, heap_hard_limit_oh[soh] && (GCConfig::GetGCHeapHardLimitPOH() == 0) && (GCConfig::GetGCHeapHardLimitPOHPercent() == 0); + bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p; if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps, - use_large_pages_p, separated_poh_p, heap_no_to_numa_node)) + use_real_large_pages, separated_poh_p, heap_no_to_numa_node)) return E_OUTOFMEMORY; if (use_large_pages_p) { @@ -1279,7 +1290,9 @@ bool gc_heap::compute_hard_limit() heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH(); #ifdef HOST_64BIT - use_large_pages_p = GCConfig::GetGCLargePages(); + int64_t large_pages_config = GCConfig::GetGCLargePages(); + use_large_pages_p = (large_pages_config != 0); + large_pages_fake_mode_p = (large_pages_config == 2); #endif //HOST_64BIT if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh]) diff --git a/src/coreclr/gc/interface.cpp b/src/coreclr/gc/interface.cpp index 5300d34848d22d..90c3b1772bbd53 100644 --- a/src/coreclr/gc/interface.cpp +++ b/src/coreclr/gc/interface.cpp @@ -275,7 +275,7 @@ HRESULT GCHeap::Initialize() { return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT; } - GCConfig::SetGCLargePages(gc_heap::use_large_pages_p); + GCConfig::SetGCLargePages(gc_heap::use_large_pages_p ? (gc_heap::large_pages_fake_mode_p ? 2 : 1) : 0); #ifdef USE_REGIONS gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange(); diff --git a/src/coreclr/gc/memory.cpp b/src/coreclr/gc/memory.cpp index 78a162e06bb34c..79f2f489fadf40 100644 --- a/src/coreclr/gc/memory.cpp +++ b/src/coreclr/gc/memory.cpp @@ -161,7 +161,7 @@ void gc_heap::reduce_committed_bytes (void* address, size_t size, int bucket, in } } -bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number, void* end_of_data) +bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number) { /** * Here are all possible cases for the decommits: @@ -171,15 +171,12 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu * Case 3: This is for free - the bucket will be recorded_committed_free_bucket, and the h_number will be -1 */ - bool decommit_succeeded_p = ((bucket != recorded_committed_bookkeeping_bucket) && use_large_pages_p) ? true : GCToOSInterface::VirtualDecommit (address, size); + // With large pages, VirtualDecommit on heap memory is a no-op. All such callers + // should either skip the decommit or handle stale data themselves (decommit_region + // does the latter by calling reduce_committed_bytes directly and clearing memory). + assert (!use_large_pages_p || bucket == recorded_committed_bookkeeping_bucket); - // Large pages: the decommit above is a no-op so memory retains stale data. - // Clear up to end_of_data if the caller provided it so that the heap never - // observes leftover object references after the region is reused. - if (use_large_pages_p && (end_of_data != nullptr) && (end_of_data > address)) - { - memclr ((uint8_t*)address, (uint8_t*)end_of_data - (uint8_t*)address); - } + bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size); reduce_committed_bytes (address, size, bucket, h_number, decommit_succeeded_p); @@ -347,7 +344,18 @@ size_t gc_heap::decommit_region (heap_segment* region, int bucket, int h_number) uint8_t* page_start = align_lower_page (get_region_start (region)); uint8_t* decommit_end = heap_segment_committed (region); size_t decommit_size = decommit_end - page_start; - bool decommit_succeeded_p = virtual_decommit (page_start, decommit_size, bucket, h_number); + bool decommit_succeeded_p; + if (use_large_pages_p) + { + // VirtualDecommit is a no-op for large pages so skip it and update + // committed bookkeeping directly. Memory clearing is handled below. + decommit_succeeded_p = true; + reduce_committed_bytes (page_start, decommit_size, bucket, h_number, true); + } + else + { + decommit_succeeded_p = virtual_decommit (page_start, decommit_size, bucket, h_number); + } bool require_clearing_memory_p = !decommit_succeeded_p || use_large_pages_p; dprintf (REGIONS_LOG, ("decommitted region %p(%p-%p) (%zu bytes) - success: %d", region, diff --git a/src/coreclr/gc/regions_segments.cpp b/src/coreclr/gc/regions_segments.cpp index ee51642d5afd8a..e9275063f10d09 100644 --- a/src/coreclr/gc/regions_segments.cpp +++ b/src/coreclr/gc/regions_segments.cpp @@ -1522,6 +1522,12 @@ size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg, //decommit all pages except one or 2 void gc_heap::decommit_heap_segment (heap_segment* seg) { + // For large pages, VirtualDecommit is a no-op so skip the decommit entirely + // to avoid lowering committed/used bookkeeping while memory retains stale data. + if (use_large_pages_p) + { + return; + } #ifdef USE_REGIONS if (!dt_high_memory_load_p()) { @@ -1814,32 +1820,38 @@ void gc_heap::distribute_free_regions() while (decommit_step(DECOMMIT_TIME_STEP_MILLISECONDS)) { } -#ifdef MULTIPLE_HEAPS - for (int i = 0; i < n_heaps; i++) + // For large pages, VirtualDecommit on in-use regions is a no-op so the + // memory is never actually returned to the OS. Skip the tail decommit + // entirely to avoid misleading bookkeeping and unnecessary memclr overhead. + if (!use_large_pages_p) { - gc_heap* hp = g_heaps[i]; - int hn = i; +#ifdef MULTIPLE_HEAPS + for (int i = 0; i < n_heaps; i++) + { + gc_heap* hp = g_heaps[i]; + int hn = i; #else //MULTIPLE_HEAPS - { - gc_heap* hp = pGenGCHeap; - int hn = 0; -#endif //MULTIPLE_HEAPS - for (int i = 0; i < total_generation_count; i++) { - generation* generation = hp->generation_of (i); - heap_segment* region = heap_segment_rw (generation_start_segment (generation)); - while (region != nullptr) + gc_heap* hp = pGenGCHeap; + int hn = 0; +#endif //MULTIPLE_HEAPS + for (int i = 0; i < total_generation_count; i++) { - uint8_t* aligned_allocated = align_on_page (heap_segment_allocated (region)); - size_t end_space = heap_segment_committed (region) - aligned_allocated; - if (end_space > 0) + generation* generation = hp->generation_of (i); + heap_segment* region = heap_segment_rw (generation_start_segment (generation)); + while (region != nullptr) { - virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn, heap_segment_used (region)); - heap_segment_committed (region) = aligned_allocated; - heap_segment_used (region) = min (heap_segment_used (region), heap_segment_committed (region)); - assert (heap_segment_committed (region) > heap_segment_mem (region)); + uint8_t* aligned_allocated = align_on_page (heap_segment_allocated (region)); + size_t end_space = heap_segment_committed (region) - aligned_allocated; + if (end_space > 0) + { + virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn); + heap_segment_committed (region) = aligned_allocated; + heap_segment_used (region) = min (heap_segment_used (region), heap_segment_committed (region)); + assert (heap_segment_committed (region) > heap_segment_mem (region)); + } + region = heap_segment_next_rw (region); } - region = heap_segment_next_rw (region); } } } diff --git a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs new file mode 100644 index 00000000000000..2c5b447ef585b7 --- /dev/null +++ b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs @@ -0,0 +1,96 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Collections.Concurrent; +using System.Runtime.CompilerServices; +using System.Threading; +using Xunit; + +// Regression test for https://github.com/dotnet/runtime/issues/126903 +// Verifies that aggressive GC does not corrupt the heap under (fake) large pages. +// The fake large pages mode (DOTNET_GCLargePagesFakeMode=1) exercises the same +// GC code paths as real large pages without requiring OS-level large page setup. +public class AggressiveCollectLargePages +{ + const int DurationMs = 3000; + const int WriterCount = 4; + + [Fact] + public static int TestEntryPoint() + { + var dict = new ConcurrentDictionary(); + var cts = new CancellationTokenSource(DurationMs); + var token = cts.Token; + int errors = 0; + + Thread[] writers = new Thread[WriterCount]; + for (int t = 0; t < WriterCount; t++) + { + int tid = t; + writers[t] = new Thread(() => + { + try + { + int i = tid * 1_000_000; + while (!token.IsCancellationRequested) + { + dict[i] = new byte[100]; + i++; + if ((i % 1000) == 0) + { + dict.Clear(); + } + } + } + catch (Exception ex) + { + Console.WriteLine($"Writer {tid} caught: {ex.GetType().Name}: {ex.Message}"); + Interlocked.Increment(ref errors); + } + }); + writers[t].IsBackground = true; + writers[t].Start(); + } + + Thread gcThread = new Thread(() => + { + while (!token.IsCancellationRequested) + { + CreateGarbage(); + GC.Collect(2, GCCollectionMode.Aggressive, blocking: true, compacting: true); + Thread.Sleep(50); + } + }); + gcThread.IsBackground = true; + gcThread.Start(); + + gcThread.Join(); + for (int t = 0; t < WriterCount; t++) + { + writers[t].Join(); + } + + if (errors > 0) + { + Console.WriteLine($"FAIL: {errors} writer(s) hit exceptions (heap corruption)."); + return 101; + } + + Console.WriteLine("PASS: No heap corruption detected."); + return 100; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void CreateGarbage() + { + byte[][] small = new byte[500][]; + for (int i = 0; i < small.Length; i++) + { + small[i] = new byte[4000]; + } + byte[] large = new byte[8 * 1024 * 1024]; + GC.KeepAlive(small); + GC.KeepAlive(large); + } +} diff --git a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj new file mode 100644 index 00000000000000..3cd0cef65b37c6 --- /dev/null +++ b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj @@ -0,0 +1,18 @@ + + + + true + true + 0 + + + PdbOnly + + + + + + + + + From bc4d5691abb91e7dbf60c8a99371abf0466b7ff1 Mon Sep 17 00:00:00 2001 From: Andrew Au <3410332+cshung@users.noreply.github.com> Date: Wed, 22 Apr 2026 21:39:57 -0700 Subject: [PATCH 2/3] Rename large_pages_fake_mode_p to large_pages_emulation_mode_p Address review feedback from mangod9 and janvorli. --- src/coreclr/gc/gc.cpp | 2 +- src/coreclr/gc/gcpriv.h | 2 +- src/coreclr/gc/init.cpp | 8 ++++---- src/coreclr/gc/interface.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index de1778d558c4b7..7f715cf7daf57e 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -2619,7 +2619,7 @@ size_t gc_heap::eph_gen_starts_size = 0; heap_segment* gc_heap::segment_standby_list; #endif //USE_REGIONS bool gc_heap::use_large_pages_p = 0; -bool gc_heap::large_pages_fake_mode_p = 0; +bool gc_heap::large_pages_emulation_mode_p = 0; #ifdef HEAP_BALANCE_INSTRUMENTATION size_t gc_heap::last_gc_end_time_us = 0; #endif //HEAP_BALANCE_INSTRUMENTATION diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index ac6506b28e5861..b4010e28cb815a 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -5371,7 +5371,7 @@ class gc_heap // Indicate to use large pages. This only works if hardlimit is also enabled. // GCLargePages=1 uses real OS large pages, GCLargePages=2 fakes it for testing. PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p; - PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool large_pages_fake_mode_p; + PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool large_pages_emulation_mode_p; #ifdef MULTIPLE_HEAPS // Init-ed in gc_heap::initialize_gc diff --git a/src/coreclr/gc/init.cpp b/src/coreclr/gc/init.cpp index 71b6cb24990546..24bd3dd369cfd4 100644 --- a/src/coreclr/gc/init.cpp +++ b/src/coreclr/gc/init.cpp @@ -884,7 +884,7 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, size_t reserve_size = regions_range; // In fake large pages mode, use normal reserve (not real large pages) then // commit all upfront to simulate the "always committed" property. - bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p; + bool use_real_large_pages = use_large_pages_p && !large_pages_emulation_mode_p; uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_real_large_pages); if (!reserve_range) { @@ -892,7 +892,7 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, reserve_size, gib (reserve_size)); return E_OUTOFMEMORY; } - if (large_pages_fake_mode_p) + if (large_pages_emulation_mode_p) { if (!GCToOSInterface::VirtualCommit (reserve_range, reserve_size)) { @@ -919,7 +919,7 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, heap_hard_limit_oh[soh] && (GCConfig::GetGCHeapHardLimitPOH() == 0) && (GCConfig::GetGCHeapHardLimitPOHPercent() == 0); - bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p; + bool use_real_large_pages = use_large_pages_p && !large_pages_emulation_mode_p; if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps, use_real_large_pages, separated_poh_p, heap_no_to_numa_node)) return E_OUTOFMEMORY; @@ -1292,7 +1292,7 @@ bool gc_heap::compute_hard_limit() #ifdef HOST_64BIT int64_t large_pages_config = GCConfig::GetGCLargePages(); use_large_pages_p = (large_pages_config != 0); - large_pages_fake_mode_p = (large_pages_config == 2); + large_pages_emulation_mode_p = (large_pages_config == 2); #endif //HOST_64BIT if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh]) diff --git a/src/coreclr/gc/interface.cpp b/src/coreclr/gc/interface.cpp index 90c3b1772bbd53..acdae1415cb403 100644 --- a/src/coreclr/gc/interface.cpp +++ b/src/coreclr/gc/interface.cpp @@ -275,7 +275,7 @@ HRESULT GCHeap::Initialize() { return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT; } - GCConfig::SetGCLargePages(gc_heap::use_large_pages_p ? (gc_heap::large_pages_fake_mode_p ? 2 : 1) : 0); + GCConfig::SetGCLargePages(gc_heap::use_large_pages_p ? (gc_heap::large_pages_emulation_mode_p ? 2 : 1) : 0); #ifdef USE_REGIONS gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange(); From 80b87404a524bcd548ab02f2c2b9a78962c2286f Mon Sep 17 00:00:00 2001 From: Andrew Au <3410332+cshung@users.noreply.github.com> Date: Wed, 22 Apr 2026 22:01:49 -0700 Subject: [PATCH 3/3] Address review feedback Rename large_pages_fake_mode_p to large_pages_emulation_mode_p and update comments to use emulation terminology throughout. Disable test on 32-bit: GCHeapHardLimit=0xC0000000 exceeds the virtual address space and GCLargePages is gated by HOST_64BIT. --- src/coreclr/gc/gcconfig.h | 2 +- src/coreclr/gc/gcpriv.h | 2 +- src/coreclr/gc/init.cpp | 2 +- src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs | 4 ++-- src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj | 2 ++ 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/coreclr/gc/gcconfig.h b/src/coreclr/gc/gcconfig.h index 3114a9aac4d9e0..ffa98b37cd6acc 100644 --- a/src/coreclr/gc/gcconfig.h +++ b/src/coreclr/gc/gcconfig.h @@ -76,7 +76,7 @@ class GCConfigStringHolder BOOL_CONFIG (ConfigLogEnabled, "GCConfigLogEnabled", NULL, false, "Specifies the name of the GC config log file") \ BOOL_CONFIG (GCNumaAware, "GCNumaAware", NULL, true, "Enables numa allocations in the GC") \ BOOL_CONFIG (GCCpuGroup, "GCCpuGroup", "System.GC.CpuGroup", false, "Enables CPU groups in the GC") \ - INT_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", 0, "Enables Large Pages in the GC (1=real large pages, 2=fake mode for testing)") \ + INT_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", 0, "Enables Large Pages in the GC (1=real large pages, 2=emulation mode for testing)") \ INT_CONFIG (HeapVerifyLevel, "HeapVerify", NULL, HEAPVERIFY_NONE, "When set verifies the integrity of the managed heap on entry and exit of each GC") \ INT_CONFIG (LOHCompactionMode, "GCLOHCompact", NULL, 0, "Specifies the LOH compaction mode") \ INT_CONFIG (LOHThreshold, "GCLOHThreshold", "System.GC.LOHThreshold", LARGE_OBJECT_SIZE, "Specifies the size that will make objects go on LOH") \ diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index b4010e28cb815a..18b128049a2e8d 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -5369,7 +5369,7 @@ class gc_heap #endif // Indicate to use large pages. This only works if hardlimit is also enabled. - // GCLargePages=1 uses real OS large pages, GCLargePages=2 fakes it for testing. + // GCLargePages=1 uses real OS large pages, GCLargePages=2 emulates it for testing. PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p; PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool large_pages_emulation_mode_p; diff --git a/src/coreclr/gc/init.cpp b/src/coreclr/gc/init.cpp index 24bd3dd369cfd4..f242e12c7aa22f 100644 --- a/src/coreclr/gc/init.cpp +++ b/src/coreclr/gc/init.cpp @@ -882,7 +882,7 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, // Right now all the non mark array portions are commmitted since I'm calling make_card_table // on the whole range. This can be committed as needed. size_t reserve_size = regions_range; - // In fake large pages mode, use normal reserve (not real large pages) then + // In large pages emulation mode, use normal reserve (not real large pages) then // commit all upfront to simulate the "always committed" property. bool use_real_large_pages = use_large_pages_p && !large_pages_emulation_mode_p; uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_real_large_pages); diff --git a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs index 2c5b447ef585b7..f9bf54b1599a0a 100644 --- a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs +++ b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs @@ -8,8 +8,8 @@ using Xunit; // Regression test for https://github.com/dotnet/runtime/issues/126903 -// Verifies that aggressive GC does not corrupt the heap under (fake) large pages. -// The fake large pages mode (DOTNET_GCLargePagesFakeMode=1) exercises the same +// Verifies that aggressive GC does not corrupt the heap under emulated large pages. +// The large pages emulation mode (DOTNET_GCLargePages=2) exercises the same // GC code paths as real large pages without requiring OS-level large page setup. public class AggressiveCollectLargePages { diff --git a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj index 3cd0cef65b37c6..b781f9f954ecab 100644 --- a/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj +++ b/src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj @@ -3,6 +3,8 @@ true true + + true 0