Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2485,7 +2485,7 @@ class gc_heap
PER_HEAP_METHOD void decommit_heap_segment (heap_segment* seg);
PER_HEAP_ISOLATED_METHOD bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
PER_HEAP_ISOLATED_METHOD bool virtual_commit (void* address, size_t size, int bucket, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1);
PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1, void* end_of_data=nullptr);
PER_HEAP_ISOLATED_METHOD void reduce_committed_bytes (void* address, size_t size, int bucket, int h_number, bool decommit_succeeded_p);
friend void destroy_card_table (uint32_t*);
PER_HEAP_ISOLATED_METHOD void destroy_card_table_helper (uint32_t* c_table);
Expand Down
10 changes: 9 additions & 1 deletion src/coreclr/gc/memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ void gc_heap::reduce_committed_bytes (void* address, size_t size, int bucket, in
}
}

bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number)
bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number, void* end_of_data)
{
/**
* Here are all possible cases for the decommits:
Expand All @@ -173,6 +173,14 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu

bool decommit_succeeded_p = ((bucket != recorded_committed_bookkeeping_bucket) && use_large_pages_p) ? true : GCToOSInterface::VirtualDecommit (address, size);

// Large pages: the decommit above is a no-op so memory retains stale data.
// Clear up to end_of_data if the caller provided it so that the heap never
// observes leftover object references after the region is reused.
if (use_large_pages_p && (end_of_data != nullptr) && (end_of_data > address))
{
memclr ((uint8_t*)address, (uint8_t*)end_of_data - (uint8_t*)address);
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In other paths, the GC just takes keeps track of the fact that memory is dirty and clears it right before it is used for allocations again in gc_heap::adjust_limit_clr. Would it be a better option here?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the fix was following the same pattern like this in decommit_region:

if (require_clearing_memory_p)
{
uint8_t* clear_end = use_large_pages_p ? heap_segment_used (region) : heap_segment_committed (region);
size_t clear_size = clear_end - page_start;
memclr (page_start, clear_size);
heap_segment_used (region) = heap_segment_mem (region);
dprintf(REGIONS_LOG, ("cleared region %p(%p-%p) (%zu bytes)",
region,
page_start,
clear_end,
clear_size));
}
else
{
heap_segment_committed (region) = heap_segment_mem (region);
}

where memclr clears the full region for large_pages. Similar cleanup was missing during aggressive decommitting of tail regions.

}

reduce_committed_bytes (address, size, bucket, h_number, decommit_succeeded_p);

return decommit_succeeded_p;
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/gc/regions_segments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1834,7 +1834,7 @@ void gc_heap::distribute_free_regions()
size_t end_space = heap_segment_committed (region) - aligned_allocated;
if (end_space > 0)
{
virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn);
virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn, heap_segment_used (region));
heap_segment_committed (region) = aligned_allocated;
heap_segment_used (region) = min (heap_segment_used (region), heap_segment_committed (region));
assert (heap_segment_committed (region) > heap_segment_mem (region));
Expand Down
Loading