From 64056a8ca5d683c070ca79d5cbc12bb28f70c1c6 Mon Sep 17 00:00:00 2001 From: Xinyi Zou Date: Tue, 28 Jun 2022 11:52:46 +0800 Subject: [PATCH 1/2] vec stress test, Allocator introduce chunkallocator --- be/src/common/config.h | 12 +- be/src/common/daemon.cpp | 2 - be/src/runtime/exec_env_init.cpp | 23 +++ be/src/runtime/memory/chunk_allocator.cpp | 32 +++- be/src/runtime/memory/chunk_allocator.h | 9 + be/src/vec/common/allocator.h | 166 +++++++++--------- docs/en/docs/admin-manual/config/be-config.md | 4 +- .../docs/admin-manual/config/be-config.md | 4 +- 8 files changed, 156 insertions(+), 96 deletions(-) diff --git a/be/src/common/config.h b/be/src/common/config.h index 9daf8f1c6a564b..96ff2f7ea6cb14 100644 --- a/be/src/common/config.h +++ b/be/src/common/config.h @@ -426,10 +426,14 @@ CONF_Bool(disable_mem_pools, "false"); // to a relative large number or the performance is very very bad. CONF_Bool(use_mmap_allocate_chunk, "false"); -// Chunk Allocator's reserved bytes limit, -// Default value is 2GB, increase this variable can improve performance, but will -// acquire more free memory which can not be used by other modules -CONF_Int64(chunk_reserved_bytes_limit, "2147483648"); +// The reserved bytes limit of Chunk Allocator, usually set as a percentage of mem_limit. +// defaults to bytes if no unit is given, the number of bytes must be a multiple of 2. +// must larger than 0. and if larger than physical memory size, it will be set to physical memory size. +// increase this variable can improve performance, +// but will acquire more free memory which can not be used by other modules. +CONF_mString(chunk_reserved_bytes_limit, "20%"); +// 1024, The minimum chunk allocator size (in bytes) +CONF_Int32(min_chunk_reserved_bytes, "1024"); // The probing algorithm of partitioned hash table. // Enable quadratic probing hash table diff --git a/be/src/common/daemon.cpp b/be/src/common/daemon.cpp index 7b58e32bfa3eb1..ca1b26541b9d8a 100644 --- a/be/src/common/daemon.cpp +++ b/be/src/common/daemon.cpp @@ -267,8 +267,6 @@ void Daemon::init(int argc, char** argv, const std::vector& paths) { init_doris_metrics(paths); init_signals(); - - ChunkAllocator::init_instance(config::chunk_reserved_bytes_limit); } void Daemon::start() { diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp index a1cdf32b6379f8..264565e4d6b6d4 100644 --- a/be/src/runtime/exec_env_init.cpp +++ b/be/src/runtime/exec_env_init.cpp @@ -263,6 +263,29 @@ Status ExecEnv::_init_mem_tracker() { RETURN_IF_ERROR(_disk_io_mgr->init(global_memory_limit_bytes)); RETURN_IF_ERROR(_tmp_file_mgr->init()); + // 5. init chunk allocator + if (!BitUtil::IsPowerOf2(config::min_chunk_reserved_bytes)) { + ss << "Config min_chunk_reserved_bytes must be a power-of-two: " + << config::min_chunk_reserved_bytes; + return Status::InternalError(ss.str()); + } + + int64_t chunk_reserved_bytes_limit = + ParseUtil::parse_mem_spec(config::chunk_reserved_bytes_limit, global_memory_limit_bytes, + MemInfo::physical_mem(), &is_percent); + if (chunk_reserved_bytes_limit <= 0) { + ss << "Invalid config chunk_reserved_bytes_limit value, must be a percentage or " + "positive bytes value or percentage: " + << config::chunk_reserved_bytes_limit; + return Status::InternalError(ss.str()); + } + chunk_reserved_bytes_limit = + BitUtil::RoundDown(chunk_reserved_bytes_limit, config::min_chunk_reserved_bytes); + ChunkAllocator::init_instance(chunk_reserved_bytes_limit); + LOG(INFO) << "Chunk allocator memory limit: " + << PrettyPrinter::print(chunk_reserved_bytes_limit, TUnit::BYTES) + << ", origin config value: " << config::chunk_reserved_bytes_limit; + // TODO(zc): The current memory usage configuration is a bit confusing, // we need to sort out the use of memory return Status::OK(); diff --git a/be/src/runtime/memory/chunk_allocator.cpp b/be/src/runtime/memory/chunk_allocator.cpp index 269dc12fcfd870..90497b0fe78faa 100644 --- a/be/src/runtime/memory/chunk_allocator.cpp +++ b/be/src/runtime/memory/chunk_allocator.cpp @@ -42,6 +42,7 @@ DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(chunk_pool_system_alloc_count, MetricUnit:: DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(chunk_pool_system_free_count, MetricUnit::NOUNIT); DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(chunk_pool_system_alloc_cost_ns, MetricUnit::NANOSECONDS); DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(chunk_pool_system_free_cost_ns, MetricUnit::NANOSECONDS); +DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(chunk_pool_reserved_bytes, MetricUnit::NOUNIT); static IntCounter* chunk_pool_local_core_alloc_count; static IntCounter* chunk_pool_other_core_alloc_count; @@ -49,6 +50,7 @@ static IntCounter* chunk_pool_system_alloc_count; static IntCounter* chunk_pool_system_free_count; static IntCounter* chunk_pool_system_alloc_cost_ns; static IntCounter* chunk_pool_system_free_cost_ns; +static IntGauge* chunk_pool_reserved_bytes; #ifdef BE_TEST static std::mutex s_mutex; @@ -115,6 +117,7 @@ void ChunkAllocator::init_instance(size_t reserve_limit) { ChunkAllocator::ChunkAllocator(size_t reserve_limit) : _reserve_bytes_limit(reserve_limit), + _steal_arena_limit(reserve_limit * 0.1), _reserved_bytes(0), _arenas(CpuInfo::get_max_num_cores()) { _mem_tracker = @@ -132,6 +135,7 @@ ChunkAllocator::ChunkAllocator(size_t reserve_limit) INT_COUNTER_METRIC_REGISTER(_chunk_allocator_metric_entity, chunk_pool_system_free_count); INT_COUNTER_METRIC_REGISTER(_chunk_allocator_metric_entity, chunk_pool_system_alloc_cost_ns); INT_COUNTER_METRIC_REGISTER(_chunk_allocator_metric_entity, chunk_pool_system_free_cost_ns); + INT_GAUGE_METRIC_REGISTER(_chunk_allocator_metric_entity, chunk_pool_reserved_bytes); } Status ChunkAllocator::allocate(size_t size, Chunk* chunk, MemTracker* tracker, bool check_limits) { @@ -158,8 +162,11 @@ Status ChunkAllocator::allocate(size_t size, Chunk* chunk, MemTracker* tracker, chunk_pool_local_core_alloc_count->increment(1); return Status::OK(); } - if (_reserved_bytes > size) { - // try to allocate from other core's arena + // Second path: try to allocate from other core's arena + // When the reserved bytes is greater than the limit, the chunk is stolen from other arena. + // Otherwise, it is allocated from the system first, which can reserve enough memory as soon as possible. + // After that, allocate from current core arena as much as possible. + if (_reserved_bytes > _steal_arena_limit) { ++core_id; for (int i = 1; i < _arenas.size(); ++i, ++core_id) { if (_arenas[core_id % _arenas.size()]->pop_free_chunk(size, &chunk->data)) { @@ -192,6 +199,7 @@ Status ChunkAllocator::allocate(size_t size, Chunk* chunk, MemTracker* tracker, void ChunkAllocator::free(const Chunk& chunk, MemTracker* tracker) { // The chunk's memory ownership is transferred from tls tracker to ChunkAllocator. + DCHECK(chunk.core_id != -1); if (tracker) { tracker->transfer_to(_mem_tracker.get(), chunk.size); } else { @@ -199,9 +207,6 @@ void ChunkAllocator::free(const Chunk& chunk, MemTracker* tracker) { chunk.size); } SCOPED_SWITCH_THREAD_LOCAL_MEM_TRACKER(_mem_tracker); - if (chunk.core_id == -1) { - return; - } int64_t old_reserved_bytes = _reserved_bytes; int64_t new_reserved_bytes = 0; do { @@ -219,6 +224,15 @@ void ChunkAllocator::free(const Chunk& chunk, MemTracker* tracker) { } } while (!_reserved_bytes.compare_exchange_weak(old_reserved_bytes, new_reserved_bytes)); + // The memory size of allocate/free is a multiple of 2, so `_reserved_bytes% 100 == 32` + // will definitely happen, and the latest `_reserved_bytes` value will be set every time. + // The real-time and accurate `_reserved_bytes` value is not required. Usually, + // the value of `_reserved_bytes` is equal to ChunkAllocator MemTracker. + // The `_reserved_bytes` metric is only concerned when verifying the accuracy of MemTracker. + // Therefore, reduce the number of sets and reduce the performance impact. + if (_reserved_bytes % 100 == 32) { + chunk_pool_reserved_bytes->set_value(_reserved_bytes); + } _arenas[chunk.core_id]->push_free_chunk(chunk.data, chunk.size); } @@ -227,4 +241,12 @@ Status ChunkAllocator::allocate_align(size_t size, Chunk* chunk, MemTracker* tra return allocate(BitUtil::RoundUpToPowerOfTwo(size), chunk, tracker, check_limits); } +void ChunkAllocator::free_as_chunk(uint8_t* data, size_t size, MemTracker* tracker) { + Chunk chunk; + chunk.data = data; + chunk.size = size; + chunk.core_id = CpuInfo::get_current_core(); + free(chunk, tracker); +} + } // namespace doris diff --git a/be/src/runtime/memory/chunk_allocator.h b/be/src/runtime/memory/chunk_allocator.h index 6f3b80e4adafcb..5f03eee2347144 100644 --- a/be/src/runtime/memory/chunk_allocator.h +++ b/be/src/runtime/memory/chunk_allocator.h @@ -74,10 +74,19 @@ class ChunkAllocator { // Free chunk allocated from this allocator void free(const Chunk& chunk, MemTracker* tracker = nullptr); + // Transfer the memory ownership to the chunk allocator. + // If the chunk allocator is full, then free to the system. + // Note: make sure that the length of 'data' is equal to size, + // otherwise the capacity of chunk allocator will be wrong. + void free_as_chunk(uint8_t* data, size_t size, MemTracker* tracker = nullptr); + private: static ChunkAllocator* _s_instance; size_t _reserve_bytes_limit; + // When the reserved chunk memory size is greater than the limit, + // it is allowed to steal the chunks of other arenas. + size_t _steal_arena_limit; std::atomic _reserved_bytes; // each core has a ChunkArena std::vector> _arenas; diff --git a/be/src/vec/common/allocator.h b/be/src/vec/common/allocator.h index b5b158e1f7aa7b..8588fe53aaf2df 100644 --- a/be/src/vec/common/allocator.h +++ b/be/src/vec/common/allocator.h @@ -20,7 +20,6 @@ #pragma once -// TODO: Tracker // TODO: Readable #include @@ -29,6 +28,8 @@ #include #include "common/status.h" +#include "runtime/memory/chunk.h" +#include "runtime/memory/chunk_allocator.h" #include "runtime/thread_context.h" #ifdef NDEBUG @@ -60,6 +61,7 @@ #define MAP_ANONYMOUS MAP_ANON #endif +#ifdef NDEBUG /** * Many modern allocators (for example, tcmalloc) do not do a mremap for * realloc, even in case of large enough chunks of memory. Although this allows @@ -73,15 +75,25 @@ * P.S. This is also required, because tcmalloc can not allocate a chunk of * memory greater than 16 GB. */ -#ifdef NDEBUG static constexpr size_t MMAP_THRESHOLD = 64 * (1ULL << 20); +/** + * Memory allocation between 4KB and 64MB will be through ChunkAllocator, + * those less than 4KB will be through malloc (for example, tcmalloc), + * and those greater than 64MB will be through MMAP. + * In the actual test, chunkallocator allocates less than 4KB of memory slower than malloc, + * and chunkallocator allocates more than 64MB of memory slower than MMAP, + * but the 4KB threshold is an empirical value, which needs to be determined + * by more detailed test later. + */ +static constexpr size_t CHUNK_THRESHOLD = 4096; #else /** - * In debug build, use small mmap threshold to reproduce more memory - * stomping bugs. Along with ASLR it will hopefully detect more issues than - * ASan. The program may fail due to the limit on number of memory mappings. - */ + * In debug build, use small mmap threshold to reproduce more memory + * stomping bugs. Along with ASLR it will hopefully detect more issues than + * ASan. The program may fail due to the limit on number of memory mappings. + */ static constexpr size_t MMAP_THRESHOLD = 4096; +static constexpr size_t CHUNK_THRESHOLD = 1024; #endif static constexpr size_t MMAP_MIN_ALIGNMENT = 4096; @@ -101,12 +113,75 @@ template class Allocator { public: /// Allocate memory range. - void* alloc(size_t size, size_t alignment = 0) { return alloc_no_track(size, alignment); } + void* alloc(size_t size, size_t alignment = 0) { + void* buf; + + if (size >= MMAP_THRESHOLD) { + if (alignment > MMAP_MIN_ALIGNMENT) + throw doris::vectorized::Exception( + fmt::format( + "Too large alignment {}: more than page size when allocating {}.", + alignment, size), + doris::TStatusCode::VEC_BAD_ARGUMENTS); + + CONSUME_THREAD_LOCAL_MEM_TRACKER(size); + buf = mmap(get_mmap_hint(), size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + if (MAP_FAILED == buf) { + RELEASE_THREAD_LOCAL_MEM_TRACKER(size); + doris::vectorized::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", size), + doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY); + } + + /// No need for zero-fill, because mmap guarantees it. + } else if (size >= CHUNK_THRESHOLD) { + doris::Chunk chunk; + if (!doris::ChunkAllocator::instance()->allocate_align(size, &chunk)) { + doris::vectorized::throwFromErrno( + fmt::format("Allocator: Cannot allocate chunk {}.", size), + doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY); + } + buf = chunk.data; + if constexpr (clear_memory) memset(buf, 0, chunk.size); + } else { + if (alignment <= MALLOC_MIN_ALIGNMENT) { + if constexpr (clear_memory) + buf = ::calloc(size, 1); + else + buf = ::malloc(size); + + if (nullptr == buf) + doris::vectorized::throwFromErrno( + fmt::format("Allocator: Cannot malloc {}.", size), + doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY); + } else { + buf = nullptr; + int res = posix_memalign(&buf, alignment, size); + + if (0 != res) + doris::vectorized::throwFromErrno( + fmt::format("Cannot allocate memory (posix_memalign) {}.", size), + doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY, res); + + if constexpr (clear_memory) memset(buf, 0, size); + } + } + return buf; + } /// Free memory range. void free(void* buf, size_t size) { - free_no_track(buf, size); - // CurrentMemoryTracker::free(size); + if (size >= MMAP_THRESHOLD) { + if (0 != munmap(buf, size)) { + doris::vectorized::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", size), + doris::TStatusCode::VEC_CANNOT_MUNMAP); + } else { + RELEASE_THREAD_LOCAL_MEM_TRACKER(size); + } + } else if (size >= CHUNK_THRESHOLD) { + doris::ChunkAllocator::instance()->free_as_chunk((uint8_t*)buf, size); + } else { + ::free(buf); + } } /** Enlarge memory range. @@ -117,11 +192,9 @@ class Allocator { if (old_size == new_size) { /// nothing to do. /// BTW, it's not possible to change alignment while doing realloc. - } else if (old_size < MMAP_THRESHOLD && new_size < MMAP_THRESHOLD && + } else if (old_size < CHUNK_THRESHOLD && new_size < CHUNK_THRESHOLD && alignment <= MALLOC_MIN_ALIGNMENT) { /// Resize malloc'd memory region with no special alignment requirement. - // CurrentMemoryTracker::realloc(old_size, new_size); - void* new_buf = ::realloc(buf, new_size); if (nullptr == new_buf) doris::vectorized::throwFromErrno("Allocator: Cannot realloc from " + @@ -135,7 +208,6 @@ class Allocator { memset(reinterpret_cast(buf) + old_size, 0, new_size - old_size); } else if (old_size >= MMAP_THRESHOLD && new_size >= MMAP_THRESHOLD) { /// Resize mmap'd memory region. - // CurrentMemoryTracker::realloc(old_size, new_size); CONSUME_THREAD_LOCAL_MEM_TRACKER(new_size - old_size); // On apple and freebsd self-implemented mremap used (common/mremap.h) @@ -157,16 +229,7 @@ class Allocator { if (new_size > old_size) memset(reinterpret_cast(buf) + old_size, 0, new_size - old_size); } - } else if (new_size < MMAP_THRESHOLD) { - /// Small allocs that requires a copy. Assume there's enough memory in system. Call CurrentMemoryTracker once. - // CurrentMemoryTracker::realloc(old_size, new_size); - - void* new_buf = alloc_no_track(new_size, alignment); - memcpy(new_buf, buf, std::min(old_size, new_size)); - free_no_track(buf, old_size); - buf = new_buf; } else { - /// Big allocs that requires a copy. MemoryTracker is called inside 'alloc', 'free' methods. void* new_buf = alloc(new_size, alignment); memcpy(new_buf, buf, std::min(old_size, new_size)); free(buf, old_size); @@ -195,65 +258,6 @@ class Allocator { ; private: - void* alloc_no_track(size_t size, size_t alignment) { - void* buf; - - if (size >= MMAP_THRESHOLD) { - if (alignment > MMAP_MIN_ALIGNMENT) - throw doris::vectorized::Exception( - fmt::format( - "Too large alignment {}: more than page size when allocating {}.", - alignment, size), - doris::TStatusCode::VEC_BAD_ARGUMENTS); - - CONSUME_THREAD_LOCAL_MEM_TRACKER(size); - buf = mmap(get_mmap_hint(), size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); - if (MAP_FAILED == buf) { - RELEASE_THREAD_LOCAL_MEM_TRACKER(size); - doris::vectorized::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", size), - doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY); - } - - /// No need for zero-fill, because mmap guarantees it. - } else { - if (alignment <= MALLOC_MIN_ALIGNMENT) { - if constexpr (clear_memory) - buf = ::calloc(size, 1); - else - buf = ::malloc(size); - - if (nullptr == buf) - doris::vectorized::throwFromErrno( - fmt::format("Allocator: Cannot malloc {}.", size), - doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY); - } else { - buf = nullptr; - int res = posix_memalign(&buf, alignment, size); - - if (0 != res) - doris::vectorized::throwFromErrno( - fmt::format("Cannot allocate memory (posix_memalign) {}.", size), - doris::TStatusCode::VEC_CANNOT_ALLOCATE_MEMORY, res); - - if constexpr (clear_memory) memset(buf, 0, size); - } - } - return buf; - } - - void free_no_track(void* buf, size_t size) { - if (size >= MMAP_THRESHOLD) { - if (0 != munmap(buf, size)) { - doris::vectorized::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", size), - doris::TStatusCode::VEC_CANNOT_MUNMAP); - } else { - RELEASE_THREAD_LOCAL_MEM_TRACKER(size); - } - } else { - ::free(buf); - } - } - #ifndef NDEBUG /// In debug builds, request mmap() at random addresses (a kind of ASLR), to /// reproduce more memory stomping bugs. Note that Linux doesn't do it by diff --git a/docs/en/docs/admin-manual/config/be-config.md b/docs/en/docs/admin-manual/config/be-config.md index 5f72ad71d5b4d3..f16194f7b3bd2c 100644 --- a/docs/en/docs/admin-manual/config/be-config.md +++ b/docs/en/docs/admin-manual/config/be-config.md @@ -238,9 +238,9 @@ The number of worker threads to calculate the checksum of the tablet ### `chunk_reserved_bytes_limit` -Default: 2147483648 +Default: 20% -The reserved bytes limit of Chunk Allocator is 2GB by default. Increasing this variable can improve performance, but it will get more free memory that other modules cannot use. +The reserved bytes limit of Chunk Allocator, usually set as a percentage of mem_limit. defaults to bytes if no unit is given, the number of bytes must be a multiple of 2. must larger than 0. and if larger than physical memory size, it will be set to physical memory size. increase this variable can improve performance, but will acquire more free memory which can not be used by other modules. ### `clear_transaction_task_worker_count` diff --git a/docs/zh-CN/docs/admin-manual/config/be-config.md b/docs/zh-CN/docs/admin-manual/config/be-config.md index 0fd824b294a8fa..8e41779a83fa58 100644 --- a/docs/zh-CN/docs/admin-manual/config/be-config.md +++ b/docs/zh-CN/docs/admin-manual/config/be-config.md @@ -231,9 +231,9 @@ BE缓存池最大的内存可用量,buffer pool是BE新的内存管理结构 ### `chunk_reserved_bytes_limit` -默认值:2147483648 +默认值:20% -Chunk Allocator的reserved bytes限制,默认为2GB,增加这个变量可以提高性能,但是会获得更多其他模块无法使用的空闲内存 +Chunk Allocator的reserved bytes限制,通常被设置为 mem_limit 的百分比。默认单位字节,值必须是2的倍数,且必须大于0,如果大于物理内存,将被设置为物理内存大小。增加这个变量可以提高性能,但是会获得更多其他模块无法使用的空闲内存。 ### `clear_transaction_task_worker_count` From c20832cae59719de63140c2b2c56467c455854cb Mon Sep 17 00:00:00 2001 From: Xinyi Zou Date: Tue, 28 Jun 2022 16:52:32 +0800 Subject: [PATCH 2/2] fix comment --- be/src/runtime/memory/chunk_allocator.cpp | 2 +- be/src/runtime/memory/chunk_allocator.h | 2 +- be/src/vec/common/allocator.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/be/src/runtime/memory/chunk_allocator.cpp b/be/src/runtime/memory/chunk_allocator.cpp index 90497b0fe78faa..872b6c37f8145b 100644 --- a/be/src/runtime/memory/chunk_allocator.cpp +++ b/be/src/runtime/memory/chunk_allocator.cpp @@ -241,7 +241,7 @@ Status ChunkAllocator::allocate_align(size_t size, Chunk* chunk, MemTracker* tra return allocate(BitUtil::RoundUpToPowerOfTwo(size), chunk, tracker, check_limits); } -void ChunkAllocator::free_as_chunk(uint8_t* data, size_t size, MemTracker* tracker) { +void ChunkAllocator::free(uint8_t* data, size_t size, MemTracker* tracker) { Chunk chunk; chunk.data = data; chunk.size = size; diff --git a/be/src/runtime/memory/chunk_allocator.h b/be/src/runtime/memory/chunk_allocator.h index 5f03eee2347144..d425b69e4a5593 100644 --- a/be/src/runtime/memory/chunk_allocator.h +++ b/be/src/runtime/memory/chunk_allocator.h @@ -78,7 +78,7 @@ class ChunkAllocator { // If the chunk allocator is full, then free to the system. // Note: make sure that the length of 'data' is equal to size, // otherwise the capacity of chunk allocator will be wrong. - void free_as_chunk(uint8_t* data, size_t size, MemTracker* tracker = nullptr); + void free(uint8_t* data, size_t size, MemTracker* tracker = nullptr); private: static ChunkAllocator* _s_instance; diff --git a/be/src/vec/common/allocator.h b/be/src/vec/common/allocator.h index 8588fe53aaf2df..835aeb172c3b2e 100644 --- a/be/src/vec/common/allocator.h +++ b/be/src/vec/common/allocator.h @@ -178,7 +178,7 @@ class Allocator { RELEASE_THREAD_LOCAL_MEM_TRACKER(size); } } else if (size >= CHUNK_THRESHOLD) { - doris::ChunkAllocator::instance()->free_as_chunk((uint8_t*)buf, size); + doris::ChunkAllocator::instance()->free((uint8_t*)buf, size); } else { ::free(buf); }