diff --git a/include/tscore/JeAllocator.h b/include/tscore/JeAllocator.h index 24e94d9022e..a1dc942b4c0 100644 --- a/include/tscore/JeAllocator.h +++ b/include/tscore/JeAllocator.h @@ -23,6 +23,9 @@ #include "tscore/ink_queue.h" #include #include +#include +#include +#include #if TS_HAS_JEMALLOC #include @@ -31,6 +34,8 @@ #endif #if (JEMALLOC_VERSION_MAJOR == 5) && defined(MADV_DONTDUMP) #define JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED 1 +#else +#define JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED 0 #endif /* MADV_DONTDUMP */ #endif /* TS_HAS_JEMALLOC */ @@ -61,23 +66,20 @@ namespace jearena class JemallocNodumpAllocator { public: - explicit JemallocNodumpAllocator(); - void *allocate(InkFreeList *f); - void deallocate(InkFreeList *f, void *ptr); private: #if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED - static extent_hooks_t extent_hooks_; - static extent_alloc_t *original_alloc_; - static void *alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - unsigned arena_ind); + static extent_alloc_t *original_alloc; + static extent_hooks_t extent_hooks; + static std::shared_mutex je_mutex; + static std::unordered_map arenas; + static std::unordered_map arena_flags; - unsigned arena_index_{0}; - int flags_{0}; + static void *alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, + unsigned int arena_id); + unsigned int extend_and_setup_arena(pthread_t thread_id); #endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */ - - bool extend_and_setup_arena(); }; /** diff --git a/include/tscore/ink_memory.h b/include/tscore/ink_memory.h index 9f772e57e08..fdaaa27c21b 100644 --- a/include/tscore/ink_memory.h +++ b/include/tscore/ink_memory.h @@ -98,7 +98,6 @@ void *ats_realloc(void *ptr, size_t size); void *ats_memalign(size_t alignment, size_t size); void ats_free(void *ptr); void *ats_free_null(void *ptr); -void ats_memalign_free(void *ptr); int ats_mallopt(int param, int value); int ats_msync(caddr_t addr, size_t len, caddr_t end, int flags); diff --git a/iocore/cache/CacheDir.cc b/iocore/cache/CacheDir.cc index 3bc464d87c4..8b2d428645b 100644 --- a/iocore/cache/CacheDir.cc +++ b/iocore/cache/CacheDir.cc @@ -988,7 +988,7 @@ sync_cache_dir_on_shutdown() if (buf_huge) { ats_free_hugepage(buf, buflen); } else { - ats_memalign_free(buf); + ats_free(buf); } buf = nullptr; } @@ -1023,7 +1023,7 @@ sync_cache_dir_on_shutdown() if (buf_huge) { ats_free_hugepage(buf, buflen); } else { - ats_memalign_free(buf); + ats_free(buf); } buf = nullptr; } @@ -1044,7 +1044,7 @@ CacheSync::mainEvent(int event, Event *e) if (buf_huge) { ats_free_hugepage(buf, buflen); } else { - ats_memalign_free(buf); + ats_free(buf); } buflen = 0; buf = nullptr; @@ -1121,7 +1121,7 @@ CacheSync::mainEvent(int event, Event *e) if (buf_huge) { ats_free_hugepage(buf, buflen); } else { - ats_memalign_free(buf); + ats_free(buf); } buf = nullptr; } diff --git a/iocore/cache/P_CacheVol.h b/iocore/cache/P_CacheVol.h index a51d245e585..f4643f3a059 100644 --- a/iocore/cache/P_CacheVol.h +++ b/iocore/cache/P_CacheVol.h @@ -269,7 +269,7 @@ struct Vol : public Continuation { SET_HANDLER(&Vol::aggWrite); } - ~Vol() override { ats_memalign_free(agg_buffer); } + ~Vol() override { ats_free(agg_buffer); } }; struct AIO_Callback_handler : public Continuation { diff --git a/src/tscore/JeAllocator.cc b/src/tscore/JeAllocator.cc index a1619ae8439..f518f106c76 100644 --- a/src/tscore/JeAllocator.cc +++ b/src/tscore/JeAllocator.cc @@ -24,24 +24,22 @@ #include "tscore/ink_assert.h" #include "tscore/ink_align.h" #include "tscore/JeAllocator.h" +#include "tscore/Diags.h" namespace jearena { -JemallocNodumpAllocator::JemallocNodumpAllocator() -{ - extend_and_setup_arena(); -} - -#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED - -extent_hooks_t JemallocNodumpAllocator::extent_hooks_; -extent_alloc_t *JemallocNodumpAllocator::original_alloc_ = nullptr; +#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED +extent_alloc_t *JemallocNodumpAllocator::original_alloc = nullptr; +extent_hooks_t JemallocNodumpAllocator::extent_hooks; +std::shared_mutex JemallocNodumpAllocator::je_mutex; +std::unordered_map JemallocNodumpAllocator::arenas; +std::unordered_map JemallocNodumpAllocator::arena_flags; void * JemallocNodumpAllocator::alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - unsigned arena_ind) + unsigned int arena_id) { - void *result = original_alloc_(extent, new_addr, size, alignment, zero, commit, arena_ind); + void *result = original_alloc(extent, new_addr, size, alignment, zero, commit, arena_id); if (result != nullptr) { // Seems like we don't really care if the advice went through @@ -52,44 +50,52 @@ JemallocNodumpAllocator::alloc(extent_hooks_t *extent, void *new_addr, size_t si return result; } -#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */ - -bool -JemallocNodumpAllocator::extend_and_setup_arena() +unsigned int +JemallocNodumpAllocator::extend_and_setup_arena(pthread_t thread_id) { -#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED - size_t arena_index_len_ = sizeof(arena_index_); - if (auto ret = mallctl("arenas.create", &arena_index_, &arena_index_len_, nullptr, 0)) { + unsigned int arena_id; + size_t arena_id_len = sizeof(arena_id); + if (auto ret = mallctl("arenas.create", &arena_id, &arena_id_len, nullptr, 0)) { ink_abort("Unable to extend arena: %s", std::strerror(ret)); } - flags_ = MALLOCX_ARENA(arena_index_) | MALLOCX_TCACHE_NONE; + + int flags = MALLOCX_ARENA(arena_id) | MALLOCX_TCACHE_NONE; // Read the existing hooks - const auto key = "arena." + std::to_string(arena_index_) + ".extent_hooks"; + const auto key = "arena." + std::to_string(arena_id) + ".extent_hooks"; extent_hooks_t *hooks; size_t hooks_len = sizeof(hooks); if (auto ret = mallctl(key.c_str(), &hooks, &hooks_len, nullptr, 0)) { ink_abort("Unable to get the hooks: %s", std::strerror(ret)); } - if (original_alloc_ == nullptr) { - original_alloc_ = hooks->alloc; - } else { - ink_release_assert(original_alloc_ == hooks->alloc); - } - // Set the custom hook - extent_hooks_ = *hooks; - extent_hooks_.alloc = &JemallocNodumpAllocator::alloc; - extent_hooks_t *new_hooks = &extent_hooks_; - if (auto ret = mallctl(key.c_str(), nullptr, nullptr, &new_hooks, sizeof(new_hooks))) { - ink_abort("Unable to set the hooks: %s", std::strerror(ret)); + { + std::unique_lock lock(je_mutex); + if (original_alloc == nullptr) { + original_alloc = hooks->alloc; + } else { + ink_release_assert(original_alloc == hooks->alloc); + } + + // Set the custom hook + if (extent_hooks.alloc != &JemallocNodumpAllocator::alloc) { + extent_hooks = *hooks; + extent_hooks.alloc = &JemallocNodumpAllocator::alloc; + } + extent_hooks_t *new_hooks = &extent_hooks; + if (auto ret = mallctl(key.c_str(), nullptr, nullptr, &new_hooks, sizeof(new_hooks))) { + ink_abort("Unable to set the hooks: %s", std::strerror(ret)); + } + + Debug("JeAllocator", "arena \"%ud\" created", arena_id); + + arenas[thread_id] = arena_id; + arena_flags[arena_id] = flags; } - return true; -#else /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */ - return false; -#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */ + return arena_id; } +#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */ /** * This will retain the original functionality if @@ -98,12 +104,29 @@ JemallocNodumpAllocator::extend_and_setup_arena() void * JemallocNodumpAllocator::allocate(InkFreeList *f) { - void *newp = nullptr; +#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED + int flags = 0; + pthread_t thread_id = pthread_self(); + { + std::shared_lock lock(je_mutex); + unsigned int arena_id = 0; + auto arena = arenas.find(thread_id); + if (unlikely(arena == arenas.end())) { + lock.unlock(); + arena_id = extend_and_setup_arena(thread_id); + lock.lock(); + } else { + arena_id = arena->second; + } + flags = arena_flags.find(arena_id)->second; + } +#endif + void *newp = nullptr; if (f->advice) { -#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED +#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED if (likely(f->type_size > 0)) { - int flags = flags_ | MALLOCX_ALIGN(f->alignment); + flags |= MALLOCX_ALIGN(f->alignment); if (unlikely((newp = mallocx(f->type_size, flags)) == nullptr)) { ink_abort("couldn't allocate %u bytes", f->type_size); } @@ -120,26 +143,6 @@ JemallocNodumpAllocator::allocate(InkFreeList *f) return newp; } -/** - * This will retain the original functionality if - * !defined(JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED) - */ -void -JemallocNodumpAllocator::deallocate(InkFreeList *f, void *ptr) -{ - if (f->advice) { -#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED - if (likely(ptr)) { - dallocx(ptr, flags_); - } -#else - ats_memalign_free(ptr); -#endif - } else { - ats_memalign_free(ptr); - } -} - JemallocNodumpAllocator & globalJemallocNodumpAllocator() { diff --git a/src/tscore/ink_memory.cc b/src/tscore/ink_memory.cc index 7aa204e5a3c..bc37a9539c1 100644 --- a/src/tscore/ink_memory.cc +++ b/src/tscore/ink_memory.cc @@ -131,14 +131,6 @@ ats_free_null(void *ptr) return nullptr; } /* End ats_free_null */ -void -ats_memalign_free(void *ptr) -{ - if (likely(ptr)) { - free(ptr); - } -} - // This effectively makes mallopt() a no-op (currently) when tcmalloc // or jemalloc is used. This might break our usage for increasing the // number of mmap areas (ToDo: Do we still really need that??). diff --git a/src/tscore/ink_queue.cc b/src/tscore/ink_queue.cc index f4a2841c06f..56601aa603a 100644 --- a/src/tscore/ink_queue.cc +++ b/src/tscore/ink_queue.cc @@ -327,11 +327,9 @@ freelist_free(InkFreeList *f, void *item) static void malloc_free(InkFreeList *f, void *item) { - if (f->alignment) { - jna.deallocate(f, item); - } else { - ats_free(item); - } + // Avoid compiler warnings + (void)f; + ats_free(item); } void @@ -396,18 +394,12 @@ malloc_bulkfree(InkFreeList *f, void *head, void *tail, size_t num_item) void *next; // Avoid compiler warnings + (void)f; (void)tail; - if (f->alignment) { - for (size_t i = 0; i < num_item && item; ++i, item = next) { - next = *static_cast(item); // find next item before freeing current item - jna.deallocate(f, item); - } - } else { - for (size_t i = 0; i < num_item && item; ++i, item = next) { - next = *static_cast(item); // find next item before freeing current item - ats_free(item); - } + for (size_t i = 0; i < num_item && item; ++i, item = next) { + next = *static_cast(item); // find next item before freeing current item + ats_free(item); } }