Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions include/tscore/JeAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
#include "tscore/ink_queue.h"
#include <sys/mman.h>
#include <cstddef>
#include <unordered_map>
#include <mutex>
#include <shared_mutex>

#if TS_HAS_JEMALLOC
#include <jemalloc/jemalloc.h>
Expand All @@ -31,6 +34,8 @@
#endif
#if (JEMALLOC_VERSION_MAJOR == 5) && defined(MADV_DONTDUMP)
#define JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED 1
#else
#define JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED 0
#endif /* MADV_DONTDUMP */
#endif /* TS_HAS_JEMALLOC */

Expand Down Expand Up @@ -61,23 +66,20 @@ namespace jearena
class JemallocNodumpAllocator
{
public:
explicit JemallocNodumpAllocator();

void *allocate(InkFreeList *f);
void deallocate(InkFreeList *f, void *ptr);

private:
#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
static extent_hooks_t extent_hooks_;
static extent_alloc_t *original_alloc_;
static void *alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind);
static extent_alloc_t *original_alloc;
static extent_hooks_t extent_hooks;
static std::shared_mutex je_mutex;
static std::unordered_map<pthread_t, unsigned int> arenas;
static std::unordered_map<unsigned int, int> arena_flags;

unsigned arena_index_{0};
int flags_{0};
static void *alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
unsigned int arena_id);
unsigned int extend_and_setup_arena(pthread_t thread_id);
#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */

bool extend_and_setup_arena();
};

/**
Expand Down
1 change: 0 additions & 1 deletion include/tscore/ink_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ void *ats_realloc(void *ptr, size_t size);
void *ats_memalign(size_t alignment, size_t size);
void ats_free(void *ptr);
void *ats_free_null(void *ptr);
void ats_memalign_free(void *ptr);
int ats_mallopt(int param, int value);

int ats_msync(caddr_t addr, size_t len, caddr_t end, int flags);
Expand Down
8 changes: 4 additions & 4 deletions iocore/cache/CacheDir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,7 @@ sync_cache_dir_on_shutdown()
if (buf_huge) {
ats_free_hugepage(buf, buflen);
} else {
ats_memalign_free(buf);
ats_free(buf);
}
buf = nullptr;
}
Expand Down Expand Up @@ -1023,7 +1023,7 @@ sync_cache_dir_on_shutdown()
if (buf_huge) {
ats_free_hugepage(buf, buflen);
} else {
ats_memalign_free(buf);
ats_free(buf);
}
buf = nullptr;
}
Expand All @@ -1044,7 +1044,7 @@ CacheSync::mainEvent(int event, Event *e)
if (buf_huge) {
ats_free_hugepage(buf, buflen);
} else {
ats_memalign_free(buf);
ats_free(buf);
}
buflen = 0;
buf = nullptr;
Expand Down Expand Up @@ -1121,7 +1121,7 @@ CacheSync::mainEvent(int event, Event *e)
if (buf_huge) {
ats_free_hugepage(buf, buflen);
} else {
ats_memalign_free(buf);
ats_free(buf);
}
buf = nullptr;
}
Expand Down
2 changes: 1 addition & 1 deletion iocore/cache/P_CacheVol.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ struct Vol : public Continuation {
SET_HANDLER(&Vol::aggWrite);
}

~Vol() override { ats_memalign_free(agg_buffer); }
~Vol() override { ats_free(agg_buffer); }
};

struct AIO_Callback_handler : public Continuation {
Expand Down
119 changes: 61 additions & 58 deletions src/tscore/JeAllocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,22 @@
#include "tscore/ink_assert.h"
#include "tscore/ink_align.h"
#include "tscore/JeAllocator.h"
#include "tscore/Diags.h"

namespace jearena
{
JemallocNodumpAllocator::JemallocNodumpAllocator()
{
extend_and_setup_arena();
}

#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED

extent_hooks_t JemallocNodumpAllocator::extent_hooks_;
extent_alloc_t *JemallocNodumpAllocator::original_alloc_ = nullptr;
#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
extent_alloc_t *JemallocNodumpAllocator::original_alloc = nullptr;
extent_hooks_t JemallocNodumpAllocator::extent_hooks;
std::shared_mutex JemallocNodumpAllocator::je_mutex;
std::unordered_map<pthread_t, unsigned int> JemallocNodumpAllocator::arenas;
std::unordered_map<unsigned int, int> JemallocNodumpAllocator::arena_flags;

void *
JemallocNodumpAllocator::alloc(extent_hooks_t *extent, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind)
unsigned int arena_id)
{
void *result = original_alloc_(extent, new_addr, size, alignment, zero, commit, arena_ind);
void *result = original_alloc(extent, new_addr, size, alignment, zero, commit, arena_id);

if (result != nullptr) {
// Seems like we don't really care if the advice went through
Expand All @@ -52,44 +50,52 @@ JemallocNodumpAllocator::alloc(extent_hooks_t *extent, void *new_addr, size_t si
return result;
}

#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */

bool
JemallocNodumpAllocator::extend_and_setup_arena()
unsigned int
JemallocNodumpAllocator::extend_and_setup_arena(pthread_t thread_id)
{
#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
size_t arena_index_len_ = sizeof(arena_index_);
if (auto ret = mallctl("arenas.create", &arena_index_, &arena_index_len_, nullptr, 0)) {
unsigned int arena_id;
size_t arena_id_len = sizeof(arena_id);
if (auto ret = mallctl("arenas.create", &arena_id, &arena_id_len, nullptr, 0)) {
ink_abort("Unable to extend arena: %s", std::strerror(ret));
}
flags_ = MALLOCX_ARENA(arena_index_) | MALLOCX_TCACHE_NONE;

int flags = MALLOCX_ARENA(arena_id) | MALLOCX_TCACHE_NONE;

// Read the existing hooks
const auto key = "arena." + std::to_string(arena_index_) + ".extent_hooks";
const auto key = "arena." + std::to_string(arena_id) + ".extent_hooks";
extent_hooks_t *hooks;
size_t hooks_len = sizeof(hooks);
if (auto ret = mallctl(key.c_str(), &hooks, &hooks_len, nullptr, 0)) {
ink_abort("Unable to get the hooks: %s", std::strerror(ret));
}
if (original_alloc_ == nullptr) {
original_alloc_ = hooks->alloc;
} else {
ink_release_assert(original_alloc_ == hooks->alloc);
}

// Set the custom hook
extent_hooks_ = *hooks;
extent_hooks_.alloc = &JemallocNodumpAllocator::alloc;
extent_hooks_t *new_hooks = &extent_hooks_;
if (auto ret = mallctl(key.c_str(), nullptr, nullptr, &new_hooks, sizeof(new_hooks))) {
ink_abort("Unable to set the hooks: %s", std::strerror(ret));
{
std::unique_lock lock(je_mutex);
if (original_alloc == nullptr) {
original_alloc = hooks->alloc;
} else {
ink_release_assert(original_alloc == hooks->alloc);
}

// Set the custom hook
if (extent_hooks.alloc != &JemallocNodumpAllocator::alloc) {
extent_hooks = *hooks;
extent_hooks.alloc = &JemallocNodumpAllocator::alloc;
}
extent_hooks_t *new_hooks = &extent_hooks;
if (auto ret = mallctl(key.c_str(), nullptr, nullptr, &new_hooks, sizeof(new_hooks))) {
ink_abort("Unable to set the hooks: %s", std::strerror(ret));
}

Debug("JeAllocator", "arena \"%ud\" created", arena_id);

arenas[thread_id] = arena_id;
Comment thread
masaori335 marked this conversation as resolved.
arena_flags[arena_id] = flags;
}

return true;
#else /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */
return false;
#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */
return arena_id;
}
#endif /* JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED */

/**
* This will retain the original functionality if
Expand All @@ -98,12 +104,29 @@ JemallocNodumpAllocator::extend_and_setup_arena()
void *
JemallocNodumpAllocator::allocate(InkFreeList *f)
{
void *newp = nullptr;
#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
int flags = 0;
pthread_t thread_id = pthread_self();
{
std::shared_lock lock(je_mutex);
unsigned int arena_id = 0;
auto arena = arenas.find(thread_id);
if (unlikely(arena == arenas.end())) {
lock.unlock();
arena_id = extend_and_setup_arena(thread_id);
lock.lock();
} else {
arena_id = arena->second;
}
flags = arena_flags.find(arena_id)->second;
}
#endif

void *newp = nullptr;
if (f->advice) {
#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
#if JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
if (likely(f->type_size > 0)) {
int flags = flags_ | MALLOCX_ALIGN(f->alignment);
flags |= MALLOCX_ALIGN(f->alignment);
if (unlikely((newp = mallocx(f->type_size, flags)) == nullptr)) {
ink_abort("couldn't allocate %u bytes", f->type_size);
}
Expand All @@ -120,26 +143,6 @@ JemallocNodumpAllocator::allocate(InkFreeList *f)
return newp;
}

/**
* This will retain the original functionality if
* !defined(JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED)
*/
void
JemallocNodumpAllocator::deallocate(InkFreeList *f, void *ptr)
{
if (f->advice) {
#ifdef JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
if (likely(ptr)) {
dallocx(ptr, flags_);
}
#else
ats_memalign_free(ptr);
#endif
} else {
ats_memalign_free(ptr);
}
}

JemallocNodumpAllocator &
globalJemallocNodumpAllocator()
{
Expand Down
8 changes: 0 additions & 8 deletions src/tscore/ink_memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,14 +131,6 @@ ats_free_null(void *ptr)
return nullptr;
} /* End ats_free_null */

void
ats_memalign_free(void *ptr)
{
if (likely(ptr)) {
free(ptr);
}
}

// This effectively makes mallopt() a no-op (currently) when tcmalloc
// or jemalloc is used. This might break our usage for increasing the
// number of mmap areas (ToDo: Do we still really need that??).
Expand Down
22 changes: 7 additions & 15 deletions src/tscore/ink_queue.cc
Original file line number Diff line number Diff line change
Expand Up @@ -327,11 +327,9 @@ freelist_free(InkFreeList *f, void *item)
static void
malloc_free(InkFreeList *f, void *item)
{
if (f->alignment) {
jna.deallocate(f, item);
} else {
ats_free(item);
}
// Avoid compiler warnings
(void)f;
ats_free(item);
}

void
Expand Down Expand Up @@ -396,18 +394,12 @@ malloc_bulkfree(InkFreeList *f, void *head, void *tail, size_t num_item)
void *next;

// Avoid compiler warnings
(void)f;
(void)tail;

if (f->alignment) {
for (size_t i = 0; i < num_item && item; ++i, item = next) {
next = *static_cast<void **>(item); // find next item before freeing current item
jna.deallocate(f, item);
}
} else {
for (size_t i = 0; i < num_item && item; ++i, item = next) {
next = *static_cast<void **>(item); // find next item before freeing current item
ats_free(item);
}
for (size_t i = 0; i < num_item && item; ++i, item = next) {
next = *static_cast<void **>(item); // find next item before freeing current item
ats_free(item);
}
}

Expand Down