|
Message-Id: <20220107195530.1862460-1-ccross@google.com> Date: Fri, 7 Jan 2022 11:55:30 -0800 From: Colin Cross <ccross@...gle.com> To: musl@...ts.openwall.com Cc: dalias@...c.org, Colin Cross <ccross@...roid.com> Subject: [PATCH v2] Add mallinfo2 and mallinfo From: Colin Cross <ccross@...roid.com> glibc introduced mallinfo2 [1], which solves some of the arguments [2] against including mallinfo in musl by expanding the width of the returned counters from int to size_t. This patch implements mallinfo2 without requiring any additional metadata. It iterates through the meta_areas and metas in order to count mmap, large and small allocations, and produces arena, ordblks, hblks, hblkhd, uordblks and fordblks values. Once mallinfo2 exists, it is trivial to implement mallinfo that caps the mallinfo2 outputs such that they fit in the int fields returned by mallinfo. [1] https://sourceware.org/git/?p=glibc.git;a=commit;h=e3960d1c57e57f33e0e846d615788f4ede73b945 [2] https://www.openwall.com/lists/musl/2018/01/17/2 --- Changes since v1: - fixed style issues - added a_popcount_32 to atomic.h - accounted for short single allocation groups I made a_popcount_32 use __builtin_popcount on any compilers that support it, which should give the most optimal solution for the architecture. It should fall back to the provided implementation if the compiler doesn't support __has_builtin or __builtin_popcount. Short single allocation groups are now accounted properly. It is ambiguous whether they should be counted in arena (heap) or hblkhd (mmaped), I chose to count them as heap for now since they are below the threshold size that would always use mmap. dynamic.list | 1 + include/malloc.h | 30 ++++++++++++ src/internal/atomic.h | 26 ++++++++++ src/malloc/mallocng/mallinfo.c | 87 ++++++++++++++++++++++++++++++++++ 4 files changed, 144 insertions(+) create mode 100644 src/malloc/mallocng/mallinfo.c diff --git a/dynamic.list b/dynamic.list index ee0d363b..84d13c53 100644 --- a/dynamic.list +++ b/dynamic.list @@ -14,6 +14,7 @@ memalign; posix_memalign; aligned_alloc; malloc_usable_size; +mallinfo; timezone; daylight; diff --git a/include/malloc.h b/include/malloc.h index 35f8b19c..98ba0100 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -18,6 +18,36 @@ void *memalign(size_t, size_t); size_t malloc_usable_size(void *); +struct mallinfo { + int arena; + int ordblks; + int smblks; + int hblks; + int hblkhd; + int usmblks; + int fsmblks; + int uordblks; + int fordblks; + int keepcost; +}; + +struct mallinfo mallinfo(void); + +struct mallinfo2 { + size_t arena; + size_t ordblks; + size_t smblks; + size_t hblks; + size_t hblkhd; + size_t usmblks; + size_t fsmblks; + size_t uordblks; + size_t fordblks; + size_t keepcost; +}; + +struct mallinfo2 mallinfo2(void); + #ifdef __cplusplus } #endif diff --git a/src/internal/atomic.h b/src/internal/atomic.h index 96c1552d..5207c632 100644 --- a/src/internal/atomic.h +++ b/src/internal/atomic.h @@ -330,4 +330,30 @@ static inline int a_clz_32(uint32_t x) } #endif +#if defined __has_builtin +#if __has_builtin(__builtin_popcount) +#define a_popcount_32 a_popcount_32 +static inline int a_popcount_32(uint32_t x) +{ + return __builtin_popcount(x); +} +#endif +#endif + +#ifndef a_popcount_32 +#define a_popcount_32 a_popcount_32 +static inline int a_popcount_32(uint32_t x) +{ + const uint32_t m1 = 0x55555555; + const uint32_t m2 = 0x33333333; + const uint32_t m4 = 0x0f0f0f0f; + x -= (x >> 1) & m1; + x = (x & m2) + ((x >> 2) & m2); + x = (x + (x >> 4)) & m4; + x += x >> 8; + x += x >> 16; + return x & 0x3f; +} +#endif + #endif diff --git a/src/malloc/mallocng/mallinfo.c b/src/malloc/mallocng/mallinfo.c new file mode 100644 index 00000000..365bc557 --- /dev/null +++ b/src/malloc/mallocng/mallinfo.c @@ -0,0 +1,87 @@ +#include <limits.h> +#include <malloc.h> +#include <stddef.h> + +#include "glue.h" +#include "meta.h" + +static void accumulate_meta(struct mallinfo2 *mi, struct meta *g) +{ + int sc = g->sizeclass; + if (sc >= 48) { + // Large mmap allocation + mi->hblks++; + mi->uordblks += g->maplen*4096; + mi->hblkhd += g->maplen*4096; + } else { + if (g->freeable && !g->maplen) { + // Small size slots are embedded in a larger slot, avoid + // double counting by subtracting the size of the larger + // slot from the total used memory. + struct meta* outer_g = get_meta((void*)g->mem); + int outer_sc = outer_g->sizeclass; + int outer_sz = size_classes[outer_sc]*UNIT; + mi->uordblks -= outer_sz; + mi->arena -= outer_sz; + } + int sz = size_classes[sc]*UNIT; + int mask = g->avail_mask | g->freed_mask; + int nr_unused = a_popcount_32(mask); + + if (!g->last_idx) { + // Allocation groups with a single allocation may use a + // smaller maplen than normally used for the size class. + if (sz > g->maplen-IB-UNIT) { + sz = g->maplen-IB-UNIT; + } + } + mi->arena += sz*(g->last_idx+1); + mi->ordblks += nr_unused; + mi->uordblks += sz*(g->last_idx+1-nr_unused); + mi->fordblks += sz*nr_unused; + } +} + +static void accumulate_meta_area(struct mallinfo2 *mi, struct meta_area *ma) +{ + for (int i=0; i<ma->nslots; i++) { + if (ma->slots[i].mem) { + accumulate_meta(mi, &ma->slots[i]); + } + } +} + +struct mallinfo2 mallinfo2(void) +{ + struct mallinfo2 mi = {0}; + + rdlock(); + struct meta_area *ma = ctx.meta_area_head; + while (ma) { + accumulate_meta_area(&mi, ma); + ma = ma->next; + } + unlock(); + + return mi; +} + +#define cap(x) ((x > INT_MAX) ? INT_MAX : x) + +struct mallinfo mallinfo(void) { + struct mallinfo mi = {0}; + struct mallinfo2 mi2 = mallinfo2(); + + mi.arena = cap(mi2.arena); + mi.ordblks = cap(mi2.ordblks); + mi.smblks = cap(mi2.smblks); + mi.hblks = cap(mi2.hblks); + mi.hblkhd = cap(mi2.hblkhd); + mi.usmblks = cap(mi2.usmblks); + mi.fsmblks = cap(mi2.fsmblks); + mi.uordblks = cap(mi2.uordblks); + mi.fordblks = cap(mi2.fordblks); + mi.keepcost = cap(mi2.keepcost); + + return mi; +} -- 2.34.1.575.g55b058a8bb-goog
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.