-/* $OpenBSD: malloc.c,v 1.290 2023/09/09 06:52:40 asou Exp $ */
+/* $OpenBSD: malloc.c,v 1.291 2023/10/22 12:19:26 otto Exp $ */
/*
* Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
void *p; /* page; low bits used to mark chunks */
uintptr_t size; /* size for pages, or chunk_info pointer */
#ifdef MALLOC_STATS
- void *f; /* where allocated from */
+ void **f; /* where allocated from */
#endif
};
size_t regions_total; /* number of region slots */
size_t regions_free; /* number of free slots */
size_t rbytesused; /* random bytes used */
- char *func; /* current function */
+ const char *func; /* current function */
int malloc_junk; /* junk fill? */
int mmap_flag; /* extra flag for mmap */
int mutex;
void *chunk_pages;
size_t chunk_pages_used;
#ifdef MALLOC_STATS
+ void *caller;
size_t inserts;
size_t insert_collisions;
size_t finds;
#define STATS_INC(x) ((x)++)
#define STATS_ZERO(x) ((x) = 0)
#define STATS_SETF(x,y) ((x)->f = (y))
+#define STATS_SETFN(x,k,y) ((x)->f[k] = (y))
+#define SET_CALLER(x,y) if (DO_STATS) ((x)->caller = (y))
#else
#define STATS_ADD(x,y) /* nothing */
#define STATS_SUB(x,y) /* nothing */
#define STATS_INC(x) /* nothing */
#define STATS_ZERO(x) /* nothing */
#define STATS_SETF(x,y) /* nothing */
+#define STATS_SETFN(x,k,y) /* nothing */
+#define SET_CALLER(x,y) /* nothing */
#endif /* MALLOC_STATS */
u_int32_t canary2;
};
u_short bits[1]; /* which chunks are free */
};
+#define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & (1U << ((n) % MALLOC_BITS)))
+
struct malloc_readonly {
/* Main bookkeeping information */
struct dir_info *malloc_pool[_MALLOC_MUTEXES];
u_int junk_loc; /* variation in location of junk */
size_t malloc_guard; /* use guard pages after allocations? */
#ifdef MALLOC_STATS
- int malloc_stats; /* dump leak report at end */
+ int malloc_stats; /* save callers, dump leak report at end */
int malloc_verbose; /* dump verbose statistics at end */
#define DO_STATS mopts.malloc_stats
#else
void malloc_dump(void);
PROTO_NORMAL(malloc_dump);
static void malloc_exit(void);
+static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
#endif
#if defined(__aarch64__) || \
do {
mopts.chunk_canaries = arc4random();
} while ((u_char)mopts.chunk_canaries == 0 ||
- (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
+ (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
}
static void
}
static inline void
-validate_junk(struct dir_info *pool, void *p, size_t sz)
+validate_junk(struct dir_info *pool, void *p, size_t argsz)
{
- size_t i, step = 1;
+ size_t i, sz, step = 1;
uint64_t *lp = p;
- if (pool->malloc_junk == 0 || sz == 0)
+ if (pool->malloc_junk == 0 || argsz == 0)
return;
- sz /= sizeof(uint64_t);
+ sz = argsz / sizeof(uint64_t);
if (pool->malloc_junk == 1) {
if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
sz = MALLOC_PAGESIZE / sizeof(uint64_t);
}
/* see junk_free */
for (i = mopts.junk_loc % step; i < sz; i += step) {
- if (lp[i] != SOME_FREEJUNK_ULL)
- wrterror(pool, "write after free %p", p);
+ if (lp[i] != SOME_FREEJUNK_ULL) {
+#ifdef MALLOC_STATS
+ if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
+ print_chunk_details(pool, lp, argsz, i);
+ else
+#endif
+ wrterror(pool,
+ "write to free mem %p[%zu..%zu]@%zu",
+ lp, i * sizeof(uint64_t),
+ (i + 1) * sizeof(uint64_t) - 1, argsz);
+ }
}
}
/*
- * Cache maintenance.
+ * Cache maintenance.
* Opposed to the regular region data structure, the sizes in the
* cache are in MALLOC_PAGESIZE units.
*/
i = getrbyte(d) & (cache->max - 1);
r = cache->pages[i];
fresh = (uintptr_t)r & 1;
- *(uintptr_t*)&r &= ~1ULL;
+ *(uintptr_t*)&r &= ~1UL;
if (!fresh && !mopts.malloc_freeunmap)
validate_junk(d, r, sz);
if (munmap(r, sz))
{
struct chunk_info *bp;
void *pp;
+ void *ff = NULL;
/* Allocate a new bucket */
pp = map(d, MALLOC_PAGESIZE, 0);
if (pp == MAP_FAILED)
return NULL;
+ if (DO_STATS) {
+ ff = map(d, MALLOC_PAGESIZE, 0);
+ if (ff == MAP_FAILED)
+ goto err;
+ memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE / B2ALLOC(bucket));
+ }
/* memory protect the page allocated in the malloc(0) case */
if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
bp->page = pp;
if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
- NULL))
+ ff))
goto err;
LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
err:
unmap(d, pp, MALLOC_PAGESIZE, 0);
+ if (ff != NULL && ff != MAP_FAILED)
+ unmap(d, ff, MALLOC_PAGESIZE, 0);
return NULL;
}
* Allocate a chunk
*/
static void *
-malloc_bytes(struct dir_info *d, size_t size, void *f)
+malloc_bytes(struct dir_info *d, size_t size)
{
u_int i, r, bucket, listnum;
size_t k;
}
}
found:
- if (i == 0 && k == 0 && DO_STATS) {
- struct region_info *r = find(d, bp->page);
- STATS_SETF(r, f);
- }
-
*lp ^= 1 << k;
/* If there are no more free, remove from free-list */
if (mopts.chunk_canaries && size > 0)
bp->bits[bp->offset + k] = size;
+ if (DO_STATS) {
+ struct region_info *r = find(d, bp->page);
+ STATS_SETFN(r, k, d->caller);
+ }
+
k *= B2ALLOC(bp->bucket);
p = (char *)bp->page + k;
while (p < q) {
if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
- wrterror(d, "canary corrupted %p %#tx@%#zx%s",
- ptr, p - ptr, sz,
+ wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
+ ptr, p - ptr, sz, allocated,
*p == SOME_FREEJUNK ? " (double free?)" : "");
}
p++;
if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
wrterror(d, "modified chunk-pointer %p", ptr);
- if (info->bits[chunknum / MALLOC_BITS] &
- (1U << (chunknum % MALLOC_BITS)))
+ if (CHUNK_FREE(info, chunknum))
wrterror(d, "double free %p", ptr);
if (check && info->bucket > 0) {
validate_canary(d, ptr, info->bits[info->offset + chunknum],
info = (struct chunk_info *)r->size;
chunknum = find_chunknum(d, info, ptr, 0);
- if (chunknum == 0)
- STATS_SETF(r, NULL);
-
info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
info->free++;
if (info->bucket == 0 && !mopts.malloc_freeunmap)
mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
unmap(d, info->page, MALLOC_PAGESIZE, 0);
+#ifdef MALLOC_STATS
+ if (r->f != NULL) {
+ unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
+ r->f = NULL;
+ }
+#endif
delete(d, r);
mp = &d->chunk_info_list[info->bucket];
LIST_INSERT_HEAD(mp, info, entries);
}
-
-
static void *
-omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
+omalloc(struct dir_info *pool, size_t sz, int zero_fill)
{
- void *p;
+ void *p, *caller = NULL;
size_t psz;
if (sz > MALLOC_MAXCHUNK) {
errno = ENOMEM;
return NULL;
}
- if (insert(pool, p, sz, f)) {
+#ifdef MALLOC_STATS
+ if (DO_STATS)
+ caller = pool->caller;
+#endif
+ if (insert(pool, p, sz, caller)) {
unmap(pool, p, psz, 0);
errno = ENOMEM;
return NULL;
} else {
/* takes care of SOME_JUNK */
- p = malloc_bytes(pool, sz, f);
+ p = malloc_bytes(pool, sz);
if (zero_fill && p != NULL && sz > 0)
memset(p, 0, sz);
}
int saved_errno = errno;
PROLOGUE(getpool(), "malloc")
- r = omalloc(d, size, 0, caller());
+ SET_CALLER(d, caller());
+ r = omalloc(d, size, 0);
EPILOGUE()
return r;
}
int saved_errno = errno;
PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
- r = omalloc(d, size, 0, caller());
+ SET_CALLER(d, caller());
+ r = omalloc(d, size, 0);
EPILOGUE()
return r;
}
static struct region_info *
findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
- char **saved_function)
+ const char ** saved_function)
{
struct dir_info *pool = argpool;
struct region_info *r = find(pool, p);
{
struct region_info *r;
struct dir_info *pool;
- char *saved_function;
+ const char *saved_function;
size_t sz;
r = findpool(p, *argpool, &pool, &saved_function);
DEF_WEAK(freezero);
static void *
-orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
+orealloc(struct dir_info **argpool, void *p, size_t newsz)
{
struct region_info *r;
struct dir_info *pool;
- char *saved_function;
+ const char *saved_function;
struct chunk_info *info;
size_t oldsz, goldsz, gnewsz;
void *q, *ret;
int forced;
if (p == NULL)
- return omalloc(*argpool, newsz, 0, f);
+ return omalloc(*argpool, newsz, 0);
if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
errno = ENOMEM;
if (mopts.chunk_canaries)
fill_canary(p, newsz,
PAGEROUND(newsz));
- STATS_SETF(r, f);
+ STATS_SETF(r, (*argpool)->caller);
STATS_INC(pool->cheap_reallocs);
ret = p;
goto done;
p = pp;
} else if (mopts.chunk_canaries)
fill_canary(p, newsz, PAGEROUND(newsz));
- STATS_SETF(r, f);
+ STATS_SETF(r, (*argpool)->caller);
ret = p;
goto done;
} else {
if (mopts.chunk_canaries)
fill_canary(p, newsz, PAGEROUND(newsz));
}
- STATS_SETF(r, f);
+ STATS_SETF(r, (*argpool)->caller);
ret = p;
goto done;
}
info->bits[info->offset + chunknum] = newsz;
fill_canary(p, newsz, B2SIZE(info->bucket));
}
- if (DO_STATS && chunknum == 0)
- STATS_SETF(r, f);
+ if (DO_STATS)
+ STATS_SETFN(r, chunknum, (*argpool)->caller);
ret = p;
} else if (newsz != oldsz || forced) {
/* create new allocation */
- q = omalloc(pool, newsz, 0, f);
+ q = omalloc(pool, newsz, 0);
if (q == NULL) {
ret = NULL;
goto done;
/* oldsz == newsz */
if (newsz != 0)
wrterror(pool, "realloc internal inconsistency");
- if (DO_STATS && chunknum == 0)
- STATS_SETF(r, f);
+ if (DO_STATS)
+ STATS_SETFN(r, chunknum, (*argpool)->caller);
ret = p;
}
done:
int saved_errno = errno;
PROLOGUE(getpool(), "realloc")
- r = orealloc(&d, ptr, size, caller());
+ SET_CALLER(d, caller());
+ r = orealloc(&d, ptr, size);
EPILOGUE()
return r;
}
int saved_errno = errno;
PROLOGUE(getpool(), "calloc")
+ SET_CALLER(d, caller());
if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
nmemb > 0 && SIZE_MAX / nmemb < size) {
d->active--;
}
size *= nmemb;
- r = omalloc(d, size, 1, caller());
+ r = omalloc(d, size, 1);
EPILOGUE()
return r;
}
int saved_errno = errno;
PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
+ SET_CALLER(d, caller());
if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
nmemb > 0 && SIZE_MAX / nmemb < size) {
d->active--;
}
size *= nmemb;
- r = omalloc(d, size, 1, caller());
+ r = omalloc(d, size, 1);
EPILOGUE()
return r;
}
static void *
orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
- size_t newsize, void *f)
+ size_t newsize)
{
struct region_info *r;
struct dir_info *pool;
- char *saved_function;
+ const char *saved_function;
void *newptr;
size_t sz;
if (p == NULL)
- return omalloc(*argpool, newsize, 1, f);
+ return omalloc(*argpool, newsize, 1);
if (oldsize == newsize)
return p;
sz - mopts.malloc_guard, oldsize);
}
- newptr = omalloc(pool, newsize, 0, f);
+ newptr = omalloc(pool, newsize, 0);
if (newptr == NULL)
goto done;
return recallocarray_p(ptr, oldnmemb, newnmemb, size);
PROLOGUE(getpool(), "recallocarray")
+ SET_CALLER(d, caller());
if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
newnmemb > 0 && SIZE_MAX / newnmemb < size) {
oldsize = oldnmemb * size;
}
- r = orecallocarray(&d, ptr, oldsize, newsize, caller());
+ r = orecallocarray(&d, ptr, oldsize, newsize);
EPILOGUE()
return r;
}
}
static void *
-omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
- void *f)
+omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
{
size_t psz;
- void *p;
+ void *p, *caller = NULL;
/* If between half a page and a page, avoid MALLOC_MOVE. */
if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
pof2 <<= 1;
} else
pof2 = sz;
- return omalloc(pool, pof2, zero_fill, f);
+ return omalloc(pool, pof2, zero_fill);
}
if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
return NULL;
}
- if (insert(pool, p, sz, f)) {
+#ifdef MALLOC_STATS
+ if (DO_STATS)
+ caller = pool->caller;
+#endif
+ if (insert(pool, p, sz, caller)) {
unmap(pool, p, psz, 0);
errno = ENOMEM;
return NULL;
malloc_recurse(d);
goto err;
}
- r = omemalign(d, alignment, size, 0, caller());
+ SET_CALLER(d, caller());
+ r = omemalign(d, alignment, size, 0);
d->active--;
_MALLOC_UNLOCK(d->mutex);
if (r == NULL) {
}
PROLOGUE(getpool(), "aligned_alloc")
- r = omemalign(d, alignment, size, 0, caller());
+ SET_CALLER(d, caller());
+ r = omemalign(d, alignment, size, 0);
EPILOGUE()
return r;
}
#ifdef MALLOC_STATS
+static void
+print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t i)
+{
+ struct region_info *r;
+ struct chunk_info *chunkinfo;
+ uint32_t chunknum;
+ Dl_info info;
+ const char *caller, *pcaller = NULL;
+ const char *object = ".";
+ const char *pobject = ".";
+ const char *msg = "";
+
+ r = find(pool, p);
+ chunkinfo = (struct chunk_info *)r->size;
+ chunknum = find_chunknum(pool, chunkinfo, p, 0);
+ caller = r->f[chunknum];
+ if (dladdr(caller, &info) != 0) {
+ caller -= (uintptr_t)info.dli_fbase;
+ object = info.dli_fname;
+ }
+ if (chunknum > 0) {
+ chunknum--;
+ pcaller = r->f[chunknum];
+ if (dladdr(pcaller, &info) != 0) {
+ pcaller -= (uintptr_t)info.dli_fbase;
+ pobject = info.dli_fname;
+ }
+ if (CHUNK_FREE(chunkinfo, chunknum))
+ msg = " (now free)";
+ }
+
+ wrterror(pool,
+ "write to free chunk %p[%zu..%zu]@%zu allocated at %s %p "
+ "(preceding chunk %p allocated at %s %p%s)",
+ p, i * sizeof(uint64_t),
+ (i + 1) * sizeof(uint64_t) - 1, sz, object, caller, p - sz,
+ pobject, pcaller, msg);
+}
+
static void
ulog(const char *format, ...)
{
}
static void
-dump_chunk(struct leaktree* leaks, struct chunk_info *p, void *f,
+dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
int fromfreelist)
{
while (p != NULL) {
if (mopts.malloc_verbose)
ulog("chunk %18p %18p %4zu %d/%d\n",
- p->page, ((p->bits[0] & 1) ? NULL : f),
+ p->page, NULL,
B2SIZE(p->bucket), p->free, p->total);
if (!fromfreelist) {
- size_t sz = B2SIZE(p->bucket);
- if (p->bits[0] & 1)
- putleakinfo(leaks, NULL, sz, p->total -
- p->free);
- else {
- putleakinfo(leaks, f, sz, 1);
- putleakinfo(leaks, NULL, sz,
- p->total - p->free - 1);
+ size_t i, sz = B2SIZE(p->bucket);
+ for (i = 0; i < p->total; i++) {
+ if (!CHUNK_FREE(p, i))
+ putleakinfo(leaks, f[i], sz, 1);
}
break;
}
if (mopts.malloc_verbose) {
ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
- ulog("MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk,
+ ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
d->mmap_flag);
ulog("Region slots free %zu/%zu\n",
d->regions_free, d->regions_total);
int save_errno = errno;
ulog("******** Start dump %s *******\n", __progname);
- ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
+ ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
"G=%zu\n",
mopts.malloc_mutexes,
mopts.internal_funcs, mopts.malloc_freecheck,