.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $OpenBSD: malloc.3,v 1.109 2017/04/06 17:00:52 otto Exp $
+.\" $OpenBSD: malloc.3,v 1.110 2017/04/10 05:45:02 otto Exp $
.\"
-.Dd $Mdocdate: April 6 2017 $
+.Dd $Mdocdate: April 10 2017 $
.Dt MALLOC 3
.Os
.Sh NAME
.Nm malloc ,
.Nm calloc ,
-.Nm reallocarray ,
-.Nm recallocarray ,
.Nm realloc ,
.Nm free
+.Nm reallocarray ,
+.Nm recallocarray ,
+.Nm freezero ,
.Nd memory allocation and deallocation
.Sh SYNOPSIS
.In stdlib.h
.Ft void *
.Fn calloc "size_t nmemb" "size_t size"
.Ft void *
+.Fn realloc "void *ptr" "size_t size"
+.Ft void
+.Fn free "void *ptr"
+.Ft void *
.Fn reallocarray "void *ptr" "size_t nmemb" "size_t size"
.Ft void *
.Fn recallocarray "void *ptr" "size_t oldnmemb" "size_t nmemb" "size_t size"
-.Ft void *
-.Fn realloc "void *ptr" "size_t size"
.Ft void
-.Fn free "void *ptr"
+.Fn freezero "void *ptr" "size_t size"
.Vt char *malloc_options ;
.Sh DESCRIPTION
+The standard functions
+.Fn malloc ,
+.Fn calloc ,
+and
+.Fn realloc
+allocate memory space.
The
.Fn malloc
function allocates uninitialized space for an object of
and allocates a new object.
.Pp
The
+.Fn free
+function causes the space pointed to by
+.Fa ptr
+to be either placed on a list of free blocks to make it available for future
+allocation or, when appropiate, to be returned to the kernel using
+.Xr munmap 2 .
+If
+.Fa ptr
+is a
+.Dv NULL
+pointer, no action occurs.
+If
+.Fa ptr
+was previously freed by
+.Fn free
+or a reallocation function,
+the behavior is undefined and the double free is a security concern.
+.Pp
+Designed for safe allocation of arrays,
+the
.Fn reallocarray
function is similar to
.Fn realloc
*
.Fa size .
.Pp
-The
+Used for the allocation of memory holding sensitive data,
+the
.Fn recallocarray
function is similar to
.Fn reallocarray
otherwise the behaviour is undefined.
.Pp
The
+.Fn freezero
+function is similar to the
.Fn free
-function causes the space pointed to by
-.Fa ptr
-to be either placed on a list of free pages to make it available for future
-allocation or, if required, to be returned to the kernel using
-.Xr munmap 2 .
+function except it ensures the memory being deallocated is explicitly
+discarded.
If
.Fa ptr
-is a
-.Dv NULL
-pointer, no action occurs.
+is
+.Dv NULL ,
+no action occurs.
If
.Fa ptr
-was previously freed by
-.Fn free
-or a reallocation function,
-the behavior is undefined and the double free is a security concern.
+is not
+.Dv NULL ,
+the
+.Fa size
+argument must be the size of the earlier allocation that returned
+.Fa ptr ,
+otherwise the behaviour is undefined.
.Sh RETURN VALUES
Upon successful completion, the allocation functions
return a pointer to the allocated space; otherwise, a
keys.
To avoid leaking information,
it guarantees memory is cleared before placing it on the internal free list.
-A
-.Fn free
-call for such an object should still be preceded by a call to
-.Xr explicit_bzero 3 .
+Deallocation of such an object should be done by calling
+.Fn freezero .
.Sh ENVIRONMENT
.Bl -tag -width "/etc/malloc.conf"
.It Ev MALLOC_OPTIONS
.Fn recallocarray
function appeared in
.Ox 6.1 .
+The
+.Fn freezero
+function appeared in
+.Ox 6.2 .
.Sh CAVEATS
When using
.Fn malloc ,
-/* $OpenBSD: malloc.c,v 1.219 2017/04/06 08:39:47 otto Exp $ */
+/* $OpenBSD: malloc.c,v 1.220 2017/04/10 05:45:02 otto Exp $ */
/*
* Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
int malloc_realloc; /* always realloc? */
int malloc_xmalloc; /* xmalloc behaviour? */
int chunk_canaries; /* use canaries after chunks? */
- int internal_recallocarray; /* use better recallocarray? */
+ int internal_funcs; /* use better recallocarray/freezero? */
u_int malloc_cache; /* free pages we cache */
size_t malloc_guard; /* use guard pages after allocations? */
#ifdef MALLOC_STATS
if (sz != PAGEROUND(sz))
wrterror(d, "munmap round");
- if (psz > mopts.malloc_cache) {
+ rsz = mopts.malloc_cache - d->free_regions_size;
+
+ /*
+ * normally the cache holds recently freed regions, but if the region
+ * to unmap is larger than the cache size or we're clearing and the
+ * cache is full, just munmap
+ */
+ if (psz > mopts.malloc_cache || (clear && rsz == 0)) {
i = munmap(p, sz);
if (i)
wrterror(d, "munmap %p", p);
return;
}
tounmap = 0;
- rsz = mopts.malloc_cache - d->free_regions_size;
if (psz > rsz)
tounmap = psz - rsz;
offset = getrbyte(d);
if (from_rthreads)
mopts.malloc_mt = 1;
else
- mopts.internal_recallocarray = 1;
+ mopts.internal_funcs = 1;
/*
* Options have been set and will never be reset.
}
static void
-ofree(struct dir_info *argpool, void *p, int clear)
+ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
{
struct dir_info *pool;
struct region_info *r;
}
REALSIZE(sz, r);
+ if (check) {
+ if (sz <= MALLOC_MAXCHUNK) {
+ if (mopts.chunk_canaries) {
+ struct chunk_info *info =
+ (struct chunk_info *)r->size;
+ uint32_t chunknum =
+ find_chunknum(pool, r, p, 0);
+
+ if (info->bits[info->offset + chunknum] !=
+ argsz)
+ wrterror(pool, "recorded old size %hu"
+ " != %zu",
+ info->bits[info->offset + chunknum],
+ argsz);
+ }
+ } else if (argsz != sz - mopts.malloc_guard)
+ wrterror(pool, "recorded old size %zu != %zu",
+ sz - mopts.malloc_guard, argsz);
+ }
if (sz > MALLOC_MAXCHUNK) {
if (!MALLOC_MOVE_COND(sz)) {
if (r->p != p)
malloc_recurse(d);
return;
}
- ofree(d, ptr, 0);
+ ofree(d, ptr, 0, 0, 0);
d->active--;
_MALLOC_UNLOCK(d->mutex);
errno = saved_errno;
}
/*DEF_STRONG(free);*/
+static void
+freezero_p(void *ptr, size_t sz)
+{
+ explicit_bzero(ptr, sz);
+ free(ptr);
+}
+
+void
+freezero(void *ptr, size_t sz)
+{
+ struct dir_info *d;
+ int saved_errno = errno;
+
+ /* This is legal. */
+ if (ptr == NULL)
+ return;
+
+ if (!mopts.internal_funcs)
+ return freezero_p(ptr, sz);
+
+ d = getpool();
+ if (d == NULL)
+ wrterror(d, "freezero() called before allocation");
+ _MALLOC_LOCK(d->mutex);
+ d->func = "freezero";
+ if (d->active++) {
+ malloc_recurse(d);
+ return;
+ }
+ ofree(d, ptr, 1, 1, sz);
+ d->active--;
+ _MALLOC_UNLOCK(d->mutex);
+ errno = saved_errno;
+}
+DEF_WEAK(freezero);
static void *
orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
}
if (newsz != 0 && oldsz != 0)
memcpy(q, p, oldsz < newsz ? oldsz : newsz);
- ofree(pool, p, 0);
+ ofree(pool, p, 0, 0, 0);
ret = q;
} else {
/* oldsz == newsz */
} else
memcpy(newptr, p, newsize);
- ofree(pool, p, 1);
+ ofree(pool, p, 1, 0, 0);
done:
if (argpool != pool) {
void *r;
int saved_errno = errno;
- if (!mopts.internal_recallocarray)
+ if (!mopts.internal_funcs)
return recallocarray_p(ptr, oldnmemb, newnmemb, size);
d = getpool();
__progname);
write(fd, buf, strlen(buf));
snprintf(buf, sizeof(buf),
- "MT=%d IRC=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
- mopts.malloc_mt, mopts.internal_recallocarray,
+ "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
+ mopts.malloc_mt, mopts.internal_funcs,
mopts.malloc_freenow,
mopts.malloc_freeunmap, mopts.malloc_junk,
mopts.malloc_realloc, mopts.malloc_xmalloc,