Browse Source

Integrate two patches originally from Daniel Micay.

1. Optionally add random "canaries" to the end of an allocation. This
requires increasing the internal size of the allocation slightly, which
probably results in a large effective increase with current power of two
sizing. Therefore, this option is only enabled via 'C'.
2. When writing junk (0xdf) to freed chunks (current default behavior),
check that the junk is still intact when finally freeing the delayed chunk
to catch some potential use after free. This should be pretty cheap so
there's no option to control it separately.
ok deraadt tb
OPENBSD_5_9
tedu 9 years ago
parent
commit
1bd8c9e70f
1 changed files with 76 additions and 8 deletions
  1. +76
    -8
      src/lib/libc/stdlib/malloc.c

+ 76
- 8
src/lib/libc/stdlib/malloc.c View File

@ -1,4 +1,4 @@
/* $OpenBSD: malloc.c,v 1.176 2015/09/13 20:29:23 guenther Exp $ */
/* $OpenBSD: malloc.c,v 1.177 2015/12/09 02:45:23 tedu Exp $ */
/*
* Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@ -185,12 +185,14 @@ struct malloc_readonly {
int malloc_move; /* move allocations to end of page? */
int malloc_realloc; /* always realloc? */
int malloc_xmalloc; /* xmalloc behaviour? */
size_t malloc_canaries; /* use canaries after chunks? */
size_t malloc_guard; /* use guard pages after allocations? */
u_int malloc_cache; /* free pages we cache */
#ifdef MALLOC_STATS
int malloc_stats; /* dump statistics at end */
#endif
u_int32_t malloc_canary; /* Matched against ones in malloc_pool */
uintptr_t malloc_chunk_canary;
};
/* This object is mapped PROT_READ after initialisation to prevent tampering */
@ -526,6 +528,12 @@ omalloc_init(struct dir_info **dp)
case 'A':
mopts.malloc_abort = 1;
break;
case 'c':
mopts.malloc_canaries = 0;
break;
case 'C':
mopts.malloc_canaries = sizeof(void *);
break;
#ifdef MALLOC_STATS
case 'd':
mopts.malloc_stats = 0;
@ -619,6 +627,9 @@ omalloc_init(struct dir_info **dp)
while ((mopts.malloc_canary = arc4random()) == 0)
;
arc4random_buf(&mopts.malloc_chunk_canary,
sizeof(mopts.malloc_chunk_canary));
/*
* Allocate dir_info with a guard page on either side. Also
* randomise offset inside the page at which the dir_info
@ -984,8 +995,15 @@ malloc_bytes(struct dir_info *d, size_t size, void *f)
k += (lp - bp->bits) * MALLOC_BITS;
k <<= bp->shift;
if (mopts.malloc_canaries && bp->size > 0) {
char *end = (char *)bp->page + k + bp->size;
uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
*canary = mopts.malloc_chunk_canary ^ hash(canary);
}
if (mopts.malloc_junk == 2 && bp->size > 0)
memset((char *)bp->page + k, SOME_JUNK, bp->size);
memset((char *)bp->page + k, SOME_JUNK,
bp->size - mopts.malloc_canaries);
return ((char *)bp->page + k);
}
@ -999,6 +1017,13 @@ find_chunknum(struct dir_info *d, struct region_info *r, void *ptr)
if (info->canary != d->canary1)
wrterror("chunk info corrupted", NULL);
if (mopts.malloc_canaries && info->size > 0) {
char *end = (char *)ptr + info->size;
uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
if (*canary != (mopts.malloc_chunk_canary ^ hash(canary)))
wrterror("chunk canary corrupted", ptr);
}
/* Find the chunk number on the page */
chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
@ -1121,7 +1146,7 @@ omalloc(size_t sz, int zero_fill, void *f)
/* takes care of SOME_JUNK */
p = malloc_bytes(pool, sz, f);
if (zero_fill && p != NULL && sz > 0)
memset(p, 0, sz);
memset(p, 0, sz - mopts.malloc_canaries);
}
return p;
@ -1176,6 +1201,8 @@ malloc(size_t size)
malloc_recurse();
return NULL;
}
if (size > 0 && size <= MALLOC_MAXCHUNK)
size += mopts.malloc_canaries;
r = omalloc(size, 0, CALLER);
malloc_active--;
_MALLOC_UNLOCK();
@ -1189,6 +1216,30 @@ malloc(size_t size)
}
/*DEF_STRONG(malloc);*/
static void
validate_junk(void *p) {
struct region_info *r;
struct dir_info *pool = getpool();
size_t byte, sz;
if (p == NULL)
return;
r = find(pool, p);
if (r == NULL)
wrterror("bogus pointer in validate_junk", p);
REALSIZE(sz, r);
if (sz > 0 && sz <= MALLOC_MAXCHUNK)
sz -= mopts.malloc_canaries;
if (sz > 32)
sz = 32;
for (byte = 0; byte < sz; byte++) {
if (((unsigned char *)p)[byte] != SOME_FREEJUNK) {
wrterror("use after free", p);
return;
}
}
}
static void
ofree(void *p)
{
@ -1242,7 +1293,7 @@ ofree(void *p)
int i;
if (mopts.malloc_junk && sz > 0)
memset(p, SOME_FREEJUNK, sz);
memset(p, SOME_FREEJUNK, sz - mopts.malloc_canaries);
if (!mopts.malloc_freenow) {
if (find_chunknum(pool, r, p) == -1)
return;
@ -1253,6 +1304,8 @@ ofree(void *p)
wrterror("double free", p);
return;
}
if (mopts.malloc_junk)
validate_junk(p);
pool->delayed_chunks[i] = tmp;
}
if (p != NULL) {
@ -1386,16 +1439,25 @@ gotit:
}
}
if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
if (mopts.malloc_junk == 2 && newsz > 0)
memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
if (mopts.malloc_junk == 2 && newsz > 0) {
size_t usable_oldsz = oldsz;
if (oldsz <= MALLOC_MAXCHUNK)
usable_oldsz -= mopts.malloc_canaries;
if (newsz < usable_oldsz)
memset((char *)p + newsz, SOME_JUNK, usable_oldsz - newsz);
}
STATS_SETF(r, f);
return p;
} else if (newsz != oldsz || mopts.malloc_realloc) {
q = omalloc(newsz, 0, f);
if (q == NULL)
return NULL;
if (newsz != 0 && oldsz != 0)
memcpy(q, p, oldsz < newsz ? oldsz : newsz);
if (newsz != 0 && oldsz != 0) {
size_t copysz = oldsz < newsz ? oldsz : newsz;
if (copysz <= MALLOC_MAXCHUNK)
copysz -= mopts.malloc_canaries;
memcpy(q, p, copysz);
}
ofree(p);
return q;
} else {
@ -1420,6 +1482,8 @@ realloc(void *ptr, size_t size)
malloc_recurse();
return NULL;
}
if (size > 0 && size <= MALLOC_MAXCHUNK)
size += mopts.malloc_canaries;
r = orealloc(ptr, size, CALLER);
malloc_active--;
@ -1468,6 +1532,8 @@ calloc(size_t nmemb, size_t size)
}
size *= nmemb;
if (size > 0 && size <= MALLOC_MAXCHUNK)
size += mopts.malloc_canaries;
r = omalloc(size, 1, CALLER);
malloc_active--;
@ -1595,6 +1661,8 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
malloc_recurse();
goto err;
}
if (size > 0 && size <= MALLOC_MAXCHUNK)
size += mopts.malloc_canaries;
r = omemalign(alignment, size, 0, CALLER);
malloc_active--;
_MALLOC_UNLOCK();


Loading…
Cancel
Save