Browse Source

Add a new malloc option 'U' => "Free unmap" that does the guarding/

unmapping of freed allocations without disabling chunk randomisation
like the "Freeguard" ('F') option does. Make security 'S' option
use 'U' and not 'F'.
Rationale: guarding with no chunk randomisation is great for debugging
use-after-free, but chunk randomisation offers better defence against
"heap feng shui" style attacks that depend on carefully constructing a
particular heap layout so we should leave this enabled when requesting
security options.
OPENBSD_5_3
djm 12 years ago
parent
commit
83081452a3
2 changed files with 36 additions and 18 deletions
  1. +12
    -3
      src/lib/libc/stdlib/malloc.3
  2. +24
    -15
      src/lib/libc/stdlib/malloc.c

+ 12
- 3
src/lib/libc/stdlib/malloc.3 View File

@ -30,9 +30,9 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE. .\" SUCH DAMAGE.
.\" .\"
.\" $OpenBSD: malloc.3,v 1.70 2011/07/22 07:00:44 otto Exp $
.\" $OpenBSD: malloc.3,v 1.71 2012/11/02 18:18:15 djm Exp $
.\" .\"
.Dd $Mdocdate: July 22 2011 $
.Dd $Mdocdate: November 2 2012 $
.Dt MALLOC 3 .Dt MALLOC 3
.Os .Os
.Sh NAME .Sh NAME
@ -231,13 +231,17 @@ This option requires the library to have been compiled with -DMALLOC_STATS in
order to have any effect. order to have any effect.
.It Cm F .It Cm F
.Dq Freeguard . .Dq Freeguard .
Enable use after free protection.
Enable use after free detection.
Unused pages on the freelist are read and write protected to Unused pages on the freelist are read and write protected to
cause a segmentation fault upon access. cause a segmentation fault upon access.
This will also switch off the delayed freeing of chunks, This will also switch off the delayed freeing of chunks,
reducing random behaviour but detecting double reducing random behaviour but detecting double
.Fn free .Fn free
calls as early as possible. calls as early as possible.
This option is intended for debugging rather than improved security
(use the
.Cm U
option for security).
.It Cm G .It Cm G
.Dq Guard . .Dq Guard .
Enable guard pages. Enable guard pages.
@ -275,6 +279,11 @@ This can substantially aid in compacting memory.
.\"Consult the source for this one. .\"Consult the source for this one.
.It Cm S .It Cm S
Enable all options suitable for security auditing. Enable all options suitable for security auditing.
.It Cm U
.Dq Free unmap .
Enable use after free protection for larger allocations.
Unused pages on the freelist are read and write protected to
cause a segmentation fault upon access.
.It Cm X .It Cm X
.Dq xmalloc . .Dq xmalloc .
Rather than return failure, Rather than return failure,


+ 24
- 15
src/lib/libc/stdlib/malloc.c View File

@ -1,4 +1,4 @@
/* $OpenBSD: malloc.c,v 1.147 2012/09/13 10:45:41 pirofti Exp $ */
/* $OpenBSD: malloc.c,v 1.148 2012/11/02 18:18:15 djm Exp $ */
/* /*
* Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
* *
@ -165,7 +165,8 @@ struct chunk_info {
struct malloc_readonly { struct malloc_readonly {
struct dir_info *g_pool; /* Main bookkeeping information */ struct dir_info *g_pool; /* Main bookkeeping information */
int malloc_abort; /* abort() on error */ int malloc_abort; /* abort() on error */
int malloc_freeprot; /* mprotect free pages PROT_NONE? */
int malloc_freenow; /* Free quickly - disable chunk rnd */
int malloc_freeunmap; /* mprotect free pages PROT_NONE? */
int malloc_hint; /* call madvice on free pages? */ int malloc_hint; /* call madvice on free pages? */
int malloc_junk; /* junk fill? */ int malloc_junk; /* junk fill? */
int malloc_move; /* move allocations to end of page? */ int malloc_move; /* move allocations to end of page? */
@ -344,7 +345,7 @@ unmap(struct dir_info *d, void *p, size_t sz)
if (r->p == NULL) { if (r->p == NULL) {
if (mopts.malloc_hint) if (mopts.malloc_hint)
madvise(p, sz, MADV_FREE); madvise(p, sz, MADV_FREE);
if (mopts.malloc_freeprot)
if (mopts.malloc_freeunmap)
mprotect(p, sz, PROT_NONE); mprotect(p, sz, PROT_NONE);
r->p = p; r->p = p;
r->size = psz; r->size = psz;
@ -407,7 +408,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
if (r->p != NULL) { if (r->p != NULL) {
if (r->size == psz) { if (r->size == psz) {
p = r->p; p = r->p;
if (mopts.malloc_freeprot)
if (mopts.malloc_freeunmap)
mprotect(p, sz, PROT_READ | PROT_WRITE); mprotect(p, sz, PROT_READ | PROT_WRITE);
if (mopts.malloc_hint) if (mopts.malloc_hint)
madvise(p, sz, MADV_NORMAL); madvise(p, sz, MADV_NORMAL);
@ -417,7 +418,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
if (zero_fill) if (zero_fill)
memset(p, 0, sz); memset(p, 0, sz);
else if (mopts.malloc_junk && else if (mopts.malloc_junk &&
mopts.malloc_freeprot)
mopts.malloc_freeunmap)
memset(p, SOME_FREEJUNK, sz); memset(p, SOME_FREEJUNK, sz);
return p; return p;
} else if (r->size > psz) } else if (r->size > psz)
@ -427,7 +428,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
if (big != NULL) { if (big != NULL) {
r = big; r = big;
p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
if (mopts.malloc_freeprot)
if (mopts.malloc_freeunmap)
mprotect(p, sz, PROT_READ | PROT_WRITE); mprotect(p, sz, PROT_READ | PROT_WRITE);
if (mopts.malloc_hint) if (mopts.malloc_hint)
madvise(p, sz, MADV_NORMAL); madvise(p, sz, MADV_NORMAL);
@ -435,7 +436,7 @@ map(struct dir_info *d, size_t sz, int zero_fill)
d->free_regions_size -= psz; d->free_regions_size -= psz;
if (zero_fill) if (zero_fill)
memset(p, 0, sz); memset(p, 0, sz);
else if (mopts.malloc_junk && mopts.malloc_freeprot)
else if (mopts.malloc_junk && mopts.malloc_freeunmap)
memset(p, SOME_FREEJUNK, sz); memset(p, SOME_FREEJUNK, sz);
return p; return p;
} }
@ -515,10 +516,12 @@ omalloc_init(struct dir_info **dp)
break; break;
#endif /* MALLOC_STATS */ #endif /* MALLOC_STATS */
case 'f': case 'f':
mopts.malloc_freeprot = 0;
mopts.malloc_freenow = 0;
mopts.malloc_freeunmap = 0;
break; break;
case 'F': case 'F':
mopts.malloc_freeprot = 1;
mopts.malloc_freenow = 1;
mopts.malloc_freeunmap = 1;
break; break;
case 'g': case 'g':
mopts.malloc_guard = 0; mopts.malloc_guard = 0;
@ -554,15 +557,21 @@ omalloc_init(struct dir_info **dp)
mopts.malloc_realloc = 1; mopts.malloc_realloc = 1;
break; break;
case 's': case 's':
mopts.malloc_freeprot = mopts.malloc_junk = 0;
mopts.malloc_freeunmap = mopts.malloc_junk = 0;
mopts.malloc_guard = 0; mopts.malloc_guard = 0;
mopts.malloc_cache = MALLOC_DEFAULT_CACHE; mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
break; break;
case 'S': case 'S':
mopts.malloc_freeprot = mopts.malloc_junk = 1;
mopts.malloc_freeunmap = mopts.malloc_junk = 1;
mopts.malloc_guard = MALLOC_PAGESIZE; mopts.malloc_guard = MALLOC_PAGESIZE;
mopts.malloc_cache = 0; mopts.malloc_cache = 0;
break; break;
case 'u':
mopts.malloc_freeunmap = 0;
break;
case 'U':
mopts.malloc_freeunmap = 1;
break;
case 'x': case 'x':
mopts.malloc_xmalloc = 0; mopts.malloc_xmalloc = 0;
break; break;
@ -1015,7 +1024,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
LIST_REMOVE(info, entries); LIST_REMOVE(info, entries);
if (info->size == 0 && !mopts.malloc_freeprot)
if (info->size == 0 && !mopts.malloc_freeunmap)
mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
unmap(d, info->page, MALLOC_PAGESIZE); unmap(d, info->page, MALLOC_PAGESIZE);
@ -1184,7 +1193,7 @@ ofree(void *p)
if (mopts.malloc_guard) { if (mopts.malloc_guard) {
if (sz < mopts.malloc_guard) if (sz < mopts.malloc_guard)
wrterror("guard size", NULL); wrterror("guard size", NULL);
if (!mopts.malloc_freeprot) {
if (!mopts.malloc_freeunmap) {
if (mprotect((char *)p + PAGEROUND(sz) - if (mprotect((char *)p + PAGEROUND(sz) -
mopts.malloc_guard, mopts.malloc_guard, mopts.malloc_guard, mopts.malloc_guard,
PROT_READ | PROT_WRITE)) PROT_READ | PROT_WRITE))
@ -1192,7 +1201,7 @@ ofree(void *p)
} }
malloc_guarded -= mopts.malloc_guard; malloc_guarded -= mopts.malloc_guard;
} }
if (mopts.malloc_junk && !mopts.malloc_freeprot)
if (mopts.malloc_junk && !mopts.malloc_freeunmap)
memset(p, SOME_FREEJUNK, memset(p, SOME_FREEJUNK,
PAGEROUND(sz) - mopts.malloc_guard); PAGEROUND(sz) - mopts.malloc_guard);
unmap(g_pool, p, PAGEROUND(sz)); unmap(g_pool, p, PAGEROUND(sz));
@ -1203,7 +1212,7 @@ ofree(void *p)
if (mopts.malloc_junk && sz > 0) if (mopts.malloc_junk && sz > 0)
memset(p, SOME_FREEJUNK, sz); memset(p, SOME_FREEJUNK, sz);
if (!mopts.malloc_freeprot) {
if (!mopts.malloc_freenow) {
i = getrnibble(); i = getrnibble();
tmp = p; tmp = p;
p = g_pool->delayed_chunks[i]; p = g_pool->delayed_chunks[i];


Loading…
Cancel
Save