From b1b6e37e4ed36cc7bd924f9536394f4d846125fb Mon Sep 17 00:00:00 2001 From: djm <> Date: Mon, 29 Dec 2008 22:25:50 +0000 Subject: [PATCH] extra paranoia for malloc(3): Move all runtime options into a structure that is made read-only (via mprotect) after initialisation to protect against attacks that overwrite options to turn off malloc protections (e.g. use-after-free) Allocate the main bookkeeping data (struct dir_info) using mmap(), thereby giving it an unpredictable address. Place a PROT_NONE guard page on either side to further frustrate attacks on it. Add a new 'L' option that maps struct dir_info PROT_NONE except when in the allocator code itself. Makes attacks on it basically impossible. feedback tedu deraadt otto canacar ok otto --- src/lib/libc/stdlib/malloc.3 | 11 +- src/lib/libc/stdlib/malloc.c | 432 ++++++++++++++++++++++------------- 2 files changed, 278 insertions(+), 165 deletions(-) diff --git a/src/lib/libc/stdlib/malloc.3 b/src/lib/libc/stdlib/malloc.3 index edcd748e..24588343 100644 --- a/src/lib/libc/stdlib/malloc.3 +++ b/src/lib/libc/stdlib/malloc.3 @@ -30,9 +30,9 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.\" $OpenBSD: malloc.3,v 1.58 2008/11/26 12:06:54 pedro Exp $ +.\" $OpenBSD: malloc.3,v 1.59 2008/12/29 22:25:50 djm Exp $ .\" -.Dd $Mdocdate: November 26 2008 $ +.Dd $Mdocdate: December 29 2008 $ .Dt MALLOC 3 .Os .Sh NAME @@ -249,6 +249,13 @@ Currently junk is bytes of 0xd0 when allocating; this is pronounced .Dq Duh . \&:-) Freed chunks are filled with 0xdf. +.It Cm L +.Dq Lock . +Lock critical data structures using +.Xr mprotect 2 +to protect against modification except by +.Nm +and related routines. .It Cm P .Dq Move allocations within a page. Allocations larger than half a page but smaller than a page diff --git a/src/lib/libc/stdlib/malloc.c b/src/lib/libc/stdlib/malloc.c index ee4bf987..e15a64ac 100644 --- a/src/lib/libc/stdlib/malloc.c +++ b/src/lib/libc/stdlib/malloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: malloc.c,v 1.111 2008/12/15 19:47:49 otto Exp $ */ +/* $OpenBSD: malloc.c,v 1.112 2008/12/29 22:25:50 djm Exp $ */ /* * Copyright (c) 2008 Otto Moerbeek * @@ -88,6 +88,23 @@ #define MMAPA(a,sz) mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \ MAP_ANON | MAP_PRIVATE, -1, (off_t) 0) +/* Protect and unprotect g_pool structure as we enter/exit the allocator */ +#define DIR_INFO_RSZ ((sizeof(struct dir_info) + PAGE_MASK) & ~PAGE_MASK) +#define PROTECT_G_POOL() \ + do { \ + if (g_pool != NULL && mopts.malloc_poolprot) { \ + mprotect((void *)((uintptr_t)g_pool & ~PAGE_MASK), \ + DIR_INFO_RSZ, PROT_NONE); \ + } \ + } while (0) +#define UNPROTECT_G_POOL() \ + do { \ + if (g_pool != NULL && mopts.malloc_poolprot) { \ + mprotect((void *)((uintptr_t)g_pool & ~PAGE_MASK), \ + DIR_INFO_RSZ, PROT_READ | PROT_WRITE); \ + } \ + } while (0) + struct region_info { void *p; /* page; low bits used to mark chunks */ uintptr_t size; /* size for pages, or chunk_info pointer */ @@ -145,29 +162,41 @@ struct chunk_info { u_long bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS]; }; -static struct dir_info g_pool; -static char *malloc_func; /* current function */ +struct malloc_readonly { + struct dir_info *g_pool; /* Main bookkeeping information */ + int malloc_abort; /* abort() on error */ + int malloc_poolprot; /* mprotect heap PROT_NONE? */ + int malloc_freeprot; /* mprotect free pages PROT_NONE? */ + int malloc_hint; /* call madvice on free pages? */ + int malloc_junk; /* junk fill? */ + int malloc_move; /* move allocations to end of page? */ + int malloc_realloc; /* always realloc? */ + int malloc_xmalloc; /* xmalloc behaviour? */ + int malloc_zero; /* zero fill? */ + size_t malloc_guard; /* use guard pages after allocations? */ + u_int malloc_cache; /* free pages we cache */ +#ifdef MALLOC_STATS + int malloc_stats; /* dump statistics at end */ +#endif + u_int32_t malloc_canary; /* Matched against ones in g_pool */ +}; + +/* This object is mapped PROT_READ after initialisation to prevent tampering */ +static union { + struct malloc_readonly mopts; + u_char _pad[PAGE_SIZE]; +} malloc_readonly __attribute__((aligned(PAGE_SIZE))); +#define mopts malloc_readonly.mopts +#define g_pool mopts.g_pool + char *malloc_options; /* compile-time options */ -static int malloc_abort = 1; /* abort() on error */ +static char *malloc_func; /* current function */ static int malloc_active; /* status of malloc */ -static int malloc_freeprot; /* mprotect free pages PROT_NONE? */ -static int malloc_hint; /* call madvice on free pages? */ -static int malloc_junk; /* junk fill? */ -static int malloc_move = 1; /* move allocations to end of page? */ -static int malloc_realloc; /* always realloc? */ -static int malloc_xmalloc; /* xmalloc behaviour? */ -static int malloc_zero; /* zero fill? */ -static size_t malloc_guard; /* use guard pages after allocations? */ - -static u_int malloc_cache = 64; /* free pages we cache */ + static size_t malloc_guarded; /* bytes used for guards */ static size_t malloc_used; /* bytes allocated */ -#ifdef MALLOC_STATS -static int malloc_stats; /* dump statistics at end */ -#endif - static size_t rbytesused; /* random bytes used */ static u_char rbytes[512]; /* random bytes */ static u_char getrbyte(void); @@ -247,7 +276,7 @@ dump_free_page_info(int fd, struct dir_info *d) snprintf(buf, sizeof(buf), "Free pages cached: %zu\n", d->free_regions_size); write(fd, buf, strlen(buf)); - for (i = 0; i < malloc_cache; i++) { + for (i = 0; i < mopts.malloc_cache; i++) { if (d->free_regions[i].p != NULL) { snprintf(buf, sizeof(buf), "%2d) ", i); write(fd, buf, strlen(buf)); @@ -266,6 +295,8 @@ malloc_dump1(int fd, struct dir_info *d) snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d); write(fd, buf, strlen(buf)); + if (d == NULL) + return; snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total); write(fd, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds, @@ -313,7 +344,7 @@ malloc_dump1(int fd, struct dir_info *d) void malloc_dump(int fd) { - malloc_dump1(fd, &g_pool); + malloc_dump1(fd, g_pool); } static void @@ -353,11 +384,11 @@ wrterror(char *p) writev(STDERR_FILENO, iov, 5); #ifdef MALLOC_STATS - if (malloc_stats) + if (mopts.malloc_stats) malloc_dump(STDERR_FILENO); #endif /* MALLOC_STATS */ //malloc_active--; - if (malloc_abort) + if (mopts.malloc_abort) abort(); } @@ -381,19 +412,19 @@ unmap(struct dir_info *d, void *p, size_t sz) return; } - if (psz > malloc_cache) { + if (psz > mopts.malloc_cache) { if (munmap(p, sz)) wrterror("munmap"); malloc_used -= sz; return; } tounmap = 0; - rsz = malloc_cache - d->free_regions_size; + rsz = mopts.malloc_cache - d->free_regions_size; if (psz > rsz) tounmap = psz - rsz; offset = getrbyte(); - for (i = 0; tounmap > 0 && i < malloc_cache; i++) { - r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; + for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { + r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; if (r->p != NULL) { rsz = r->size << MALLOC_PAGESHIFT; if (munmap(r->p, rsz)) @@ -410,12 +441,12 @@ unmap(struct dir_info *d, void *p, size_t sz) } if (tounmap > 0) wrterror("malloc cache underflow"); - for (i = 0; i < malloc_cache; i++) { + for (i = 0; i < mopts.malloc_cache; i++) { r = &d->free_regions[i]; if (r->p == NULL) { - if (malloc_hint) + if (mopts.malloc_hint) madvise(p, sz, MADV_FREE); - if (malloc_freeprot) + if (mopts.malloc_freeprot) mprotect(p, sz, PROT_NONE); r->p = p; r->size = psz; @@ -423,9 +454,9 @@ unmap(struct dir_info *d, void *p, size_t sz) break; } } - if (i == malloc_cache) + if (i == mopts.malloc_cache) wrterror("malloc free slot lost"); - if (d->free_regions_size > malloc_cache) + if (d->free_regions_size > mopts.malloc_cache) wrterror("malloc cache overflow"); } @@ -436,7 +467,7 @@ zapcacheregion(struct dir_info *d, void *p) struct region_info *r; size_t rsz; - for (i = 0; i < malloc_cache; i++) { + for (i = 0; i < mopts.malloc_cache; i++) { r = &d->free_regions[i]; if (r->p == p) { rsz = r->size << MALLOC_PAGESHIFT; @@ -458,6 +489,9 @@ map(struct dir_info *d, size_t sz, int zero_fill) u_int i, offset; void *p; + if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) || + d->canary1 != ~d->canary2) + wrterror("internal struct corrupt"); if (sz != PAGEROUND(sz)) { wrterror("map round"); return NULL; @@ -470,21 +504,22 @@ map(struct dir_info *d, size_t sz, int zero_fill) return p; } offset = getrbyte(); - for (i = 0; i < malloc_cache; i++) { - r = &d->free_regions[(i + offset) & (malloc_cache - 1)]; + for (i = 0; i < mopts.malloc_cache; i++) { + r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; if (r->p != NULL) { if (r->size == psz) { p = r->p; - if (malloc_freeprot) + if (mopts.malloc_freeprot) mprotect(p, sz, PROT_READ | PROT_WRITE); - if (malloc_hint) + if (mopts.malloc_hint) madvise(p, sz, MADV_NORMAL); r->p = NULL; r->size = 0; d->free_regions_size -= psz; if (zero_fill) memset(p, 0, sz); - else if (malloc_junk && malloc_freeprot) + else if (mopts.malloc_junk && + mopts.malloc_freeprot) memset(p, SOME_FREEJUNK, sz); return p; } else if (r->size > psz) @@ -494,9 +529,9 @@ map(struct dir_info *d, size_t sz, int zero_fill) if (big != NULL) { r = big; p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT); - if (malloc_freeprot) + if (mopts.malloc_freeprot) mprotect(p, sz, PROT_READ | PROT_WRITE); - if (malloc_hint) + if (mopts.malloc_hint) madvise(p, sz, MADV_NORMAL); r->size -= psz; d->free_regions_size -= psz; @@ -507,7 +542,7 @@ map(struct dir_info *d, size_t sz, int zero_fill) p = MMAP(sz); if (p != MAP_FAILED) malloc_used += sz; - if (d->free_regions_size > malloc_cache) + if (d->free_regions_size > mopts.malloc_cache) wrterror("malloc cache"); /* zero fill not needed */ return p; @@ -532,14 +567,22 @@ getrbyte(void) * Initialize a dir_info, which should have been cleared by caller */ static int -omalloc_init(struct dir_info *d) +omalloc_init(struct dir_info **dp) { char *p, b[64]; int i, j; - size_t regioninfo_size; + size_t d_avail, regioninfo_size; + struct dir_info *d; rbytes_init(); + /* + * Default options + */ + mopts.malloc_abort = 1; + mopts.malloc_move = 1; + mopts.malloc_cache = 64; + for (i = 0; i < 3; i++) { switch (i) { case 0: @@ -565,77 +608,83 @@ omalloc_init(struct dir_info *d) for (; p != NULL && *p != '\0'; p++) { switch (*p) { case '>': - malloc_cache <<= 1; - if (malloc_cache > MALLOC_MAXCACHE) - malloc_cache = MALLOC_MAXCACHE; + mopts.malloc_cache <<= 1; + if (mopts.malloc_cache > MALLOC_MAXCACHE) + mopts.malloc_cache = MALLOC_MAXCACHE; break; case '<': - malloc_cache >>= 1; + mopts.malloc_cache >>= 1; break; case 'a': - malloc_abort = 0; + mopts.malloc_abort = 0; break; case 'A': - malloc_abort = 1; + mopts.malloc_abort = 1; break; #ifdef MALLOC_STATS case 'd': - malloc_stats = 0; + mopts.malloc_stats = 0; break; case 'D': - malloc_stats = 1; + mopts.malloc_stats = 1; break; #endif /* MALLOC_STATS */ case 'f': - malloc_freeprot = 0; + mopts.malloc_freeprot = 0; break; case 'F': - malloc_freeprot = 1; + mopts.malloc_freeprot = 1; break; case 'g': - malloc_guard = 0; + mopts.malloc_guard = 0; break; case 'G': - malloc_guard = MALLOC_PAGESIZE; + mopts.malloc_guard = MALLOC_PAGESIZE; break; case 'h': - malloc_hint = 0; + mopts.malloc_hint = 0; break; case 'H': - malloc_hint = 1; + mopts.malloc_hint = 1; break; case 'j': - malloc_junk = 0; + mopts.malloc_junk = 0; break; case 'J': - malloc_junk = 1; + mopts.malloc_junk = 1; + break; + case 'l': + mopts.malloc_poolprot = 0; + break; + case 'L': + mopts.malloc_poolprot = 1; break; case 'n': case 'N': break; case 'p': - malloc_move = 0; + mopts.malloc_move = 0; break; case 'P': - malloc_move = 1; + mopts.malloc_move = 1; break; case 'r': - malloc_realloc = 0; + mopts.malloc_realloc = 0; break; case 'R': - malloc_realloc = 1; + mopts.malloc_realloc = 1; break; case 'x': - malloc_xmalloc = 0; + mopts.malloc_xmalloc = 0; break; case 'X': - malloc_xmalloc = 1; + mopts.malloc_xmalloc = 1; break; case 'z': - malloc_zero = 0; + mopts.malloc_zero = 0; break; case 'Z': - malloc_zero = 1; + mopts.malloc_zero = 1; break; default: { static const char q[] = "malloc() warning: " @@ -651,17 +700,33 @@ omalloc_init(struct dir_info *d) * We want junk in the entire allocation, and zero only in the part * the user asked for. */ - if (malloc_zero) - malloc_junk = 1; + if (mopts.malloc_zero) + mopts.malloc_junk = 1; #ifdef MALLOC_STATS - if (malloc_stats && (atexit(malloc_exit) == -1)) { + if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { static const char q[] = "malloc() warning: atexit(2) failed." " Will not be able to dump stats on exit\n"; write(STDERR_FILENO, q, sizeof(q) - 1); } #endif /* MALLOC_STATS */ + while ((mopts.malloc_canary = arc4random()) == 0) + ; + + /* + * Allocate dir_info with a guard page on either side. Also + * randomise offset inside the page at which the dir_info + * lies (subject to alignment by 1 << MALLOC_MINSHIFT) + */ + if ((p = MMAP(PAGE_SIZE + DIR_INFO_RSZ + PAGE_SIZE)) == NULL) + return -1; + mprotect(p, PAGE_SIZE, PROT_NONE); + mprotect(p + PAGE_SIZE + DIR_INFO_RSZ, PAGE_SIZE, PROT_NONE); + d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; + d = (struct dir_info *)(p + PAGE_SIZE + + (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); + d->regions_bits = 9; d->regions_free = d->regions_total = 1 << d->regions_bits; regioninfo_size = d->regions_total * sizeof(struct region_info); @@ -673,8 +738,18 @@ omalloc_init(struct dir_info *d) } malloc_used += regioninfo_size; memset(d->r, 0, regioninfo_size); - d->canary1 = arc4random(); + d->canary1 = mopts.malloc_canary ^ (u_int32_t)d; d->canary2 = ~d->canary1; + + *dp = d; + + /* + * Options have been set and will never be reset. + * Prevent further tampering with them. + */ + if (((uintptr_t)&malloc_readonly & PAGE_MASK) == 0) + mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); + return 0; } @@ -792,7 +867,8 @@ find(struct dir_info *d, void *p) size_t mask = d->regions_total - 1; void *q, *r; - if (d->canary1 != ~d->canary2) + if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) || + d->canary1 != ~d->canary2) wrterror("internal struct corrupt"); p = MASK_POINTER(p); index = hash(p) & mask; @@ -818,7 +894,7 @@ delete(struct dir_info *d, struct region_info *ri) if (d->regions_total & (d->regions_total - 1)) wrterror("regions_total not 2^x"); d->regions_free++; - STATS_INC(g_pool.deletes); + STATS_INC(g_pool->deletes); i = ri - d->r; for (;;) { @@ -834,7 +910,7 @@ delete(struct dir_info *d, struct region_info *ri) (j < i && i <= r)) continue; d->r[j] = d->r[i]; - STATS_INC(g_pool.delete_moves); + STATS_INC(g_pool->delete_moves); break; } @@ -919,6 +995,9 @@ malloc_bytes(struct dir_info *d, size_t size) u_long u, *lp; struct chunk_info *bp; + if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)d) || + d->canary1 != ~d->canary2) + wrterror("internal struct corrupt"); /* Don't bother with anything less than this */ /* unless we have a malloc(0) requests */ if (size != 0 && size < MALLOC_MINSIZE) @@ -983,7 +1062,7 @@ malloc_bytes(struct dir_info *d, size_t size) k += (lp - bp->bits) * MALLOC_BITS; k <<= bp->shift; - if (malloc_junk && bp->size > 0) + if (mopts.malloc_junk && bp->size > 0) memset((char *)bp->page + k, SOME_JUNK, bp->size); return ((char *)bp->page + k); } @@ -1047,7 +1126,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr) } *mp = info->next; - if (info->size == 0 && !malloc_freeprot) + if (info->size == 0 && !mopts.malloc_freeprot) mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); unmap(d, info->page, MALLOC_PAGESIZE); @@ -1064,54 +1143,55 @@ omalloc(size_t sz, int zero_fill) size_t psz; if (sz > MALLOC_MAXCHUNK) { - if (sz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { + if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { errno = ENOMEM; return NULL; } - sz += malloc_guard; + sz += mopts.malloc_guard; psz = PAGEROUND(sz); - p = map(&g_pool, psz, zero_fill); + p = map(g_pool, psz, zero_fill); if (p == MAP_FAILED) { errno = ENOMEM; return NULL; } - if (insert(&g_pool, p, sz)) { - unmap(&g_pool, p, psz); + if (insert(g_pool, p, sz)) { + unmap(g_pool, p, psz); errno = ENOMEM; return NULL; } - if (malloc_guard) { - if (mprotect((char *)p + psz - malloc_guard, - malloc_guard, PROT_NONE)) + if (mopts.malloc_guard) { + if (mprotect((char *)p + psz - mopts.malloc_guard, + mopts.malloc_guard, PROT_NONE)) wrterror("mprotect"); - malloc_guarded += malloc_guard; + malloc_guarded += mopts.malloc_guard; } - if (malloc_move && - sz - malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) { + if (mopts.malloc_move && + sz - mopts.malloc_guard < MALLOC_PAGESIZE - + MALLOC_LEEWAY) { /* fill whole allocation */ - if (malloc_junk) - memset(p, SOME_JUNK, psz - malloc_guard); + if (mopts.malloc_junk) + memset(p, SOME_JUNK, psz - mopts.malloc_guard); /* shift towards the end */ p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY - - (sz - malloc_guard)) & ~(MALLOC_MINSIZE-1)); + (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1)); /* fill zeros if needed and overwritten above */ - if (zero_fill && malloc_junk) - memset(p, 0, sz - malloc_guard); + if (zero_fill && mopts.malloc_junk) + memset(p, 0, sz - mopts.malloc_guard); } else { - if (malloc_junk) { + if (mopts.malloc_junk) { if (zero_fill) - memset(p + sz - malloc_guard, + memset(p + sz - mopts.malloc_guard, SOME_JUNK, psz - sz); else - memset(p, - SOME_JUNK, psz - malloc_guard); + memset(p, SOME_JUNK, + psz - mopts.malloc_guard); } } } else { /* takes care of SOME_JUNK */ - p = malloc_bytes(&g_pool, sz); + p = malloc_bytes(g_pool, sz); if (zero_fill && p != NULL && sz > 0) memset(p, 0, sz); } @@ -1134,10 +1214,33 @@ malloc_recurse(void) wrterror("recursive call"); } malloc_active--; + PROTECT_G_POOL(); _MALLOC_UNLOCK(); errno = EDEADLK; } +static void +malloc_global_corrupt(void) +{ + wrterror("global malloc data corrupt"); + PROTECT_G_POOL(); + _MALLOC_UNLOCK(); + errno = EINVAL; +} + +static int +malloc_init(void) +{ + if (omalloc_init(&g_pool)) { + _MALLOC_UNLOCK(); + if (mopts.malloc_xmalloc) + wrterror("out of memory"); + errno = ENOMEM; + return -1; + } + return 0; +} + void * malloc(size_t size) { @@ -1145,24 +1248,21 @@ malloc(size_t size) int saved_errno = errno; _MALLOC_LOCK(); + UNPROTECT_G_POOL(); malloc_func = " in malloc():"; - if (!g_pool.regions_total) { - if (omalloc_init(&g_pool)) { - _MALLOC_UNLOCK(); - if (malloc_xmalloc) - wrterror("out of memory"); - errno = ENOMEM; + if (g_pool == NULL) { + if (malloc_init() != 0) return NULL; - } } if (malloc_active++) { malloc_recurse(); return NULL; } - r = omalloc(size, malloc_zero); + r = omalloc(size, mopts.malloc_zero); malloc_active--; + PROTECT_G_POOL(); _MALLOC_UNLOCK(); - if (r == NULL && malloc_xmalloc) { + if (r == NULL && mopts.malloc_xmalloc) { wrterror("out of memory"); errno = ENOMEM; } @@ -1177,14 +1277,15 @@ ofree(void *p) struct region_info *r; size_t sz; - r = find(&g_pool, p); + r = find(g_pool, p); if (r == NULL) { wrterror("bogus pointer (double free?)"); return; } REALSIZE(sz, r); if (sz > MALLOC_MAXCHUNK) { - if (sz - malloc_guard >= MALLOC_PAGESIZE - MALLOC_LEEWAY) { + if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE - + MALLOC_LEEWAY) { if (r->p != p) { wrterror("bogus pointer"); return; @@ -1193,46 +1294,47 @@ ofree(void *p) #if notyetbecause_of_realloc /* shifted towards the end */ if (p != ((char *)r->p) + ((MALLOC_PAGESIZE - - MALLOC_MINSIZE - sz - malloc_guard) & + MALLOC_MINSIZE - sz - mopts.malloc_guard) & ~(MALLOC_MINSIZE-1))) { } #endif p = r->p; } - if (malloc_guard) { - if (sz < malloc_guard) + if (mopts.malloc_guard) { + if (sz < mopts.malloc_guard) wrterror("guard size"); - if (!malloc_freeprot) { + if (!mopts.malloc_freeprot) { if (mprotect((char *)p + PAGEROUND(sz) - - malloc_guard, malloc_guard, + mopts.malloc_guard, mopts.malloc_guard, PROT_READ | PROT_WRITE)) wrterror("mprotect"); } - malloc_guarded -= malloc_guard; + malloc_guarded -= mopts.malloc_guard; } - if (malloc_junk && !malloc_freeprot) - memset(p, SOME_FREEJUNK, PAGEROUND(sz) - malloc_guard); - unmap(&g_pool, p, PAGEROUND(sz)); - delete(&g_pool, r); + if (mopts.malloc_junk && !mopts.malloc_freeprot) + memset(p, SOME_FREEJUNK, + PAGEROUND(sz) - mopts.malloc_guard); + unmap(g_pool, p, PAGEROUND(sz)); + delete(g_pool, r); } else { void *tmp; int i; - if (malloc_junk && sz > 0) + if (mopts.malloc_junk && sz > 0) memset(p, SOME_FREEJUNK, sz); - if (!malloc_freeprot) { + if (!mopts.malloc_freeprot) { i = getrbyte() & (MALLOC_DELAYED_CHUNKS - 1); tmp = p; - p = g_pool.delayed_chunks[i]; - g_pool.delayed_chunks[i] = tmp; + p = g_pool->delayed_chunks[i]; + g_pool->delayed_chunks[i] = tmp; } if (p != NULL) { - r = find(&g_pool, p); + r = find(g_pool, p); if (r == NULL) { wrterror("bogus pointer (double free?)"); return; } - free_bytes(&g_pool, r, p); + free_bytes(g_pool, r, p); } } } @@ -1247,13 +1349,20 @@ free(void *ptr) return; _MALLOC_LOCK(); + UNPROTECT_G_POOL(); malloc_func = " in free():"; + if (g_pool == NULL) { + _MALLOC_UNLOCK(); + wrterror("free() called before allocation"); + return; + } if (malloc_active++) { malloc_recurse(); return; } ofree(ptr); malloc_active--; + PROTECT_G_POOL(); _MALLOC_UNLOCK(); errno = saved_errno; } @@ -1269,12 +1378,12 @@ orealloc(void *p, size_t newsz) if (p == NULL) return omalloc(newsz, 0); - r = find(&g_pool, p); + r = find(g_pool, p); if (r == NULL) { wrterror("bogus pointer (double free?)"); return NULL; } - if (newsz >= SIZE_MAX - malloc_guard - MALLOC_PAGESIZE) { + if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { errno = ENOMEM; return NULL; } @@ -1282,61 +1391,63 @@ orealloc(void *p, size_t newsz) REALSIZE(oldsz, r); goldsz = oldsz; if (oldsz > MALLOC_MAXCHUNK) { - if (oldsz < malloc_guard) + if (oldsz < mopts.malloc_guard) wrterror("guard size"); - oldsz -= malloc_guard; + oldsz -= mopts.malloc_guard; } gnewsz = newsz; if (gnewsz > MALLOC_MAXCHUNK) - gnewsz += malloc_guard; + gnewsz += mopts.malloc_guard; if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && - !malloc_realloc) { + !mopts.malloc_realloc) { size_t roldsz = PAGEROUND(goldsz); size_t rnewsz = PAGEROUND(gnewsz); if (rnewsz > roldsz) { - if (!malloc_guard) { + if (!mopts.malloc_guard) { STATS_INC(g_pool.cheap_realloc_tries); - zapcacheregion(&g_pool, p + roldsz); + zapcacheregion(g_pool, p + roldsz); q = MMAPA(p + roldsz, rnewsz - roldsz); if (q == p + roldsz) { malloc_used += rnewsz - roldsz; - if (malloc_junk) + if (mopts.malloc_junk) memset(q, SOME_JUNK, rnewsz - roldsz); r->size = newsz; - STATS_INC(g_pool.cheap_reallocs); + STATS_INC(g_pool->cheap_reallocs); return p; } else if (q != MAP_FAILED) munmap(q, rnewsz - roldsz); } } else if (rnewsz < roldsz) { - if (malloc_guard) { - if (mprotect((char *)p + roldsz - malloc_guard, - malloc_guard, PROT_READ | PROT_WRITE)) + if (mopts.malloc_guard) { + if (mprotect((char *)p + roldsz - + mopts.malloc_guard, mopts.malloc_guard, + PROT_READ | PROT_WRITE)) wrterror("mprotect"); - if (mprotect((char *)p + rnewsz - malloc_guard, - malloc_guard, PROT_NONE)) + if (mprotect((char *)p + rnewsz - + mopts.malloc_guard, mopts.malloc_guard, + PROT_NONE)) wrterror("mprotect"); } - unmap(&g_pool, (char *)p + rnewsz, roldsz - rnewsz); + unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz); r->size = gnewsz; return p; } else { - if (newsz > oldsz && malloc_junk) + if (newsz > oldsz && mopts.malloc_junk) memset((char *)p + newsz, SOME_JUNK, - rnewsz - malloc_guard - newsz); + rnewsz - mopts.malloc_guard - newsz); r->size = gnewsz; return p; } } - if (newsz <= oldsz && newsz > oldsz / 2 && !malloc_realloc) { - if (malloc_junk && newsz > 0) + if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { + if (mopts.malloc_junk && newsz > 0) memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); return p; - } else if (newsz != oldsz || malloc_realloc) { + } else if (newsz != oldsz || mopts.malloc_realloc) { q = omalloc(newsz, 0); if (q == NULL) return NULL; @@ -1355,26 +1466,22 @@ realloc(void *ptr, size_t size) int saved_errno = errno; _MALLOC_LOCK(); + UNPROTECT_G_POOL(); malloc_func = " in realloc():"; - if (!g_pool.regions_total) { - if (omalloc_init(&g_pool)) { - _MALLOC_UNLOCK(); - if (malloc_xmalloc) - wrterror("out of memory"); - errno = ENOMEM; + if (g_pool == NULL) { + if (malloc_init() != 0) return NULL; - } } if (malloc_active++) { malloc_recurse(); return NULL; } - r = orealloc(ptr, size); malloc_active--; + PROTECT_G_POOL(); _MALLOC_UNLOCK(); - if (r == NULL && malloc_xmalloc) { + if (r == NULL && mopts.malloc_xmalloc) { wrterror("out of memory"); errno = ENOMEM; } @@ -1393,20 +1500,17 @@ calloc(size_t nmemb, size_t size) int saved_errno = errno; _MALLOC_LOCK(); + UNPROTECT_G_POOL(); malloc_func = " in calloc():"; - if (!g_pool.regions_total) { - if (omalloc_init(&g_pool)) { - _MALLOC_UNLOCK(); - if (malloc_xmalloc) - wrterror("out of memory"); - errno = ENOMEM; + if (g_pool == NULL) { + if (malloc_init() != 0) return NULL; - } } if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && nmemb > 0 && SIZE_MAX / nmemb < size) { - _MALLOC_UNLOCK(); - if (malloc_xmalloc) + PROTECT_G_POOL(); + _MALLOC_UNLOCK(); + if (mopts.malloc_xmalloc) wrterror("out of memory"); errno = ENOMEM; return NULL; @@ -1421,8 +1525,9 @@ calloc(size_t nmemb, size_t size) r = omalloc(size, 1); malloc_active--; + PROTECT_G_POOL(); _MALLOC_UNLOCK(); - if (r == NULL && malloc_xmalloc) { + if (r == NULL && mopts.malloc_xmalloc) { wrterror("out of memory"); errno = ENOMEM; } @@ -1430,3 +1535,4 @@ calloc(size_t nmemb, size_t size) errno = saved_errno; return r; } +