Browse Source

1. When shrinking a chunk allocation, compare the size of the current

allocation to the size of the new allocation (instead of the requested size).
2. Previously realloc takes the easy way and always reallocates if C is
active. This commit fixes by carefully updating the recorded requested
size in all cases, and writing the canary bytes in the proper location
after reallocating.
3. Introduce defines to test if MALLOC_MOVE should be done and to
compute the new value.
OPENBSD_6_1
otto 8 years ago
parent
commit
3d80117872
1 changed files with 87 additions and 46 deletions
  1. +87
    -46
      src/lib/libc/stdlib/malloc.c

+ 87
- 46
src/lib/libc/stdlib/malloc.c View File

@ -1,4 +1,4 @@
/* $OpenBSD: malloc.c,v 1.211 2016/11/04 09:11:20 otto Exp $ */
/* $OpenBSD: malloc.c,v 1.212 2017/01/21 07:47:42 otto Exp $ */
/* /*
* Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@ -73,6 +73,12 @@
* Set to zero to be the most strict. * Set to zero to be the most strict.
*/ */
#define MALLOC_LEEWAY 0 #define MALLOC_LEEWAY 0
#define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \
MALLOC_PAGESIZE - MALLOC_LEEWAY)
#define MALLOC_MOVE(p, sz) (((char *)(p)) + \
((MALLOC_PAGESIZE - MALLOC_LEEWAY - \
((sz) - mopts.malloc_guard)) & \
~(MALLOC_MINSIZE - 1)))
#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
@ -199,6 +205,7 @@ char *malloc_options; /* compile-time options */
static u_char getrbyte(struct dir_info *d); static u_char getrbyte(struct dir_info *d);
static __dead void wrterror(struct dir_info *d, char *msg, ...) static __dead void wrterror(struct dir_info *d, char *msg, ...)
__attribute__((__format__ (printf, 2, 3))); __attribute__((__format__ (printf, 2, 3)));
static void fill_canary(char *ptr, size_t sz, size_t allocated);
#ifdef MALLOC_STATS #ifdef MALLOC_STATS
void malloc_dump(int, int, struct dir_info *); void malloc_dump(int, int, struct dir_info *);
@ -209,8 +216,8 @@ static void malloc_exit(void);
#define CALLER NULL #define CALLER NULL
#endif #endif
/* low bits of r->p determine size: 0 means >= page size and p->size holding
* real size, otherwise r->size is a shift count, or 1 for malloc(0)
/* low bits of r->p determine size: 0 means >= page size and r->size holding
* real size, otherwise low bits are a shift count, or 1 for malloc(0)
*/ */
#define REALSIZE(sz, r) \ #define REALSIZE(sz, r) \
(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
@ -905,23 +912,10 @@ omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
return bp; return bp;
} }
/*
* Allocate a chunk
*/
static void *
malloc_bytes(struct dir_info *d, size_t argsize, void *f)
static int
find_chunksize(size_t size)
{ {
int i, j, listnum;
size_t k, size;
u_short u, *lp;
struct chunk_info *bp;
if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
d->canary1 != ~d->canary2)
wrterror(d, "internal struct corrupt");
size = argsize;
int i, j;
/* Don't bother with anything less than this */ /* Don't bother with anything less than this */
/* unless we have a malloc(0) requests */ /* unless we have a malloc(0) requests */
@ -937,6 +931,25 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
while (i >>= 1) while (i >>= 1)
j++; j++;
} }
return j;
}
/*
* Allocate a chunk
*/
static void *
malloc_bytes(struct dir_info *d, size_t size, void *f)
{
int i, j, listnum;
size_t k;
u_short u, *lp;
struct chunk_info *bp;
if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
d->canary1 != ~d->canary2)
wrterror(d, "internal struct corrupt");
j = find_chunksize(size);
listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
/* If it's empty, make a page more of that size chunks */ /* If it's empty, make a page more of that size chunks */
@ -990,24 +1003,29 @@ malloc_bytes(struct dir_info *d, size_t argsize, void *f)
k += (lp - bp->bits) * MALLOC_BITS; k += (lp - bp->bits) * MALLOC_BITS;
if (mopts.chunk_canaries) if (mopts.chunk_canaries)
bp->bits[bp->offset + k] = argsize;
bp->bits[bp->offset + k] = size;
k <<= bp->shift; k <<= bp->shift;
if (bp->size > 0) { if (bp->size > 0) {
if (mopts.malloc_junk == 2) if (mopts.malloc_junk == 2)
memset((char *)bp->page + k, SOME_JUNK, bp->size); memset((char *)bp->page + k, SOME_JUNK, bp->size);
else if (mopts.chunk_canaries) {
size_t sz = bp->size - argsize;
if (sz > CHUNK_CHECK_LENGTH)
sz = CHUNK_CHECK_LENGTH;
memset((char *)bp->page + k + argsize, SOME_JUNK, sz);
}
else if (mopts.chunk_canaries)
fill_canary((char *)bp->page + k, size, bp->size);
} }
return ((char *)bp->page + k); return ((char *)bp->page + k);
} }
static void
fill_canary(char *ptr, size_t sz, size_t allocated)
{
size_t check_sz = allocated - sz;
if (check_sz > CHUNK_CHECK_LENGTH)
check_sz = CHUNK_CHECK_LENGTH;
memset(ptr + sz, SOME_JUNK, check_sz);
}
static void static void
validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
{ {
@ -1130,13 +1148,12 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
} }
if (sz - mopts.malloc_guard < MALLOC_PAGESIZE - MALLOC_LEEWAY) {
if (MALLOC_MOVE_COND(sz)) {
/* fill whole allocation */ /* fill whole allocation */
if (mopts.malloc_junk == 2) if (mopts.malloc_junk == 2)
memset(p, SOME_JUNK, psz - mopts.malloc_guard); memset(p, SOME_JUNK, psz - mopts.malloc_guard);
/* shift towards the end */ /* shift towards the end */
p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
(sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
p = MALLOC_MOVE(p, sz);
/* fill zeros if needed and overwritten above */ /* fill zeros if needed and overwritten above */
if (zero_fill && mopts.malloc_junk == 2) if (zero_fill && mopts.malloc_junk == 2)
memset(p, 0, sz - mopts.malloc_guard); memset(p, 0, sz - mopts.malloc_guard);
@ -1149,14 +1166,9 @@ omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
memset(p, SOME_JUNK, memset(p, SOME_JUNK,
psz - mopts.malloc_guard); psz - mopts.malloc_guard);
} }
else if (mopts.chunk_canaries) {
size_t csz = psz - sz;
if (csz > CHUNK_CHECK_LENGTH)
csz = CHUNK_CHECK_LENGTH;
memset((char *)p + sz - mopts.malloc_guard,
SOME_JUNK, csz);
}
else if (mopts.chunk_canaries)
fill_canary(p, sz - mopts.malloc_guard,
psz - mopts.malloc_guard);
} }
} else { } else {
@ -1308,8 +1320,7 @@ ofree(struct dir_info *argpool, void *p)
REALSIZE(sz, r); REALSIZE(sz, r);
if (sz > MALLOC_MAXCHUNK) { if (sz > MALLOC_MAXCHUNK) {
if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
MALLOC_LEEWAY) {
if (!MALLOC_MOVE_COND(sz)) {
if (r->p != p) if (r->p != p)
wrterror(pool, "bogus pointer %p", p); wrterror(pool, "bogus pointer %p", p);
if (mopts.chunk_canaries) if (mopts.chunk_canaries)
@ -1410,9 +1421,11 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
{ {
struct dir_info *pool; struct dir_info *pool;
struct region_info *r; struct region_info *r;
struct chunk_info *info;
size_t oldsz, goldsz, gnewsz; size_t oldsz, goldsz, gnewsz;
void *q, *ret; void *q, *ret;
int i; int i;
uint32_t chunknum;
pool = argpool; pool = argpool;
@ -1445,6 +1458,11 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
} }
REALSIZE(oldsz, r); REALSIZE(oldsz, r);
if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
chunknum = find_chunknum(pool, r, p, 0);
info = (struct chunk_info *)r->size;
}
goldsz = oldsz; goldsz = oldsz;
if (oldsz > MALLOC_MAXCHUNK) { if (oldsz > MALLOC_MAXCHUNK) {
if (oldsz < mopts.malloc_guard) if (oldsz < mopts.malloc_guard)
@ -1457,11 +1475,14 @@ orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
gnewsz += mopts.malloc_guard; gnewsz += mopts.malloc_guard;
if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p && if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
!mopts.chunk_canaries && !mopts.malloc_realloc) {
!mopts.malloc_realloc) {
/* First case: from n pages sized allocation to m pages sized
allocation, no malloc_move in effect */
size_t roldsz = PAGEROUND(goldsz); size_t roldsz = PAGEROUND(goldsz);
size_t rnewsz = PAGEROUND(gnewsz); size_t rnewsz = PAGEROUND(gnewsz);
if (rnewsz > roldsz) { if (rnewsz > roldsz) {
/* try to extend existing region */
if (!mopts.malloc_guard) { if (!mopts.malloc_guard) {
void *hint = (char *)p + roldsz; void *hint = (char *)p + roldsz;
size_t needed = rnewsz - roldsz; size_t needed = rnewsz - roldsz;
@ -1482,6 +1503,8 @@ gotit:
if (mopts.malloc_junk == 2) if (mopts.malloc_junk == 2)
memset(q, SOME_JUNK, needed); memset(q, SOME_JUNK, needed);
r->size = newsz; r->size = newsz;
if (mopts.chunk_canaries)
fill_canary(p, newsz, PAGEROUND(newsz));
STATS_SETF(r, f); STATS_SETF(r, f);
STATS_INC(pool->cheap_reallocs); STATS_INC(pool->cheap_reallocs);
ret = p; ret = p;
@ -1492,6 +1515,7 @@ gotit:
} }
} }
} else if (rnewsz < roldsz) { } else if (rnewsz < roldsz) {
/* shrink number of pages */
if (mopts.malloc_guard) { if (mopts.malloc_guard) {
if (mprotect((char *)p + roldsz - if (mprotect((char *)p + roldsz -
mopts.malloc_guard, mopts.malloc_guard, mopts.malloc_guard, mopts.malloc_guard,
@ -1504,27 +1528,38 @@ gotit:
} }
unmap(pool, (char *)p + rnewsz, roldsz - rnewsz); unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
r->size = gnewsz; r->size = gnewsz;
if (mopts.chunk_canaries)
fill_canary(p, newsz, PAGEROUND(newsz));
STATS_SETF(r, f); STATS_SETF(r, f);
ret = p; ret = p;
goto done; goto done;
} else { } else {
/* number of pages remains the same */
if (newsz > oldsz && mopts.malloc_junk == 2) if (newsz > oldsz && mopts.malloc_junk == 2)
memset((char *)p + newsz, SOME_JUNK, memset((char *)p + newsz, SOME_JUNK,
rnewsz - mopts.malloc_guard - newsz); rnewsz - mopts.malloc_guard - newsz);
r->size = gnewsz; r->size = gnewsz;
if (mopts.chunk_canaries)
fill_canary(p, newsz, PAGEROUND(newsz));
STATS_SETF(r, f); STATS_SETF(r, f);
ret = p; ret = p;
goto done; goto done;
} }
} }
if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.chunk_canaries &&
!mopts.malloc_realloc) {
if (mopts.malloc_junk == 2 && newsz > 0)
if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
/* do not reallocate if new size fits good in existing chunk */
if (mopts.malloc_junk == 2)
memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
if (mopts.chunk_canaries) {
info->bits[info->offset + chunknum] = newsz;
fill_canary(p, newsz, info->size);
}
STATS_SETF(r, f); STATS_SETF(r, f);
ret = p; ret = p;
} else if (newsz != oldsz || mopts.chunk_canaries ||
mopts.malloc_realloc) {
} else if (newsz != oldsz || mopts.malloc_realloc) {
/* create new allocation */
q = omalloc(pool, newsz, 0, f); q = omalloc(pool, newsz, 0, f);
if (q == NULL) { if (q == NULL) {
ret = NULL; ret = NULL;
@ -1535,6 +1570,12 @@ gotit:
ofree(pool, p); ofree(pool, p);
ret = q; ret = q;
} else { } else {
/* > page size allocation didnt change */
if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
info->bits[info->offset + chunknum] = newsz;
if (info->size > 0)
fill_canary(p, newsz, info->size);
}
STATS_SETF(r, f); STATS_SETF(r, f);
ret = p; ret = p;
} }


Loading…
Cancel
Save