Browse Source

Back out previous; otto saw a potential race that could lead to a

double unmap and I experienced a much more unstable firefox.
discussed with otto on icb
OPENBSD_6_0
tb 8 years ago
parent
commit
071457b57b
1 changed files with 23 additions and 32 deletions
  1. +23
    -32
      src/lib/libc/stdlib/malloc.c

+ 23
- 32
src/lib/libc/stdlib/malloc.c View File

@ -1,4 +1,4 @@
/* $OpenBSD: malloc.c,v 1.189 2016/06/27 15:33:40 tedu Exp $ */
/* $OpenBSD: malloc.c,v 1.190 2016/06/28 06:40:11 tb Exp $ */
/*
* Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@ -122,8 +122,6 @@ struct dir_info {
char *func; /* current function */
u_char rbytes[32]; /* random bytes */
u_short chunk_start;
void *unmap_me;
size_t unmap_me_sz;
#ifdef MALLOC_STATS
size_t inserts;
size_t insert_collisions;
@ -220,16 +218,10 @@ static void malloc_exit(void);
static inline void
_MALLOC_LEAVE(struct dir_info *d)
{
void *unmap_me = d->unmap_me;
size_t unmap_me_sz = d->unmap_me_sz;
if (__isthreaded) {
d->active--;
_MALLOC_UNLOCK();
}
if (unmap_me != NULL)
munmap(unmap_me, unmap_me_sz);
}
static inline void
@ -239,8 +231,6 @@ _MALLOC_ENTER(struct dir_info *d)
_MALLOC_LOCK();
d->active++;
}
d->unmap_me = NULL;
d->unmap_me_sz = 0;
}
static inline size_t
@ -305,16 +295,6 @@ wrterror(struct dir_info *d, char *msg, void *p)
abort();
}
static inline void
_MUNMAP(struct dir_info *d, void *p, size_t sz)
{
if (d->unmap_me == NULL) {
d->unmap_me = p;
d->unmap_me_sz = sz;
} else if (munmap(p, sz) == -1)
wrterror(d, "munmap", p);
}
static void
rbytes_init(struct dir_info *d)
{
@ -355,7 +335,9 @@ unmap(struct dir_info *d, void *p, size_t sz)
}
if (psz > mopts.malloc_cache) {
_MUNMAP(d, p, sz);
i = munmap(p, sz);
if (i)
wrterror(d, "munmap", p);
STATS_SUB(d->malloc_used, sz);
return;
}
@ -368,7 +350,8 @@ unmap(struct dir_info *d, void *p, size_t sz)
r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
if (r->p != NULL) {
rsz = r->size << MALLOC_PAGESHIFT;
_MUNMAP(d, r->p, rsz);
if (munmap(r->p, rsz))
wrterror(d, "munmap", r->p);
r->p = NULL;
if (tounmap > r->size)
tounmap -= r->size;
@ -411,7 +394,8 @@ zapcacheregion(struct dir_info *d, void *p, size_t len)
r = &d->free_regions[i];
if (r->p >= p && r->p <= (void *)((char *)p + len)) {
rsz = r->size << MALLOC_PAGESHIFT;
_MUNMAP(d, r->p, rsz);
if (munmap(r->p, rsz))
wrterror(d, "munmap", r->p);
r->p = NULL;
d->free_regions_size -= r->size;
r->size = 0;
@ -743,9 +727,11 @@ omalloc_grow(struct dir_info *d)
}
}
/* avoid pages containing meta info to end up in cache */
_MUNMAP(d, d->r, d->regions_total * sizeof(struct region_info));
STATS_SUB(d->malloc_used,
d->regions_total * sizeof(struct region_info));
if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
wrterror(d, "munmap", d->r);
else
STATS_SUB(d->malloc_used,
d->regions_total * sizeof(struct region_info));
d->regions_free = d->regions_free + d->regions_total;
d->regions_total = newtotal;
d->r = p;
@ -1442,8 +1428,10 @@ gotit:
STATS_SETF(r, f);
STATS_INC(pool->cheap_reallocs);
return p;
} else if (q != MAP_FAILED)
_MUNMAP(pool, q, needed);
} else if (q != MAP_FAILED) {
if (munmap(q, needed))
wrterror(pool, "munmap", q);
}
}
} else if (rnewsz < roldsz) {
if (mopts.malloc_guard) {
@ -1612,9 +1600,12 @@ mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
if (p == MAP_FAILED)
return MAP_FAILED;
q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
if (q != p)
_MUNMAP(d, p, q - p);
_MUNMAP(d, q + sz, alignment - (q - p));
if (q != p) {
if (munmap(p, q - p))
wrterror(d, "munmap", p);
}
if (munmap(q + sz, alignment - (q - p)))
wrterror(d, "munmap", q + sz);
STATS_SUB(d->malloc_used, alignment);
return q;


Loading…
Cancel
Save