summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/jemalloc/ChangeLog24
-rw-r--r--contrib/jemalloc/FREEBSD-diffs12
-rw-r--r--contrib/jemalloc/VERSION2
-rw-r--r--contrib/jemalloc/doc/jemalloc.310
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hash.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/huge.h8
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h10
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h3
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/private_namespace.h1
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc.h8
-rw-r--r--contrib/jemalloc/src/arena.c5
-rw-r--r--contrib/jemalloc/src/huge.c26
-rw-r--r--contrib/jemalloc/src/jemalloc.c2
-rw-r--r--contrib/jemalloc/src/prof.c5
14 files changed, 80 insertions, 38 deletions
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index c0ca338b076e..d56ee999e69c 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -5,6 +5,30 @@ found in the git revision history:
https://github.com/jemalloc/jemalloc
+* 3.6.0 (March 31, 2014)
+
+ This version contains a critical bug fix for a regression present in 3.5.0 and
+ 3.5.1.
+
+ Bug fixes:
+ - Fix a regression in arena_chunk_alloc() that caused crashes during
+ small/large allocation if chunk allocation failed. In the absence of this
+ bug, chunk allocation failure would result in allocation failure, e.g. NULL
+ return from malloc(). This regression was introduced in 3.5.0.
+ - Fix backtracing for gcc intrinsics-based backtracing by specifying
+ -fno-omit-frame-pointer to gcc. Note that the application (and all the
+ libraries it links to) must also be compiled with this option for
+ backtracing to be reliable.
+ - Use dss allocation precedence for huge allocations as well as small/large
+ allocations.
+ - Fix test assertion failure message formatting. This bug did not manifect on
+ x86_64 systems because of implementation subtleties in va_list.
+ - Fix inconsequential test failures for hash and SFMT code.
+
+ New features:
+ - Support heap profiling on FreeBSD. This feature depends on the proc
+ filesystem being mounted during heap profile dumping.
+
* 3.5.1 (February 25, 2014)
This version primarily addresses minor bugs in test code.
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index e621af750ed0..c8cc9c1656ba 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index c7e2e87..2bd59f0 100644
+index d8e2e71..330ba2a 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -57,12 +57,23 @@
@@ -27,7 +27,7 @@ index c7e2e87..2bd59f0 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
-@@ -2338,4 +2349,19 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2342,4 +2353,19 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@@ -48,7 +48,7 @@ index c7e2e87..2bd59f0 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index d24a1fe..d101c3d 100644
+index 574bbb1..e3eafdf 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
@@ -100,10 +100,10 @@ index de44e14..564d604 100644
bool malloc_mutex_init(malloc_mutex_t *mutex);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
-index 1e64ed5..29ddba3 100644
+index 93516d2..22f9af9 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
-@@ -225,7 +225,6 @@ iralloc
+@@ -226,7 +226,6 @@ iralloc
iralloct
iralloct_realign
isalloc
@@ -263,7 +263,7 @@ index f943891..47d032c 100755
+#include "jemalloc_FreeBSD.h"
EOF
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index 563d99f..42f97b4 100644
+index 204778b..9e5f2df 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index 24870eef7902..dace31ba7b6a 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136
+3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index f10abd109fe8..db43d648e738 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 02/25/2014
+.\" Date: 03/31/2014
.\" Manual: User Manual
-.\" Source: jemalloc 3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136
+.\" Source: jemalloc 3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "02/25/2014" "jemalloc 3.5.1-0-g7709a64c59da" "User Manual"
+.TH "JEMALLOC" "3" "03/31/2014" "jemalloc 3.6.0-0-g46c0af68bd24" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.5\&.1\-0\-g7709a64c59daf0b1f938be49472fcc499e1bd136\&. More information can be found at the
+This manual describes jemalloc 3\&.6\&.0\-0\-g46c0af68bd248b04df75e4f92d5fb804c3d75340\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -1035,7 +1035,7 @@ Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
-"arenas\&.narenas"\&. See
+"arenas\&.narenas"\&. Note that even during huge allocation this setting is read from the arena that would be chosen for small or large allocation so that applications can depend on consistent dss versus mmap allocation regardless of allocation size\&. See
"opt\&.dss"
for supported settings\&.
.RE
diff --git a/contrib/jemalloc/include/jemalloc/internal/hash.h b/contrib/jemalloc/include/jemalloc/internal/hash.h
index 09b69df515be..c7183ede82d7 100644
--- a/contrib/jemalloc/include/jemalloc/internal/hash.h
+++ b/contrib/jemalloc/include/jemalloc/internal/hash.h
@@ -320,7 +320,7 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
-#if (LG_SIZEOF_PTR == 3)
+#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
uint64_t hashes[2];
diff --git a/contrib/jemalloc/include/jemalloc/internal/huge.h b/contrib/jemalloc/include/jemalloc/internal/huge.h
index ddf13138ad73..a2b9c779191f 100644
--- a/contrib/jemalloc/include/jemalloc/internal/huge.h
+++ b/contrib/jemalloc/include/jemalloc/internal/huge.h
@@ -17,18 +17,20 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
-void *huge_malloc(size_t size, bool zero);
-void *huge_palloc(size_t size, size_t alignment, bool zero);
+void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
+void *huge_palloc(size_t size, size_t alignment, bool zero,
+ dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc);
+ size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
+dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index aad7b6c2ef76..dd6e6a301705 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -770,7 +770,7 @@ imalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
- return (huge_malloc(size, false));
+ return (huge_malloc(size, false, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -787,7 +787,7 @@ icalloct(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
- return (huge_malloc(size, true));
+ return (huge_malloc(size, true, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -813,9 +813,9 @@ ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
- ret = huge_malloc(usize, zero);
+ ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
- ret = huge_palloc(usize, alignment, zero);
+ ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@@ -984,7 +984,7 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc));
+ alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
index 5bb7d5d011db..1ca25573a128 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
@@ -191,6 +191,9 @@
/* C99 restrict keyword supported. */
#define JEMALLOC_HAS_RESTRICT 1
+/* For use by hash code. */
+/* #undef JEMALLOC_BIG_ENDIAN */
+
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index 6a0fa0fb7bfd..d790c8b8ab1b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -197,6 +197,7 @@
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 3a4feeaf8d62..3dcccc3a5054 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -67,12 +67,12 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136"
+#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 5
-#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_MINOR 6
+#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "7709a64c59daf0b1f938be49472fcc499e1bd136"
+#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 390ab0f82304..dad707b63d05 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -614,8 +614,11 @@ arena_chunk_alloc(arena_t *arena)
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
- else
+ else {
chunk = arena_chunk_init_hard(arena);
+ if (chunk == NULL)
+ return (NULL);
+ }
/* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
index 6d86aed881b6..d72f21357021 100644
--- a/contrib/jemalloc/src/huge.c
+++ b/contrib/jemalloc/src/huge.c
@@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
-huge_malloc(size_t size, bool zero)
+huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
{
- return (huge_palloc(size, chunksize, zero));
+ return (huge_palloc(size, chunksize, zero, dss_prec));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero)
+huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
{
void *ret;
size_t csize;
@@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed,
- chunk_dss_prec_get());
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -98,7 +97,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc)
+ size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
{
void *ret;
size_t copysize;
@@ -113,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero);
+ ret = huge_palloc(size + extra, alignment, zero, dss_prec);
else
- ret = huge_malloc(size + extra, zero);
+ ret = huge_malloc(size + extra, zero, dss_prec);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero);
+ ret = huge_palloc(size, alignment, zero, dss_prec);
else
- ret = huge_malloc(size, zero);
+ ret = huge_malloc(size, zero, dss_prec);
if (ret == NULL)
return (NULL);
@@ -264,6 +263,13 @@ huge_salloc(const void *ptr)
return (ret);
}
+dss_prec_t
+huge_dss_prec_get(arena_t *arena)
+{
+
+ return (arena_dss_prec_get(choose_arena(arena)));
+}
+
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index 42f97b4dbb7a..9e5f2df3a491 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -2081,7 +2081,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
- return (huge_malloc(size, zero));
+ return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
}
void *
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index 1d8ccbd60ae0..7722b7b43739 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -935,9 +935,12 @@ prof_dump_maps(bool propagate_err)
char filename[PATH_MAX + 1];
cassert(config_prof);
-
+#ifdef __FreeBSD__
+ malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+#else
malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
(int)getpid());
+#endif
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;