aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorJason Evans <jasone@FreeBSD.org>2012-11-10 01:46:13 +0000
committerJason Evans <jasone@FreeBSD.org>2012-11-10 01:46:13 +0000
commit82872ac08650a49b356e99f9bf640b8d7201849f (patch)
tree259ea73d4da6fff525da091d1c46c6809a58621f /contrib
parent8a28f3228f13be45b9151e2dd3c740fe0814f979 (diff)
downloadsrc-82872ac08650a49b356e99f9bf640b8d7201849f.tar.gz
src-82872ac08650a49b356e99f9bf640b8d7201849f.zip
Notes
Diffstat (limited to 'contrib')
-rw-r--r--contrib/jemalloc/ChangeLog41
-rw-r--r--contrib/jemalloc/FREEBSD-diffs26
-rw-r--r--contrib/jemalloc/VERSION2
-rw-r--r--contrib/jemalloc/doc/jemalloc.389
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena.h68
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk.h8
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_dss.h14
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ctl.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent.h3
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/huge.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h123
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/private_namespace.h40
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree.h3
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_defs.h9
-rw-r--r--contrib/jemalloc/src/arena.c699
-rw-r--r--contrib/jemalloc/src/base.c3
-rw-r--r--contrib/jemalloc/src/chunk.c165
-rw-r--r--contrib/jemalloc/src/chunk_dss.c37
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c12
-rw-r--r--contrib/jemalloc/src/ctl.c361
-rw-r--r--contrib/jemalloc/src/huge.c7
-rw-r--r--contrib/jemalloc/src/jemalloc.c212
-rw-r--r--contrib/jemalloc/src/mutex.c2
-rw-r--r--contrib/jemalloc/src/prof.c42
-rw-r--r--contrib/jemalloc/src/rtree.c21
-rw-r--r--contrib/jemalloc/src/stats.c10
-rw-r--r--contrib/jemalloc/src/tcache.c4
-rw-r--r--contrib/jemalloc/src/util.c5
31 files changed, 1471 insertions, 560 deletions
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index 231dd6da7300..ab3476c69fd7 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -6,6 +6,47 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
+* 3.2.0 (November 9, 2012)
+
+ In addition to a couple of bug fixes, this version modifies page run
+ allocation and dirty page purging algorithms in order to better control
+ page-level virtual memory fragmentation.
+
+ Incompatible changes:
+ - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
+
+ Bug fixes:
+ - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
+ after primary dss allocation fails.
+ - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
+ in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
+
+* 3.1.0 (October 16, 2012)
+
+ New features:
+ - Auto-detect whether running inside Valgrind, thus removing the need to
+ manually specify MALLOC_CONF=valgrind:true.
+ - Add the "arenas.extend" mallctl, which allows applications to create
+ manually managed arenas.
+ - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
+ - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
+ which provide control over dss/mmap precedence.
+ - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
+ - Define LG_QUANTUM for hppa.
+
+ Incompatible changes:
+ - Disable tcache by default if running inside Valgrind, in order to avoid
+ making unallocated objects appear reachable to Valgrind.
+ - Drop const from malloc_usable_size() argument on Linux.
+
+ Bug fixes:
+ - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
+ - Remove const from __*_hook variable declarations, so that glibc can modify
+ them during process forking.
+ - Fix mlockall(2)/madvise(2) interaction.
+ - Fix fork(2)-related deadlocks.
+ - Fix error return value for "thread.tcache.enabled" mallctl.
+
* 3.0.0 (May 11, 2012)
Although this version adds some major new features, the primary focus is on
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index 8ca49955824f..0372f91c689e 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 877c500..7d659a7 100644
+index 54b8747..91c4a4e 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index 877c500..7d659a7 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
-@@ -2101,4 +2112,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2170,4 +2181,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@@ -45,7 +45,7 @@ index 877c500..7d659a7 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index 268cd14..2acd2eb 100644
+index 475821a..73306ac 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
@@ -97,19 +97,19 @@ index de44e14..564d604 100644
bool malloc_mutex_init(malloc_mutex_t *mutex);
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
-index b816647..b8ce6b1 100644
+index 06241cd..7b19906 100644
--- a/include/jemalloc/internal/private_namespace.h
+++ b/include/jemalloc/internal/private_namespace.h
-@@ -186,7 +186,6 @@
- #define iqalloc JEMALLOC_N(iqalloc)
+@@ -204,7 +204,6 @@
#define iralloc JEMALLOC_N(iralloc)
+ #define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
-#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
-index ad06948..505dd38 100644
+index 31b1304..c3ef2f5 100644
--- a/include/jemalloc/jemalloc.h.in
+++ b/include/jemalloc/jemalloc.h.in
@@ -15,6 +15,7 @@ extern "C" {
@@ -122,7 +122,7 @@ index ad06948..505dd38 100644
#define ALLOCM_LG_ALIGN(la) (la)
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
-index 0000000..9efab93
+index 0000000..9c97a13
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
@@ -0,0 +1,76 @@
@@ -203,7 +203,7 @@ index 0000000..9efab93
+#define pthread_mutex_lock _pthread_mutex_lock
+#define pthread_mutex_unlock _pthread_mutex_unlock
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index bc54cd7..fa9fcf0 100644
+index 8a667b6..aaf5012 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@@ -217,7 +217,7 @@ index bc54cd7..fa9fcf0 100644
/* Runtime configuration options. */
const char *je_malloc_conf;
#ifdef JEMALLOC_DEBUG
-@@ -429,7 +433,8 @@ malloc_conf_init(void)
+@@ -448,7 +452,8 @@ malloc_conf_init(void)
#endif
;
@@ -228,12 +228,12 @@ index bc54cd7..fa9fcf0 100644
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
diff --git a/src/mutex.c b/src/mutex.c
-index 37a843e..4a90a05 100644
+index 55e18c2..6b6f438 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
#ifdef JEMALLOC_MUTEX_INIT_CB
- int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
@@ -250,7 +250,7 @@ index 37a843e..4a90a05 100644
bool
diff --git a/src/util.c b/src/util.c
-index 9b73c3e..f94799f 100644
+index b3a0114..df1c5d5 100644
--- a/src/util.c
+++ b/src/util.c
@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index c0f4e740940b..5e64fc9e8bbd 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index f4a928208371..eada5167d6c5 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 05/12/2012
+.\" Date: 11/09/2012
.\" Manual: User Manual
-.\" Source: jemalloc 3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+.\" Source: jemalloc 3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "05/12/2012" "jemalloc 3.0.0-0-gfc9b1dbf69f5" "User Manual"
+.TH "JEMALLOC" "3" "11/09/2012" "jemalloc 3.2.0-0-g87499f6748eb" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.0\&.0\-0\-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046\&. More information can be found at the
+This manual describes jemalloc 3\&.2\&.0\-0\-g87499f6748ebe4817571e817e9f680ccb5bf54a9\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -310,6 +310,14 @@ Initialize newly allocated memory to contain zero bytes\&. In the growing reallo
For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&.
.RE
.PP
+\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Use the arena specified by the index
+\fIa\fR\&. This macro does not validate that
+\fIa\fR
+specifies an arena in the valid range\&.
+.RE
+.PP
The
\fBallocm\fR\fB\fR
function allocates at least
@@ -647,16 +655,23 @@ is specified during configuration, in which case it is enabled by default\&.
Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&.
.RE
.PP
+"opt\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq (default)\&.
+.RE
+.PP
"opt\&.narenas" (\fBsize_t\fR) r\-
.RS 4
-Maximum number of arenas to use\&. The default maximum number of arenas is four times the number of CPUs, or one if there is a single CPU\&.
+Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
.RE
.PP
"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
\fBmadvise\fR(2)
-or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option value of \-1 will disable dirty page purging\&.
+or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&.
.RE
.PP
"opt\&.stats_print" (\fBbool\fR) r\-
@@ -676,7 +691,8 @@ Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocate
0xa5\&. All deallocated memory will be initialized to
0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless
\fB\-\-enable\-debug\fR
-is specified during configuration, in which case it is enabled by default\&.
+is specified during configuration, in which case it is enabled by default unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -684,7 +700,7 @@ is specified during configuration, in which case it is enabled by default\&.
Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
"opt\&.junk"
option is enabled\&. This feature is of particular use in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
.RE
.PP
"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -692,7 +708,7 @@ option is enabled\&. This feature is of particular use in combination with
Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
"opt\&.junk"
option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
.RE
.PP
"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -714,15 +730,7 @@ enabled/disabled\&. This option is disabled by default\&.
"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR]
.RS 4
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
-support enabled/disabled\&. If enabled, several other options are automatically modified during options processing to work well with Valgrind:
-"opt\&.junk"
-and
-"opt\&.zero"
-are set to false,
-"opt\&.quarantine"
-is set to 16 MiB, and
-"opt\&.redzone"
-is set to true\&. This option is disabled by default\&.
+support enabled/disabled\&. This option is vestigal because jemalloc auto\-detects whether it is running inside Valgrind\&. This option is disabled by default, unless running inside Valgrind\&.
.RE
.PP
"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
@@ -749,7 +757,8 @@ This option is disabled by default\&.
.RS 4
Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
"opt\&.lg_tcache_max"
-option for related tuning information\&. This option is enabled by default\&.
+option for related tuning information\&. This option is enabled by default unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
@@ -845,9 +854,7 @@ option for information on analyzing heap profile output\&. This option is disabl
.PP
"thread\&.arena" (\fBunsigned\fR) rw
.RS 4
-Get or set the arena associated with the calling thread\&. The arena index must be less than the maximum number of arenas (see the
-"arenas\&.narenas"
-mallctl)\&. If the specified arena was not initialized beforehand (see the
+Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the
"arenas\&.initialized"
mallctl), it will be automatically initialized as a side effect of calling this interface\&.
.RE
@@ -891,9 +898,23 @@ Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed a
Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
.RE
.PP
+"arena\&.<i>\&.purge" (\fBunsigned\fR) \-\-
+.RS 4
+Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&.
+.RE
+.PP
+"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
+.RS 4
+Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&. See
+"opt\&.dss"
+for supported settings\&.
+.RE
+.PP
"arenas\&.narenas" (\fBunsigned\fR) r\-
.RS 4
-Maximum number of arenas\&.
+Current limit on number of arenas\&.
.RE
.PP
"arenas\&.initialized" (\fBbool *\fR) r\-
@@ -958,6 +979,11 @@ Maximum size supported by this large size class\&.
Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&.
.RE
.PP
+"arenas\&.extend" (\fBunsigned\fR) r\-
+.RS 4
+Extend the array of arenas by appending a new arena, and returning the new arena index\&.
+.RE
+.PP
"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active\&. See the
@@ -997,7 +1023,9 @@ Total number of bytes allocated by the application\&.
"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
-"stats\&.allocated"\&.
+"stats\&.allocated"\&. This does not include
+"stats\&.arenas\&.<i>\&.pdirty"
+and pages entirely devoted to allocator metadata\&.
.RE
.PP
"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1036,6 +1064,15 @@ Cumulative number of huge allocation requests\&.
Cumulative number of huge deallocation requests\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. See
+"opt\&.dss"
+for details\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
.RS 4
Number of threads currently assigned to arena\&.
@@ -1197,9 +1234,7 @@ This implementation does not provide much detail about the problems it detects,
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
tool if the
\fB\-\-enable\-valgrind\fR
-configuration option is enabled and the
-"opt\&.valgrind"
-option is enabled\&.
+configuration option is enabled\&.
.SH "DIAGNOSTIC MESSAGES"
.PP
If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena.h b/contrib/jemalloc/include/jemalloc/internal/arena.h
index 0b0f640a4594..561c9b6ffe9e 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena.h
@@ -38,10 +38,10 @@
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
*
- * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
- * times as many active pages as dirty pages.
+ * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
+ * as many active pages as dirty pages.
*/
-#define LG_DIRTY_MULT_DEFAULT 5
+#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_chunk_map_s arena_chunk_map_t;
typedef struct arena_chunk_s arena_chunk_t;
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
/*
* Linkage for run trees. There are two disjoint uses:
*
- * 1) arena_t's runs_avail_{clean,dirty} trees.
+ * 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
@@ -162,20 +162,24 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
/* Arena chunk header. */
struct arena_chunk_s {
/* Arena that owns the chunk. */
- arena_t *arena;
+ arena_t *arena;
- /* Linkage for the arena's chunks_dirty list. */
- ql_elm(arena_chunk_t) link_dirty;
-
- /*
- * True if the chunk is currently in the chunks_dirty list, due to
- * having at some point contained one or more dirty pages. Removal
- * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
- */
- bool dirtied;
+ /* Linkage for tree of arena chunks that contain dirty runs. */
+ rb_node(arena_chunk_t) dirty_link;
/* Number of dirty pages. */
- size_t ndirty;
+ size_t ndirty;
+
+ /* Number of available runs. */
+ size_t nruns_avail;
+
+ /*
+ * Number of available run adjacencies. Clean and dirty available runs
+ * are not coalesced, which causes virtual memory fragmentation. The
+ * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
+ * this fragmentation.
+ * */
+ size_t nruns_adjac;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@@ -183,7 +187,7 @@ struct arena_chunk_s {
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
- arena_chunk_map_t map[1]; /* Dynamically sized. */
+ arena_chunk_map_t map[1]; /* Dynamically sized. */
};
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
@@ -331,8 +335,10 @@ struct arena_s {
uint64_t prof_accumbytes;
- /* List of dirty-page-containing chunks this arena manages. */
- ql_head(arena_chunk_t) chunks_dirty;
+ dss_prec_t dss_prec;
+
+ /* Tree of dirty-page-containing chunks this arena manages. */
+ arena_chunk_tree_t chunks_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -367,18 +373,9 @@ struct arena_s {
/*
* Size/address-ordered trees of this arena's available runs. The trees
- * are used for first-best-fit run allocation. The dirty tree contains
- * runs with dirty pages (i.e. very likely to have been touched and
- * therefore have associated physical pages), whereas the clean tree
- * contains runs with pages that either have no associated physical
- * pages, or have pages that the kernel may recycle at any time due to
- * previous madvise(2) calls. The dirty tree is used in preference to
- * the clean tree for allocations, because using dirty pages reduces
- * the amount of dirty purging necessary to keep the active:dirty page
- * ratio below the purge threshold.
+ * are used for first-best-fit run allocation.
*/
- arena_avail_tree_t runs_avail_clean;
- arena_avail_tree_t runs_avail_dirty;
+ arena_avail_tree_t runs_avail;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
@@ -422,13 +419,16 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats);
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
-void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache);
+void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats);
bool arena_new(arena_t *arena, unsigned ind);
void arena_boot(void);
void arena_prefork(arena_t *arena);
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk.h b/contrib/jemalloc/include/jemalloc/internal/chunk.h
index 8fb1fe6d15c1..87d8700dac8a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk.h
@@ -28,6 +28,7 @@
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
+extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
@@ -42,9 +43,14 @@ extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
+void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec);
+void chunk_unmap(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
+void chunk_prefork(void);
+void chunk_postfork_parent(void);
+void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6e2643b24c3e..6585f071bbec 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -1,14 +1,28 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t ;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+extern const char *dss_prec_names[];
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
+dss_prec_t chunk_dss_prec_get(void);
+bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void);
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h b/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
index b29f39e9eafe..f24abac75382 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,7 +9,7 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void pages_purge(void *addr, size_t length);
+bool pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h
index adf3827f0948..0ffecc5f2a23 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h
@@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
+ const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
@@ -61,6 +62,7 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
+ unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -75,6 +77,9 @@ int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
+void ctl_prefork(void);
+void ctl_postfork_parent(void);
+void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h
index 36af8be8995f..ba95ca816bd9 100644
--- a/contrib/jemalloc/include/jemalloc/internal/extent.h
+++ b/contrib/jemalloc/include/jemalloc/internal/extent.h
@@ -23,6 +23,9 @@ struct extent_node_s {
/* Total region size. */
size_t size;
+
+ /* True if zero-filled; used by chunk recycling code. */
+ bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/huge.h b/contrib/jemalloc/include/jemalloc/internal/huge.h
index e8513c933e24..d987d370767a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/huge.h
+++ b/contrib/jemalloc/include/jemalloc/internal/huge.h
@@ -22,7 +22,7 @@ void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero);
+ size_t alignment, bool zero, bool try_tcache_dalloc);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 32cdc6f31425..4214c5390294 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -270,6 +270,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
@@ -424,6 +427,7 @@ static const bool config_ivsalloc =
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
+#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
@@ -510,13 +514,19 @@ extern size_t opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
-extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, arenas_total). */
+extern malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
*/
extern arena_t **arenas;
-extern unsigned narenas;
+extern unsigned narenas_total;
+extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
@@ -571,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
+unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena);
#endif
@@ -675,6 +686,18 @@ sa2u(size_t size, size_t alignment)
}
}
+JEMALLOC_INLINE unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
@@ -710,15 +733,24 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
+void *imallocx(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
+void *icallocx(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
+void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
+void idallocx(void *ptr, bool try_tcache);
void idalloc(void *ptr);
+void iqallocx(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
+void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@@ -726,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
-imalloc(size_t size)
+imallocx(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, false, true));
+ return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
}
JEMALLOC_INLINE void *
-icalloc(size_t size)
+imalloc(size_t size)
+{
+
+ return (imallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+icallocx(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, true, true));
+ return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void *
-ipalloc(size_t usize, size_t alignment, bool zero)
+icalloc(size_t size)
+{
+
+ return (icallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
void *ret;
@@ -756,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
- ret = arena_malloc(NULL, usize, zero, true);
+ ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
- ret = arena_palloc(choose_arena(NULL), usize, alignment,
- zero);
+ ret = arena_palloc(choose_arena(arena), usize,
+ alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
@@ -771,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, bool zero)
return (ret);
}
+JEMALLOC_INLINE void *
+ipalloc(size_t usize, size_t alignment, bool zero)
+{
+
+ return (ipallocx(usize, alignment, zero, true, NULL));
+}
+
/*
* Typical usage:
* void *ptr = [...]
@@ -829,7 +883,7 @@ p2rz(const void *ptr)
}
JEMALLOC_INLINE void
-idalloc(void *ptr)
+idallocx(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -837,24 +891,38 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, true);
+ arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
-iqalloc(void *ptr)
+idalloc(void *ptr)
+{
+
+ idallocx(ptr, true);
+}
+
+JEMALLOC_INLINE void
+iqallocx(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idalloc(ptr);
+ idallocx(ptr, try_tcache);
+}
+
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+ iqallocx(ptr, true);
}
JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
size_t oldsize;
@@ -877,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
@@ -885,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
if (ret == NULL)
return (NULL);
}
@@ -896,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
return (ret);
}
@@ -910,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
}
} else {
if (size + extra <= arena_maxclass) {
- return (arena_ralloc(ptr, oldsize, size, extra,
- alignment, zero, true));
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero));
+ alignment, zero, try_tcache_dalloc));
}
}
}
+JEMALLOC_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move)
+{
+
+ return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
+ NULL));
+}
+
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index b8ce6b16125a..7b19906caa11 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -12,6 +12,8 @@
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
@@ -51,14 +53,13 @@
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
-#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls)
+#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
@@ -101,9 +102,15 @@
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
+#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
+#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
+#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
+#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
+#define chunk_prefork JEMALLOC_N(chunk_prefork)
+#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
@@ -129,6 +136,10 @@
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@@ -161,6 +172,7 @@
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
@@ -180,11 +192,17 @@
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
+#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
+#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
+#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
+#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
+#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
+#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
@@ -211,7 +229,9 @@
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
-#define narenas JEMALLOC_N(narenas)
+#define narenas_auto JEMALLOC_N(narenas_auto)
+#define narenas_total JEMALLOC_N(narenas_total)
+#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
@@ -253,6 +273,9 @@
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
+#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@@ -263,6 +286,7 @@
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
+#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
@@ -277,12 +301,13 @@
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
+#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
+#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
+#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
-#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
-#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
-#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
+#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@@ -310,6 +335,7 @@
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
+#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
@@ -324,6 +350,7 @@
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
+#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
@@ -331,6 +358,7 @@
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
+#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof.h b/contrib/jemalloc/include/jemalloc/internal/prof.h
index c3e3f9e4bcd8..47f22ad2da16 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof.h
@@ -223,6 +223,9 @@ void prof_tdata_cleanup(void *arg);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
+void prof_prefork(void);
+void prof_postfork_parent(void);
+void prof_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -506,7 +509,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
- } else
+ } else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h
index 95d6355a5f49..9bd98548cfed 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h
@@ -36,6 +36,9 @@ struct rtree_s {
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits);
+void rtree_prefork(rtree_t *rtree);
+void rtree_postfork_parent(rtree_t *rtree);
+void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 65440b36bb31..629dbb4942c2 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -7,12 +7,12 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
+#define JEMALLOC_VERSION "3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 0
+#define JEMALLOC_VERSION_MINOR 2
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "fc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
+#define JEMALLOC_VERSION_GID "87499f6748ebe4817571e817e9f680ccb5bf54a9"
#include "jemalloc_defs.h"
#include "jemalloc_FreeBSD.h"
@@ -26,6 +26,8 @@ extern "C" {
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
+/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
+#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
@@ -60,7 +62,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
+JEMALLOC_EXPORT size_t je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_defs.h b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
index 85571b9a6594..169078be8e34 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
@@ -223,6 +223,15 @@
#define JEMALLOC_OVERRIDE_VALLOC
/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST const
+
+/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 2a6150f3e8d8..0c53b071b87f 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -40,6 +40,12 @@ const uint8_t small_size2bin[] = {
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
+static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
+static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, size_t binind, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
@@ -48,8 +54,11 @@ static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
bool large, size_t binind, bool zero);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
size_t binind, bool zero);
+static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
+ arena_chunk_t *chunk, void *arg);
static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
@@ -101,9 +110,6 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
size_t a_size = a->bits & ~PAGE_MASK;
size_t b_size = b->bits & ~PAGE_MASK;
- assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
- CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
-
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_mapelm, b_mapelm;
@@ -129,6 +135,182 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
u.rb_link, arena_avail_comp)
+static inline int
+arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ /*
+ * Short-circuit for self comparison. The following comparison code
+ * would come to the same result, but at the cost of executing the slow
+ * path.
+ */
+ if (a == b)
+ return (0);
+
+ /*
+ * Order such that chunks with higher fragmentation are "less than"
+ * those with lower fragmentation -- purging order is from "least" to
+ * "greatest". Fragmentation is measured as:
+ *
+ * mean current avail run size
+ * --------------------------------
+ * mean defragmented avail run size
+ *
+ * navail
+ * -----------
+ * nruns_avail nruns_avail-nruns_adjac
+ * = ========================= = -----------------------
+ * navail nruns_avail
+ * -----------------------
+ * nruns_avail-nruns_adjac
+ *
+ * The following code multiplies away the denominator prior to
+ * comparison, in order to avoid division.
+ *
+ */
+ {
+ size_t a_val = (a->nruns_avail - a->nruns_adjac) *
+ b->nruns_avail;
+ size_t b_val = (b->nruns_avail - b->nruns_adjac) *
+ a->nruns_avail;
+
+ if (a_val < b_val)
+ return (1);
+ if (a_val > b_val)
+ return (-1);
+ }
+ /*
+ * Break ties by chunk address. For fragmented chunks, report lower
+ * addresses as "lower", so that fragmentation reduction happens first
+ * at lower addresses. However, use the opposite ordering for
+ * unfragmented chunks, in order to increase the chances of
+ * re-allocating dirty runs.
+ */
+ {
+ uintptr_t a_chunk = (uintptr_t)a;
+ uintptr_t b_chunk = (uintptr_t)b;
+ int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+ if (a->nruns_adjac == 0) {
+ assert(b->nruns_adjac == 0);
+ ret = -ret;
+ }
+ return (ret);
+ }
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
+ dirty_link, arena_chunk_dirty_comp)
+
+static inline bool
+arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
+{
+ bool ret;
+
+ if (pageind-1 < map_bias)
+ ret = false;
+ else {
+ ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk,
+ pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+ bool ret;
+
+ if (pageind+npages == chunk_npages)
+ ret = false;
+ else {
+ assert(pageind+npages < chunk_npages);
+ ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
+ != arena_mapbits_dirty_get(chunk, pageind+npages));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+
+ return (arena_avail_adjac_pred(chunk, pageind) ||
+ arena_avail_adjac_succ(chunk, pageind, npages));
+}
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be inserted is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac++;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac++;
+ chunk->nruns_avail++;
+ assert(chunk->nruns_avail > chunk->nruns_adjac);
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty += npages;
+ chunk->ndirty += npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
+
+static void
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be removed is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac--;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac--;
+ chunk->nruns_avail--;
+ assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
+ == 0 && chunk->nruns_adjac == 0));
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty -= npages;
+ chunk->ndirty -= npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
+
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
@@ -193,7 +375,6 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_chunk_t *chunk;
size_t run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
- arena_avail_tree_t *runs_avail;
assert((large && binind == BININD_INVALID) || (large == false && binind
!= BININD_INVALID));
@@ -201,8 +382,6 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
@@ -212,7 +391,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
+ arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
if (config_stats) {
/*
* Update stats_cactive if nactive is crossing a chunk
@@ -244,14 +423,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_mapbits_unzeroed_get(chunk,
run_ind+total_pages-1));
}
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
- run_ind+need_pages));
- }
-
- /* Update dirty page accounting. */
- if (flag_dirty != 0) {
- chunk->ndirty -= need_pages;
- arena->ndirty -= need_pages;
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
+ false, true);
}
/*
@@ -344,8 +517,6 @@ arena_chunk_alloc(arena_t *arena)
size_t i;
if (arena->spare != NULL) {
- arena_avail_tree_t *runs_avail;
-
chunk = arena->spare;
arena->spare = NULL;
@@ -357,14 +528,6 @@ arena_chunk_alloc(arena_t *arena)
chunk_npages-1) == arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- /* Insert the run into the appropriate runs_avail_* tree. */
- if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
- map_bias));
} else {
bool zero;
size_t unzeroed;
@@ -372,7 +535,7 @@ arena_chunk_alloc(arena_t *arena)
zero = false;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
- false, &zero);
+ false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
@@ -380,8 +543,6 @@ arena_chunk_alloc(arena_t *arena)
arena->stats.mapped += chunksize;
chunk->arena = arena;
- ql_elm_new(chunk, link_dirty);
- chunk->dirtied = false;
/*
* Claim that no pages are in use, since the header is merely
@@ -389,6 +550,9 @@ arena_chunk_alloc(arena_t *arena)
*/
chunk->ndirty = 0;
+ chunk->nruns_avail = 0;
+ chunk->nruns_adjac = 0;
+
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
@@ -412,20 +576,18 @@ arena_chunk_alloc(arena_t *arena)
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1,
arena_maxclass, unzeroed);
-
- /* Insert the run into the runs_avail_clean tree. */
- arena_avail_tree_insert(&arena->runs_avail_clean,
- arena_mapp_get(chunk, map_bias));
}
+ /* Insert the run into the runs_avail tree. */
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
+
return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
- arena_avail_tree_t *runs_avail;
-
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
@@ -436,24 +598,16 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena_mapbits_dirty_get(chunk, chunk_npages-1));
/*
- * Remove run from the appropriate runs_avail_* tree, so that the arena
- * does not use it.
+ * Remove run from the runs_avail tree, so that the arena does not use
+ * it.
*/
- if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
- if (spare->dirtied) {
- ql_remove(&chunk->arena->chunks_dirty, spare,
- link_dirty);
- arena->ndirty -= spare->ndirty;
- }
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
@@ -471,19 +625,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
arena_chunk_map_t *mapelm, key;
key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
- return (run);
- }
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
@@ -537,41 +679,40 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
static inline void
arena_maybe_purge(arena_t *arena)
{
+ size_t npurgeable, threshold;
+
+ /* Don't purge if the option is disabled. */
+ if (opt_lg_dirty_mult < 0)
+ return;
+ /* Don't purge if all dirty pages are already being purged. */
+ if (arena->ndirty <= arena->npurgatory)
+ return;
+ npurgeable = arena->ndirty - arena->npurgatory;
+ threshold = (arena->nactive >> opt_lg_dirty_mult);
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (npurgeable <= threshold)
+ return;
- /* Enforce opt_lg_dirty_mult. */
- if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
- (arena->ndirty - arena->npurgatory) > chunk_npages &&
- (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory))
- arena_purge(arena, false);
+ arena_purge(arena, false);
}
-static inline void
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
+static inline size_t
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
+ size_t npurged;
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
- size_t pageind, flag_unzeroed;
- size_t ndirty;
+ size_t pageind, npages;
size_t nmadvise;
ql_new(&mapelms);
- flag_unzeroed =
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /*
- * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
- * mappings, but not for file-backed mappings.
- */
- 0
-#else
- CHUNK_MAP_UNZEROED
-#endif
- ;
-
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
+ * run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
@@ -591,68 +732,50 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
arena_chunk_alloc(arena);
}
- /* Temporarily allocate all free dirty runs within chunk. */
- for (pageind = map_bias; pageind < chunk_npages;) {
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
+
+ /*
+ * Operate on all dirty runs if there is no clean/dirty run
+ * fragmentation.
+ */
+ if (chunk->nruns_adjac == 0)
+ all = true;
+
+ /*
+ * Temporarily allocate free dirty runs within chunk. If all is false,
+ * only operate on dirty runs that are fragments; otherwise operate on
+ * all dirty runs.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
- size_t npages;
+ size_t run_size =
+ arena_mapbits_unallocated_size_get(chunk, pageind);
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
+ npages = run_size >> LG_PAGE;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- size_t i;
-
- arena_avail_tree_remove(
- &arena->runs_avail_dirty, mapelm);
- arena_mapbits_unzeroed_set(chunk, pageind,
- flag_unzeroed);
- arena_mapbits_large_set(chunk, pageind,
- (npages << LG_PAGE), 0);
- /*
- * Update internal elements in the page map, so
- * that CHUNK_MAP_UNZEROED is properly set.
- */
- for (i = 1; i < npages - 1; i++) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+i, flag_unzeroed);
- }
- if (npages > 1) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+npages-1, flag_unzeroed);
- arena_mapbits_large_set(chunk,
- pageind+npages-1, 0, 0);
- }
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
+ (all || arena_avail_adjac(chunk, pageind,
+ npages))) {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)
+ chunk + (uintptr_t)(pageind << LG_PAGE));
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is
- * crossing a chunk multiple.
- */
- size_t cactive_diff =
- CHUNK_CEILING((arena->nactive +
- npages) << LG_PAGE) -
- CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
- arena->nactive += npages;
+ arena_run_split(arena, run, run_size, true,
+ BININD_INVALID, false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(&mapelms, mapelm, u.ql_link);
}
-
- pageind += npages;
} else {
- /* Skip allocated run. */
- if (arena_mapbits_large_get(chunk, pageind))
- pageind += arena_mapbits_large_size_get(chunk,
+ /* Skip run. */
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
- else {
+ } else {
size_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run = (arena_run_t *)((uintptr_t)
@@ -662,41 +785,48 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
pageind) == 0);
binind = arena_bin_index(arena, run->bin);
bin_info = &arena_bin_info[binind];
- pageind += bin_info->run_size >> LG_PAGE;
+ npages = bin_info->run_size >> LG_PAGE;
}
}
}
assert(pageind == chunk_npages);
-
- if (config_debug)
- ndirty = chunk->ndirty;
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
- arena->ndirty -= chunk->ndirty;
- chunk->ndirty = 0;
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
+ assert(chunk->ndirty == 0 || all == false);
+ assert(chunk->nruns_adjac == 0);
malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
+ npurged = 0;
ql_foreach(mapelm, &mapelms, u.ql_link) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ bool unzeroed;
+ size_t flag_unzeroed, i;
+
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
- size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
+ npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE;
-
assert(pageind + npages <= chunk_npages);
- assert(ndirty >= npages);
- if (config_debug)
- ndirty -= npages;
-
- pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
- (npages << LG_PAGE));
+ unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
+ /*
+ * Set the unzeroed flag for all pages, now that pages_purge()
+ * has returned whether the pages were zeroed as a side effect
+ * of purging. This chunk map modification is safe even though
+ * the arena mutex isn't currently owned by this thread,
+ * because the run is marked as allocated, thus protecting it
+ * from being modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 0; i < npages; i++) {
+ arena_mapbits_unzeroed_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
+ npurged += npages;
if (config_stats)
nmadvise++;
}
- assert(ndirty == 0);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.nmadvise += nmadvise;
@@ -704,14 +834,27 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
/* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL;
mapelm = ql_first(&mapelms)) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)(pageind << LG_PAGE));
+ arena_run_t *run;
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
+ LG_PAGE));
ql_remove(&mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false);
+ arena_run_dalloc(arena, run, false, true);
}
+
+ return (npurged);
+}
+
+static arena_chunk_t *
+chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+{
+ size_t *ndirty = (size_t *)arg;
+
+ assert(chunk->ndirty != 0);
+ *ndirty += chunk->ndirty;
+ return (NULL);
}
static void
@@ -722,14 +865,11 @@ arena_purge(arena_t *arena, bool all)
if (config_debug) {
size_t ndirty = 0;
- ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
- assert(chunk->dirtied);
- ndirty += chunk->ndirty;
- }
+ arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
+ chunks_dirty_iter_cb, (void *)&ndirty);
assert(ndirty == arena->ndirty);
}
assert(arena->ndirty > arena->npurgatory || all);
- assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory) || all);
@@ -741,16 +881,24 @@ arena_purge(arena_t *arena, bool all)
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/
- npurgatory = arena->ndirty - arena->npurgatory;
- if (all == false) {
- assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
- npurgatory -= arena->nactive >> opt_lg_dirty_mult;
+ {
+ size_t npurgeable = arena->ndirty - arena->npurgatory;
+
+ if (all == false) {
+ size_t threshold = (arena->nactive >>
+ opt_lg_dirty_mult);
+
+ npurgatory = npurgeable - threshold;
+ } else
+ npurgatory = npurgeable;
}
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
+ size_t npurgeable, npurged, nunpurged;
+
/* Get next chunk with dirty pages. */
- chunk = ql_first(&arena->chunks_dirty);
+ chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
if (chunk == NULL) {
/*
* This thread was unable to purge as many pages as
@@ -761,23 +909,15 @@ arena_purge(arena_t *arena, bool all)
arena->npurgatory -= npurgatory;
return;
}
- while (chunk->ndirty == 0) {
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
- chunk = ql_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /* Same logic as for above. */
- arena->npurgatory -= npurgatory;
- return;
- }
- }
+ npurgeable = chunk->ndirty;
+ assert(npurgeable != 0);
- if (chunk->ndirty > npurgatory) {
+ if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
/*
- * This thread will, at a minimum, purge all the dirty
- * pages in chunk, so set npurgatory to reflect this
- * thread's commitment to purge the pages. This tends
- * to reduce the chances of the following scenario:
+ * This thread will purge all the dirty pages in chunk,
+ * so set npurgatory to reflect this thread's intent to
+ * purge the pages. This tends to reduce the chances
+ * of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
@@ -791,13 +931,20 @@ arena_purge(arena_t *arena, bool all)
* because all of the purging work being done really
* needs to happen.
*/
- arena->npurgatory += chunk->ndirty - npurgatory;
- npurgatory = chunk->ndirty;
+ arena->npurgatory += npurgeable - npurgatory;
+ npurgatory = npurgeable;
}
- arena->npurgatory -= chunk->ndirty;
- npurgatory -= chunk->ndirty;
- arena_chunk_purge(arena, chunk);
+ /*
+ * Keep track of how many pages are purgeable, versus how many
+ * actually get purged, and adjust counters accordingly.
+ */
+ arena->npurgatory -= npurgeable;
+ npurgatory -= npurgeable;
+ npurged = arena_chunk_purge(arena, chunk, all);
+ nunpurged = npurgeable - npurged;
+ arena->npurgatory += nunpurged;
+ npurgatory += nunpurged;
}
}
@@ -811,11 +958,10 @@ arena_purge_all(arena_t *arena)
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
- arena_avail_tree_t *runs_avail;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
@@ -846,15 +992,14 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/*
* The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated.
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
- runs_avail = dirty ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
@@ -862,9 +1007,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
CHUNK_MAP_DIRTY);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
-
- chunk->ndirty += run_pages;
- arena->ndirty += run_pages;
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
@@ -888,8 +1030,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_tree_remove(runs_avail,
- arena_mapp_get(chunk, run_ind+run_pages));
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
+ false, true);
size += nrun_size;
run_pages += nrun_pages;
@@ -915,8 +1057,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
- run_ind));
+ arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
+ false);
size += prun_size;
run_pages += prun_pages;
@@ -931,19 +1073,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
-
- if (dirty) {
- /*
- * Insert into chunks_dirty before potentially calling
- * arena_chunk_dealloc(), so that chunks_dirty and
- * arena->ndirty are consistent.
- */
- if (chunk->dirtied == false) {
- ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = true;
- }
- }
+ arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
/* Deallocate chunk if it is now completely unused. */
if (size == arena_maxclass) {
@@ -992,7 +1122,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
flag_dirty);
- arena_run_dalloc(arena, run, false);
+ arena_run_dalloc(arena, run, false, false);
}
static void
@@ -1025,7 +1155,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty);
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty);
+ dirty, false);
}
static arena_run_t *
@@ -1536,7 +1666,7 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */
}
- arena_run_dalloc(arena, run, true);
+ arena_run_dalloc(arena, run, true, false);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
@@ -1629,52 +1759,6 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
-void
-arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
- unsigned i;
-
- malloc_mutex_lock(&arena->lock);
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
-
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
- malloc_mutex_unlock(&arena->lock);
-
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
-
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
- }
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
- }
-}
void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
@@ -1694,7 +1778,7 @@ arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
}
}
- arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
}
void
@@ -1887,8 +1971,9 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
}
void *
-arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache)
+arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -1907,9 +1992,9 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
} else
- ret = arena_malloc(NULL, size + extra, zero, try_tcache);
+ ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
if (ret == NULL) {
if (extra == 0)
@@ -1919,9 +2004,10 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
} else
- ret = arena_malloc(NULL, size, zero, try_tcache);
+ ret = arena_malloc(arena, size, zero, try_tcache_alloc);
if (ret == NULL)
return (NULL);
@@ -1936,10 +2022,78 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
+ return (ret);
+}
+
+dss_prec_t
+arena_dss_prec_get(arena_t *arena)
+{
+ dss_prec_t ret;
+
+ malloc_mutex_lock(&arena->lock);
+ ret = arena->dss_prec;
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
+void
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
+{
+
+ malloc_mutex_lock(&arena->lock);
+ arena->dss_prec = dss_prec;
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats)
+{
+ unsigned i;
+
+ malloc_mutex_lock(&arena->lock);
+ *dss = dss_prec_names[arena->dss_prec];
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+
+ astats->mapped += arena->stats.mapped;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(&bin->lock);
+ bstats[i].allocated += bin->stats.allocated;
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(&bin->lock);
+ }
+}
+
bool
arena_new(arena_t *arena, unsigned ind)
{
@@ -1968,16 +2122,17 @@ arena_new(arena_t *arena, unsigned ind)
if (config_prof)
arena->prof_accumbytes = 0;
+ arena->dss_prec = chunk_dss_prec_get();
+
/* Initialize chunks. */
- ql_new(&arena->chunks_dirty);
+ arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->nactive = 0;
arena->ndirty = 0;
arena->npurgatory = 0;
- arena_avail_tree_new(&arena->runs_avail_clean);
- arena_avail_tree_new(&arena->runs_avail_dirty);
+ arena_avail_tree_new(&arena->runs_avail);
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index bafaa7438016..b1a5945efaa4 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero);
+ base_pages = chunk_alloc(csize, chunksize, true, &zero,
+ chunk_dss_prec_get());
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index 6bc245447977..1a3bb4f673fc 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -4,7 +4,8 @@
/******************************************************************************/
/* Data. */
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
+const char *opt_dss = DSS_DEFAULT;
+size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
@@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
-static extent_tree_t chunks_szad;
-static extent_tree_t chunks_ad;
+static extent_tree_t chunks_szad_mmap;
+static extent_tree_t chunks_ad_mmap;
+static extent_tree_t chunks_szad_dss;
+static extent_tree_t chunks_ad_dss;
rtree_t *chunks_rtree;
@@ -30,19 +33,23 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *chunk_recycle(size_t size, size_t alignment, bool base,
+static void *chunk_recycle(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
bool *zero);
-static void chunk_record(void *chunk, size_t size);
+static void chunk_record(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, void *chunk, size_t size);
/******************************************************************************/
static void *
-chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
+chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
+ size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
+ bool zeroed;
if (base) {
/*
@@ -61,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
key.addr = NULL;
key.size = alloc_size;
malloc_mutex_lock(&chunks_mtx);
- node = extent_tree_szad_nsearch(&chunks_szad, &key);
+ node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL) {
malloc_mutex_unlock(&chunks_mtx);
return (NULL);
@@ -72,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
/* Remove node from the tree. */
- extent_tree_szad_remove(&chunks_szad, node);
- extent_tree_ad_remove(&chunks_ad, node);
+ extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
- extent_tree_szad_insert(&chunks_szad, node);
- extent_tree_ad_insert(&chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
@@ -101,23 +108,24 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
- extent_tree_szad_insert(&chunks_szad, node);
- extent_tree_ad_insert(&chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&chunks_mtx);
- if (node != NULL)
+ zeroed = false;
+ if (node != NULL) {
+ if (node->zeroed) {
+ zeroed = true;
+ *zero = true;
+ }
base_node_dealloc(node);
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /* Pages are zeroed as a side effect of pages_purge(). */
- *zero = true;
-#else
- if (*zero) {
+ }
+ if (zeroed == false && *zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
-#endif
return (ret);
}
@@ -128,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
* advantage of them if they are returned.
*/
void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
+chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec)
{
void *ret;
@@ -137,17 +146,26 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(size, alignment, base, zero);
- if (ret != NULL)
+ /* "primary" dss. */
+ if (config_dss && dss_prec == dss_prec_primary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ goto label_return;
+ }
+ /* mmap. */
+ if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
+ alignment, base, zero)) != NULL)
goto label_return;
-
- ret = chunk_alloc_mmap(size, alignment, zero);
- if (ret != NULL)
+ if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
goto label_return;
-
- if (config_dss) {
- ret = chunk_alloc_dss(size, alignment, zero);
- if (ret != NULL)
+ /* "secondary" dss. */
+ if (config_dss && dss_prec == dss_prec_secondary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return;
}
@@ -189,11 +207,13 @@ label_return:
}
static void
-chunk_record(void *chunk, size_t size)
+chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
+ size_t size)
{
+ bool unzeroed;
extent_node_t *xnode, *node, *prev, key;
- pages_purge(chunk, size);
+ unzeroed = pages_purge(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
@@ -205,7 +225,7 @@ chunk_record(void *chunk, size_t size)
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&chunks_ad, &key);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
@@ -213,10 +233,11 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
- extent_tree_szad_remove(&chunks_szad, node);
+ extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (node->zeroed && (unzeroed == false));
+ extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else {
@@ -234,12 +255,13 @@ chunk_record(void *chunk, size_t size)
node = xnode;
node->addr = chunk;
node->size = size;
- extent_tree_ad_insert(&chunks_ad, node);
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (unzeroed == false);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&chunks_ad, node);
+ prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
@@ -247,13 +269,14 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
- extent_tree_szad_remove(&chunks_szad, prev);
- extent_tree_ad_remove(&chunks_ad, prev);
+ extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
- extent_tree_szad_remove(&chunks_szad, node);
+ extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (node->zeroed && prev->zeroed);
+ extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev);
}
@@ -261,6 +284,20 @@ chunk_record(void *chunk, size_t size)
}
void
+chunk_unmap(void *chunk, size_t size)
+{
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ if (config_dss && chunk_in_dss(chunk))
+ chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
+ else if (chunk_dealloc_mmap(chunk, size))
+ chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+}
+
+void
chunk_dealloc(void *chunk, size_t size, bool unmap)
{
@@ -273,15 +310,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
+ assert(stats_chunks.curchunks >= (size / chunksize));
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
}
- if (unmap) {
- if ((config_dss && chunk_in_dss(chunk)) ||
- chunk_dealloc_mmap(chunk, size))
- chunk_record(chunk, size);
- }
+ if (unmap)
+ chunk_unmap(chunk, size);
}
bool
@@ -301,8 +336,10 @@ chunk_boot(void)
}
if (config_dss && chunk_dss_boot())
return (true);
- extent_tree_szad_new(&chunks_szad);
- extent_tree_ad_new(&chunks_ad);
+ extent_tree_szad_new(&chunks_szad_mmap);
+ extent_tree_ad_new(&chunks_ad_mmap);
+ extent_tree_szad_new(&chunks_szad_dss);
+ extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk);
@@ -312,3 +349,33 @@ chunk_boot(void)
return (false);
}
+
+void
+chunk_prefork(void)
+{
+
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_ivsalloc)
+ rtree_prefork(chunks_rtree);
+ chunk_dss_prefork();
+}
+
+void
+chunk_postfork_parent(void)
+{
+
+ chunk_dss_postfork_parent();
+ if (config_ivsalloc)
+ rtree_postfork_parent(chunks_rtree);
+ malloc_mutex_postfork_parent(&chunks_mtx);
+}
+
+void
+chunk_postfork_child(void)
+{
+
+ chunk_dss_postfork_child();
+ if (config_ivsalloc)
+ rtree_postfork_child(chunks_rtree);
+ malloc_mutex_postfork_child(&chunks_mtx);
+}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index 2d68e4804d7c..24781cc52dca 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -3,6 +3,16 @@
/******************************************************************************/
/* Data. */
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
+
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
@@ -29,6 +39,31 @@ sbrk(intptr_t increment)
}
#endif
+dss_prec_t
+chunk_dss_prec_get(void)
+{
+ dss_prec_t ret;
+
+ if (config_dss == false)
+ return (dss_prec_disabled);
+ malloc_mutex_lock(&dss_mtx);
+ ret = dss_prec_default;
+ malloc_mutex_unlock(&dss_mtx);
+ return (ret);
+}
+
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (config_dss == false)
+ return (true);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
+ return (false);
+}
+
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
@@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
- chunk_dealloc(cpad, cpad_size, true);
+ chunk_unmap(cpad, cpad_size);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
index c8da6556b098..8a42e75915f8 100644
--- a/contrib/jemalloc/src/chunk_mmap.c
+++ b/contrib/jemalloc/src/chunk_mmap.c
@@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
#endif
}
-void
+bool
pages_purge(void *addr, size_t length)
{
+ bool unzeroed;
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
# else
# error "No method defined for purging unused dirty pages."
# endif
- madvise(addr, length, JEMALLOC_MADV_PURGE);
+ int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
+ unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
#endif
+ return (unzeroed);
}
static void *
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 55e766777fd7..6e01b1e27e13 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-const ctl_named_node_t *n##_index(const size_t *mib, size_t miblen, \
- size_t i);
+static const ctl_named_node_t *n##_index(const size_t *mib, \
+ size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
@@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static bool ctl_grow(void);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
@@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
+CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
@@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(arena_i_purge)
+static void arena_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_dss)
+INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
+CTL_PROTO(arenas_extend)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
@@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
@@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
@@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
+static const ctl_named_node_t arena_i_node[] = {
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("dss"), CTL(arena_i_dss)}
+};
+static const ctl_named_node_t super_arena_i_node[] = {
+ {NAME(""), CHILD(named, arena_i)}
+};
+
+static const ctl_indexed_node_t arena_node[] = {
+ {INDEX(arena_i)}
+};
+
static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
@@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)}
+ {NAME("purge"), CTL(arenas_purge)},
+ {NAME("extend"), CTL(arenas_extend)}
};
static const ctl_named_node_t prof_node[] = {
@@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
@@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
+ {NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
{NAME("stats"), CHILD(named, stats)}
@@ -420,6 +444,7 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
+ astats->dss = dss_prec_names[dss_prec_limit];
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
- &cstats->astats, cstats->bstats, cstats->lstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
+ &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated;
@@ -500,7 +525,7 @@ static void
ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
@@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
}
}
+static bool
+ctl_grow(void)
+{
+ size_t astats_size;
+ ctl_arena_stats_t *astats;
+ arena_t **tarenas;
+
+ /* Extend arena stats and arenas arrays. */
+ astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
+ if (ctl_stats.narenas == narenas_auto) {
+ /* ctl_stats.arenas and arenas came from base_alloc(). */
+ astats = (ctl_arena_stats_t *)imalloc(astats_size);
+ if (astats == NULL)
+ return (true);
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
+ sizeof(arena_t *));
+ if (tarenas == NULL) {
+ idalloc(astats);
+ return (true);
+ }
+ memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
+ } else {
+ astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
+ astats_size, 0, 0, false, false);
+ if (astats == NULL)
+ return (true);
+
+ tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
+ sizeof(arena_t *), 0, 0, false, false);
+ if (tarenas == NULL)
+ return (true);
+ }
+ /* Initialize the new astats and arenas elements. */
+ memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
+ return (true);
+ tarenas[ctl_stats.narenas] = NULL;
+ /* Swap merged stats to their new location. */
+ {
+ ctl_arena_stats_t tstats;
+ memcpy(&tstats, &astats[ctl_stats.narenas],
+ sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas],
+ &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas + 1], &tstats,
+ sizeof(ctl_arena_stats_t));
+ }
+ ctl_stats.arenas = astats;
+ ctl_stats.narenas++;
+ malloc_mutex_lock(&arenas_lock);
+ arenas = tarenas;
+ narenas_total++;
+ arenas_extend(narenas_total - 1);
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (false);
+}
+
static void
ctl_refresh(void)
{
unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
if (config_stats) {
malloc_mutex_lock(&chunks_mtx);
@@ -542,19 +628,19 @@ ctl_refresh(void)
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_stats.arenas[narenas].nthreads = 0;
- ctl_arena_clear(&ctl_stats.arenas[narenas]);
+ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
+ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
- for (i = 0; i < narenas; i++) {
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ for (i = 0; i < ctl_stats.narenas; i++) {
if (arenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
else
ctl_stats.arenas[i].nthreads = 0;
}
malloc_mutex_unlock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
@@ -563,11 +649,13 @@ ctl_refresh(void)
}
if (config_stats) {
- ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
- + ctl_stats.arenas[narenas].astats.allocated_large
+ ctl_stats.allocated =
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
+ + ctl_stats.huge.allocated;
+ ctl_stats.active =
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
+ ctl_stats.huge.allocated;
- ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
- LG_PAGE) + ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
}
@@ -585,13 +673,15 @@ ctl_init(void)
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
+ assert(narenas_auto == narenas_total_get());
+ ctl_stats.narenas = narenas_auto;
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
- (narenas + 1) * sizeof(ctl_arena_stats_t));
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
goto label_return;
}
- memset(ctl_stats.arenas, 0, (narenas + 1) *
+ memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
@@ -601,14 +691,14 @@ ctl_init(void)
*/
if (config_stats) {
unsigned i;
- for (i = 0; i <= narenas; i++) {
+ for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
ret = true;
goto label_return;
}
}
}
- ctl_stats.arenas[narenas].initialized = true;
+ ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh();
@@ -827,6 +917,27 @@ ctl_boot(void)
return (false);
}
+void
+ctl_prefork(void)
+{
+
+ malloc_mutex_lock(&ctl_mtx);
+}
+
+void
+ctl_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&ctl_mtx);
+}
+
+void
+ctl_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
@@ -1032,8 +1143,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
}
READ(oldval, bool);
-label_return:
ret = 0;
+label_return:
return (ret);
}
@@ -1063,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
unsigned newind, oldind;
+ malloc_mutex_lock(&ctl_mtx);
newind = oldind = choose_arena(NULL)->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
arena_t *arena;
- if (newind >= narenas) {
+ if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
@@ -1102,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1135,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
@@ -1160,10 +1274,121 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
/******************************************************************************/
+/* ctl_mutex must be held during execution of this function. */
+static void
+arena_purge(unsigned arena_ind)
+{
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ malloc_mutex_unlock(&arenas_lock);
+
+ if (arena_ind == ctl_stats.narenas) {
+ unsigned i;
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena_ind < ctl_stats.narenas);
+ if (tarenas[arena_ind] != NULL)
+ arena_purge_all(tarenas[arena_ind]);
+ }
+}
+
+static int
+arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ malloc_mutex_lock(&ctl_mtx);
+ arena_purge(mib[1]);
+ malloc_mutex_unlock(&ctl_mtx);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret, i;
+ bool match, err;
+ const char *dss;
+ unsigned arena_ind = mib[1];
+ dss_prec_t dss_prec_old = dss_prec_limit;
+ dss_prec_t dss_prec = dss_prec_limit;
+
+ malloc_mutex_lock(&ctl_mtx);
+ WRITE(dss, const char *);
+ match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+ if (match == false) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if (arena_ind < ctl_stats.narenas) {
+ arena_t *arena = arenas[arena_ind];
+ if (arena != NULL) {
+ dss_prec_old = arena_dss_prec_get(arena);
+ arena_dss_prec_set(arena, dss_prec);
+ err = false;
+ } else
+ err = true;
+ } else {
+ dss_prec_old = chunk_dss_prec_get();
+ err = chunk_dss_prec_set(dss_prec);
+ }
+ dss = dss_prec_names[dss_prec_old];
+ READ(dss, const char *);
+ if (err) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static const ctl_named_node_t *
+arena_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t * ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (i > ctl_stats.narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_arena_i_node;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+
+/******************************************************************************/
+
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1173,7 +1398,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1182,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
-CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
+static int
+arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (*oldlenp != sizeof(unsigned)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1193,13 +1438,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
malloc_mutex_lock(&ctl_mtx);
READONLY();
- if (*oldlenp != narenas * sizeof(bool)) {
+ if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
- nread = (*oldlenp < narenas * sizeof(bool))
- ? (*oldlenp / sizeof(bool)) : narenas;
+ nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
+ ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
- nread = narenas;
+ nread = ctl_stats.narenas;
}
for (i = 0; i < nread; i++)
@@ -1222,36 +1467,43 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- unsigned arena;
+ unsigned arena_ind;
+ malloc_mutex_lock(&ctl_mtx);
WRITEONLY();
- arena = UINT_MAX;
- WRITE(arena, unsigned);
- if (newp != NULL && arena >= narenas) {
+ arena_ind = UINT_MAX;
+ WRITE(arena_ind, unsigned);
+ if (newp != NULL && arena_ind >= ctl_stats.narenas)
ret = EFAULT;
- goto label_return;
- } else {
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+ else {
+ if (arena_ind == UINT_MAX)
+ arena_ind = ctl_stats.narenas;
+ arena_purge(arena_ind);
+ ret = 0;
+ }
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
- malloc_mutex_unlock(&arenas_lock);
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
- if (arena == UINT_MAX) {
- unsigned i;
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge_all(tarenas[i]);
- }
- } else {
- assert(arena < narenas);
- if (tarenas[arena] != NULL)
- arena_purge_all(tarenas[arena]);
- }
+static int
+arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (ctl_grow()) {
+ ret = EAGAIN;
+ goto label_return;
}
+ READ(ctl_stats.narenas - 1, unsigned);
ret = 0;
label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1356,7 +1608,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
@@ -1374,7 +1626,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
@@ -1384,6 +1636,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
}
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
@@ -1395,13 +1648,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
- if (ctl_stats.arenas[i].initialized == false) {
+ if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
ret = NULL;
goto label_return;
}
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
index 8a4ec942410e..aa08d43d3626 100644
--- a/contrib/jemalloc/src/huge.c
+++ b/contrib/jemalloc/src/huge.c
@@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed);
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed,
+ chunk_dss_prec_get());
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+ size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
}
return (ret);
}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index fa9fcf0fbd31..aaf5012cb164 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -37,7 +37,8 @@ unsigned ncpus;
malloc_mutex_t arenas_lock;
arena_t **arenas;
-unsigned narenas;
+unsigned narenas_total;
+unsigned narenas_auto;
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
@@ -148,14 +149,14 @@ choose_arena_hard(void)
{
arena_t *ret;
- if (narenas > 1) {
+ if (narenas_auto > 1) {
unsigned i, choose, first_null;
choose = 0;
- first_null = narenas;
+ first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock);
assert(arenas[0] != NULL);
- for (i = 1; i < narenas; i++) {
+ for (i = 1; i < narenas_auto; i++) {
if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
@@ -164,7 +165,7 @@ choose_arena_hard(void)
if (arenas[i]->nthreads <
arenas[choose]->nthreads)
choose = i;
- } else if (first_null == narenas) {
+ } else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
@@ -178,7 +179,8 @@ choose_arena_hard(void)
}
}
- if (arenas[choose]->nthreads == 0 || first_null == narenas) {
+ if (arenas[choose]->nthreads == 0
+ || first_null == narenas_auto) {
/*
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
@@ -207,7 +209,7 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
- unsigned i;
+ unsigned narenas, i;
/*
* Merge stats from extant threads. This is racy, since
@@ -216,7 +218,7 @@ stats_print_atexit(void)
* out of date by the time they are reported, if other threads
* continue to allocate.
*/
- for (i = 0; i < narenas; i++) {
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
@@ -258,12 +260,13 @@ malloc_ncpus(void)
result = si.dwNumberOfProcessors;
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
if (result == -1) {
/* Error. */
ret = 1;
- }
-#endif
- ret = (unsigned)result;
+ } else {
+ ret = (unsigned)result;
+ }
return (ret);
}
@@ -381,6 +384,22 @@ malloc_conf_init(void)
const char *opts, *k, *v;
size_t klen, vlen;
+ /*
+ * Automatically configure valgrind before processing options. The
+ * valgrind option remains in jemalloc 3.x for compatibility reasons.
+ */
+ if (config_valgrind) {
+ opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && opt_valgrind) {
+ opt_junk = false;
+ assert(opt_zero == false);
+ opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ opt_redzone = true;
+ }
+ if (config_tcache && opt_valgrind)
+ opt_tcache = false;
+ }
+
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
@@ -542,6 +561,30 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+ if (strncmp("dss", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strncmp(dss_prec_names[i], v, vlen)
+ == 0) {
+ if (chunk_dss_prec_set(i)) {
+ malloc_conf_error(
+ "Error setting dss",
+ k, klen, v, vlen);
+ } else {
+ opt_dss =
+ dss_prec_names[i];
+ match = true;
+ break;
+ }
+ }
+ }
+ if (match == false) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
@@ -558,20 +601,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_utrace, "utrace")
}
if (config_valgrind) {
- bool hit;
- CONF_HANDLE_BOOL_HIT(opt_valgrind,
- "valgrind", hit)
- if (config_fill && opt_valgrind && hit) {
- opt_junk = false;
- opt_zero = false;
- if (opt_quarantine == 0) {
- opt_quarantine =
- JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- }
- opt_redzone = true;
- }
- if (hit)
- continue;
+ CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
@@ -700,9 +730,9 @@ malloc_init_hard(void)
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas = 1;
+ narenas_total = narenas_auto = 1;
arenas = init_arenas;
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
@@ -760,20 +790,21 @@ malloc_init_hard(void)
else
opt_narenas = 1;
}
- narenas = opt_narenas;
+ narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
- if (narenas > chunksize / sizeof(arena_t *)) {
- narenas = chunksize / sizeof(arena_t *);
+ if (narenas_auto > chunksize / sizeof(arena_t *)) {
+ narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
- narenas);
+ narenas_auto);
}
+ narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
@@ -782,7 +813,7 @@ malloc_init_hard(void)
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0];
@@ -1267,11 +1298,10 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* ignored.
*/
-JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
- je_realloc;
-JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
+JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
je_memalign;
#endif
@@ -1284,7 +1314,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
*/
size_t
-je_malloc_usable_size(const void *ptr)
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
@@ -1348,18 +1378,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE void *
-iallocm(size_t usize, size_t alignment, bool zero)
+iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
alignment)));
if (alignment != 0)
- return (ipalloc(usize, alignment, zero));
+ return (ipallocx(usize, alignment, zero, try_tcache, arena));
else if (zero)
- return (icalloc(usize));
+ return (icallocx(usize, try_tcache, arena));
else
- return (imalloc(usize));
+ return (imallocx(usize, try_tcache, arena));
}
int
@@ -1370,6 +1401,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ arena_t *arena;
+ bool try_tcache;
assert(ptr != NULL);
assert(size != 0);
@@ -1377,6 +1411,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
if (malloc_init())
goto label_oom;
+ if (arena_ind != UINT_MAX) {
+ arena = arenas[arena_ind];
+ try_tcache = false;
+ } else {
+ arena = NULL;
+ try_tcache = true;
+ }
+
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (usize == 0)
goto label_oom;
@@ -1393,18 +1435,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
alignment);
assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero);
+ p = iallocm(usize_promoted, alignment, zero,
+ try_tcache, arena);
if (p == NULL)
goto label_oom;
arena_prof_promoted(p, usize);
} else {
- p = iallocm(usize, alignment, zero);
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
prof_malloc(p, usize, cnt);
} else {
- p = iallocm(usize, alignment, zero);
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
@@ -1441,6 +1484,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
+ arena_t *arena;
assert(ptr != NULL);
assert(*ptr != NULL);
@@ -1448,6 +1494,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = true;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
+ try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
+ arenas[arena_ind]);
+ arena = arenas[arena_ind];
+ } else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
+ arena = NULL;
+ }
+
p = *ptr;
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
@@ -1474,9 +1533,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
<= SMALL_MAXCLASS) {
- q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move);
+ alignment, zero, no_move, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (max_usize < PAGE) {
@@ -1485,7 +1545,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
} else
usize = isalloc(q, config_prof);
} else {
- q = iralloc(p, size, extra, alignment, zero, no_move);
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
usize = isalloc(q, config_prof);
@@ -1502,7 +1563,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
old_size = isalloc(p, false);
old_rzsize = u2rz(old_size);
}
- q = iralloc(p, size, extra, alignment, zero, no_move);
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (config_stats)
@@ -1563,10 +1625,19 @@ je_dallocm(void *ptr, int flags)
{
size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache;
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
+ } else
+ try_tcache = true;
+
UTRACE(ptr, 0, 0);
if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
@@ -1579,7 +1650,7 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS);
@@ -1616,6 +1687,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork().
*/
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still possible to
+ * trigger the deadlock described above, but doing so would involve forking via
+ * a library constructor that runs before jemalloc's runs.
+ */
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
@@ -1633,14 +1725,16 @@ _malloc_prefork(void)
assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */
+ ctl_prefork();
malloc_mutex_prefork(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
+ prof_prefork();
+ chunk_prefork();
base_prefork();
huge_prefork();
- chunk_dss_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -1660,14 +1754,16 @@ _malloc_postfork(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_parent();
huge_postfork_parent();
base_postfork_parent();
- for (i = 0; i < narenas; i++) {
+ chunk_postfork_parent();
+ prof_postfork_parent();
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ ctl_postfork_parent();
}
void
@@ -1678,14 +1774,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_child();
huge_postfork_child();
base_postfork_child();
- for (i = 0; i < narenas; i++) {
+ chunk_postfork_child();
+ prof_postfork_child();
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ ctl_postfork_child();
}
/******************************************************************************/
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 4a90a05e984e..6b6f438781e6 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
-int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
__weak_reference(_pthread_mutex_init_calloc_cb_stub,
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index de1d392993e7..04964ef7ca3a 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -1270,4 +1270,46 @@ prof_boot2(void)
return (false);
}
+void
+prof_prefork(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_lock(&bt2ctx_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_lock(&ctx_locks[i]);
+ }
+}
+
+void
+prof_postfork_parent(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(&ctx_locks[i]);
+ malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ }
+}
+
+void
+prof_postfork_child(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(&ctx_locks[i]);
+ malloc_mutex_postfork_child(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(&bt2ctx_mtx);
+ }
+}
+
/******************************************************************************/
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index eb0ff1e24afc..90c6935a0edd 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -44,3 +44,24 @@ rtree_new(unsigned bits)
return (ret);
}
+
+void
+rtree_prefork(rtree_t *rtree)
+{
+
+ malloc_mutex_prefork(&rtree->mutex);
+}
+
+void
+rtree_postfork_parent(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_parent(&rtree->mutex);
+}
+
+void
+rtree_postfork_child(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_child(&rtree->mutex);
+}
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index 433b80d128d9..43f87af67000 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i, bool bins, bool large)
{
unsigned nthreads;
+ const char *dss;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
+ CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+ malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
+ dss);
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
@@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
@@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
+ malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
@@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
+ malloc_cprintf(write_cb, cbopaque,
+ " %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
/* Print huge stats. */
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 60244c45f8ba..47e14f30b1ab 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -288,7 +288,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icalloc(size);
+ tcache = (tcache_t *)icallocx(size, false, arena);
if (tcache == NULL)
return (NULL);
@@ -364,7 +364,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache);
} else
- idalloc(tcache);
+ idallocx(tcache, false);
}
void
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/util.c
index f94799f380bd..df1c5d50ce5d 100644
--- a/contrib/jemalloc/src/util.c
+++ b/contrib/jemalloc/src/util.c
@@ -393,7 +393,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '\0': goto label_out;
case '%': {
bool alt_form = false;
- bool zero_pad = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
@@ -414,10 +413,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(alt_form == false);
alt_form = true;
break;
- case '0':
- assert(zero_pad == false);
- zero_pad = true;
- break;
case '-':
assert(left_justify == false);
left_justify = true;