mirror of
https://github.com/beefytech/Beef.git
synced 2025-06-10 12:32:20 +02:00
Added TCMalloc and JEMalloc projects
This commit is contained in:
parent
53376f3861
commit
652142e189
242 changed files with 67746 additions and 6 deletions
41
BeefRT/JEMalloc/.appveyor.yml
Normal file
41
BeefRT/JEMalloc/.appveyor.yml
Normal file
|
@ -0,0 +1,41 @@
|
|||
version: '{build}'
|
||||
|
||||
environment:
|
||||
matrix:
|
||||
- MSYSTEM: MINGW64
|
||||
CPU: x86_64
|
||||
MSVC: amd64
|
||||
CONFIG_FLAGS: --enable-debug
|
||||
- MSYSTEM: MINGW64
|
||||
CPU: x86_64
|
||||
CONFIG_FLAGS: --enable-debug
|
||||
- MSYSTEM: MINGW32
|
||||
CPU: i686
|
||||
MSVC: x86
|
||||
CONFIG_FLAGS: --enable-debug
|
||||
- MSYSTEM: MINGW32
|
||||
CPU: i686
|
||||
CONFIG_FLAGS: --enable-debug
|
||||
- MSYSTEM: MINGW64
|
||||
CPU: x86_64
|
||||
MSVC: amd64
|
||||
- MSYSTEM: MINGW64
|
||||
CPU: x86_64
|
||||
- MSYSTEM: MINGW32
|
||||
CPU: i686
|
||||
MSVC: x86
|
||||
- MSYSTEM: MINGW32
|
||||
CPU: i686
|
||||
|
||||
install:
|
||||
- set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
|
||||
- if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC%
|
||||
- if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc
|
||||
|
||||
build_script:
|
||||
- bash -c "autoconf"
|
||||
- bash -c "./configure $CONFIG_FLAGS"
|
||||
- mingw32-make
|
||||
- file lib/jemalloc.dll
|
||||
- mingw32-make tests
|
||||
- mingw32-make -k check
|
3
BeefRT/JEMalloc/.autom4te.cfg
Normal file
3
BeefRT/JEMalloc/.autom4te.cfg
Normal file
|
@ -0,0 +1,3 @@
|
|||
begin-language: "Autoconf-without-aclocal-m4"
|
||||
args: --no-cache
|
||||
end-language: "Autoconf-without-aclocal-m4"
|
27
BeefRT/JEMalloc/COPYING
Normal file
27
BeefRT/JEMalloc/COPYING
Normal file
|
@ -0,0 +1,27 @@
|
|||
Unless otherwise specified, files in the jemalloc source distribution are
|
||||
subject to the following license:
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2002-present Jason Evans <jasone@canonware.com>.
|
||||
All rights reserved.
|
||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||
Copyright (C) 2009-present Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
1. Redistributions of source code must retain the above copyright notice(s),
|
||||
this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice(s),
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
|
||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
--------------------------------------------------------------------------------
|
424
BeefRT/JEMalloc/INSTALL.md
Normal file
424
BeefRT/JEMalloc/INSTALL.md
Normal file
|
@ -0,0 +1,424 @@
|
|||
Building and installing a packaged release of jemalloc can be as simple as
|
||||
typing the following while in the root directory of the source tree:
|
||||
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
|
||||
If building from unpackaged developer sources, the simplest command sequence
|
||||
that might work is:
|
||||
|
||||
./autogen.sh
|
||||
make
|
||||
make install
|
||||
|
||||
You can uninstall the installed build artifacts like this:
|
||||
|
||||
make uninstall
|
||||
|
||||
Notes:
|
||||
- "autoconf" needs to be installed
|
||||
- Documentation is built by the default target only when xsltproc is
|
||||
available. Build will warn but not stop if the dependency is missing.
|
||||
|
||||
|
||||
## Advanced configuration
|
||||
|
||||
The 'configure' script supports numerous options that allow control of which
|
||||
functionality is enabled, where jemalloc is installed, etc. Optionally, pass
|
||||
any of the following arguments (not a definitive list) to 'configure':
|
||||
|
||||
* `--help`
|
||||
|
||||
Print a definitive list of options.
|
||||
|
||||
* `--prefix=<install-root-dir>`
|
||||
|
||||
Set the base directory in which to install. For example:
|
||||
|
||||
./configure --prefix=/usr/local
|
||||
|
||||
will cause files to be installed into /usr/local/include, /usr/local/lib,
|
||||
and /usr/local/man.
|
||||
|
||||
* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
|
||||
|
||||
The VERSION file is mandatory for successful configuration, and the
|
||||
following steps are taken to assure its presence:
|
||||
1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
|
||||
generate VERSION using the specified value.
|
||||
2) If --with-version is not specified in either form and the source
|
||||
directory is inside a git repository, try to generate VERSION via 'git
|
||||
describe' invocations that pattern-match release tags.
|
||||
3) If VERSION is missing, generate it with a bogus version:
|
||||
0.0.0-0-g0000000000000000000000000000000000000000
|
||||
|
||||
Note that --with-version=VERSION bypasses (1) and (2), which simplifies
|
||||
VERSION configuration when embedding a jemalloc release into another
|
||||
project's git repository.
|
||||
|
||||
* `--with-rpath=<colon-separated-rpath>`
|
||||
|
||||
Embed one or more library paths, so that libjemalloc can find the libraries
|
||||
it is linked to. This works only on ELF-based systems.
|
||||
|
||||
* `--with-mangling=<map>`
|
||||
|
||||
Mangle public symbols specified in <map> which is a comma-separated list of
|
||||
name:mangled pairs.
|
||||
|
||||
For example, to use ld's --wrap option as an alternative method for
|
||||
overriding libc's malloc implementation, specify something like:
|
||||
|
||||
--with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
|
||||
|
||||
Note that mangling happens prior to application of the prefix specified by
|
||||
--with-jemalloc-prefix, and mangled symbols are then ignored when applying
|
||||
the prefix.
|
||||
|
||||
* `--with-jemalloc-prefix=<prefix>`
|
||||
|
||||
Prefix all public APIs with <prefix>. For example, if <prefix> is
|
||||
"prefix_", API changes like the following occur:
|
||||
|
||||
malloc() --> prefix_malloc()
|
||||
malloc_conf --> prefix_malloc_conf
|
||||
/etc/malloc.conf --> /etc/prefix_malloc.conf
|
||||
MALLOC_CONF --> PREFIX_MALLOC_CONF
|
||||
|
||||
This makes it possible to use jemalloc at the same time as the system
|
||||
allocator, or even to use multiple copies of jemalloc simultaneously.
|
||||
|
||||
By default, the prefix is "", except on OS X, where it is "je_". On OS X,
|
||||
jemalloc overlays the default malloc zone, but makes no attempt to actually
|
||||
replace the "malloc", "calloc", etc. symbols.
|
||||
|
||||
* `--without-export`
|
||||
|
||||
Don't export public APIs. This can be useful when building jemalloc as a
|
||||
static library, or to avoid exporting public APIs when using the zone
|
||||
allocator on OSX.
|
||||
|
||||
* `--with-private-namespace=<prefix>`
|
||||
|
||||
Prefix all library-private APIs with <prefix>je_. For shared libraries,
|
||||
symbol visibility mechanisms prevent these symbols from being exported, but
|
||||
for static libraries, naming collisions are a real possibility. By
|
||||
default, <prefix> is empty, which results in a symbol prefix of je_ .
|
||||
|
||||
* `--with-install-suffix=<suffix>`
|
||||
|
||||
Append <suffix> to the base name of all installed files, such that multiple
|
||||
versions of jemalloc can coexist in the same installation directory. For
|
||||
example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
|
||||
|
||||
* `--with-malloc-conf=<malloc_conf>`
|
||||
|
||||
Embed `<malloc_conf>` as a run-time options string that is processed prior to
|
||||
the malloc_conf global variable, the /etc/malloc.conf symlink, and the
|
||||
MALLOC_CONF environment variable. For example, to change the default decay
|
||||
time to 30 seconds:
|
||||
|
||||
--with-malloc-conf=decay_ms:30000
|
||||
|
||||
* `--enable-debug`
|
||||
|
||||
Enable assertions and validation code. This incurs a substantial
|
||||
performance hit, but is very useful during application development.
|
||||
|
||||
* `--disable-stats`
|
||||
|
||||
Disable statistics gathering functionality. See the "opt.stats_print"
|
||||
option documentation for usage details.
|
||||
|
||||
* `--enable-prof`
|
||||
|
||||
Enable heap profiling and leak detection functionality. See the "opt.prof"
|
||||
option documentation for usage details. When enabled, there are several
|
||||
approaches to backtracing, and the configure script chooses the first one
|
||||
in the following list that appears to function correctly:
|
||||
|
||||
+ libunwind (requires --enable-prof-libunwind)
|
||||
+ libgcc (unless --disable-prof-libgcc)
|
||||
+ gcc intrinsics (unless --disable-prof-gcc)
|
||||
|
||||
* `--enable-prof-libunwind`
|
||||
|
||||
Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
|
||||
backtracing.
|
||||
|
||||
* `--disable-prof-libgcc`
|
||||
|
||||
Disable the use of libgcc's backtracing functionality.
|
||||
|
||||
* `--disable-prof-gcc`
|
||||
|
||||
Disable the use of gcc intrinsics for backtracing.
|
||||
|
||||
* `--with-static-libunwind=<libunwind.a>`
|
||||
|
||||
Statically link against the specified libunwind.a rather than dynamically
|
||||
linking with -lunwind.
|
||||
|
||||
* `--disable-fill`
|
||||
|
||||
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||
"opt.zero" option documentation for usage details.
|
||||
|
||||
* `--disable-zone-allocator`
|
||||
|
||||
Disable zone allocator for Darwin. This means jemalloc won't be hooked as
|
||||
the default allocator on OSX/iOS.
|
||||
|
||||
* `--enable-utrace`
|
||||
|
||||
Enable utrace(2)-based allocation tracing. This feature is not broadly
|
||||
portable (FreeBSD has it, but Linux and OS X do not).
|
||||
|
||||
* `--enable-xmalloc`
|
||||
|
||||
Enable support for optional immediate termination due to out-of-memory
|
||||
errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
|
||||
See the "opt.xmalloc" option documentation for usage details.
|
||||
|
||||
* `--enable-lazy-lock`
|
||||
|
||||
Enable code that wraps pthread_create() to detect when an application
|
||||
switches from single-threaded to multi-threaded mode, so that it can avoid
|
||||
mutex locking/unlocking operations while in single-threaded mode. In
|
||||
practice, this feature usually has little impact on performance unless
|
||||
thread-specific caching is disabled.
|
||||
|
||||
* `--disable-cache-oblivious`
|
||||
|
||||
Disable cache-oblivious large allocation alignment by default, for large
|
||||
allocation requests with no alignment constraints. If this feature is
|
||||
disabled, all large allocations are page-aligned as an implementation
|
||||
artifact, which can severely harm CPU cache utilization. However, the
|
||||
cache-oblivious layout comes at the cost of one extra page per large
|
||||
allocation, which in the most extreme case increases physical memory usage
|
||||
for the 16 KiB size class to 20 KiB.
|
||||
|
||||
* `--disable-syscall`
|
||||
|
||||
Disable use of syscall(2) rather than {open,read,write,close}(2). This is
|
||||
intended as a workaround for systems that place security limitations on
|
||||
syscall(2).
|
||||
|
||||
* `--disable-cxx`
|
||||
|
||||
Disable C++ integration. This will cause new and delete operator
|
||||
implementations to be omitted.
|
||||
|
||||
* `--with-xslroot=<path>`
|
||||
|
||||
Specify where to find DocBook XSL stylesheets when building the
|
||||
documentation.
|
||||
|
||||
* `--with-lg-page=<lg-page>`
|
||||
|
||||
Specify the base 2 log of the allocator page size, which must in turn be at
|
||||
least as large as the system page size. By default the configure script
|
||||
determines the host's page size and sets the allocator page size equal to
|
||||
the system page size, so this option need not be specified unless the
|
||||
system page size may change between configuration and execution, e.g. when
|
||||
cross compiling.
|
||||
|
||||
* `--with-lg-hugepage=<lg-hugepage>`
|
||||
|
||||
Specify the base 2 log of the system huge page size. This option is useful
|
||||
when cross compiling, or when overriding the default for systems that do
|
||||
not explicitly support huge pages.
|
||||
|
||||
* `--with-lg-quantum=<lg-quantum>`
|
||||
|
||||
Specify the base 2 log of the minimum allocation alignment. jemalloc needs
|
||||
to know the minimum alignment that meets the following C standard
|
||||
requirement (quoted from the April 12, 2011 draft of the C11 standard):
|
||||
|
||||
> The pointer returned if the allocation succeeds is suitably aligned so
|
||||
that it may be assigned to a pointer to any type of object with a
|
||||
fundamental alignment requirement and then used to access such an object
|
||||
or an array of such objects in the space allocated [...]
|
||||
|
||||
This setting is architecture-specific, and although jemalloc includes known
|
||||
safe values for the most commonly used modern architectures, there is a
|
||||
wrinkle related to GNU libc (glibc) that may impact your choice of
|
||||
<lg-quantum>. On most modern architectures, this mandates 16-byte
|
||||
alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
|
||||
requirement for performance reasons. An old discussion can be found at
|
||||
<https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
|
||||
jemalloc does follow the C standard by default (caveat: jemalloc
|
||||
technically cheats for size classes smaller than the quantum), but the fact
|
||||
that Linux systems already work around this allocator noncompliance means
|
||||
that it is generally safe in practice to let jemalloc's minimum alignment
|
||||
follow glibc's lead. If you specify `--with-lg-quantum=3` during
|
||||
configuration, jemalloc will provide additional size classes that are not
|
||||
16-byte-aligned (24, 40, and 56).
|
||||
|
||||
* `--with-lg-vaddr=<lg-vaddr>`
|
||||
|
||||
Specify the number of significant virtual address bits. By default, the
|
||||
configure script attempts to detect virtual address size on those platforms
|
||||
where it knows how, and picks a default otherwise. This option may be
|
||||
useful when cross-compiling.
|
||||
|
||||
* `--disable-initial-exec-tls`
|
||||
|
||||
Disable the initial-exec TLS model for jemalloc's internal thread-local
|
||||
storage (on those platforms that support explicit settings). This can allow
|
||||
jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
|
||||
Note that in this case, there will be two malloc implementations operating
|
||||
in the same process, which will almost certainly result in confusing runtime
|
||||
crashes if pointers leak from one implementation to the other.
|
||||
|
||||
* `--disable-libdl`
|
||||
|
||||
Disable the usage of libdl, namely dlsym(3) which is required by the lazy
|
||||
lock option. This can allow building static binaries.
|
||||
|
||||
The following environment variables (not a definitive list) impact configure's
|
||||
behavior:
|
||||
|
||||
* `CFLAGS="?"`
|
||||
* `CXXFLAGS="?"`
|
||||
|
||||
Pass these flags to the C/C++ compiler. Any flags set by the configure
|
||||
script are prepended, which means explicitly set flags generally take
|
||||
precedence. Take care when specifying flags such as -Werror, because
|
||||
configure tests may be affected in undesirable ways.
|
||||
|
||||
* `EXTRA_CFLAGS="?"`
|
||||
* `EXTRA_CXXFLAGS="?"`
|
||||
|
||||
Append these flags to CFLAGS/CXXFLAGS, without passing them to the
|
||||
compiler(s) during configuration. This makes it possible to add flags such
|
||||
as -Werror, while allowing the configure script to determine what other
|
||||
flags are appropriate for the specified configuration.
|
||||
|
||||
* `CPPFLAGS="?"`
|
||||
|
||||
Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
|
||||
'cpp' when 'configure' is looking for include files, so you must use
|
||||
CPPFLAGS instead if you need to help 'configure' find header files.
|
||||
|
||||
* `LD_LIBRARY_PATH="?"`
|
||||
|
||||
'ld' uses this colon-separated list to find libraries.
|
||||
|
||||
* `LDFLAGS="?"`
|
||||
|
||||
Pass these flags when linking.
|
||||
|
||||
* `PATH="?"`
|
||||
|
||||
'configure' uses this to find programs.
|
||||
|
||||
In some cases it may be necessary to work around configuration results that do
|
||||
not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
|
||||
to madvise(2), which can cause problems if building on a host with MADV_FREE
|
||||
support and deploying to a target without. To work around this, use a cache
|
||||
file to override the relevant configuration variable defined in configure.ac,
|
||||
e.g.:
|
||||
|
||||
echo "je_cv_madv_free=no" > config.cache && ./configure -C
|
||||
|
||||
|
||||
## Advanced compilation
|
||||
|
||||
To build only parts of jemalloc, use the following targets:
|
||||
|
||||
build_lib_shared
|
||||
build_lib_static
|
||||
build_lib
|
||||
build_doc_html
|
||||
build_doc_man
|
||||
build_doc
|
||||
|
||||
To install only parts of jemalloc, use the following targets:
|
||||
|
||||
install_bin
|
||||
install_include
|
||||
install_lib_shared
|
||||
install_lib_static
|
||||
install_lib_pc
|
||||
install_lib
|
||||
install_doc_html
|
||||
install_doc_man
|
||||
install_doc
|
||||
|
||||
To clean up build results to varying degrees, use the following make targets:
|
||||
|
||||
clean
|
||||
distclean
|
||||
relclean
|
||||
|
||||
|
||||
## Advanced installation
|
||||
|
||||
Optionally, define make variables when invoking make, including (not
|
||||
exclusively):
|
||||
|
||||
* `INCLUDEDIR="?"`
|
||||
|
||||
Use this as the installation prefix for header files.
|
||||
|
||||
* `LIBDIR="?"`
|
||||
|
||||
Use this as the installation prefix for libraries.
|
||||
|
||||
* `MANDIR="?"`
|
||||
|
||||
Use this as the installation prefix for man pages.
|
||||
|
||||
* `DESTDIR="?"`
|
||||
|
||||
Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
|
||||
when installing to a different path than was specified via --prefix.
|
||||
|
||||
* `CC="?"`
|
||||
|
||||
Use this to invoke the C compiler.
|
||||
|
||||
* `CFLAGS="?"`
|
||||
|
||||
Pass these flags to the compiler.
|
||||
|
||||
* `CPPFLAGS="?"`
|
||||
|
||||
Pass these flags to the C preprocessor.
|
||||
|
||||
* `LDFLAGS="?"`
|
||||
|
||||
Pass these flags when linking.
|
||||
|
||||
* `PATH="?"`
|
||||
|
||||
Use this to search for programs used during configuration and building.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
|
||||
script rather than 'configure'. This re-generates 'configure', enables
|
||||
configuration dependency rules, and enables re-generation of automatically
|
||||
generated source files.
|
||||
|
||||
The build system supports using an object directory separate from the source
|
||||
tree. For example, you can create an 'obj' directory, and from within that
|
||||
directory, issue configuration and build commands:
|
||||
|
||||
autoconf
|
||||
mkdir obj
|
||||
cd obj
|
||||
../configure --enable-autogen
|
||||
make
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The manual page is generated in both html and roff formats. Any web browser
|
||||
can be used to view the html manual. The roff manual page can be formatted
|
||||
prior to installation via the following command:
|
||||
|
||||
nroff -man -t doc/jemalloc.3
|
762
BeefRT/JEMalloc/Makefile.in
Normal file
762
BeefRT/JEMalloc/Makefile.in
Normal file
|
@ -0,0 +1,762 @@
|
|||
# Clear out all vpaths, then set just one (default vpath) for the main build
|
||||
# directory.
|
||||
vpath
|
||||
vpath % .
|
||||
|
||||
# Clear the default suffixes, so that built-in rules are not used.
|
||||
.SUFFIXES :
|
||||
|
||||
SHELL := /bin/sh
|
||||
|
||||
CC := @CC@
|
||||
CXX := @CXX@
|
||||
|
||||
# Configuration parameters.
|
||||
DESTDIR =
|
||||
BINDIR := $(DESTDIR)@BINDIR@
|
||||
INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
|
||||
LIBDIR := $(DESTDIR)@LIBDIR@
|
||||
DATADIR := $(DESTDIR)@DATADIR@
|
||||
MANDIR := $(DESTDIR)@MANDIR@
|
||||
srcroot := @srcroot@
|
||||
objroot := @objroot@
|
||||
abs_srcroot := @abs_srcroot@
|
||||
abs_objroot := @abs_objroot@
|
||||
|
||||
# Build parameters.
|
||||
CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
|
||||
CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
|
||||
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
|
||||
EXTRA_CFLAGS := @EXTRA_CFLAGS@
|
||||
CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
|
||||
CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
|
||||
SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
|
||||
EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
|
||||
CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
|
||||
LDFLAGS := @LDFLAGS@
|
||||
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
||||
LIBS := @LIBS@
|
||||
RPATH_EXTRA := @RPATH_EXTRA@
|
||||
SO := @so@
|
||||
IMPORTLIB := @importlib@
|
||||
O := @o@
|
||||
A := @a@
|
||||
EXE := @exe@
|
||||
LIBPREFIX := @libprefix@
|
||||
REV := @rev@
|
||||
install_suffix := @install_suffix@
|
||||
ABI := @abi@
|
||||
XSLTPROC := @XSLTPROC@
|
||||
XSLROOT := @XSLROOT@
|
||||
AUTOCONF := @AUTOCONF@
|
||||
_RPATH = @RPATH@
|
||||
RPATH = $(if $(1),$(call _RPATH,$(1)))
|
||||
cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
|
||||
cfghdrs_out := @cfghdrs_out@
|
||||
cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
|
||||
cfgoutputs_out := @cfgoutputs_out@
|
||||
enable_autogen := @enable_autogen@
|
||||
enable_doc := @enable_doc@
|
||||
enable_shared := @enable_shared@
|
||||
enable_static := @enable_static@
|
||||
enable_prof := @enable_prof@
|
||||
enable_zone_allocator := @enable_zone_allocator@
|
||||
enable_experimental_smallocx := @enable_experimental_smallocx@
|
||||
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
|
||||
link_whole_archive := @link_whole_archive@
|
||||
DSO_LDFLAGS = @DSO_LDFLAGS@
|
||||
SOREV = @SOREV@
|
||||
PIC_CFLAGS = @PIC_CFLAGS@
|
||||
CTARGET = @CTARGET@
|
||||
LDTARGET = @LDTARGET@
|
||||
TEST_LD_MODE = @TEST_LD_MODE@
|
||||
MKLIB = @MKLIB@
|
||||
AR = @AR@
|
||||
ARFLAGS = @ARFLAGS@
|
||||
DUMP_SYMS = @DUMP_SYMS@
|
||||
AWK := @AWK@
|
||||
CC_MM = @CC_MM@
|
||||
LM := @LM@
|
||||
INSTALL = @INSTALL@
|
||||
|
||||
ifeq (macho, $(ABI))
|
||||
TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
|
||||
else
|
||||
ifeq (pecoff, $(ABI))
|
||||
TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
|
||||
else
|
||||
TEST_LIBRARY_PATH :=
|
||||
endif
|
||||
endif
|
||||
|
||||
LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
|
||||
|
||||
# Lists of files.
|
||||
BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
|
||||
C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
|
||||
C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/arena.c \
|
||||
$(srcroot)src/background_thread.c \
|
||||
$(srcroot)src/base.c \
|
||||
$(srcroot)src/bin.c \
|
||||
$(srcroot)src/bin_info.c \
|
||||
$(srcroot)src/bitmap.c \
|
||||
$(srcroot)src/buf_writer.c \
|
||||
$(srcroot)src/cache_bin.c \
|
||||
$(srcroot)src/ckh.c \
|
||||
$(srcroot)src/counter.c \
|
||||
$(srcroot)src/ctl.c \
|
||||
$(srcroot)src/decay.c \
|
||||
$(srcroot)src/div.c \
|
||||
$(srcroot)src/ecache.c \
|
||||
$(srcroot)src/edata.c \
|
||||
$(srcroot)src/edata_cache.c \
|
||||
$(srcroot)src/ehooks.c \
|
||||
$(srcroot)src/emap.c \
|
||||
$(srcroot)src/eset.c \
|
||||
$(srcroot)src/exp_grow.c \
|
||||
$(srcroot)src/extent.c \
|
||||
$(srcroot)src/extent_dss.c \
|
||||
$(srcroot)src/extent_mmap.c \
|
||||
$(srcroot)src/fxp.c \
|
||||
$(srcroot)src/san.c \
|
||||
$(srcroot)src/san_bump.c \
|
||||
$(srcroot)src/hook.c \
|
||||
$(srcroot)src/hpa.c \
|
||||
$(srcroot)src/hpa_hooks.c \
|
||||
$(srcroot)src/hpdata.c \
|
||||
$(srcroot)src/inspect.c \
|
||||
$(srcroot)src/large.c \
|
||||
$(srcroot)src/log.c \
|
||||
$(srcroot)src/malloc_io.c \
|
||||
$(srcroot)src/mutex.c \
|
||||
$(srcroot)src/nstime.c \
|
||||
$(srcroot)src/pa.c \
|
||||
$(srcroot)src/pa_extra.c \
|
||||
$(srcroot)src/pai.c \
|
||||
$(srcroot)src/pac.c \
|
||||
$(srcroot)src/pages.c \
|
||||
$(srcroot)src/peak_event.c \
|
||||
$(srcroot)src/prof.c \
|
||||
$(srcroot)src/prof_data.c \
|
||||
$(srcroot)src/prof_log.c \
|
||||
$(srcroot)src/prof_recent.c \
|
||||
$(srcroot)src/prof_stats.c \
|
||||
$(srcroot)src/prof_sys.c \
|
||||
$(srcroot)src/psset.c \
|
||||
$(srcroot)src/rtree.c \
|
||||
$(srcroot)src/safety_check.c \
|
||||
$(srcroot)src/sc.c \
|
||||
$(srcroot)src/sec.c \
|
||||
$(srcroot)src/stats.c \
|
||||
$(srcroot)src/sz.c \
|
||||
$(srcroot)src/tcache.c \
|
||||
$(srcroot)src/test_hooks.c \
|
||||
$(srcroot)src/thread_event.c \
|
||||
$(srcroot)src/ticker.c \
|
||||
$(srcroot)src/tsd.c \
|
||||
$(srcroot)src/witness.c
|
||||
ifeq ($(enable_zone_allocator), 1)
|
||||
C_SRCS += $(srcroot)src/zone.c
|
||||
endif
|
||||
ifeq ($(IMPORTLIB),$(SO))
|
||||
STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
|
||||
endif
|
||||
ifdef PIC_CFLAGS
|
||||
STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
|
||||
else
|
||||
STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
|
||||
endif
|
||||
DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
|
||||
ifneq ($(SOREV),$(SO))
|
||||
DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
|
||||
endif
|
||||
ifeq (1, $(link_whole_archive))
|
||||
LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
|
||||
else
|
||||
LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||
endif
|
||||
PC := $(objroot)jemalloc.pc
|
||||
DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
|
||||
DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
|
||||
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
|
||||
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
|
||||
C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
|
||||
$(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
|
||||
$(srcroot)test/src/mtx.c $(srcroot)test/src/sleep.c \
|
||||
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
|
||||
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
|
||||
ifeq (1, $(link_whole_archive))
|
||||
C_UTIL_INTEGRATION_SRCS :=
|
||||
C_UTIL_CPP_SRCS :=
|
||||
else
|
||||
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c \
|
||||
$(srcroot)src/ticker.c
|
||||
C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
|
||||
endif
|
||||
TESTS_UNIT := \
|
||||
$(srcroot)test/unit/a0.c \
|
||||
$(srcroot)test/unit/arena_decay.c \
|
||||
$(srcroot)test/unit/arena_reset.c \
|
||||
$(srcroot)test/unit/atomic.c \
|
||||
$(srcroot)test/unit/background_thread.c \
|
||||
$(srcroot)test/unit/background_thread_enable.c \
|
||||
$(srcroot)test/unit/base.c \
|
||||
$(srcroot)test/unit/batch_alloc.c \
|
||||
$(srcroot)test/unit/binshard.c \
|
||||
$(srcroot)test/unit/bitmap.c \
|
||||
$(srcroot)test/unit/bit_util.c \
|
||||
$(srcroot)test/unit/buf_writer.c \
|
||||
$(srcroot)test/unit/cache_bin.c \
|
||||
$(srcroot)test/unit/ckh.c \
|
||||
$(srcroot)test/unit/counter.c \
|
||||
$(srcroot)test/unit/decay.c \
|
||||
$(srcroot)test/unit/div.c \
|
||||
$(srcroot)test/unit/double_free.c \
|
||||
$(srcroot)test/unit/edata_cache.c \
|
||||
$(srcroot)test/unit/emitter.c \
|
||||
$(srcroot)test/unit/extent_quantize.c \
|
||||
${srcroot}test/unit/fb.c \
|
||||
$(srcroot)test/unit/fork.c \
|
||||
${srcroot}test/unit/fxp.c \
|
||||
${srcroot}test/unit/san.c \
|
||||
${srcroot}test/unit/san_bump.c \
|
||||
$(srcroot)test/unit/hash.c \
|
||||
$(srcroot)test/unit/hook.c \
|
||||
$(srcroot)test/unit/hpa.c \
|
||||
$(srcroot)test/unit/hpa_background_thread.c \
|
||||
$(srcroot)test/unit/hpdata.c \
|
||||
$(srcroot)test/unit/huge.c \
|
||||
$(srcroot)test/unit/inspect.c \
|
||||
$(srcroot)test/unit/junk.c \
|
||||
$(srcroot)test/unit/junk_alloc.c \
|
||||
$(srcroot)test/unit/junk_free.c \
|
||||
$(srcroot)test/unit/log.c \
|
||||
$(srcroot)test/unit/mallctl.c \
|
||||
$(srcroot)test/unit/malloc_conf_2.c \
|
||||
$(srcroot)test/unit/malloc_io.c \
|
||||
$(srcroot)test/unit/math.c \
|
||||
$(srcroot)test/unit/mpsc_queue.c \
|
||||
$(srcroot)test/unit/mq.c \
|
||||
$(srcroot)test/unit/mtx.c \
|
||||
$(srcroot)test/unit/nstime.c \
|
||||
$(srcroot)test/unit/oversize_threshold.c \
|
||||
$(srcroot)test/unit/pa.c \
|
||||
$(srcroot)test/unit/pack.c \
|
||||
$(srcroot)test/unit/pages.c \
|
||||
$(srcroot)test/unit/peak.c \
|
||||
$(srcroot)test/unit/ph.c \
|
||||
$(srcroot)test/unit/prng.c \
|
||||
$(srcroot)test/unit/prof_accum.c \
|
||||
$(srcroot)test/unit/prof_active.c \
|
||||
$(srcroot)test/unit/prof_gdump.c \
|
||||
$(srcroot)test/unit/prof_hook.c \
|
||||
$(srcroot)test/unit/prof_idump.c \
|
||||
$(srcroot)test/unit/prof_log.c \
|
||||
$(srcroot)test/unit/prof_mdump.c \
|
||||
$(srcroot)test/unit/prof_recent.c \
|
||||
$(srcroot)test/unit/prof_reset.c \
|
||||
$(srcroot)test/unit/prof_stats.c \
|
||||
$(srcroot)test/unit/prof_tctx.c \
|
||||
$(srcroot)test/unit/prof_thread_name.c \
|
||||
$(srcroot)test/unit/prof_sys_thread_name.c \
|
||||
$(srcroot)test/unit/psset.c \
|
||||
$(srcroot)test/unit/ql.c \
|
||||
$(srcroot)test/unit/qr.c \
|
||||
$(srcroot)test/unit/rb.c \
|
||||
$(srcroot)test/unit/retained.c \
|
||||
$(srcroot)test/unit/rtree.c \
|
||||
$(srcroot)test/unit/safety_check.c \
|
||||
$(srcroot)test/unit/sc.c \
|
||||
$(srcroot)test/unit/sec.c \
|
||||
$(srcroot)test/unit/seq.c \
|
||||
$(srcroot)test/unit/SFMT.c \
|
||||
$(srcroot)test/unit/size_check.c \
|
||||
$(srcroot)test/unit/size_classes.c \
|
||||
$(srcroot)test/unit/slab.c \
|
||||
$(srcroot)test/unit/smoothstep.c \
|
||||
$(srcroot)test/unit/spin.c \
|
||||
$(srcroot)test/unit/stats.c \
|
||||
$(srcroot)test/unit/stats_print.c \
|
||||
$(srcroot)test/unit/sz.c \
|
||||
$(srcroot)test/unit/tcache_max.c \
|
||||
$(srcroot)test/unit/test_hooks.c \
|
||||
$(srcroot)test/unit/thread_event.c \
|
||||
$(srcroot)test/unit/ticker.c \
|
||||
$(srcroot)test/unit/tsd.c \
|
||||
$(srcroot)test/unit/uaf.c \
|
||||
$(srcroot)test/unit/witness.c \
|
||||
$(srcroot)test/unit/zero.c \
|
||||
$(srcroot)test/unit/zero_realloc_abort.c \
|
||||
$(srcroot)test/unit/zero_realloc_free.c \
|
||||
$(srcroot)test/unit/zero_realloc_alloc.c \
|
||||
$(srcroot)test/unit/zero_reallocs.c
|
||||
ifeq (@enable_prof@, 1)
|
||||
TESTS_UNIT += \
|
||||
$(srcroot)test/unit/arena_reset_prof.c \
|
||||
$(srcroot)test/unit/batch_alloc_prof.c
|
||||
endif
|
||||
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
||||
$(srcroot)test/integration/allocated.c \
|
||||
$(srcroot)test/integration/extent.c \
|
||||
$(srcroot)test/integration/malloc.c \
|
||||
$(srcroot)test/integration/mallocx.c \
|
||||
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
||||
$(srcroot)test/integration/overflow.c \
|
||||
$(srcroot)test/integration/posix_memalign.c \
|
||||
$(srcroot)test/integration/rallocx.c \
|
||||
$(srcroot)test/integration/sdallocx.c \
|
||||
$(srcroot)test/integration/slab_sizes.c \
|
||||
$(srcroot)test/integration/thread_arena.c \
|
||||
$(srcroot)test/integration/thread_tcache_enabled.c \
|
||||
$(srcroot)test/integration/xallocx.c
|
||||
ifeq (@enable_experimental_smallocx@, 1)
|
||||
TESTS_INTEGRATION += \
|
||||
$(srcroot)test/integration/smallocx.c
|
||||
endif
|
||||
ifeq (@enable_cxx@, 1)
|
||||
CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
|
||||
TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp \
|
||||
$(srcroot)test/integration/cpp/infallible_new_true.cpp \
|
||||
$(srcroot)test/integration/cpp/infallible_new_false.cpp
|
||||
else
|
||||
CPP_SRCS :=
|
||||
TESTS_INTEGRATION_CPP :=
|
||||
endif
|
||||
TESTS_ANALYZE := $(srcroot)test/analyze/prof_bias.c \
|
||||
$(srcroot)test/analyze/rand.c \
|
||||
$(srcroot)test/analyze/sizes.c
|
||||
TESTS_STRESS := $(srcroot)test/stress/batch_alloc.c \
|
||||
$(srcroot)test/stress/fill_flush.c \
|
||||
$(srcroot)test/stress/hookbench.c \
|
||||
$(srcroot)test/stress/large_microbench.c \
|
||||
$(srcroot)test/stress/mallctl.c \
|
||||
$(srcroot)test/stress/microbench.c
|
||||
|
||||
|
||||
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) \
|
||||
$(TESTS_ANALYZE) $(TESTS_STRESS)
|
||||
|
||||
PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
|
||||
PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
|
||||
C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O))
|
||||
C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym)
|
||||
C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||
CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
|
||||
C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
|
||||
CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
|
||||
C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O))
|
||||
C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym)
|
||||
C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
|
||||
C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
|
||||
C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
|
||||
C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
|
||||
C_TESTLIB_ANALYZE_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.analyze.$(O))
|
||||
C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
|
||||
C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) \
|
||||
$(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_ANALYZE_OBJS) \
|
||||
$(C_TESTLIB_STRESS_OBJS)
|
||||
|
||||
TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
|
||||
TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
|
||||
TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
|
||||
TESTS_ANALYZE_OBJS := $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%.$(O))
|
||||
TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||
TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_ANALYZE_OBJS) \
|
||||
$(TESTS_STRESS_OBJS)
|
||||
TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
|
||||
|
||||
.PHONY: all dist build_doc_html build_doc_man build_doc
|
||||
.PHONY: install_bin install_include install_lib
|
||||
.PHONY: install_doc_html install_doc_man install_doc install
|
||||
.PHONY: tests check clean distclean relclean
|
||||
|
||||
.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS)
|
||||
|
||||
# Default target.
|
||||
all: build_lib
|
||||
|
||||
dist: build_doc
|
||||
|
||||
$(objroot)doc/%$(install_suffix).html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
|
||||
ifneq ($(XSLROOT),)
|
||||
$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
|
||||
else
|
||||
ifeq ($(wildcard $(DOCS_HTML)),)
|
||||
@echo "<p>Missing xsltproc. Doc not built.</p>" > $@
|
||||
endif
|
||||
@echo "Missing xsltproc. "$@" not (re)built."
|
||||
endif
|
||||
|
||||
$(objroot)doc/%$(install_suffix).3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
|
||||
ifneq ($(XSLROOT),)
|
||||
$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
|
||||
# The -o option (output filename) of xsltproc may not work (it uses the
|
||||
# <refname> in the .xml file). Manually add the suffix if so.
|
||||
ifneq ($(install_suffix),)
|
||||
@if [ -f $(objroot)doc/jemalloc.3 ]; then \
|
||||
mv $(objroot)doc/jemalloc.3 $(objroot)doc/jemalloc$(install_suffix).3 ; \
|
||||
fi
|
||||
endif
|
||||
else
|
||||
ifeq ($(wildcard $(DOCS_MAN3)),)
|
||||
@echo "Missing xsltproc. Doc not built." > $@
|
||||
endif
|
||||
@echo "Missing xsltproc. "$@" not (re)built."
|
||||
endif
|
||||
|
||||
build_doc_html: $(DOCS_HTML)
|
||||
build_doc_man: $(DOCS_MAN3)
|
||||
build_doc: $(DOCS)
|
||||
|
||||
#
|
||||
# Include generated dependency files.
|
||||
#
|
||||
ifdef CC_MM
|
||||
-include $(C_SYM_OBJS:%.$(O)=%.d)
|
||||
-include $(C_OBJS:%.$(O)=%.d)
|
||||
-include $(CPP_OBJS:%.$(O)=%.d)
|
||||
-include $(C_PIC_OBJS:%.$(O)=%.d)
|
||||
-include $(CPP_PIC_OBJS:%.$(O)=%.d)
|
||||
-include $(C_JET_SYM_OBJS:%.$(O)=%.d)
|
||||
-include $(C_JET_OBJS:%.$(O)=%.d)
|
||||
-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
||||
-include $(TESTS_OBJS:%.$(O)=%.d)
|
||||
-include $(TESTS_CPP_OBJS:%.$(O)=%.d)
|
||||
endif
|
||||
|
||||
$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c
|
||||
$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O)
|
||||
$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
|
||||
$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
|
||||
$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
|
||||
$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
|
||||
$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
|
||||
$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
|
||||
$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c
|
||||
$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O)
|
||||
$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
|
||||
$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET
|
||||
$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
|
||||
$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
||||
$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
|
||||
$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
|
||||
$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
|
||||
$(C_TESTLIB_ANALYZE_OBJS): $(objroot)test/src/%.analyze.$(O): $(srcroot)test/src/%.c
|
||||
$(C_TESTLIB_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
|
||||
$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
|
||||
$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
|
||||
$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||
$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
||||
$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
|
||||
$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
|
||||
$(TESTS_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
|
||||
$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
|
||||
$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
|
||||
$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
|
||||
$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||
$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||
ifneq ($(IMPORTLIB),$(SO))
|
||||
$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
|
||||
endif
|
||||
|
||||
# Dependencies.
|
||||
ifndef CC_MM
|
||||
HEADER_DIRS = $(srcroot)include/jemalloc/internal \
|
||||
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
|
||||
HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)))
|
||||
$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS)
|
||||
$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
|
||||
endif
|
||||
|
||||
$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
|
||||
$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_ANALYZE_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_ANALYZE_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
|
||||
|
||||
$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
||||
ifdef CC_MM
|
||||
@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
||||
endif
|
||||
|
||||
$(C_SYMS): %.sym:
|
||||
@mkdir -p $(@D)
|
||||
$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@
|
||||
|
||||
$(C_JET_SYMS): %.sym:
|
||||
@mkdir -p $(@D)
|
||||
$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@
|
||||
|
||||
$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS)
|
||||
$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
|
||||
|
||||
$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
|
||||
$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
|
||||
|
||||
%.h: %.gen.h
|
||||
@if ! `cmp -s $< $@` ; then echo "cp $< $@"; cp $< $@ ; fi
|
||||
|
||||
$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
||||
ifdef CC_MM
|
||||
@$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
||||
endif
|
||||
|
||||
ifneq ($(SOREV),$(SO))
|
||||
%.$(SO) : %.$(SOREV)
|
||||
@mkdir -p $(@D)
|
||||
ln -sf $(<F) $@
|
||||
endif
|
||||
|
||||
$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
|
||||
|
||||
$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
|
||||
$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
|
||||
$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
|
||||
|
||||
$(STATIC_LIBS):
|
||||
@mkdir -p $(@D)
|
||||
$(AR) $(ARFLAGS)@AROUT@ $+
|
||||
|
||||
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
|
||||
|
||||
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -pthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
|
||||
|
||||
$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
|
||||
|
||||
$(objroot)test/analyze/%$(EXE): $(objroot)test/analyze/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_ANALYZE_OBJS)
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
|
||||
|
||||
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||
@mkdir -p $(@D)
|
||||
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
|
||||
|
||||
build_lib_shared: $(DSOS)
|
||||
build_lib_static: $(STATIC_LIBS)
|
||||
ifeq ($(enable_shared), 1)
|
||||
build_lib: build_lib_shared
|
||||
endif
|
||||
ifeq ($(enable_static), 1)
|
||||
build_lib: build_lib_static
|
||||
endif
|
||||
|
||||
install_bin:
|
||||
$(INSTALL) -d $(BINDIR)
|
||||
@for b in $(BINS); do \
|
||||
$(INSTALL) -v -m 755 $$b $(BINDIR); \
|
||||
done
|
||||
|
||||
install_include:
|
||||
$(INSTALL) -d $(INCLUDEDIR)/jemalloc
|
||||
@for h in $(C_HDRS); do \
|
||||
$(INSTALL) -v -m 644 $$h $(INCLUDEDIR)/jemalloc; \
|
||||
done
|
||||
|
||||
install_lib_shared: $(DSOS)
|
||||
$(INSTALL) -d $(LIBDIR)
|
||||
$(INSTALL) -v -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
|
||||
ifneq ($(SOREV),$(SO))
|
||||
ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
|
||||
endif
|
||||
|
||||
install_lib_static: $(STATIC_LIBS)
|
||||
$(INSTALL) -d $(LIBDIR)
|
||||
@for l in $(STATIC_LIBS); do \
|
||||
$(INSTALL) -v -m 755 $$l $(LIBDIR); \
|
||||
done
|
||||
|
||||
install_lib_pc: $(PC)
|
||||
$(INSTALL) -d $(LIBDIR)/pkgconfig
|
||||
@for l in $(PC); do \
|
||||
$(INSTALL) -v -m 644 $$l $(LIBDIR)/pkgconfig; \
|
||||
done
|
||||
|
||||
ifeq ($(enable_shared), 1)
|
||||
install_lib: install_lib_shared
|
||||
endif
|
||||
ifeq ($(enable_static), 1)
|
||||
install_lib: install_lib_static
|
||||
endif
|
||||
install_lib: install_lib_pc
|
||||
|
||||
install_doc_html: build_doc_html
|
||||
$(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
|
||||
@for d in $(DOCS_HTML); do \
|
||||
$(INSTALL) -v -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
|
||||
done
|
||||
|
||||
install_doc_man: build_doc_man
|
||||
$(INSTALL) -d $(MANDIR)/man3
|
||||
@for d in $(DOCS_MAN3); do \
|
||||
$(INSTALL) -v -m 644 $$d $(MANDIR)/man3; \
|
||||
done
|
||||
|
||||
install_doc: install_doc_html install_doc_man
|
||||
|
||||
install: install_bin install_include install_lib
|
||||
|
||||
ifeq ($(enable_doc), 1)
|
||||
install: install_doc
|
||||
endif
|
||||
|
||||
uninstall_bin:
|
||||
$(RM) -v $(foreach b,$(notdir $(BINS)),$(BINDIR)/$(b))
|
||||
|
||||
uninstall_include:
|
||||
$(RM) -v $(foreach h,$(notdir $(C_HDRS)),$(INCLUDEDIR)/jemalloc/$(h))
|
||||
rmdir -v $(INCLUDEDIR)/jemalloc
|
||||
|
||||
uninstall_lib_shared:
|
||||
$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SOREV)
|
||||
ifneq ($(SOREV),$(SO))
|
||||
$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SO)
|
||||
endif
|
||||
|
||||
uninstall_lib_static:
|
||||
$(RM) -v $(foreach l,$(notdir $(STATIC_LIBS)),$(LIBDIR)/$(l))
|
||||
|
||||
uninstall_lib_pc:
|
||||
$(RM) -v $(foreach p,$(notdir $(PC)),$(LIBDIR)/pkgconfig/$(p))
|
||||
|
||||
ifeq ($(enable_shared), 1)
|
||||
uninstall_lib: uninstall_lib_shared
|
||||
endif
|
||||
ifeq ($(enable_static), 1)
|
||||
uninstall_lib: uninstall_lib_static
|
||||
endif
|
||||
uninstall_lib: uninstall_lib_pc
|
||||
|
||||
uninstall_doc_html:
|
||||
$(RM) -v $(foreach d,$(notdir $(DOCS_HTML)),$(DATADIR)/doc/jemalloc$(install_suffix)/$(d))
|
||||
rmdir -v $(DATADIR)/doc/jemalloc$(install_suffix)
|
||||
|
||||
uninstall_doc_man:
|
||||
$(RM) -v $(foreach d,$(notdir $(DOCS_MAN3)),$(MANDIR)/man3/$(d))
|
||||
|
||||
uninstall_doc: uninstall_doc_html uninstall_doc_man
|
||||
|
||||
uninstall: uninstall_bin uninstall_include uninstall_lib
|
||||
|
||||
ifeq ($(enable_doc), 1)
|
||||
uninstall: uninstall_doc
|
||||
endif
|
||||
|
||||
tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||
tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
|
||||
tests_analyze: $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||
tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||
tests: tests_unit tests_integration tests_analyze tests_stress
|
||||
|
||||
check_unit_dir:
|
||||
@mkdir -p $(objroot)test/unit
|
||||
check_integration_dir:
|
||||
@mkdir -p $(objroot)test/integration
|
||||
analyze_dir:
|
||||
@mkdir -p $(objroot)test/analyze
|
||||
stress_dir:
|
||||
@mkdir -p $(objroot)test/stress
|
||||
check_dir: check_unit_dir check_integration_dir
|
||||
|
||||
check_unit: tests_unit check_unit_dir
|
||||
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
||||
check_integration_prof: tests_integration check_integration_dir
|
||||
ifeq ($(enable_prof), 1)
|
||||
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||
endif
|
||||
check_integration_decay: tests_integration check_integration_dir
|
||||
$(MALLOC_CONF)="dirty_decay_ms:-1,muzzy_decay_ms:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||
$(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||
check_integration: tests_integration check_integration_dir
|
||||
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||
analyze: tests_analyze analyze_dir
|
||||
ifeq ($(enable_prof), 1)
|
||||
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
|
||||
else
|
||||
$(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
|
||||
endif
|
||||
stress: tests_stress stress_dir
|
||||
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
|
||||
check: check_unit check_integration check_integration_decay check_integration_prof
|
||||
|
||||
clean:
|
||||
rm -f $(PRIVATE_NAMESPACE_HDRS)
|
||||
rm -f $(PRIVATE_NAMESPACE_GEN_HDRS)
|
||||
rm -f $(C_SYM_OBJS)
|
||||
rm -f $(C_SYMS)
|
||||
rm -f $(C_OBJS)
|
||||
rm -f $(CPP_OBJS)
|
||||
rm -f $(C_PIC_OBJS)
|
||||
rm -f $(CPP_PIC_OBJS)
|
||||
rm -f $(C_JET_SYM_OBJS)
|
||||
rm -f $(C_JET_SYMS)
|
||||
rm -f $(C_JET_OBJS)
|
||||
rm -f $(C_TESTLIB_OBJS)
|
||||
rm -f $(C_SYM_OBJS:%.$(O)=%.d)
|
||||
rm -f $(C_OBJS:%.$(O)=%.d)
|
||||
rm -f $(CPP_OBJS:%.$(O)=%.d)
|
||||
rm -f $(C_PIC_OBJS:%.$(O)=%.d)
|
||||
rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
|
||||
rm -f $(C_JET_SYM_OBJS:%.$(O)=%.d)
|
||||
rm -f $(C_JET_OBJS:%.$(O)=%.d)
|
||||
rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
||||
rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
|
||||
rm -f $(TESTS_OBJS)
|
||||
rm -f $(TESTS_OBJS:%.$(O)=%.d)
|
||||
rm -f $(TESTS_OBJS:%.$(O)=%.out)
|
||||
rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
|
||||
rm -f $(TESTS_CPP_OBJS)
|
||||
rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
|
||||
rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
|
||||
rm -f $(DSOS) $(STATIC_LIBS)
|
||||
|
||||
distclean: clean
|
||||
rm -f $(objroot)bin/jemalloc-config
|
||||
rm -f $(objroot)bin/jemalloc.sh
|
||||
rm -f $(objroot)bin/jeprof
|
||||
rm -f $(objroot)config.log
|
||||
rm -f $(objroot)config.status
|
||||
rm -f $(objroot)config.stamp
|
||||
rm -f $(cfghdrs_out)
|
||||
rm -f $(cfgoutputs_out)
|
||||
|
||||
relclean: distclean
|
||||
rm -f $(objroot)configure
|
||||
rm -f $(objroot)VERSION
|
||||
rm -f $(DOCS_HTML)
|
||||
rm -f $(DOCS_MAN3)
|
||||
|
||||
#===============================================================================
|
||||
# Re-configuration rules.
|
||||
|
||||
ifeq ($(enable_autogen), 1)
|
||||
$(srcroot)configure : $(srcroot)configure.ac
|
||||
cd ./$(srcroot) && $(AUTOCONF)
|
||||
|
||||
$(objroot)config.status : $(srcroot)configure
|
||||
./$(objroot)config.status --recheck
|
||||
|
||||
$(srcroot)config.stamp.in : $(srcroot)configure.ac
|
||||
echo stamp > $(srcroot)config.stamp.in
|
||||
|
||||
$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
|
||||
./$(objroot)config.status
|
||||
@touch $@
|
||||
|
||||
# There must be some action in order for make to re-read Makefile when it is
|
||||
# out of date.
|
||||
$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
|
||||
@true
|
||||
endif
|
20
BeefRT/JEMalloc/README
Normal file
20
BeefRT/JEMalloc/README
Normal file
|
@ -0,0 +1,20 @@
|
|||
jemalloc is a general purpose malloc(3) implementation that emphasizes
|
||||
fragmentation avoidance and scalable concurrency support. jemalloc first came
|
||||
into use as the FreeBSD libc allocator in 2005, and since then it has found its
|
||||
way into numerous applications that rely on its predictable behavior. In 2010
|
||||
jemalloc development efforts broadened to include developer support features
|
||||
such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
|
||||
releases continue to be integrated back into FreeBSD, and therefore versatility
|
||||
remains critical. Ongoing development efforts trend toward making jemalloc
|
||||
among the best allocators for a broad range of demanding applications, and
|
||||
eliminating/mitigating weaknesses that have practical repercussions for real
|
||||
world applications.
|
||||
|
||||
The COPYING file contains copyright and licensing information.
|
||||
|
||||
The INSTALL file contains information on how to configure, build, and install
|
||||
jemalloc.
|
||||
|
||||
The ChangeLog file contains a brief summary of changes for each release.
|
||||
|
||||
URL: http://jemalloc.net/
|
1
BeefRT/JEMalloc/VERSION
Normal file
1
BeefRT/JEMalloc/VERSION
Normal file
|
@ -0,0 +1 @@
|
|||
5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
|
17
BeefRT/JEMalloc/autogen.sh
Normal file
17
BeefRT/JEMalloc/autogen.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
for i in autoconf; do
|
||||
echo "$i"
|
||||
$i
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error $? in $i"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "./configure --enable-autogen $@"
|
||||
./configure --enable-autogen $@
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error $? in ./configure"
|
||||
exit 1
|
||||
fi
|
83
BeefRT/JEMalloc/bin/jemalloc-config.in
Normal file
83
BeefRT/JEMalloc/bin/jemalloc-config.in
Normal file
|
@ -0,0 +1,83 @@
|
|||
#!/bin/sh
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage:
|
||||
@BINDIR@/jemalloc-config <option>
|
||||
Options:
|
||||
--help | -h : Print usage.
|
||||
--version : Print jemalloc version.
|
||||
--revision : Print shared library revision number.
|
||||
--config : Print configure options used to build jemalloc.
|
||||
--prefix : Print installation directory prefix.
|
||||
--bindir : Print binary installation directory.
|
||||
--datadir : Print data installation directory.
|
||||
--includedir : Print include installation directory.
|
||||
--libdir : Print library installation directory.
|
||||
--mandir : Print manual page installation directory.
|
||||
--cc : Print compiler used to build jemalloc.
|
||||
--cflags : Print compiler flags used to build jemalloc.
|
||||
--cppflags : Print preprocessor flags used to build jemalloc.
|
||||
--cxxflags : Print C++ compiler flags used to build jemalloc.
|
||||
--ldflags : Print library flags used to build jemalloc.
|
||||
--libs : Print libraries jemalloc was linked against.
|
||||
EOF
|
||||
}
|
||||
|
||||
prefix="@prefix@"
|
||||
exec_prefix="@exec_prefix@"
|
||||
|
||||
case "$1" in
|
||||
--help | -h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--version)
|
||||
echo "@jemalloc_version@"
|
||||
;;
|
||||
--revision)
|
||||
echo "@rev@"
|
||||
;;
|
||||
--config)
|
||||
echo "@CONFIG@"
|
||||
;;
|
||||
--prefix)
|
||||
echo "@PREFIX@"
|
||||
;;
|
||||
--bindir)
|
||||
echo "@BINDIR@"
|
||||
;;
|
||||
--datadir)
|
||||
echo "@DATADIR@"
|
||||
;;
|
||||
--includedir)
|
||||
echo "@INCLUDEDIR@"
|
||||
;;
|
||||
--libdir)
|
||||
echo "@LIBDIR@"
|
||||
;;
|
||||
--mandir)
|
||||
echo "@MANDIR@"
|
||||
;;
|
||||
--cc)
|
||||
echo "@CC@"
|
||||
;;
|
||||
--cflags)
|
||||
echo "@CFLAGS@"
|
||||
;;
|
||||
--cppflags)
|
||||
echo "@CPPFLAGS@"
|
||||
;;
|
||||
--cxxflags)
|
||||
echo "@CXXFLAGS@"
|
||||
;;
|
||||
--ldflags)
|
||||
echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
|
||||
;;
|
||||
--libs)
|
||||
echo "@LIBS@"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 1
|
||||
esac
|
9
BeefRT/JEMalloc/bin/jemalloc.sh.in
Normal file
9
BeefRT/JEMalloc/bin/jemalloc.sh.in
Normal file
|
@ -0,0 +1,9 @@
|
|||
#!/bin/sh
|
||||
|
||||
prefix=@prefix@
|
||||
exec_prefix=@exec_prefix@
|
||||
libdir=@libdir@
|
||||
|
||||
@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
|
||||
export @LD_PRELOAD_VAR@
|
||||
exec "$@"
|
5723
BeefRT/JEMalloc/bin/jeprof.in
Normal file
5723
BeefRT/JEMalloc/bin/jeprof.in
Normal file
File diff suppressed because it is too large
Load diff
250
BeefRT/JEMalloc/build-aux/install-sh
Normal file
250
BeefRT/JEMalloc/build-aux/install-sh
Normal file
|
@ -0,0 +1,250 @@
|
|||
#! /bin/sh
|
||||
#
|
||||
# install - install a program, script, or datafile
|
||||
# This comes from X11R5 (mit/util/scripts/install.sh).
|
||||
#
|
||||
# Copyright 1991 by the Massachusetts Institute of Technology
|
||||
#
|
||||
# Permission to use, copy, modify, distribute, and sell this software and its
|
||||
# documentation for any purpose is hereby granted without fee, provided that
|
||||
# the above copyright notice appear in all copies and that both that
|
||||
# copyright notice and this permission notice appear in supporting
|
||||
# documentation, and that the name of M.I.T. not be used in advertising or
|
||||
# publicity pertaining to distribution of the software without specific,
|
||||
# written prior permission. M.I.T. makes no representations about the
|
||||
# suitability of this software for any purpose. It is provided "as is"
|
||||
# without express or implied warranty.
|
||||
#
|
||||
# Calling this script install-sh is preferred over install.sh, to prevent
|
||||
# `make' implicit rules from creating a file called install from it
|
||||
# when there is no Makefile.
|
||||
#
|
||||
# This script is compatible with the BSD install script, but was written
|
||||
# from scratch. It can only install one file at a time, a restriction
|
||||
# shared with many OS's install programs.
|
||||
|
||||
|
||||
# set DOITPROG to echo to test this script
|
||||
|
||||
# Don't use :- since 4.3BSD and earlier shells don't like it.
|
||||
doit="${DOITPROG-}"
|
||||
|
||||
|
||||
# put in absolute paths if you don't have them in your path; or use env. vars.
|
||||
|
||||
mvprog="${MVPROG-mv}"
|
||||
cpprog="${CPPROG-cp}"
|
||||
chmodprog="${CHMODPROG-chmod}"
|
||||
chownprog="${CHOWNPROG-chown}"
|
||||
chgrpprog="${CHGRPPROG-chgrp}"
|
||||
stripprog="${STRIPPROG-strip}"
|
||||
rmprog="${RMPROG-rm}"
|
||||
mkdirprog="${MKDIRPROG-mkdir}"
|
||||
|
||||
transformbasename=""
|
||||
transform_arg=""
|
||||
instcmd="$mvprog"
|
||||
chmodcmd="$chmodprog 0755"
|
||||
chowncmd=""
|
||||
chgrpcmd=""
|
||||
stripcmd=""
|
||||
rmcmd="$rmprog -f"
|
||||
mvcmd="$mvprog"
|
||||
src=""
|
||||
dst=""
|
||||
dir_arg=""
|
||||
|
||||
while [ x"$1" != x ]; do
|
||||
case $1 in
|
||||
-c) instcmd="$cpprog"
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-d) dir_arg=true
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-m) chmodcmd="$chmodprog $2"
|
||||
shift
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-o) chowncmd="$chownprog $2"
|
||||
shift
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-g) chgrpcmd="$chgrpprog $2"
|
||||
shift
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-s) stripcmd="$stripprog"
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-t=*) transformarg=`echo $1 | sed 's/-t=//'`
|
||||
shift
|
||||
continue;;
|
||||
|
||||
-b=*) transformbasename=`echo $1 | sed 's/-b=//'`
|
||||
shift
|
||||
continue;;
|
||||
|
||||
*) if [ x"$src" = x ]
|
||||
then
|
||||
src=$1
|
||||
else
|
||||
# this colon is to work around a 386BSD /bin/sh bug
|
||||
:
|
||||
dst=$1
|
||||
fi
|
||||
shift
|
||||
continue;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ x"$src" = x ]
|
||||
then
|
||||
echo "install: no input file specified"
|
||||
exit 1
|
||||
else
|
||||
true
|
||||
fi
|
||||
|
||||
if [ x"$dir_arg" != x ]; then
|
||||
dst=$src
|
||||
src=""
|
||||
|
||||
if [ -d $dst ]; then
|
||||
instcmd=:
|
||||
else
|
||||
instcmd=mkdir
|
||||
fi
|
||||
else
|
||||
|
||||
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
|
||||
# might cause directories to be created, which would be especially bad
|
||||
# if $src (and thus $dsttmp) contains '*'.
|
||||
|
||||
if [ -f $src -o -d $src ]
|
||||
then
|
||||
true
|
||||
else
|
||||
echo "install: $src does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ x"$dst" = x ]
|
||||
then
|
||||
echo "install: no destination specified"
|
||||
exit 1
|
||||
else
|
||||
true
|
||||
fi
|
||||
|
||||
# If destination is a directory, append the input filename; if your system
|
||||
# does not like double slashes in filenames, you may need to add some logic
|
||||
|
||||
if [ -d $dst ]
|
||||
then
|
||||
dst="$dst"/`basename $src`
|
||||
else
|
||||
true
|
||||
fi
|
||||
fi
|
||||
|
||||
## this sed command emulates the dirname command
|
||||
dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
|
||||
|
||||
# Make sure that the destination directory exists.
|
||||
# this part is taken from Noah Friedman's mkinstalldirs script
|
||||
|
||||
# Skip lots of stat calls in the usual case.
|
||||
if [ ! -d "$dstdir" ]; then
|
||||
defaultIFS='
|
||||
'
|
||||
IFS="${IFS-${defaultIFS}}"
|
||||
|
||||
oIFS="${IFS}"
|
||||
# Some sh's can't handle IFS=/ for some reason.
|
||||
IFS='%'
|
||||
set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
|
||||
IFS="${oIFS}"
|
||||
|
||||
pathcomp=''
|
||||
|
||||
while [ $# -ne 0 ] ; do
|
||||
pathcomp="${pathcomp}${1}"
|
||||
shift
|
||||
|
||||
if [ ! -d "${pathcomp}" ] ;
|
||||
then
|
||||
$mkdirprog "${pathcomp}"
|
||||
else
|
||||
true
|
||||
fi
|
||||
|
||||
pathcomp="${pathcomp}/"
|
||||
done
|
||||
fi
|
||||
|
||||
if [ x"$dir_arg" != x ]
|
||||
then
|
||||
$doit $instcmd $dst &&
|
||||
|
||||
if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
|
||||
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
|
||||
if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
|
||||
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
|
||||
else
|
||||
|
||||
# If we're going to rename the final executable, determine the name now.
|
||||
|
||||
if [ x"$transformarg" = x ]
|
||||
then
|
||||
dstfile=`basename $dst`
|
||||
else
|
||||
dstfile=`basename $dst $transformbasename |
|
||||
sed $transformarg`$transformbasename
|
||||
fi
|
||||
|
||||
# don't allow the sed command to completely eliminate the filename
|
||||
|
||||
if [ x"$dstfile" = x ]
|
||||
then
|
||||
dstfile=`basename $dst`
|
||||
else
|
||||
true
|
||||
fi
|
||||
|
||||
# Make a temp file name in the proper directory.
|
||||
|
||||
dsttmp=$dstdir/#inst.$$#
|
||||
|
||||
# Move or copy the file name to the temp name
|
||||
|
||||
$doit $instcmd $src $dsttmp &&
|
||||
|
||||
trap "rm -f ${dsttmp}" 0 &&
|
||||
|
||||
# and set any options; do chmod last to preserve setuid bits
|
||||
|
||||
# If any of these fail, we abort the whole thing. If we want to
|
||||
# ignore errors from any of these, just make sure not to ignore
|
||||
# errors from the above "$doit $instcmd $src $dsttmp" command.
|
||||
|
||||
if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
|
||||
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
|
||||
if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
|
||||
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
|
||||
|
||||
# Now rename the file to the real destination.
|
||||
|
||||
$doit $rmcmd -f $dstdir/$dstfile &&
|
||||
$doit $mvcmd $dsttmp $dstdir/$dstfile
|
||||
|
||||
fi &&
|
||||
|
||||
|
||||
exit 0
|
2669
BeefRT/JEMalloc/configure.ac
Normal file
2669
BeefRT/JEMalloc/configure.ac
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,23 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
|
||||
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
|
||||
|
||||
/*
|
||||
* The callback to be executed "periodically", in response to some amount of
|
||||
* allocator activity.
|
||||
*
|
||||
* This callback need not be computing any sort of peak (although that's the
|
||||
* intended first use case), but we drive it from the peak counter, so it's
|
||||
* keeps things tidy to keep it here.
|
||||
*
|
||||
* The calls to this thunk get driven by the peak_event module.
|
||||
*/
|
||||
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
|
||||
typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
|
||||
uint64_t deallocated);
|
||||
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
|
||||
struct activity_callback_thunk_s {
|
||||
activity_callback_t callback;
|
||||
void *uctx;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */
|
121
BeefRT/JEMalloc/include/jemalloc/internal/arena_externs.h
Normal file
121
BeefRT/JEMalloc/include/jemalloc/internal/arena_externs.h
Normal file
|
@ -0,0 +1,121 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/div.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
/*
|
||||
* When the amount of pages to be purged exceeds this amount, deferred purge
|
||||
* should happen.
|
||||
*/
|
||||
#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
|
||||
|
||||
extern ssize_t opt_dirty_decay_ms;
|
||||
extern ssize_t opt_muzzy_decay_ms;
|
||||
|
||||
extern percpu_arena_mode_t opt_percpu_arena;
|
||||
extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern div_info_t arena_binind_div_info[SC_NBINS];
|
||||
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
extern emap_t arena_emap_global;
|
||||
|
||||
extern size_t opt_oversize_threshold;
|
||||
extern size_t oversize_threshold;
|
||||
|
||||
/*
|
||||
* arena_bin_offsets[binind] is the offset of the first bin shard for size class
|
||||
* binind.
|
||||
*/
|
||||
extern uint32_t arena_bin_offsets[SC_NBINS];
|
||||
|
||||
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
||||
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
|
||||
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
||||
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool zero);
|
||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *edata);
|
||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *edata, size_t oldsize);
|
||||
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *edata, size_t oldsize);
|
||||
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
|
||||
ssize_t decay_ms);
|
||||
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
|
||||
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all);
|
||||
uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
||||
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
|
||||
const unsigned nfill);
|
||||
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
szind_t ind, bool zero);
|
||||
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache);
|
||||
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
|
||||
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
bool slow_path);
|
||||
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
|
||||
|
||||
void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *slab, bin_t *bin);
|
||||
void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *slab, bin_t *bin);
|
||||
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
||||
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero, size_t *newsize);
|
||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args);
|
||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||
ehooks_t *arena_get_ehooks(arena_t *arena);
|
||||
extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
|
||||
extent_hooks_t *extent_hooks);
|
||||
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||
ssize_t arena_dirty_decay_ms_default_get(void);
|
||||
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
||||
ssize_t arena_muzzy_decay_ms_default_get(void);
|
||||
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
||||
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
||||
size_t *old_limit, size_t *new_limit);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
|
||||
bool arena_init_huge(void);
|
||||
bool arena_is_huge(unsigned arena_ind);
|
||||
arena_t *arena_choose_huge(tsd_t *tsd);
|
||||
bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
unsigned *binshard);
|
||||
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
void **ptrs, size_t nfill, bool zero);
|
||||
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
|
||||
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
|
24
BeefRT/JEMalloc/include/jemalloc/internal/arena_inlines_a.h
Normal file
24
BeefRT/JEMalloc/include/jemalloc/internal/arena_inlines_a.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||
|
||||
static inline unsigned
|
||||
arena_ind_get(const arena_t *arena) {
|
||||
return arena->ind;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_internal_add(arena_t *arena, size_t size) {
|
||||
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_internal_sub(arena_t *arena, size_t size) {
|
||||
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_internal_get(arena_t *arena) {
|
||||
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|
550
BeefRT/JEMalloc/include/jemalloc/internal/arena_inlines_b.h
Normal file
550
BeefRT/JEMalloc/include/jemalloc/internal/arena_inlines_b.h
Normal file
|
@ -0,0 +1,550 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||
|
||||
#include "jemalloc/internal/div.h"
|
||||
#include "jemalloc/internal/emap.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
static inline arena_t *
|
||||
arena_get_from_edata(edata_t *edata) {
|
||||
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
||||
if (arena != NULL) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
/*
|
||||
* For huge allocations, use the dedicated huge arena if both are true:
|
||||
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
||||
* thread is not assigned to a manual arena.
|
||||
*/
|
||||
if (unlikely(size >= oversize_threshold)) {
|
||||
arena_t *tsd_arena = tsd_arena_get(tsd);
|
||||
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
||||
return arena_choose_huge(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
return arena_choose(tsd, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
|
||||
prof_info_t *prof_info, bool reset_recent) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(prof_info != NULL);
|
||||
|
||||
edata_t *edata = NULL;
|
||||
bool is_slab;
|
||||
|
||||
/* Static check. */
|
||||
if (alloc_ctx == NULL) {
|
||||
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
||||
ptr);
|
||||
is_slab = edata_slab_get(edata);
|
||||
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
|
||||
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
||||
ptr);
|
||||
}
|
||||
|
||||
if (unlikely(!is_slab)) {
|
||||
/* edata must have been initialized at this point. */
|
||||
assert(edata != NULL);
|
||||
large_prof_info_get(tsd, edata, prof_info, reset_recent);
|
||||
} else {
|
||||
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||
/*
|
||||
* No need to set other fields in prof_info; they will never be
|
||||
* accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
|
||||
emap_alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
/* Static check. */
|
||||
if (alloc_ctx == NULL) {
|
||||
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
|
||||
&arena_emap_global, ptr);
|
||||
if (unlikely(!edata_slab_get(edata))) {
|
||||
large_prof_tctx_reset(edata);
|
||||
}
|
||||
} else {
|
||||
if (unlikely(!alloc_ctx->slab)) {
|
||||
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
|
||||
&arena_emap_global, ptr);
|
||||
large_prof_tctx_reset(edata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
||||
ptr);
|
||||
assert(!edata_slab_get(edata));
|
||||
|
||||
large_prof_tctx_reset(edata);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
|
||||
size_t size) {
|
||||
cassert(config_prof);
|
||||
|
||||
assert(!edata_slab_get(edata));
|
||||
large_prof_info_set(edata, tctx, size);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
return;
|
||||
}
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
/*
|
||||
* We use the ticker_geom_t to avoid having per-arena state in the tsd.
|
||||
* Instead of having a countdown-until-decay timer running for every
|
||||
* arena in every thread, we flip a coin once per tick, whose
|
||||
* probability of coming up heads is 1/nticks; this is effectively the
|
||||
* operation of the ticker_geom_t. Each arena has the same chance of a
|
||||
* coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
|
||||
* use a single ticker for all of them.
|
||||
*/
|
||||
ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
|
||||
uint64_t *prng_state = tsd_prng_statep_get(tsd);
|
||||
if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
|
||||
arena_decay(tsdn, arena, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
||||
arena_decay_ticks(tsdn, arena, 1);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
|
||||
if (likely(tcache != NULL)) {
|
||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
||||
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
}
|
||||
if (likely(size <= tcache_maxclass)) {
|
||||
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
}
|
||||
/* (size > tcache_maxclass) case falls through. */
|
||||
assert(size > tcache_maxclass);
|
||||
}
|
||||
|
||||
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
||||
unsigned arena_ind = edata_arena_ind_get(edata);
|
||||
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
emap_alloc_ctx_t alloc_ctx;
|
||||
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
|
||||
assert(alloc_ctx.szind != SC_NSIZES);
|
||||
|
||||
return sz_index2size(alloc_ctx.szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
/*
|
||||
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
||||
* function has two extra costs relative to isalloc():
|
||||
* - The rtree calls cannot claim to be dependent lookups, which induces
|
||||
* rtree lookup load dependencies.
|
||||
* - The lookup may fail, so there is an extra branch to check for
|
||||
* failure.
|
||||
*/
|
||||
|
||||
emap_full_alloc_ctx_t full_alloc_ctx;
|
||||
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
|
||||
ptr, &full_alloc_ctx);
|
||||
if (missing) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (full_alloc_ctx.edata == NULL) {
|
||||
return 0;
|
||||
}
|
||||
assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
|
||||
/* Only slab members should be looked up via interior pointers. */
|
||||
assert(edata_addr_get(full_alloc_ctx.edata) == ptr
|
||||
|| edata_slab_get(full_alloc_ctx.edata));
|
||||
|
||||
assert(full_alloc_ctx.szind != SC_NSIZES);
|
||||
|
||||
return sz_index2size(full_alloc_ctx.szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
|
||||
if (!config_opt_safety_checks) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Eagerly detect double free and sized dealloc bugs for large sizes.
|
||||
* The cost is low enough (as edata will be accessed anyway) to be
|
||||
* enabled all the time.
|
||||
*/
|
||||
if (unlikely(edata == NULL ||
|
||||
edata_state_get(edata) != extent_state_active)) {
|
||||
safety_check_fail("Invalid deallocation detected: "
|
||||
"pages being freed (%p) not currently active, "
|
||||
"possibly caused by double free bugs.",
|
||||
(uintptr_t)edata_addr_get(edata));
|
||||
return true;
|
||||
}
|
||||
size_t input_size = sz_index2size(szind);
|
||||
if (unlikely(input_size != edata_usize_get(edata))) {
|
||||
safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
|
||||
/* true_size */ edata_usize_get(edata), input_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
||||
} else {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
if (large_dalloc_safety_checks(edata, ptr, szind)) {
|
||||
/* See the comment in isfree. */
|
||||
return;
|
||||
}
|
||||
large_dalloc(tsdn, edata);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
emap_alloc_ctx_t alloc_ctx;
|
||||
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
|
||||
|
||||
if (config_debug) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||
assert(alloc_ctx.szind < SC_NSIZES);
|
||||
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||
}
|
||||
|
||||
if (likely(alloc_ctx.slab)) {
|
||||
/* Small allocation. */
|
||||
arena_dalloc_small(tsdn, ptr);
|
||||
} else {
|
||||
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
||||
bool slow_path) {
|
||||
if (szind < nhbins) {
|
||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
}
|
||||
} else {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
if (large_dalloc_safety_checks(edata, ptr, szind)) {
|
||||
/* See the comment in isfree. */
|
||||
return;
|
||||
}
|
||||
large_dalloc(tsdn, edata);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(tcache == NULL)) {
|
||||
arena_dalloc_no_tcache(tsdn, ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
emap_alloc_ctx_t alloc_ctx;
|
||||
if (caller_alloc_ctx != NULL) {
|
||||
alloc_ctx = *caller_alloc_ctx;
|
||||
} else {
|
||||
util_assume(!tsdn_null(tsdn));
|
||||
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
||||
&alloc_ctx);
|
||||
}
|
||||
|
||||
if (config_debug) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||
assert(alloc_ctx.szind < SC_NSIZES);
|
||||
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||
}
|
||||
|
||||
if (likely(alloc_ctx.slab)) {
|
||||
/* Small allocation. */
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||
alloc_ctx.szind, slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
||||
slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
||||
assert(ptr != NULL);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
emap_alloc_ctx_t alloc_ctx;
|
||||
if (!config_prof || !opt_prof) {
|
||||
/*
|
||||
* There is no risk of being confused by a promoted sampled
|
||||
* object, so base szind and slab on the given size.
|
||||
*/
|
||||
alloc_ctx.szind = sz_size2index(size);
|
||||
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||
}
|
||||
|
||||
if ((config_prof && opt_prof) || config_debug) {
|
||||
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
||||
&alloc_ctx);
|
||||
|
||||
assert(alloc_ctx.szind == sz_size2index(size));
|
||||
assert((config_prof && opt_prof)
|
||||
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
|
||||
|
||||
if (config_debug) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn,
|
||||
&arena_emap_global, ptr);
|
||||
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(alloc_ctx.slab)) {
|
||||
/* Small allocation. */
|
||||
arena_dalloc_small(tsdn, ptr);
|
||||
} else {
|
||||
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
if (unlikely(tcache == NULL)) {
|
||||
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
||||
return;
|
||||
}
|
||||
|
||||
emap_alloc_ctx_t alloc_ctx;
|
||||
if (config_prof && opt_prof) {
|
||||
if (caller_alloc_ctx == NULL) {
|
||||
/* Uncommon case and should be a static check. */
|
||||
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
|
||||
&alloc_ctx);
|
||||
assert(alloc_ctx.szind == sz_size2index(size));
|
||||
} else {
|
||||
alloc_ctx = *caller_alloc_ctx;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* There is no risk of being confused by a promoted sampled
|
||||
* object, so base szind and slab on the given size.
|
||||
*/
|
||||
alloc_ctx.szind = sz_size2index(size);
|
||||
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||
}
|
||||
|
||||
if (config_debug) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
assert(alloc_ctx.szind == edata_szind_get(edata));
|
||||
assert(alloc_ctx.slab == edata_slab_get(edata));
|
||||
}
|
||||
|
||||
if (likely(alloc_ctx.slab)) {
|
||||
/* Small allocation. */
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||
alloc_ctx.szind, slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
|
||||
slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||
size_t alignment) {
|
||||
assert(edata_base_get(edata) == edata_addr_get(edata));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
unsigned lg_range = LG_PAGE -
|
||||
lg_floor(CACHELINE_CEILING(alignment));
|
||||
size_t r;
|
||||
if (!tsdn_null(tsdn)) {
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
r = (size_t)prng_lg_range_u64(
|
||||
tsd_prng_statep_get(tsd), lg_range);
|
||||
} else {
|
||||
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
|
||||
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
|
||||
}
|
||||
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||
lg_range);
|
||||
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
|
||||
random_offset);
|
||||
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
|
||||
edata->e_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The dalloc bin info contains just the information that the common paths need
|
||||
* during tcache flushes. By force-inlining these paths, and using local copies
|
||||
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
|
||||
* redundant loads and stores by leaving this information in registers.
|
||||
*/
|
||||
typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
|
||||
struct arena_dalloc_bin_locked_info_s {
|
||||
div_info_t div_info;
|
||||
uint32_t nregs;
|
||||
uint64_t ndalloc;
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
|
||||
edata_t *slab, const void *ptr) {
|
||||
size_t diff, regind;
|
||||
|
||||
/* Freeing a pointer outside the slab can cause assertion failure. */
|
||||
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
|
||||
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
|
||||
/* Freeing an interior pointer can cause assertion failure. */
|
||||
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
|
||||
(uintptr_t)bin_infos[binind].reg_size == 0);
|
||||
|
||||
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
|
||||
|
||||
/* Avoid doing division with a variable divisor. */
|
||||
regind = div_compute(&info->div_info, diff);
|
||||
|
||||
assert(regind < bin_infos[binind].nregs);
|
||||
|
||||
return regind;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
|
||||
szind_t binind) {
|
||||
info->div_info = arena_binind_div_info[binind];
|
||||
info->nregs = bin_infos[binind].nregs;
|
||||
info->ndalloc = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Does the deallocation work associated with freeing a single pointer (a
|
||||
* "step") in between a arena_dalloc_bin_locked begin and end call.
|
||||
*
|
||||
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
|
||||
* stats updates, which happen during finish (this lets running counts get left
|
||||
* in a register).
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||
arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
|
||||
void *ptr) {
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
size_t regind = arena_slab_regind(info, binind, slab, ptr);
|
||||
slab_data_t *slab_data = edata_slab_data_get(slab);
|
||||
|
||||
assert(edata_nfree_get(slab) < bin_info->nregs);
|
||||
/* Freeing an unallocated pointer can cause assertion failure. */
|
||||
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
||||
|
||||
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
||||
edata_nfree_inc(slab);
|
||||
|
||||
if (config_stats) {
|
||||
info->ndalloc++;
|
||||
}
|
||||
|
||||
unsigned nfree = edata_nfree_get(slab);
|
||||
if (nfree == bin_info->nregs) {
|
||||
arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
|
||||
bin);
|
||||
return true;
|
||||
} else if (nfree == 1 && slab != bin->slabcur) {
|
||||
arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
|
||||
bin);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||
arena_dalloc_bin_locked_info_t *info) {
|
||||
if (config_stats) {
|
||||
bin->stats.ndalloc += info->ndalloc;
|
||||
assert(bin->stats.curregs >= (size_t)info->ndalloc);
|
||||
bin->stats.curregs -= (size_t)info->ndalloc;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bin_t *
|
||||
arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
|
||||
bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
|
||||
return shard0 + binshard;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|
114
BeefRT/JEMalloc/include/jemalloc/internal/arena_stats.h
Normal file
114
BeefRT/JEMalloc/include/jemalloc/internal/arena_stats.h
Normal file
|
@ -0,0 +1,114 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/lockedint.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/pa.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
typedef struct arena_stats_large_s arena_stats_large_t;
|
||||
struct arena_stats_large_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
locked_u64_t nmalloc;
|
||||
locked_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
locked_u64_t nrequests; /* Partially derived. */
|
||||
/*
|
||||
* Number of tcache fills / flushes for large (similarly, periodically
|
||||
* merged). Note that there is no large tcache batch-fill currently
|
||||
* (i.e. only fill 1 at a time); however flush may be batched.
|
||||
*/
|
||||
locked_u64_t nfills; /* Partially derived. */
|
||||
locked_u64_t nflushes; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
};
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
struct arena_stats_s {
|
||||
LOCKEDINT_MTX_DECLARE(mtx)
|
||||
|
||||
/*
|
||||
* resident includes the base stats -- that's why it lives here and not
|
||||
* in pa_shard_stats_t.
|
||||
*/
|
||||
size_t base; /* Derived. */
|
||||
size_t resident; /* Derived. */
|
||||
size_t metadata_thp; /* Derived. */
|
||||
size_t mapped; /* Derived. */
|
||||
|
||||
atomic_zu_t internal;
|
||||
|
||||
size_t allocated_large; /* Derived. */
|
||||
uint64_t nmalloc_large; /* Derived. */
|
||||
uint64_t ndalloc_large; /* Derived. */
|
||||
uint64_t nfills_large; /* Derived. */
|
||||
uint64_t nflushes_large; /* Derived. */
|
||||
uint64_t nrequests_large; /* Derived. */
|
||||
|
||||
/*
|
||||
* The stats logically owned by the pa_shard in the same arena. This
|
||||
* lives here only because it's convenient for the purposes of the ctl
|
||||
* module -- it only knows about the single arena_stats.
|
||||
*/
|
||||
pa_shard_stats_t pa_shard_stats;
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
size_t tcache_bytes; /* Derived. */
|
||||
size_t tcache_stashed_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
}
|
||||
}
|
||||
if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
|
||||
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
|
||||
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
|
||||
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
|
||||
&lstats->nrequests, nrequests);
|
||||
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
|
||||
&lstats->nflushes, 1);
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
|
101
BeefRT/JEMalloc/include/jemalloc/internal/arena_structs.h
Normal file
101
BeefRT/JEMalloc/include/jemalloc/internal/arena_structs.h
Normal file
|
@ -0,0 +1,101 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/arena_stats.h"
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/counter.h"
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/edata_cache.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
#include "jemalloc/internal/pa.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
* two distinct assignments, one for application-serving allocation, and
|
||||
* the other for internal metadata allocation. Internal metadata must
|
||||
* not be allocated from arenas explicitly created via the arenas.create
|
||||
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
||||
* discards all allocations for the affected arena.
|
||||
*
|
||||
* 0: Application allocation.
|
||||
* 1: Internal metadata allocation.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_u_t nthreads[2];
|
||||
|
||||
/* Next bin shard for binding new threads. Synchronization: atomic. */
|
||||
atomic_u_t binshard_next;
|
||||
|
||||
/*
|
||||
* When percpu_arena is enabled, to amortize the cost of reading /
|
||||
* updating the current CPU id, track the most recent thread accessing
|
||||
* this arena, and only read CPU if there is a mismatch.
|
||||
*/
|
||||
tsdn_t *last_thd;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
arena_stats_t stats;
|
||||
|
||||
/*
|
||||
* Lists of tcaches and cache_bin_array_descriptors for extant threads
|
||||
* associated with this arena. Stats from these are merged
|
||||
* incrementally, and at exit if opt_stats_print is enabled.
|
||||
*
|
||||
* Synchronization: tcache_ql_mtx.
|
||||
*/
|
||||
ql_head(tcache_slow_t) tcache_ql;
|
||||
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
|
||||
/*
|
||||
* Represents a dss_prec_t, but atomically.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_u_t dss_prec;
|
||||
|
||||
/*
|
||||
* Extant large allocations.
|
||||
*
|
||||
* Synchronization: large_mtx.
|
||||
*/
|
||||
edata_list_active_t large;
|
||||
/* Synchronizes all large allocation/update/deallocation. */
|
||||
malloc_mutex_t large_mtx;
|
||||
|
||||
/* The page-level allocator shard this arena uses. */
|
||||
pa_shard_t pa_shard;
|
||||
|
||||
/*
|
||||
* A cached copy of base->ind. This can get accessed on hot paths;
|
||||
* looking it up in base requires an extra pointer hop / cache miss.
|
||||
*/
|
||||
unsigned ind;
|
||||
|
||||
/*
|
||||
* Base allocator, from which arena metadata are allocated.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
base_t *base;
|
||||
/* Used to determine uptime. Read-only after initialization. */
|
||||
nstime_t create_time;
|
||||
|
||||
/*
|
||||
* The arena is allocated alongside its bins; really this is a
|
||||
* dynamically sized array determined by the binshard settings.
|
||||
*/
|
||||
bin_t bins[0];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
|
58
BeefRT/JEMalloc/include/jemalloc/internal/arena_types.h
Normal file
58
BeefRT/JEMalloc/include/jemalloc/internal/arena_types.h
Normal file
|
@ -0,0 +1,58 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/* Default decay times in milliseconds. */
|
||||
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
|
||||
#define MUZZY_DECAY_MS_DEFAULT (0)
|
||||
/* Number of event ticks between time checks. */
|
||||
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
|
||||
|
||||
typedef struct arena_decay_s arena_decay_t;
|
||||
typedef struct arena_s arena_t;
|
||||
|
||||
typedef enum {
|
||||
percpu_arena_mode_names_base = 0, /* Used for options processing. */
|
||||
|
||||
/*
|
||||
* *_uninit are used only during bootstrapping, and must correspond
|
||||
* to initialized variant plus percpu_arena_mode_enabled_base.
|
||||
*/
|
||||
percpu_arena_uninit = 0,
|
||||
per_phycpu_arena_uninit = 1,
|
||||
|
||||
/* All non-disabled modes must come after percpu_arena_disabled. */
|
||||
percpu_arena_disabled = 2,
|
||||
|
||||
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
|
||||
percpu_arena_mode_enabled_base = 3,
|
||||
|
||||
percpu_arena = 3,
|
||||
per_phycpu_arena = 4 /* Hyper threads share arena. */
|
||||
} percpu_arena_mode_t;
|
||||
|
||||
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
|
||||
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
|
||||
|
||||
/*
|
||||
* When allocation_size >= oversize_threshold, use the dedicated huge arena
|
||||
* (unless have explicitly spicified arena index). 0 disables the feature.
|
||||
*/
|
||||
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
|
||||
|
||||
struct arena_config_s {
|
||||
/* extent hooks to be used for the arena */
|
||||
extent_hooks_t *extent_hooks;
|
||||
|
||||
/*
|
||||
* Use extent hooks for metadata (base) allocations when true.
|
||||
*/
|
||||
bool metadata_use_hooks;
|
||||
};
|
||||
|
||||
typedef struct arena_config_s arena_config_t;
|
||||
|
||||
extern const arena_config_t arena_config_default;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
|
56
BeefRT/JEMalloc/include/jemalloc/internal/assert.h
Normal file
56
BeefRT/JEMalloc/include/jemalloc/internal/assert.h
Normal file
|
@ -0,0 +1,56 @@
|
|||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#ifndef cassert
|
||||
#define cassert(c) do { \
|
||||
if (unlikely(!(c))) { \
|
||||
not_reached(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
107
BeefRT/JEMalloc/include/jemalloc/internal/atomic.h
Normal file
107
BeefRT/JEMalloc/include/jemalloc/internal/atomic.h
Normal file
|
@ -0,0 +1,107 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_H
|
||||
|
||||
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
|
||||
|
||||
#define JEMALLOC_U8_ATOMICS
|
||||
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_atomic.h"
|
||||
# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
|
||||
# undef JEMALLOC_U8_ATOMICS
|
||||
# endif
|
||||
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_sync.h"
|
||||
# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
|
||||
# undef JEMALLOC_U8_ATOMICS
|
||||
# endif
|
||||
#elif defined(_MSC_VER)
|
||||
# include "jemalloc/internal/atomic_msvc.h"
|
||||
#elif defined(JEMALLOC_C11_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_c11.h"
|
||||
#else
|
||||
# error "Don't have atomics implemented on this platform."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This header gives more or less a backport of C11 atomics. The user can write
|
||||
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
|
||||
* counterparts of the C11 atomic functions for type, as so:
|
||||
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
|
||||
* and then write things like:
|
||||
* int *some_ptr;
|
||||
* atomic_pi_t atomic_ptr_to_int;
|
||||
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
|
||||
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
|
||||
* assert(some_ptr == prev_value);
|
||||
* and expect things to work in the obvious way.
|
||||
*
|
||||
* Also included (with naming differences to avoid conflicts with the standard
|
||||
* library):
|
||||
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
|
||||
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pure convenience, so that we don't have to type "atomic_memory_order_"
|
||||
* quite so often.
|
||||
*/
|
||||
#define ATOMIC_RELAXED atomic_memory_order_relaxed
|
||||
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
|
||||
#define ATOMIC_RELEASE atomic_memory_order_release
|
||||
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
|
||||
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
|
||||
|
||||
/*
|
||||
* Another convenience -- simple atomic helper functions.
|
||||
*/
|
||||
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
|
||||
lg_size) \
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
|
||||
ATOMIC_INLINE void \
|
||||
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type inc) { \
|
||||
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
|
||||
type newval = oldval + inc; \
|
||||
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
|
||||
} \
|
||||
ATOMIC_INLINE void \
|
||||
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type inc) { \
|
||||
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
|
||||
type newval = oldval - inc; \
|
||||
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
|
||||
* fact.
|
||||
*/
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# define JEMALLOC_ATOMIC_U64
|
||||
#endif
|
||||
|
||||
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
|
||||
|
||||
/*
|
||||
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
|
||||
* platform that actually needs to know the size, MSVC.
|
||||
*/
|
||||
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
|
||||
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
|
||||
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
|
||||
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
|
||||
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
|
97
BeefRT/JEMalloc/include/jemalloc/internal/atomic_c11.h
Normal file
97
BeefRT/JEMalloc/include/jemalloc/internal/atomic_c11.h
Normal file
|
@ -0,0 +1,97 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
|
||||
|
||||
#define atomic_memory_order_t memory_order
|
||||
#define atomic_memory_order_relaxed memory_order_relaxed
|
||||
#define atomic_memory_order_acquire memory_order_acquire
|
||||
#define atomic_memory_order_release memory_order_release
|
||||
#define atomic_memory_order_acq_rel memory_order_acq_rel
|
||||
#define atomic_memory_order_seq_cst memory_order_seq_cst
|
||||
|
||||
#define atomic_fence atomic_thread_fence
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef _Atomic(type) atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* A strict interpretation of the C standard prevents \
|
||||
* atomic_load from taking a const argument, but it's \
|
||||
* convenient for our purposes. This cast is a workaround. \
|
||||
*/ \
|
||||
atomic_##short_type##_t* a_nonconst = \
|
||||
(atomic_##short_type##_t*)a; \
|
||||
return atomic_load_explicit(a_nonconst, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
atomic_store_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return atomic_exchange_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return atomic_compare_exchange_weak_explicit(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return atomic_compare_exchange_strong_explicit(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Integral types have some special operations available that non-integral ones
|
||||
* lack.
|
||||
*/
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_add_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_sub_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_and_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_or_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_xor_explicit(a, val, mo); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
|
129
BeefRT/JEMalloc/include/jemalloc/internal/atomic_gcc_atomic.h
Normal file
129
BeefRT/JEMalloc/include/jemalloc/internal/atomic_gcc_atomic.h
Normal file
|
@ -0,0 +1,129 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
ATOMIC_INLINE int
|
||||
atomic_enum_to_builtin(atomic_memory_order_t mo) {
|
||||
switch (mo) {
|
||||
case atomic_memory_order_relaxed:
|
||||
return __ATOMIC_RELAXED;
|
||||
case atomic_memory_order_acquire:
|
||||
return __ATOMIC_ACQUIRE;
|
||||
case atomic_memory_order_release:
|
||||
return __ATOMIC_RELEASE;
|
||||
case atomic_memory_order_acq_rel:
|
||||
return __ATOMIC_ACQ_REL;
|
||||
case atomic_memory_order_seq_cst:
|
||||
return __ATOMIC_SEQ_CST;
|
||||
}
|
||||
/* Can't happen; the switch is exhaustive. */
|
||||
not_reached();
|
||||
}
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
__atomic_thread_fence(atomic_enum_to_builtin(mo));
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef struct { \
|
||||
type repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
type result; \
|
||||
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
type result; \
|
||||
__atomic_exchange(&a->repr, &val, &result, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
UNUSED type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||
true, atomic_enum_to_builtin(success_mo), \
|
||||
atomic_enum_to_builtin(failure_mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
UNUSED type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||
false, \
|
||||
atomic_enum_to_builtin(success_mo), \
|
||||
atomic_enum_to_builtin(failure_mo)); \
|
||||
}
|
||||
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_add(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_sub(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_and(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_or(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_xor(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
|
195
BeefRT/JEMalloc/include/jemalloc/internal/atomic_gcc_sync.h
Normal file
195
BeefRT/JEMalloc/include/jemalloc/internal/atomic_gcc_sync.h
Normal file
|
@ -0,0 +1,195 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
/* Easy cases first: no barrier, and full barrier. */
|
||||
if (mo == atomic_memory_order_relaxed) {
|
||||
asm volatile("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
if (mo == atomic_memory_order_seq_cst) {
|
||||
asm volatile("" ::: "memory");
|
||||
__sync_synchronize();
|
||||
asm volatile("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
asm volatile("" ::: "memory");
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
/* This is implicit on x86. */
|
||||
# elif defined(__ppc64__)
|
||||
asm volatile("lwsync");
|
||||
# elif defined(__ppc__)
|
||||
asm volatile("sync");
|
||||
# elif defined(__sparc__) && defined(__arch64__)
|
||||
if (mo == atomic_memory_order_acquire) {
|
||||
asm volatile("membar #LoadLoad | #LoadStore");
|
||||
} else if (mo == atomic_memory_order_release) {
|
||||
asm volatile("membar #LoadStore | #StoreStore");
|
||||
} else {
|
||||
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
|
||||
}
|
||||
# else
|
||||
__sync_synchronize();
|
||||
# endif
|
||||
asm volatile("" ::: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* A correct implementation of seq_cst loads and stores on weakly ordered
|
||||
* architectures could do either of the following:
|
||||
* 1. store() is weak-fence -> store -> strong fence, load() is load ->
|
||||
* strong-fence.
|
||||
* 2. store() is strong-fence -> store, load() is strong-fence -> load ->
|
||||
* weak-fence.
|
||||
* The tricky thing is, load() and store() above can be the load or store
|
||||
* portions of a gcc __sync builtin, so we have to follow GCC's lead, which
|
||||
* means going with strategy 2.
|
||||
* On strongly ordered architectures, the natural strategy is to stick a strong
|
||||
* fence after seq_cst stores, and have naked loads. So we want the strong
|
||||
* fences in different places on different architectures.
|
||||
* atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
|
||||
* accomplish this.
|
||||
*/
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_pre_sc_load_fence() {
|
||||
# if defined(__i386__) || defined(__x86_64__) || \
|
||||
(defined(__sparc__) && defined(__arch64__))
|
||||
atomic_fence(atomic_memory_order_relaxed);
|
||||
# else
|
||||
atomic_fence(atomic_memory_order_seq_cst);
|
||||
# endif
|
||||
}
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_post_sc_store_fence() {
|
||||
# if defined(__i386__) || defined(__x86_64__) || \
|
||||
(defined(__sparc__) && defined(__arch64__))
|
||||
atomic_fence(atomic_memory_order_seq_cst);
|
||||
# else
|
||||
atomic_fence(atomic_memory_order_relaxed);
|
||||
# endif
|
||||
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef struct { \
|
||||
type volatile repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_pre_sc_load_fence(); \
|
||||
} \
|
||||
type result = a->repr; \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_acquire); \
|
||||
} \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_release); \
|
||||
} \
|
||||
a->repr = val; \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_post_sc_store_fence(); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
|
||||
* an atomic exchange builtin. We fake it with a CAS loop. \
|
||||
*/ \
|
||||
while (true) { \
|
||||
type old = a->repr; \
|
||||
if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
|
||||
return old; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = prev; \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = prev; \
|
||||
return false; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_add(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_sub(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_and(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_or(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_xor(&a->repr, val); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
|
158
BeefRT/JEMalloc/include/jemalloc/internal/atomic_msvc.h
Normal file
158
BeefRT/JEMalloc/include/jemalloc/internal/atomic_msvc.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
typedef char atomic_repr_0_t;
|
||||
typedef short atomic_repr_1_t;
|
||||
typedef long atomic_repr_2_t;
|
||||
typedef __int64 atomic_repr_3_t;
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
_ReadWriteBarrier();
|
||||
# if defined(_M_ARM) || defined(_M_ARM64)
|
||||
/* ARM needs a barrier for everything but relaxed. */
|
||||
if (mo != atomic_memory_order_relaxed) {
|
||||
MemoryBarrier();
|
||||
}
|
||||
# elif defined(_M_IX86) || defined (_M_X64)
|
||||
/* x86 needs a barrier only for seq_cst. */
|
||||
if (mo == atomic_memory_order_seq_cst) {
|
||||
MemoryBarrier();
|
||||
}
|
||||
# else
|
||||
# error "Don't know how to create atomics for this platform for MSVC."
|
||||
# endif
|
||||
_ReadWriteBarrier();
|
||||
}
|
||||
|
||||
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
|
||||
|
||||
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
|
||||
#define ATOMIC_RAW_CONCAT(a, b) a ## b
|
||||
|
||||
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
|
||||
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
|
||||
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
|
||||
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
|
||||
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_2
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||
typedef struct { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_acquire); \
|
||||
} \
|
||||
return (type) ret; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_release); \
|
||||
} \
|
||||
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_fence(atomic_memory_order_seq_cst); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
|
||||
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
|
||||
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
|
||||
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
|
||||
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
|
||||
lg_size)(&a->repr, d, e); \
|
||||
if (old == e) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = (type)old; \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
/* We implement the weak version with strong semantics. */ \
|
||||
return atomic_compare_exchange_weak_##short_type(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
}
|
||||
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
|
||||
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* MSVC warns on negation of unsigned operands, but for us it \
|
||||
* gives exactly the right semantics (MAX_TYPE + 1 - operand). \
|
||||
*/ \
|
||||
__pragma(warning(push)) \
|
||||
__pragma(warning(disable: 4146)) \
|
||||
return atomic_fetch_add_##short_type(a, -val, mo); \
|
||||
__pragma(warning(pop)) \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
|
|
@ -0,0 +1,33 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
|
||||
extern bool opt_background_thread;
|
||||
extern size_t opt_max_background_threads;
|
||||
extern malloc_mutex_t background_thread_lock;
|
||||
extern atomic_b_t background_thread_enabled_state;
|
||||
extern size_t n_background_threads;
|
||||
extern size_t max_background_threads;
|
||||
extern background_thread_info_t *background_thread_info;
|
||||
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||
bool background_threads_enable(tsd_t *tsd);
|
||||
bool background_threads_disable(tsd_t *tsd);
|
||||
bool background_thread_is_started(background_thread_info_t* info);
|
||||
void background_thread_wakeup_early(background_thread_info_t *info,
|
||||
nstime_t *remaining_sleep);
|
||||
void background_thread_prefork0(tsdn_t *tsdn);
|
||||
void background_thread_prefork1(tsdn_t *tsdn);
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn);
|
||||
void background_thread_postfork_child(tsdn_t *tsdn);
|
||||
bool background_thread_stats_read(tsdn_t *tsdn,
|
||||
background_thread_stats_t *stats);
|
||||
void background_thread_ctl_init(tsdn_t *tsdn);
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
#endif
|
||||
bool background_thread_boot0(void);
|
||||
bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
|
|
@ -0,0 +1,48 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
background_thread_enabled(void) {
|
||||
return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
background_thread_enabled_set(tsdn_t *tsdn, bool state) {
|
||||
malloc_mutex_assert_owner(tsdn, &background_thread_lock);
|
||||
atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
|
||||
arena_background_thread_info_get(arena_t *arena) {
|
||||
unsigned arena_ind = arena_ind_get(arena);
|
||||
return &background_thread_info[arena_ind % max_background_threads];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
|
||||
background_thread_info_get(size_t ind) {
|
||||
return &background_thread_info[ind % max_background_threads];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
background_thread_wakeup_time_get(background_thread_info_t *info) {
|
||||
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
|
||||
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
|
||||
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
|
||||
return next_wakeup;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
|
||||
uint64_t wakeup_time) {
|
||||
malloc_mutex_assert_owner(tsdn, &info->mtx);
|
||||
atomic_store_b(&info->indefinite_sleep,
|
||||
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
|
||||
nstime_init(&info->next_wakeup, wakeup_time);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
background_thread_indefinite_sleep(background_thread_info_t *info) {
|
||||
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
|
|
@ -0,0 +1,66 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||
|
||||
/* This file really combines "structs" and "types", but only transitionally. */
|
||||
|
||||
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
|
||||
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
#endif
|
||||
|
||||
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
|
||||
#define DEFAULT_NUM_BACKGROUND_THREAD 4
|
||||
|
||||
/*
|
||||
* These exist only as a transitional state. Eventually, deferral should be
|
||||
* part of the PAI, and each implementation can indicate wait times with more
|
||||
* specificity.
|
||||
*/
|
||||
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
|
||||
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
|
||||
|
||||
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
|
||||
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
||||
background_thread_started,
|
||||
/* Thread waits on the global lock when paused (for arena_reset). */
|
||||
background_thread_paused,
|
||||
} background_thread_state_t;
|
||||
|
||||
struct background_thread_info_s {
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
/* Background thread is pthread specific. */
|
||||
pthread_t thread;
|
||||
pthread_cond_t cond;
|
||||
#endif
|
||||
malloc_mutex_t mtx;
|
||||
background_thread_state_t state;
|
||||
/* When true, it means no wakeup scheduled. */
|
||||
atomic_b_t indefinite_sleep;
|
||||
/* Next scheduled wakeup time (absolute time in ns). */
|
||||
nstime_t next_wakeup;
|
||||
/*
|
||||
* Since the last background thread run, newly added number of pages
|
||||
* that need to be purged by the next wakeup. This is adjusted on
|
||||
* epoch advance, and is used to determine whether we should signal the
|
||||
* background thread to wake up earlier.
|
||||
*/
|
||||
size_t npages_to_purge_new;
|
||||
/* Stats: total number of runs since started. */
|
||||
uint64_t tot_n_runs;
|
||||
/* Stats: total sleep time since started. */
|
||||
nstime_t tot_sleep_time;
|
||||
};
|
||||
typedef struct background_thread_info_s background_thread_info_t;
|
||||
|
||||
struct background_thread_stats_s {
|
||||
size_t num_threads;
|
||||
uint64_t num_runs;
|
||||
nstime_t run_interval;
|
||||
mutex_prof_data_t max_counter_per_bg_thd;
|
||||
};
|
||||
typedef struct background_thread_stats_s background_thread_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
|
110
BeefRT/JEMalloc/include/jemalloc/internal/base.h
Normal file
110
BeefRT/JEMalloc/include/jemalloc/internal/base.h
Normal file
|
@ -0,0 +1,110 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BASE_H
|
||||
#define JEMALLOC_INTERNAL_BASE_H
|
||||
|
||||
#include "jemalloc/internal/edata.h"
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
enum metadata_thp_mode_e {
|
||||
metadata_thp_disabled = 0,
|
||||
/*
|
||||
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
|
||||
* + low usage arena (i.e. THP becomes a significant percentage), the
|
||||
* "auto" option only starts using THP after a base allocator used up
|
||||
* the first THP region. Starting from the second hugepage (in a single
|
||||
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
|
||||
* right away.
|
||||
*/
|
||||
metadata_thp_auto = 1,
|
||||
metadata_thp_always = 2,
|
||||
metadata_thp_mode_limit = 3
|
||||
};
|
||||
typedef enum metadata_thp_mode_e metadata_thp_mode_t;
|
||||
|
||||
#define METADATA_THP_DEFAULT metadata_thp_disabled
|
||||
extern metadata_thp_mode_t opt_metadata_thp;
|
||||
extern const char *metadata_thp_mode_names[];
|
||||
|
||||
|
||||
/* Embedded at the beginning of every block of base-managed virtual memory. */
|
||||
typedef struct base_block_s base_block_t;
|
||||
struct base_block_s {
|
||||
/* Total size of block's virtual memory mapping. */
|
||||
size_t size;
|
||||
|
||||
/* Next block in list of base's blocks. */
|
||||
base_block_t *next;
|
||||
|
||||
/* Tracks unused trailing space. */
|
||||
edata_t edata;
|
||||
};
|
||||
|
||||
typedef struct base_s base_t;
|
||||
struct base_s {
|
||||
/*
|
||||
* User-configurable extent hook functions.
|
||||
*/
|
||||
ehooks_t ehooks;
|
||||
|
||||
/*
|
||||
* User-configurable extent hook functions for metadata allocations.
|
||||
*/
|
||||
ehooks_t ehooks_base;
|
||||
|
||||
/* Protects base_alloc() and base_stats_get() operations. */
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/* Using THP when true (metadata_thp auto mode). */
|
||||
bool auto_thp_switched;
|
||||
/*
|
||||
* Most recent size class in the series of increasingly large base
|
||||
* extents. Logarithmic spacing between subsequent allocations ensures
|
||||
* that the total number of distinct mappings remains small.
|
||||
*/
|
||||
pszind_t pind_last;
|
||||
|
||||
/* Serial number generation state. */
|
||||
size_t extent_sn_next;
|
||||
|
||||
/* Chain of all blocks associated with base. */
|
||||
base_block_t *blocks;
|
||||
|
||||
/* Heap of extents that track unused trailing space within blocks. */
|
||||
edata_heap_t avail[SC_NSIZES];
|
||||
|
||||
/* Stats, only maintained if config_stats. */
|
||||
size_t allocated;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
/* Number of THP regions touched. */
|
||||
size_t n_thp;
|
||||
};
|
||||
|
||||
static inline unsigned
|
||||
base_ind_get(const base_t *base) {
|
||||
return ehooks_ind_get(&base->ehooks);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
metadata_thp_enabled(void) {
|
||||
return (opt_metadata_thp != metadata_thp_disabled);
|
||||
}
|
||||
|
||||
base_t *b0get(void);
|
||||
base_t *base_new(tsdn_t *tsdn, unsigned ind,
|
||||
const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
|
||||
void base_delete(tsdn_t *tsdn, base_t *base);
|
||||
ehooks_t *base_ehooks_get(base_t *base);
|
||||
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
|
||||
extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||
extent_hooks_t *extent_hooks);
|
||||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
|
||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_child(tsdn_t *tsdn, base_t *base);
|
||||
bool base_boot(tsdn_t *tsdn);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_H */
|
82
BeefRT/JEMalloc/include/jemalloc/internal/bin.h
Normal file
82
BeefRT/JEMalloc/include/jemalloc/internal/bin.h
Normal file
|
@ -0,0 +1,82 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_H
|
||||
#define JEMALLOC_INTERNAL_BIN_H
|
||||
|
||||
#include "jemalloc/internal/bin_stats.h"
|
||||
#include "jemalloc/internal/bin_types.h"
|
||||
#include "jemalloc/internal/edata.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/*
|
||||
* A bin contains a set of extents that are currently being used for slab
|
||||
* allocations.
|
||||
*/
|
||||
typedef struct bin_s bin_t;
|
||||
struct bin_s {
|
||||
/* All operations on bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Bin statistics. These get touched every time the lock is acquired,
|
||||
* so put them close by in the hopes of getting some cache locality.
|
||||
*/
|
||||
bin_stats_t stats;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
edata_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
edata_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
edata_list_active_t slabs_full;
|
||||
};
|
||||
|
||||
/* A set of sharded bins of the same size class. */
|
||||
typedef struct bins_s bins_t;
|
||||
struct bins_s {
|
||||
/* Sharded bins. Dynamically sized. */
|
||||
bin_t *bin_shards;
|
||||
};
|
||||
|
||||
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
|
||||
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
|
||||
size_t end_size, size_t nshards);
|
||||
|
||||
/* Initializes a bin to empty. Returns true on error. */
|
||||
bool bin_init(bin_t *bin);
|
||||
|
||||
/* Forking. */
|
||||
void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
||||
|
||||
/* Stats. */
|
||||
static inline void
|
||||
bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
|
||||
bin_stats_t *stats = &dst_bin_stats->stats_data;
|
||||
stats->nmalloc += bin->stats.nmalloc;
|
||||
stats->ndalloc += bin->stats.ndalloc;
|
||||
stats->nrequests += bin->stats.nrequests;
|
||||
stats->curregs += bin->stats.curregs;
|
||||
stats->nfills += bin->stats.nfills;
|
||||
stats->nflushes += bin->stats.nflushes;
|
||||
stats->nslabs += bin->stats.nslabs;
|
||||
stats->reslabs += bin->stats.reslabs;
|
||||
stats->curslabs += bin->stats.curslabs;
|
||||
stats->nonfull_slabs += bin->stats.nonfull_slabs;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_H */
|
50
BeefRT/JEMalloc/include/jemalloc/internal/bin_info.h
Normal file
50
BeefRT/JEMalloc/include/jemalloc/internal/bin_info.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
|
||||
#define JEMALLOC_INTERNAL_BIN_INFO_H
|
||||
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
typedef struct bin_info_s bin_info_t;
|
||||
struct bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/* Number of sharded bins in each arena for this size class. */
|
||||
uint32_t n_shards;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
extern bin_info_t bin_infos[SC_NBINS];
|
||||
|
||||
void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
|
57
BeefRT/JEMalloc/include/jemalloc/internal/bin_stats.h
Normal file
57
BeefRT/JEMalloc/include/jemalloc/internal/bin_stats.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
#define JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
|
||||
typedef struct bin_stats_s bin_stats_t;
|
||||
struct bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
/* Current size of nonfull slabs heap in this bin. */
|
||||
size_t nonfull_slabs;
|
||||
};
|
||||
|
||||
typedef struct bin_stats_data_s bin_stats_data_t;
|
||||
struct bin_stats_data_s {
|
||||
bin_stats_t stats_data;
|
||||
mutex_prof_data_t mutex_data;
|
||||
};
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
|
17
BeefRT/JEMalloc/include/jemalloc/internal/bin_types.h
Normal file
17
BeefRT/JEMalloc/include/jemalloc/internal/bin_types.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_BIN_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
|
||||
#define N_BIN_SHARDS_DEFAULT 1
|
||||
|
||||
/* Used in TSD static initializer only. Real init in arena_bind(). */
|
||||
#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
|
||||
|
||||
typedef struct tsd_binshards_s tsd_binshards_t;
|
||||
struct tsd_binshards_s {
|
||||
uint8_t binshard[SC_NBINS];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
|
422
BeefRT/JEMalloc/include/jemalloc/internal/bit_util.h
Normal file
422
BeefRT/JEMalloc/include/jemalloc/internal/bit_util.h
Normal file
|
@ -0,0 +1,422 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||
#define JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/* Sanity check. */
|
||||
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|
||||
|| !defined(JEMALLOC_INTERNAL_FFS)
|
||||
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
|
||||
* input, and returns the position of the lowest bit set (as opposed to the
|
||||
* posix versions, which return 1 larger than that position and use a return
|
||||
* value of zero as a sentinel. This tends to simplify logic in callers, and
|
||||
* allows for consistency with the builtins we build fls on top of.
|
||||
*/
|
||||
static inline unsigned
|
||||
ffs_llu(unsigned long long x) {
|
||||
util_assume(x != 0);
|
||||
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
ffs_lu(unsigned long x) {
|
||||
util_assume(x != 0);
|
||||
return JEMALLOC_INTERNAL_FFSL(x) - 1;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
ffs_u(unsigned x) {
|
||||
util_assume(x != 0);
|
||||
return JEMALLOC_INTERNAL_FFS(x) - 1;
|
||||
}
|
||||
|
||||
#define DO_FLS_SLOW(x, suffix) do { \
|
||||
util_assume(x != 0); \
|
||||
x |= (x >> 1); \
|
||||
x |= (x >> 2); \
|
||||
x |= (x >> 4); \
|
||||
x |= (x >> 8); \
|
||||
x |= (x >> 16); \
|
||||
if (sizeof(x) > 4) { \
|
||||
/* \
|
||||
* If sizeof(x) is 4, then the expression "x >> 32" \
|
||||
* will generate compiler warnings even if the code \
|
||||
* never executes. This circumvents the warning, and \
|
||||
* gets compiled out in optimized builds. \
|
||||
*/ \
|
||||
int constant_32 = sizeof(x) * 4; \
|
||||
x |= (x >> constant_32); \
|
||||
} \
|
||||
x++; \
|
||||
if (x == 0) { \
|
||||
return 8 * sizeof(x) - 1; \
|
||||
} \
|
||||
return ffs_##suffix(x) - 1; \
|
||||
} while(0)
|
||||
|
||||
static inline unsigned
|
||||
fls_llu_slow(unsigned long long x) {
|
||||
DO_FLS_SLOW(x, llu);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_lu_slow(unsigned long x) {
|
||||
DO_FLS_SLOW(x, lu);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u_slow(unsigned x) {
|
||||
DO_FLS_SLOW(x, u);
|
||||
}
|
||||
|
||||
#undef DO_FLS_SLOW
|
||||
|
||||
#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
static inline unsigned
|
||||
fls_llu(unsigned long long x) {
|
||||
util_assume(x != 0);
|
||||
/*
|
||||
* Note that the xor here is more naturally written as subtraction; the
|
||||
* last bit set is the number of bits in the type minus the number of
|
||||
* leading zero bits. But GCC implements that as:
|
||||
* bsr edi, edi
|
||||
* mov eax, 31
|
||||
* xor edi, 31
|
||||
* sub eax, edi
|
||||
* If we write it as xor instead, then we get
|
||||
* bsr eax, edi
|
||||
* as desired.
|
||||
*/
|
||||
return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_lu(unsigned long x) {
|
||||
util_assume(x != 0);
|
||||
return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u(unsigned x) {
|
||||
util_assume(x != 0);
|
||||
return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
|
||||
}
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#if LG_SIZEOF_PTR == 3
|
||||
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
|
||||
#else
|
||||
/*
|
||||
* This never actually runs; we're just dodging a compiler error for the
|
||||
* never-taken branch where sizeof(void *) == 8.
|
||||
*/
|
||||
#define DO_BSR64(bit, x) bit = 0; unreachable()
|
||||
#endif
|
||||
|
||||
#define DO_FLS(x) do { \
|
||||
if (x == 0) { \
|
||||
return 8 * sizeof(x); \
|
||||
} \
|
||||
unsigned long bit; \
|
||||
if (sizeof(x) == 4) { \
|
||||
_BitScanReverse(&bit, (unsigned)x); \
|
||||
return (unsigned)bit; \
|
||||
} \
|
||||
if (sizeof(x) == 8 && sizeof(void *) == 8) { \
|
||||
DO_BSR64(bit, x); \
|
||||
return (unsigned)bit; \
|
||||
} \
|
||||
if (sizeof(x) == 8 && sizeof(void *) == 4) { \
|
||||
/* Dodge a compiler warning, as above. */ \
|
||||
int constant_32 = sizeof(x) * 4; \
|
||||
if (_BitScanReverse(&bit, \
|
||||
(unsigned)(x >> constant_32))) { \
|
||||
return 32 + (unsigned)bit; \
|
||||
} else { \
|
||||
_BitScanReverse(&bit, (unsigned)x); \
|
||||
return (unsigned)bit; \
|
||||
} \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
static inline unsigned
|
||||
fls_llu(unsigned long long x) {
|
||||
DO_FLS(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_lu(unsigned long x) {
|
||||
DO_FLS(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u(unsigned x) {
|
||||
DO_FLS(x);
|
||||
}
|
||||
|
||||
#undef DO_FLS
|
||||
#undef DO_BSR64
|
||||
#else
|
||||
|
||||
static inline unsigned
|
||||
fls_llu(unsigned long long x) {
|
||||
return fls_llu_slow(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_lu(unsigned long x) {
|
||||
return fls_lu_slow(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u(unsigned x) {
|
||||
return fls_u_slow(x);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LG_SIZEOF_LONG_LONG > 3
|
||||
# error "Haven't implemented popcount for 16-byte ints."
|
||||
#endif
|
||||
|
||||
#define DO_POPCOUNT(x, type) do { \
|
||||
/* \
|
||||
* Algorithm from an old AMD optimization reference manual. \
|
||||
* We're putting a little bit more work than you might expect \
|
||||
* into the no-instrinsic case, since we only support the \
|
||||
* GCC intrinsics spelling of popcount (for now). Detecting \
|
||||
* whether or not the popcount builtin is actually useable in \
|
||||
* MSVC is nontrivial. \
|
||||
*/ \
|
||||
\
|
||||
type bmul = (type)0x0101010101010101ULL; \
|
||||
\
|
||||
/* \
|
||||
* Replace each 2 bits with the sideways sum of the original \
|
||||
* values. 0x5 = 0b0101. \
|
||||
* \
|
||||
* You might expect this to be: \
|
||||
* x = (x & 0x55...) + ((x >> 1) & 0x55...). \
|
||||
* That costs an extra mask relative to this, though. \
|
||||
*/ \
|
||||
x = x - ((x >> 1) & (0x55U * bmul)); \
|
||||
/* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
|
||||
x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
|
||||
/* \
|
||||
* Replace each 8 bits with their sideways sum. Note that we \
|
||||
* can't overflow within each 4-bit sum here, so we can skip \
|
||||
* the initial mask. \
|
||||
*/ \
|
||||
x = (x + (x >> 4)) & (bmul * 0x0FU); \
|
||||
/* \
|
||||
* None of the partial sums in this multiplication (viewed in \
|
||||
* base-256) can overflow into the next digit. So the least \
|
||||
* significant byte of the product will be the least \
|
||||
* significant byte of the original value, the second least \
|
||||
* significant byte will be the sum of the two least \
|
||||
* significant bytes of the original value, and so on. \
|
||||
* Importantly, the high byte will be the byte-wise sum of all \
|
||||
* the bytes of the original value. \
|
||||
*/ \
|
||||
x = x * bmul; \
|
||||
x >>= ((sizeof(x) - 1) * 8); \
|
||||
return (unsigned)x; \
|
||||
} while(0)
|
||||
|
||||
static inline unsigned
|
||||
popcount_u_slow(unsigned bitmap) {
|
||||
DO_POPCOUNT(bitmap, unsigned);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
popcount_lu_slow(unsigned long bitmap) {
|
||||
DO_POPCOUNT(bitmap, unsigned long);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
popcount_llu_slow(unsigned long long bitmap) {
|
||||
DO_POPCOUNT(bitmap, unsigned long long);
|
||||
}
|
||||
|
||||
#undef DO_POPCOUNT
|
||||
|
||||
static inline unsigned
|
||||
popcount_u(unsigned bitmap) {
|
||||
#ifdef JEMALLOC_INTERNAL_POPCOUNT
|
||||
return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
|
||||
#else
|
||||
return popcount_u_slow(bitmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
popcount_lu(unsigned long bitmap) {
|
||||
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
|
||||
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
|
||||
#else
|
||||
return popcount_lu_slow(bitmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
popcount_llu(unsigned long long bitmap) {
|
||||
#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
|
||||
return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
|
||||
#else
|
||||
return popcount_llu_slow(bitmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Clears first unset bit in bitmap, and returns
|
||||
* place of bit. bitmap *must not* be 0.
|
||||
*/
|
||||
|
||||
static inline size_t
|
||||
cfs_lu(unsigned long* bitmap) {
|
||||
util_assume(*bitmap != 0);
|
||||
size_t bit = ffs_lu(*bitmap);
|
||||
*bitmap ^= ZU(1) << bit;
|
||||
return bit;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
ffs_zu(size_t x) {
|
||||
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
|
||||
return ffs_u(x);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
|
||||
return ffs_lu(x);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
|
||||
return ffs_llu(x);
|
||||
#else
|
||||
#error No implementation for size_t ffs()
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_zu(size_t x) {
|
||||
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
|
||||
return fls_u(x);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
|
||||
return fls_lu(x);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
|
||||
return fls_llu(x);
|
||||
#else
|
||||
#error No implementation for size_t fls()
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned
|
||||
ffs_u64(uint64_t x) {
|
||||
#if LG_SIZEOF_LONG == 3
|
||||
return ffs_lu(x);
|
||||
#elif LG_SIZEOF_LONG_LONG == 3
|
||||
return ffs_llu(x);
|
||||
#else
|
||||
#error No implementation for 64-bit ffs()
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u64(uint64_t x) {
|
||||
#if LG_SIZEOF_LONG == 3
|
||||
return fls_lu(x);
|
||||
#elif LG_SIZEOF_LONG_LONG == 3
|
||||
return fls_llu(x);
|
||||
#else
|
||||
#error No implementation for 64-bit fls()
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
ffs_u32(uint32_t x) {
|
||||
#if LG_SIZEOF_INT == 2
|
||||
return ffs_u(x);
|
||||
#else
|
||||
#error No implementation for 32-bit ffs()
|
||||
#endif
|
||||
return ffs_u(x);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
fls_u32(uint32_t x) {
|
||||
#if LG_SIZEOF_INT == 2
|
||||
return fls_u(x);
|
||||
#else
|
||||
#error No implementation for 32-bit fls()
|
||||
#endif
|
||||
return fls_u(x);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
pow2_ceil_u64(uint64_t x) {
|
||||
if (unlikely(x <= 1)) {
|
||||
return x;
|
||||
}
|
||||
size_t msb_on_index = fls_u64(x - 1);
|
||||
/*
|
||||
* Range-check; it's on the callers to ensure that the result of this
|
||||
* call won't overflow.
|
||||
*/
|
||||
assert(msb_on_index < 63);
|
||||
return 1ULL << (msb_on_index + 1);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
pow2_ceil_u32(uint32_t x) {
|
||||
if (unlikely(x <= 1)) {
|
||||
return x;
|
||||
}
|
||||
size_t msb_on_index = fls_u32(x - 1);
|
||||
/* As above. */
|
||||
assert(msb_on_index < 31);
|
||||
return 1U << (msb_on_index + 1);
|
||||
}
|
||||
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
static inline size_t
|
||||
pow2_ceil_zu(size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return pow2_ceil_u64(x);
|
||||
#else
|
||||
return pow2_ceil_u32(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
lg_floor(size_t x) {
|
||||
util_assume(x != 0);
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return fls_u64(x);
|
||||
#else
|
||||
return fls_u32(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
lg_ceil(size_t x) {
|
||||
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
|
||||
}
|
||||
|
||||
/* A compile-time version of lg_floor and lg_ceil. */
|
||||
#define LG_FLOOR_1(x) 0
|
||||
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
|
||||
#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
|
||||
#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
|
||||
#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
|
||||
#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
|
||||
#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define LG_FLOOR(x) LG_FLOOR_32((x))
|
||||
#else
|
||||
# define LG_FLOOR(x) LG_FLOOR_64((x))
|
||||
#endif
|
||||
|
||||
#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
|
368
BeefRT/JEMalloc/include/jemalloc/internal/bitmap.h
Normal file
368
BeefRT/JEMalloc/include/jemalloc/internal/bitmap.h
Normal file
|
@ -0,0 +1,368 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BITMAP_H
|
||||
#define JEMALLOC_INTERNAL_BITMAP_H
|
||||
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
|
||||
/* Maximum bitmap bit count is determined by maximum regions per slab. */
|
||||
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
|
||||
#else
|
||||
/* Maximum bitmap bit count is determined by number of extent size classes. */
|
||||
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
|
||||
#endif
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/*
|
||||
* Do some analysis on how big the bitmap is before we use a tree. For a brute
|
||||
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
|
||||
* use a tree instead.
|
||||
*/
|
||||
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
||||
# define BITMAP_USE_TREE
|
||||
#endif
|
||||
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||
|
||||
/*
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||
|
||||
/*
|
||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||
*/
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
|
||||
#else
|
||||
# error "Unsupported bitmap size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum number of levels possible. This could be statically computed based
|
||||
* on LG_BITMAP_MAXBITS:
|
||||
*
|
||||
* #define BITMAP_MAX_LEVELS \
|
||||
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
*
|
||||
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
|
||||
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
|
||||
* various cascading macros. The only additional cost this incurs is some
|
||||
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||
* are not impacted.
|
||||
*/
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* nlevels. */ \
|
||||
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
|
||||
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
|
||||
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
|
||||
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
|
||||
/* levels. */ \
|
||||
{ \
|
||||
{0}, \
|
||||
{BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
|
||||
BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
|
||||
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
|
||||
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
|
||||
+ BITMAP_GROUPS_L0(nbits)} \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* ngroups. */ \
|
||||
BITMAP_BITS2GROUPS(nbits) \
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
typedef struct bitmap_level_s {
|
||||
/* Offset of this level's groups within the array of groups. */
|
||||
size_t group_offset;
|
||||
} bitmap_level_t;
|
||||
|
||||
typedef struct bitmap_info_s {
|
||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||
size_t nbits;
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Number of levels necessary for nbits. */
|
||||
unsigned nlevels;
|
||||
|
||||
/*
|
||||
* Only the first (nlevels+1) elements are used, and levels are ordered
|
||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||
*/
|
||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||
#else /* BITMAP_USE_TREE */
|
||||
/* Number of groups necessary for nbits. */
|
||||
size_t ngroups;
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
} bitmap_info_t;
|
||||
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
|
||||
size_t bitmap_size(const bitmap_info_t *binfo);
|
||||
|
||||
static inline bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
/* The bitmap is full iff the root group is 0. */
|
||||
return (rg == 0);
|
||||
#else
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < binfo->ngroups; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
g = bitmap[goff];
|
||||
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
}
|
||||
|
||||
static inline void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (g == 0) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ffu: find first unset >= bit. */
|
||||
static inline size_t
|
||||
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||
assert(min_bit < binfo->nbits);
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t bit = 0;
|
||||
for (unsigned level = binfo->nlevels; level--;) {
|
||||
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
|
||||
1));
|
||||
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
|
||||
>> lg_bits_per_group)];
|
||||
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
|
||||
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
|
||||
assert(group_nmask <= BITMAP_GROUP_NBITS);
|
||||
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
|
||||
bitmap_t group_masked = group & group_mask;
|
||||
if (group_masked == 0LU) {
|
||||
if (group == 0LU) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
/*
|
||||
* min_bit was preceded by one or more unset bits in
|
||||
* this group, but there are no other unset bits in this
|
||||
* group. Try again starting at the first bit of the
|
||||
* next sibling. This will recurse at most once per
|
||||
* non-root level.
|
||||
*/
|
||||
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
|
||||
assert(sib_base > min_bit);
|
||||
assert(sib_base > bit);
|
||||
if (sib_base >= binfo->nbits) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
return bitmap_ffu(bitmap, binfo, sib_base);
|
||||
}
|
||||
bit += ((size_t)ffs_lu(group_masked)) <<
|
||||
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
|
||||
}
|
||||
assert(bit >= min_bit);
|
||||
assert(bit < binfo->nbits);
|
||||
return bit;
|
||||
#else
|
||||
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
|
||||
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
|
||||
- 1);
|
||||
size_t bit;
|
||||
do {
|
||||
if (g != 0) {
|
||||
bit = ffs_lu(g);
|
||||
return (i << LG_BITMAP_GROUP_NBITS) + bit;
|
||||
}
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
} while (i < binfo->ngroups);
|
||||
return binfo->nbits;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* sfu: set first unset. */
|
||||
static inline size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(!bitmap_full(bitmap, binfo));
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffs_lu(g);
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
|
||||
}
|
||||
#else
|
||||
i = 0;
|
||||
g = bitmap[0];
|
||||
while (g == 0) {
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
}
|
||||
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
|
||||
#endif
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
return bit;
|
||||
}
|
||||
|
||||
static inline void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
UNUSED bool propagate;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
== 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (!propagate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BITMAP_H */
|
32
BeefRT/JEMalloc/include/jemalloc/internal/buf_writer.h
Normal file
32
BeefRT/JEMalloc/include/jemalloc/internal/buf_writer.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
|
||||
#define JEMALLOC_INTERNAL_BUF_WRITER_H
|
||||
|
||||
/*
|
||||
* Note: when using the buffered writer, cbopaque is passed to write_cb only
|
||||
* when the buffer is flushed. It would make a difference if cbopaque points
|
||||
* to something that's changing for each write_cb call, or something that
|
||||
* affects write_cb in a way dependent on the content of the output string.
|
||||
* However, the most typical usage case in practice is that cbopaque points to
|
||||
* some "option like" content for the write_cb, so it doesn't matter.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
write_cb_t *write_cb;
|
||||
void *cbopaque;
|
||||
char *buf;
|
||||
size_t buf_size;
|
||||
size_t buf_end;
|
||||
bool internal_buf;
|
||||
} buf_writer_t;
|
||||
|
||||
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
|
||||
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
|
||||
void buf_writer_flush(buf_writer_t *buf_writer);
|
||||
write_cb_t buf_writer_cb;
|
||||
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
|
||||
|
||||
typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
|
||||
void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
|
||||
void *read_cbopaque);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
|
670
BeefRT/JEMalloc/include/jemalloc/internal/cache_bin.h
Normal file
670
BeefRT/JEMalloc/include/jemalloc/internal/cache_bin.h
Normal file
|
@ -0,0 +1,670 @@
|
|||
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
#define JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
/*
|
||||
* The cache_bins are the mechanism that the tcache and the arena use to
|
||||
* communicate. The tcache fills from and flushes to the arena by passing a
|
||||
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
|
||||
* tcaches associated with it, it does so by iterating over its
|
||||
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
|
||||
* contains. This makes it so that the arena need not know about the existence
|
||||
* of the tcache at all.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The size in bytes of each cache bin stack. We also use this to indicate
|
||||
* *counts* of individual objects.
|
||||
*/
|
||||
typedef uint16_t cache_bin_sz_t;
|
||||
|
||||
/*
|
||||
* Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
|
||||
* bug starts leaking those. Make it look like the junk pattern but be distinct
|
||||
* from it.
|
||||
*/
|
||||
static const uintptr_t cache_bin_preceding_junk =
|
||||
(uintptr_t)0x7a7a7a7a7a7a7a7aULL;
|
||||
/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
|
||||
static const uintptr_t cache_bin_trailing_junk =
|
||||
(uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
|
||||
|
||||
/*
|
||||
* That implies the following value, for the maximum number of items in any
|
||||
* individual bin. The cache bins track their bounds looking just at the low
|
||||
* bits of a pointer, compared against a cache_bin_sz_t. So that's
|
||||
* 1 << (sizeof(cache_bin_sz_t) * 8)
|
||||
* bytes spread across pointer sized objects to get the maximum.
|
||||
*/
|
||||
#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
|
||||
/ sizeof(void *) - 1)
|
||||
|
||||
/*
|
||||
* This lives inside the cache_bin (for locality reasons), and is initialized
|
||||
* alongside it, but is otherwise not modified by any cache bin operations.
|
||||
* It's logically public and maintained by its callers.
|
||||
*/
|
||||
typedef struct cache_bin_stats_s cache_bin_stats_t;
|
||||
struct cache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
typedef struct cache_bin_info_s cache_bin_info_t;
|
||||
struct cache_bin_info_s {
|
||||
cache_bin_sz_t ncached_max;
|
||||
};
|
||||
|
||||
/*
|
||||
* Responsible for caching allocations associated with a single size.
|
||||
*
|
||||
* Several pointers are used to track the stack. To save on metadata bytes,
|
||||
* only the stack_head is a full sized pointer (which is dereferenced on the
|
||||
* fastpath), while the others store only the low 16 bits -- this is correct
|
||||
* because a single stack never takes more space than 2^16 bytes, and at the
|
||||
* same time only equality checks are performed on the low bits.
|
||||
*
|
||||
* (low addr) (high addr)
|
||||
* |------stashed------|------available------|------cached-----|
|
||||
* ^ ^ ^ ^
|
||||
* low_bound(derived) low_bits_full stack_head low_bits_empty
|
||||
*/
|
||||
typedef struct cache_bin_s cache_bin_t;
|
||||
struct cache_bin_s {
|
||||
/*
|
||||
* The stack grows down. Whenever the bin is nonempty, the head points
|
||||
* to an array entry containing a valid allocation. When it is empty,
|
||||
* the head points to one element past the owned array.
|
||||
*/
|
||||
void **stack_head;
|
||||
/*
|
||||
* cur_ptr and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
cache_bin_stats_t tstats;
|
||||
|
||||
/*
|
||||
* The low bits of the address of the first item in the stack that
|
||||
* hasn't been used since the last GC, to track the low water mark (min
|
||||
* # of cached items).
|
||||
*
|
||||
* Since the stack grows down, this is a higher address than
|
||||
* low_bits_full.
|
||||
*/
|
||||
uint16_t low_bits_low_water;
|
||||
|
||||
/*
|
||||
* The low bits of the value that stack_head will take on when the array
|
||||
* is full (of cached & stashed items). But remember that stack_head
|
||||
* always points to a valid item when the array is nonempty -- this is
|
||||
* in the array.
|
||||
*
|
||||
* Recall that since the stack grows down, this is the lowest available
|
||||
* address in the array for caching. Only adjusted when stashing items.
|
||||
*/
|
||||
uint16_t low_bits_full;
|
||||
|
||||
/*
|
||||
* The low bits of the value that stack_head will take on when the array
|
||||
* is empty.
|
||||
*
|
||||
* The stack grows down -- this is one past the highest address in the
|
||||
* array. Immutable after initialization.
|
||||
*/
|
||||
uint16_t low_bits_empty;
|
||||
};
|
||||
|
||||
/*
|
||||
* The cache_bins live inside the tcache, but the arena (by design) isn't
|
||||
* supposed to know much about tcache internals. To let the arena iterate over
|
||||
* associated bins, we keep (with the tcache) a linked list of
|
||||
* cache_bin_array_descriptor_ts that tell the arena how to find the bins.
|
||||
*/
|
||||
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
|
||||
struct cache_bin_array_descriptor_s {
|
||||
/*
|
||||
* The arena keeps a list of the cache bins associated with it, for
|
||||
* stats collection.
|
||||
*/
|
||||
ql_elm(cache_bin_array_descriptor_t) link;
|
||||
/* Pointers to the tcache bins. */
|
||||
cache_bin_t *bins;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
|
||||
cache_bin_t *bins) {
|
||||
ql_elm_new(descriptor, link);
|
||||
descriptor->bins = bins;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
cache_bin_nonfast_aligned(const void *ptr) {
|
||||
if (!config_uaf_detection) {
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Currently we use alignment to decide which pointer to junk & stash on
|
||||
* dealloc (for catching use-after-free). In some common cases a
|
||||
* page-aligned check is needed already (sdalloc w/ config_prof), so we
|
||||
* are getting it more or less for free -- no added instructions on
|
||||
* free_fastpath.
|
||||
*
|
||||
* Another way of deciding which pointer to sample, is adding another
|
||||
* thread_event to pick one every N bytes. That also adds no cost on
|
||||
* the fastpath, however it will tend to pick large allocations which is
|
||||
* not the desired behavior.
|
||||
*/
|
||||
return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
|
||||
}
|
||||
|
||||
/* Returns ncached_max: Upper limit on ncached. */
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_info_ncached_max(cache_bin_info_t *info) {
|
||||
return info->ncached_max;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal.
|
||||
*
|
||||
* Asserts that the pointer associated with earlier is <= the one associated
|
||||
* with later.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
|
||||
if (earlier > later) {
|
||||
assert(bin->low_bits_full > bin->low_bits_empty);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal.
|
||||
*
|
||||
* Does difference calculations that handle wraparound correctly. Earlier must
|
||||
* be associated with the position earlier in memory.
|
||||
*/
|
||||
static inline uint16_t
|
||||
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
|
||||
/*
|
||||
* When it's racy, bin->low_bits_full can be modified concurrently. It
|
||||
* can cross the uint16_t max value and become less than
|
||||
* bin->low_bits_empty at the time of the check.
|
||||
*/
|
||||
if (!racy) {
|
||||
cache_bin_assert_earlier(bin, earlier, later);
|
||||
}
|
||||
return later - earlier;
|
||||
}
|
||||
|
||||
/*
|
||||
* Number of items currently cached in the bin, without checking ncached_max.
|
||||
* We require specifying whether or not the request is racy or not (i.e. whether
|
||||
* or not concurrent modifications are possible).
|
||||
*/
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
|
||||
cache_bin_sz_t diff = cache_bin_diff(bin,
|
||||
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
|
||||
cache_bin_sz_t n = diff / sizeof(void *);
|
||||
/*
|
||||
* We have undefined behavior here; if this function is called from the
|
||||
* arena stats updating code, then stack_head could change from the
|
||||
* first line to the next one. Morally, these loads should be atomic,
|
||||
* but compilers won't currently generate comparisons with in-memory
|
||||
* operands against atomics, and these variables get accessed on the
|
||||
* fast paths. This should still be "safe" in the sense of generating
|
||||
* the correct assembly for the foreseeable future, though.
|
||||
*/
|
||||
assert(n == 0 || *(bin->stack_head) != NULL || racy);
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Number of items currently cached in the bin, with checking ncached_max. The
|
||||
* caller must know that no concurrent modification of the cache_bin is
|
||||
* possible.
|
||||
*/
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
|
||||
/* racy */ false);
|
||||
assert(n <= cache_bin_info_ncached_max(info));
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal.
|
||||
*
|
||||
* A pointer to the position one past the end of the backing array.
|
||||
*
|
||||
* Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
|
||||
* are subject to concurrent modifications.
|
||||
*/
|
||||
static inline void **
|
||||
cache_bin_empty_position_get(cache_bin_t *bin) {
|
||||
cache_bin_sz_t diff = cache_bin_diff(bin,
|
||||
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
|
||||
/* racy */ false);
|
||||
uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
|
||||
void **ret = (void **)empty_bits;
|
||||
|
||||
assert(ret >= bin->stack_head);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal.
|
||||
*
|
||||
* Calculates low bits of the lower bound of the usable cache bin's range (see
|
||||
* cache_bin_t visual representation above).
|
||||
*
|
||||
* No values are concurrently modified, so should be safe to read in a
|
||||
* multithreaded environment. Currently concurrent access happens only during
|
||||
* arena statistics collection.
|
||||
*/
|
||||
static inline uint16_t
|
||||
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
return (uint16_t)bin->low_bits_empty -
|
||||
info->ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal.
|
||||
*
|
||||
* A pointer to the position with the lowest address of the backing array.
|
||||
*/
|
||||
static inline void **
|
||||
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
|
||||
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
|
||||
assert(ret <= bin->stack_head);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* As the name implies. This is important since it's not correct to try to
|
||||
* batch fill a nonempty cache bin.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
assert(cache_bin_ncached_get_local(bin, info) == 0);
|
||||
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get low water, but without any of the correctness checking we do for the
|
||||
* caller-usable version, if we are temporarily breaking invariants (like
|
||||
* ncached >= low_water during flush).
|
||||
*/
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_low_water_get_internal(cache_bin_t *bin) {
|
||||
return cache_bin_diff(bin, bin->low_bits_low_water,
|
||||
bin->low_bits_empty, /* racy */ false) / sizeof(void *);
|
||||
}
|
||||
|
||||
/* Returns the numeric value of low water in [0, ncached]. */
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
|
||||
assert(low_water <= cache_bin_info_ncached_max(info));
|
||||
assert(low_water <= cache_bin_ncached_get_local(bin, info));
|
||||
|
||||
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
|
||||
bin->low_bits_low_water);
|
||||
|
||||
return low_water;
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicates that the current cache bin position should be the low water mark
|
||||
* going forward.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_low_water_set(cache_bin_t *bin) {
|
||||
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_low_water_adjust(cache_bin_t *bin) {
|
||||
if (cache_bin_ncached_get_internal(bin, /* racy */ false)
|
||||
< cache_bin_low_water_get_internal(bin)) {
|
||||
cache_bin_low_water_set(bin);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
|
||||
/*
|
||||
* success (instead of ret) should be checked upon the return of this
|
||||
* function. We avoid checking (ret == NULL) because there is never a
|
||||
* null stored on the avail stack (which is unknown to the compiler),
|
||||
* and eagerly checking ret would cause pipeline stall (waiting for the
|
||||
* cacheline).
|
||||
*/
|
||||
|
||||
/*
|
||||
* This may read from the empty position; however the loaded value won't
|
||||
* be used. It's safe because the stack has one more slot reserved.
|
||||
*/
|
||||
void *ret = *bin->stack_head;
|
||||
uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
|
||||
void **new_head = bin->stack_head + 1;
|
||||
|
||||
/*
|
||||
* Note that the low water mark is at most empty; if we pass this check,
|
||||
* we know we're non-empty.
|
||||
*/
|
||||
if (likely(low_bits != bin->low_bits_low_water)) {
|
||||
bin->stack_head = new_head;
|
||||
*success = true;
|
||||
return ret;
|
||||
}
|
||||
if (!adjust_low_water) {
|
||||
*success = false;
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* In the fast-path case where we call alloc_easy and then alloc, the
|
||||
* previous checking and computation is optimized away -- we didn't
|
||||
* actually commit any of our operations.
|
||||
*/
|
||||
if (likely(low_bits != bin->low_bits_empty)) {
|
||||
bin->stack_head = new_head;
|
||||
bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
|
||||
*success = true;
|
||||
return ret;
|
||||
}
|
||||
*success = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an item out of the bin, failing if we're at the low-water mark.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
|
||||
/* We don't look at info if we're not adjusting low-water. */
|
||||
return cache_bin_alloc_impl(bin, success, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an item out of the bin, even if we're currently at the low-water
|
||||
* mark (and failing only if the bin is empty).
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc(cache_bin_t *bin, bool *success) {
|
||||
return cache_bin_alloc_impl(bin, success, true);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
||||
cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
|
||||
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
|
||||
/* racy */ false);
|
||||
if (n > num) {
|
||||
n = (cache_bin_sz_t)num;
|
||||
}
|
||||
memcpy(out, bin->stack_head, n * sizeof(void *));
|
||||
bin->stack_head += n;
|
||||
cache_bin_low_water_adjust(bin);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
cache_bin_full(cache_bin_t *bin) {
|
||||
return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free an object into the given bin. Fails only if the bin is full.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
|
||||
if (unlikely(cache_bin_full(bin))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bin->stack_head--;
|
||||
*bin->stack_head = ptr;
|
||||
cache_bin_assert_earlier(bin, bin->low_bits_full,
|
||||
(uint16_t)(uintptr_t)bin->stack_head);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Returns false if failed to stash (i.e. bin is full). */
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
cache_bin_stash(cache_bin_t *bin, void *ptr) {
|
||||
if (cache_bin_full(bin)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Stash at the full position, in the [full, head) range. */
|
||||
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
|
||||
/* Wraparound handled as well. */
|
||||
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
|
||||
/* racy */ false);
|
||||
*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
|
||||
|
||||
assert(!cache_bin_full(bin));
|
||||
bin->low_bits_full += sizeof(void *);
|
||||
cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the number of stashed pointers.
|
||||
*
|
||||
* When called from a thread not owning the TLS (i.e. racy = true), it's
|
||||
* important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
|
||||
* be modified concurrently and almost none assertions about their values can be
|
||||
* made.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
||||
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
bool racy) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
|
||||
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
|
||||
info);
|
||||
|
||||
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
|
||||
bin->low_bits_full, racy) / sizeof(void *);
|
||||
assert(n <= ncached_max);
|
||||
|
||||
if (!racy) {
|
||||
/* Below are for assertions only. */
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
|
||||
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
|
||||
void *stashed = *(low_bound + n - 1);
|
||||
bool aligned = cache_bin_nonfast_aligned(stashed);
|
||||
#ifdef JEMALLOC_JET
|
||||
/* Allow arbitrary pointers to be stashed in tests. */
|
||||
aligned = true;
|
||||
#endif
|
||||
assert(n == 0 || (stashed != NULL && aligned));
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
||||
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
|
||||
/* racy */ false);
|
||||
assert(n <= cache_bin_info_ncached_max(info));
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain a racy view of the number of items currently in the cache bin, in the
|
||||
* presence of possible concurrent modifications.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
|
||||
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
|
||||
assert(n <= cache_bin_info_ncached_max(info));
|
||||
*ncached = n;
|
||||
|
||||
n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
|
||||
assert(n <= cache_bin_info_ncached_max(info));
|
||||
*nstashed = n;
|
||||
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
|
||||
}
|
||||
|
||||
/*
|
||||
* Filling and flushing are done in batch, on arrays of void *s. For filling,
|
||||
* the arrays go forward, and can be accessed with ordinary array arithmetic.
|
||||
* For flushing, we work from the end backwards, and so need to use special
|
||||
* accessors that invert the usual ordering.
|
||||
*
|
||||
* This is important for maintaining first-fit; the arena code fills with
|
||||
* earliest objects first, and so those are the ones we should return first for
|
||||
* cache_bin_alloc calls. When flushing, we should flush the objects that we
|
||||
* wish to return later; those at the end of the array. This is better for the
|
||||
* first-fit heuristic as well as for cache locality; the most recently freed
|
||||
* objects are the ones most likely to still be in cache.
|
||||
*
|
||||
* This all sounds very hand-wavey and theoretical, but reverting the ordering
|
||||
* on one or the other pathway leads to measurable slowdowns.
|
||||
*/
|
||||
|
||||
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
|
||||
struct cache_bin_ptr_array_s {
|
||||
cache_bin_sz_t n;
|
||||
void **ptr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Declare a cache_bin_ptr_array_t sufficient for nval items.
|
||||
*
|
||||
* In the current implementation, this could be just part of a
|
||||
* cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
|
||||
* Indirecting behind a macro, though, means experimenting with linked-list
|
||||
* representations is easy (since they'll require an alloca in the calling
|
||||
* frame).
|
||||
*/
|
||||
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
|
||||
cache_bin_ptr_array_t name; \
|
||||
name.n = (nval)
|
||||
|
||||
/*
|
||||
* Start a fill. The bin must be empty, and This must be followed by a
|
||||
* finish_fill call before doing any alloc/dalloc operations on the bin.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
|
||||
cache_bin_assert_empty(bin, info);
|
||||
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
|
||||
}
|
||||
|
||||
/*
|
||||
* While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
|
||||
* fill, nfilled here is the number we actually filled (which may be less, in
|
||||
* case of OOM.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
|
||||
cache_bin_assert_empty(bin, info);
|
||||
void **empty_position = cache_bin_empty_position_get(bin);
|
||||
if (nfilled < arr->n) {
|
||||
memmove(empty_position - nfilled, empty_position - arr->n,
|
||||
nfilled * sizeof(void *));
|
||||
}
|
||||
bin->stack_head = empty_position - nfilled;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same deal, but with flush. Unlike fill (which can fail), the user must flush
|
||||
* everything we give them.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
|
||||
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
|
||||
assert(cache_bin_ncached_get_local(bin, info) == 0
|
||||
|| *arr->ptr != NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
|
||||
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
|
||||
memmove(bin->stack_head + nflushed, bin->stack_head,
|
||||
rem * sizeof(void *));
|
||||
bin->stack_head = bin->stack_head + nflushed;
|
||||
cache_bin_low_water_adjust(bin);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
|
||||
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
|
||||
cache_bin_sz_t nstashed) {
|
||||
assert(nstashed > 0);
|
||||
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
|
||||
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
arr->ptr = low_bound;
|
||||
assert(*arr->ptr != NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
|
||||
/* Reset the bin local full position. */
|
||||
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
|
||||
assert(cache_bin_nstashed_get_local(bin, info) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a cache_bin_info to represent up to the given number of items in
|
||||
* the cache_bins it is associated with.
|
||||
*/
|
||||
void cache_bin_info_init(cache_bin_info_t *bin_info,
|
||||
cache_bin_sz_t ncached_max);
|
||||
/*
|
||||
* Given an array of initialized cache_bin_info_ts, determine how big an
|
||||
* allocation is required to initialize a full set of cache_bin_ts.
|
||||
*/
|
||||
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
|
||||
size_t *size, size_t *alignment);
|
||||
|
||||
/*
|
||||
* Actually initialize some cache bins. Callers should allocate the backing
|
||||
* memory indicated by a call to cache_bin_compute_alloc. They should then
|
||||
* preincrement, call init once for each bin and info, and then call
|
||||
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
|
||||
* of the allocation.
|
||||
*/
|
||||
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
|
||||
void *alloc, size_t *cur_offset);
|
||||
void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
|
||||
void *alloc, size_t *cur_offset);
|
||||
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
size_t *cur_offset);
|
||||
|
||||
/*
|
||||
* If a cache bin was zero initialized (either because it lives in static or
|
||||
* thread-local storage, or was memset to 0), this function indicates whether or
|
||||
* not cache_bin_init was called on it.
|
||||
*/
|
||||
bool cache_bin_still_zero_initialized(cache_bin_t *bin);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|
101
BeefRT/JEMalloc/include/jemalloc/internal/ckh.h
Normal file
101
BeefRT/JEMalloc/include/jemalloc/internal/ckh.h
Normal file
|
@ -0,0 +1,101 @@
|
|||
#ifndef JEMALLOC_INTERNAL_CKH_H
|
||||
#define JEMALLOC_INTERNAL_CKH_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* Cuckoo hashing implementation. Skip to the end for the interface. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Hash table cell. */
|
||||
typedef struct {
|
||||
const void *key;
|
||||
const void *data;
|
||||
} ckhc_t;
|
||||
|
||||
/* The hash table itself. */
|
||||
typedef struct {
|
||||
#ifdef CKH_COUNT
|
||||
/* Counters used to get an idea of performance. */
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
#endif
|
||||
|
||||
/* Used for pseudo-random number generation. */
|
||||
uint64_t prng_state;
|
||||
|
||||
/* Total number of items. */
|
||||
size_t count;
|
||||
|
||||
/*
|
||||
* Minimum and current number of hash table buckets. There are
|
||||
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
|
||||
*/
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
|
||||
/* Hash and comparison functions. */
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
|
||||
/* Hash table with 2^lg_curbuckets buckets. */
|
||||
ckhc_t *tab;
|
||||
} ckh_t;
|
||||
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/* Lifetime management. Minitems is the initial capacity. */
|
||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/* Get the number of elements in the set. */
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
|
||||
/*
|
||||
* To iterate over the elements in the table, initialize *tabind to 0 and call
|
||||
* this function until it returns true. Each call that returns false will
|
||||
* update *key and *data to the next element in the table, assuming the pointers
|
||||
* are non-NULL.
|
||||
*/
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
|
||||
/*
|
||||
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
|
||||
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
|
||||
* the key and value, and doesn't do any lifetime management.
|
||||
*/
|
||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
||||
|
||||
/* Some useful hash and comparison functions for strings and pointers. */
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CKH_H */
|
34
BeefRT/JEMalloc/include/jemalloc/internal/counter.h
Normal file
34
BeefRT/JEMalloc/include/jemalloc/internal/counter.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
#ifndef JEMALLOC_INTERNAL_COUNTER_H
|
||||
#define JEMALLOC_INTERNAL_COUNTER_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct counter_accum_s {
|
||||
LOCKEDINT_MTX_DECLARE(mtx)
|
||||
locked_u64_t accumbytes;
|
||||
uint64_t interval;
|
||||
} counter_accum_t;
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
|
||||
uint64_t interval = counter->interval;
|
||||
assert(interval > 0);
|
||||
LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
|
||||
/*
|
||||
* If the event moves fast enough (and/or if the event handling is slow
|
||||
* enough), extreme overflow can cause counter trigger coalescing.
|
||||
* This is an intentional mechanism that avoids rate-limiting
|
||||
* allocation.
|
||||
*/
|
||||
bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
|
||||
&counter->accumbytes, bytes, interval);
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
|
||||
void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
|
||||
void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
|
||||
void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
|
159
BeefRT/JEMalloc/include/jemalloc/internal/ctl.h
Normal file
159
BeefRT/JEMalloc/include/jemalloc/internal/ctl.h
Normal file
|
@ -0,0 +1,159 @@
|
|||
#ifndef JEMALLOC_INTERNAL_CTL_H
|
||||
#define JEMALLOC_INTERNAL_CTL_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
|
||||
typedef struct ctl_node_s {
|
||||
bool named;
|
||||
} ctl_node_t;
|
||||
|
||||
typedef struct ctl_named_node_s {
|
||||
ctl_node_t node;
|
||||
const char *name;
|
||||
/* If (nchildren == 0), this is a terminal node. */
|
||||
size_t nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
|
||||
size_t);
|
||||
} ctl_named_node_t;
|
||||
|
||||
typedef struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
||||
size_t);
|
||||
} ctl_indexed_node_t;
|
||||
|
||||
typedef struct ctl_arena_stats_s {
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
uint64_t nfills_small;
|
||||
uint64_t nflushes_small;
|
||||
|
||||
bin_stats_data_t bstats[SC_NBINS];
|
||||
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
|
||||
pac_estats_t estats[SC_NPSIZES];
|
||||
hpa_shard_stats_t hpastats;
|
||||
sec_stats_t secstats;
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t metadata_thp;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
||||
background_thread_stats_t background_thread;
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
|
||||
} ctl_stats_t;
|
||||
|
||||
typedef struct ctl_arena_s ctl_arena_t;
|
||||
struct ctl_arena_s {
|
||||
unsigned arena_ind;
|
||||
bool initialized;
|
||||
ql_elm(ctl_arena_t) destroyed_link;
|
||||
|
||||
/* Basic stats, supported even if !config_stats. */
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t dirty_decay_ms;
|
||||
ssize_t muzzy_decay_ms;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
size_t pmuzzy;
|
||||
|
||||
/* NULL if !config_stats. */
|
||||
ctl_arena_stats_t *astats;
|
||||
};
|
||||
|
||||
typedef struct ctl_arenas_s {
|
||||
uint64_t epoch;
|
||||
unsigned narenas;
|
||||
ql_head(ctl_arena_t) destroyed;
|
||||
|
||||
/*
|
||||
* Element 0 corresponds to merged stats for extant arenas (accessed via
|
||||
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
|
||||
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
|
||||
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
|
||||
*/
|
||||
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
|
||||
} ctl_arenas_t;
|
||||
|
||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
|
||||
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
|
||||
size_t *miblenp);
|
||||
int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
|
||||
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(tsdn_t *tsdn);
|
||||
void ctl_postfork_parent(tsdn_t *tsdn);
|
||||
void ctl_postfork_child(tsdn_t *tsdn);
|
||||
void ctl_mtx_assert_held(tsdn_t *tsdn);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
|
||||
name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
"<jemalloc>: Failure in xmallctlbymib()\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
|
||||
if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
|
||||
!= 0) { \
|
||||
malloc_write( \
|
||||
"<jemalloc>: Failure in ctl_mibnametomib()\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
|
||||
newp, newlen) do { \
|
||||
if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
|
||||
oldp, oldlenp, newp, newlen) != 0) { \
|
||||
malloc_write( \
|
||||
"<jemalloc>: Failure in ctl_bymibname()\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CTL_H */
|
186
BeefRT/JEMalloc/include/jemalloc/internal/decay.h
Normal file
186
BeefRT/JEMalloc/include/jemalloc/internal/decay.h
Normal file
|
@ -0,0 +1,186 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DECAY_H
|
||||
#define JEMALLOC_INTERNAL_DECAY_H
|
||||
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
|
||||
#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
|
||||
|
||||
/*
|
||||
* The decay_t computes the number of pages we should purge at any given time.
|
||||
* Page allocators inform a decay object when pages enter a decay-able state
|
||||
* (i.e. dirty or muzzy), and query it to determine how many pages should be
|
||||
* purged at any given time.
|
||||
*
|
||||
* This is mostly a single-threaded data structure and doesn't care about
|
||||
* synchronization at all; it's the caller's responsibility to manage their
|
||||
* synchronization on their own. There are two exceptions:
|
||||
* 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
|
||||
* 2) The mtx and purging fields live (and are initialized) here, but are
|
||||
* logically owned by the page allocator. This is just a convenience (since
|
||||
* those fields would be duplicated for both the dirty and muzzy states
|
||||
* otherwise).
|
||||
*/
|
||||
typedef struct decay_s decay_t;
|
||||
struct decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* True if a thread is currently purging the extents associated with
|
||||
* this decay structure.
|
||||
*/
|
||||
bool purging;
|
||||
/*
|
||||
* Approximate time in milliseconds from the creation of a set of unused
|
||||
* dirty pages until an equivalent set of unused dirty pages is purged
|
||||
* and/or reused.
|
||||
*/
|
||||
atomic_zd_t time_ms;
|
||||
/* time / SMOOTHSTEP_NSTEPS. */
|
||||
nstime_t interval;
|
||||
/*
|
||||
* Time at which the current decay interval logically started. We do
|
||||
* not actually advance to a new epoch until sometime after it starts
|
||||
* because of scheduling and computation delays, and it is even possible
|
||||
* to completely skip epochs. In all cases, during epoch advancement we
|
||||
* merge all relevant activity into the most recently recorded epoch.
|
||||
*/
|
||||
nstime_t epoch;
|
||||
/* Deadline randomness generator. */
|
||||
uint64_t jitter_state;
|
||||
/*
|
||||
* Deadline for current epoch. This is the sum of interval and per
|
||||
* epoch jitter which is a uniform random variable in [0..interval).
|
||||
* Epochs always advance by precise multiples of interval, but we
|
||||
* randomize the deadline to reduce the likelihood of arenas purging in
|
||||
* lockstep.
|
||||
*/
|
||||
nstime_t deadline;
|
||||
/*
|
||||
* The number of pages we cap ourselves at in the current epoch, per
|
||||
* decay policies. Updated on an epoch change. After an epoch change,
|
||||
* the caller should take steps to try to purge down to this amount.
|
||||
*/
|
||||
size_t npages_limit;
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
|
||||
* if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
* Trailing log of how many unused dirty pages were generated during
|
||||
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
||||
* element is the most recent epoch. Corresponding epoch times are
|
||||
* relative to epoch.
|
||||
*
|
||||
* Updated only on epoch advance, triggered by
|
||||
* decay_maybe_advance_epoch, below.
|
||||
*/
|
||||
size_t backlog[SMOOTHSTEP_NSTEPS];
|
||||
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
/*
|
||||
* The current decay time setting. This is the only public access to a decay_t
|
||||
* that's allowed without holding mtx.
|
||||
*/
|
||||
static inline ssize_t
|
||||
decay_ms_read(const decay_t *decay) {
|
||||
return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/*
|
||||
* See the comment on the struct field -- the limit on pages we should allow in
|
||||
* this decay state this epoch.
|
||||
*/
|
||||
static inline size_t
|
||||
decay_npages_limit_get(const decay_t *decay) {
|
||||
return decay->npages_limit;
|
||||
}
|
||||
|
||||
/* How many unused dirty pages were generated during the last epoch. */
|
||||
static inline size_t
|
||||
decay_epoch_npages_delta(const decay_t *decay) {
|
||||
return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
|
||||
}
|
||||
|
||||
/*
|
||||
* Current epoch duration, in nanoseconds. Given that new epochs are started
|
||||
* somewhat haphazardly, this is not necessarily exactly the time between any
|
||||
* two calls to decay_maybe_advance_epoch; see the comments on fields in the
|
||||
* decay_t.
|
||||
*/
|
||||
static inline uint64_t
|
||||
decay_epoch_duration_ns(const decay_t *decay) {
|
||||
return nstime_ns(&decay->interval);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
decay_immediately(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms == 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
decay_disabled(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms < 0;
|
||||
}
|
||||
|
||||
/* Returns true if decay is enabled and done gradually. */
|
||||
static inline bool
|
||||
decay_gradually(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the passed in decay time setting is valid.
|
||||
* < -1 : invalid
|
||||
* -1 : never decay
|
||||
* 0 : decay immediately
|
||||
* > 0 : some positive decay time, up to a maximum allowed value of
|
||||
* NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
|
||||
* 27th century. By that time, we expect to have implemented alternate purging
|
||||
* strategies.
|
||||
*/
|
||||
bool decay_ms_valid(ssize_t decay_ms);
|
||||
|
||||
/*
|
||||
* As a precondition, the decay_t must be zeroed out (as if with memset).
|
||||
*
|
||||
* Returns true on error.
|
||||
*/
|
||||
bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
|
||||
|
||||
/*
|
||||
* Given an already-initialized decay_t, reinitialize it with the given decay
|
||||
* time. The decay_t must have previously been initialized (and should not then
|
||||
* be zeroed).
|
||||
*/
|
||||
void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
|
||||
|
||||
/*
|
||||
* Compute how many of 'npages_new' pages we would need to purge in 'time'.
|
||||
*/
|
||||
uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
|
||||
size_t npages_new);
|
||||
|
||||
/* Returns true if the epoch advanced and there are pages to purge. */
|
||||
bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
|
||||
size_t current_npages);
|
||||
|
||||
/*
|
||||
* Calculates wait time until a number of pages in the interval
|
||||
* [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
|
||||
*
|
||||
* Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
|
||||
* indefinite wait.
|
||||
*/
|
||||
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
|
||||
uint64_t npages_threshold);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DECAY_H */
|
41
BeefRT/JEMalloc/include/jemalloc/internal/div.h
Normal file
41
BeefRT/JEMalloc/include/jemalloc/internal/div.h
Normal file
|
@ -0,0 +1,41 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DIV_H
|
||||
#define JEMALLOC_INTERNAL_DIV_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* This module does the division that computes the index of a region in a slab,
|
||||
* given its offset relative to the base.
|
||||
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
|
||||
* We do some pre-computation to do this more quickly than a CPU division
|
||||
* instruction.
|
||||
* We bound n < 2^32, and don't support dividing by one.
|
||||
*/
|
||||
|
||||
typedef struct div_info_s div_info_t;
|
||||
struct div_info_s {
|
||||
uint32_t magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
size_t d;
|
||||
#endif
|
||||
};
|
||||
|
||||
void div_init(div_info_t *div_info, size_t divisor);
|
||||
|
||||
static inline size_t
|
||||
div_compute(div_info_t *div_info, size_t n) {
|
||||
assert(n <= (uint32_t)-1);
|
||||
/*
|
||||
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
|
||||
* the compilers I tried were all smart enough to turn this into the
|
||||
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
|
||||
* mul; mov edx eax; on x86, umull on arm, etc.).
|
||||
*/
|
||||
size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
assert(i * div_info->d == n);
|
||||
#endif
|
||||
return i;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DIV_H */
|
55
BeefRT/JEMalloc/include/jemalloc/internal/ecache.h
Normal file
55
BeefRT/JEMalloc/include/jemalloc/internal/ecache.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ECACHE_H
|
||||
#define JEMALLOC_INTERNAL_ECACHE_H
|
||||
|
||||
#include "jemalloc/internal/eset.h"
|
||||
#include "jemalloc/internal/san.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct ecache_s ecache_t;
|
||||
struct ecache_s {
|
||||
malloc_mutex_t mtx;
|
||||
eset_t eset;
|
||||
eset_t guarded_eset;
|
||||
/* All stored extents must be in the same state. */
|
||||
extent_state_t state;
|
||||
/* The index of the ehooks the ecache is associated with. */
|
||||
unsigned ind;
|
||||
/*
|
||||
* If true, delay coalescing until eviction; otherwise coalesce during
|
||||
* deallocation.
|
||||
*/
|
||||
bool delay_coalesce;
|
||||
};
|
||||
|
||||
static inline size_t
|
||||
ecache_npages_get(ecache_t *ecache) {
|
||||
return eset_npages_get(&ecache->eset) +
|
||||
eset_npages_get(&ecache->guarded_eset);
|
||||
}
|
||||
|
||||
/* Get the number of extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nextents_get(&ecache->eset, ind) +
|
||||
eset_nextents_get(&ecache->guarded_eset, ind);
|
||||
}
|
||||
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nbytes_get(&ecache->eset, ind) +
|
||||
eset_nbytes_get(&ecache->guarded_eset, ind);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
ecache_ind_get(ecache_t *ecache) {
|
||||
return ecache->ind;
|
||||
}
|
||||
|
||||
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
|
||||
unsigned ind, bool delay_coalesce);
|
||||
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
|
698
BeefRT/JEMalloc/include/jemalloc/internal/edata.h
Normal file
698
BeefRT/JEMalloc/include/jemalloc/internal/edata.h
Normal file
|
@ -0,0 +1,698 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EDATA_H
|
||||
#define JEMALLOC_INTERNAL_EDATA_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin_info.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/hpdata.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/slab_data.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/typed_list.h"
|
||||
|
||||
/*
|
||||
* sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
|
||||
* to free up the low bits in the rtree leaf.
|
||||
*/
|
||||
#define EDATA_ALIGNMENT 128
|
||||
|
||||
enum extent_state_e {
|
||||
extent_state_active = 0,
|
||||
extent_state_dirty = 1,
|
||||
extent_state_muzzy = 2,
|
||||
extent_state_retained = 3,
|
||||
extent_state_transition = 4, /* States below are intermediate. */
|
||||
extent_state_merging = 5,
|
||||
extent_state_max = 5 /* Sanity checking only. */
|
||||
};
|
||||
typedef enum extent_state_e extent_state_t;
|
||||
|
||||
enum extent_head_state_e {
|
||||
EXTENT_NOT_HEAD,
|
||||
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
|
||||
};
|
||||
typedef enum extent_head_state_e extent_head_state_t;
|
||||
|
||||
/*
|
||||
* Which implementation of the page allocator interface, (PAI, defined in
|
||||
* pai.h) owns the given extent?
|
||||
*/
|
||||
enum extent_pai_e {
|
||||
EXTENT_PAI_PAC = 0,
|
||||
EXTENT_PAI_HPA = 1
|
||||
};
|
||||
typedef enum extent_pai_e extent_pai_t;
|
||||
|
||||
struct e_prof_info_s {
|
||||
/* Time when this was allocated. */
|
||||
nstime_t e_prof_alloc_time;
|
||||
/* Allocation request size. */
|
||||
size_t e_prof_alloc_size;
|
||||
/* Points to a prof_tctx_t. */
|
||||
atomic_p_t e_prof_tctx;
|
||||
/*
|
||||
* Points to a prof_recent_t for the allocation; NULL
|
||||
* means the recent allocation record no longer exists.
|
||||
* Protected by prof_recent_alloc_mtx.
|
||||
*/
|
||||
atomic_p_t e_prof_recent_alloc;
|
||||
};
|
||||
typedef struct e_prof_info_s e_prof_info_t;
|
||||
|
||||
/*
|
||||
* The information about a particular edata that lives in an emap. Space is
|
||||
* more precious there (the information, plus the edata pointer, has to live in
|
||||
* a 64-bit word if we want to enable a packed representation.
|
||||
*
|
||||
* There are two things that are special about the information here:
|
||||
* - It's quicker to access. You have one fewer pointer hop, since finding the
|
||||
* edata_t associated with an item always requires accessing the rtree leaf in
|
||||
* which this data is stored.
|
||||
* - It can be read unsynchronized, and without worrying about lifetime issues.
|
||||
*/
|
||||
typedef struct edata_map_info_s edata_map_info_t;
|
||||
struct edata_map_info_s {
|
||||
bool slab;
|
||||
szind_t szind;
|
||||
};
|
||||
|
||||
typedef struct edata_cmp_summary_s edata_cmp_summary_t;
|
||||
struct edata_cmp_summary_s {
|
||||
uint64_t sn;
|
||||
uintptr_t addr;
|
||||
};
|
||||
|
||||
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||
typedef struct edata_s edata_t;
|
||||
ph_structs(edata_avail, edata_t);
|
||||
ph_structs(edata_heap, edata_t);
|
||||
struct edata_s {
|
||||
/*
|
||||
* Bitfield containing several fields:
|
||||
*
|
||||
* a: arena_ind
|
||||
* b: slab
|
||||
* c: committed
|
||||
* p: pai
|
||||
* z: zeroed
|
||||
* g: guarded
|
||||
* t: state
|
||||
* i: szind
|
||||
* f: nfree
|
||||
* s: bin_shard
|
||||
*
|
||||
* 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
|
||||
*
|
||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||
* unassociated.
|
||||
*
|
||||
* slab: The slab flag indicates whether the extent is used for a slab
|
||||
* of small regions. This helps differentiate small size classes,
|
||||
* and it indicates whether interior pointers can be looked up via
|
||||
* iealloc().
|
||||
*
|
||||
* committed: The committed flag indicates whether physical memory is
|
||||
* committed to the extent, whether explicitly or implicitly
|
||||
* as on a system that overcommits and satisfies physical
|
||||
* memory needs on demand via soft page faults.
|
||||
*
|
||||
* pai: The pai flag is an extent_pai_t.
|
||||
*
|
||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||
* whether memory is zero-filled.
|
||||
*
|
||||
* guarded: The guarded flag is use by the sanitizer to track whether
|
||||
* the extent has page guards around it.
|
||||
*
|
||||
* state: The state flag is an extent_state_t.
|
||||
*
|
||||
* szind: The szind flag indicates usable size class index for
|
||||
* allocations residing in this extent, regardless of whether the
|
||||
* extent is a slab. Extent size and usable size often differ
|
||||
* even for non-slabs, either due to sz_large_pad or promotion of
|
||||
* sampled small regions.
|
||||
*
|
||||
* nfree: Number of free regions in slab.
|
||||
*
|
||||
* bin_shard: the shard of the bin from which this extent came.
|
||||
*/
|
||||
uint64_t e_bits;
|
||||
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
||||
|
||||
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
||||
#define EDATA_BITS_ARENA_SHIFT 0
|
||||
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
|
||||
|
||||
#define EDATA_BITS_SLAB_WIDTH 1
|
||||
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
|
||||
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
|
||||
|
||||
#define EDATA_BITS_COMMITTED_WIDTH 1
|
||||
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
|
||||
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
|
||||
|
||||
#define EDATA_BITS_PAI_WIDTH 1
|
||||
#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
|
||||
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
|
||||
|
||||
#define EDATA_BITS_ZEROED_WIDTH 1
|
||||
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
|
||||
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
||||
|
||||
#define EDATA_BITS_GUARDED_WIDTH 1
|
||||
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
|
||||
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
|
||||
|
||||
#define EDATA_BITS_STATE_WIDTH 3
|
||||
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
|
||||
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
|
||||
|
||||
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
||||
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
|
||||
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
|
||||
|
||||
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
|
||||
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
|
||||
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
|
||||
|
||||
#define EDATA_BITS_BINSHARD_WIDTH 6
|
||||
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
|
||||
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
|
||||
|
||||
#define EDATA_BITS_IS_HEAD_WIDTH 1
|
||||
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
|
||||
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
|
||||
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
void *e_addr;
|
||||
|
||||
union {
|
||||
/*
|
||||
* Extent size and serial number associated with the extent
|
||||
* structure (different than the serial number for the extent at
|
||||
* e_addr).
|
||||
*
|
||||
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
||||
*/
|
||||
size_t e_size_esn;
|
||||
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
|
||||
#define EDATA_ESN_MASK ((size_t)PAGE-1)
|
||||
/* Base extent size, which may not be a multiple of PAGE. */
|
||||
size_t e_bsize;
|
||||
};
|
||||
|
||||
/*
|
||||
* If this edata is a user allocation from an HPA, it comes out of some
|
||||
* pageslab (we don't yet support huegpage allocations that don't fit
|
||||
* into pageslabs). This tracks it.
|
||||
*/
|
||||
hpdata_t *e_ps;
|
||||
|
||||
/*
|
||||
* Serial number. These are not necessarily unique; splitting an extent
|
||||
* results in two extents with the same serial number.
|
||||
*/
|
||||
uint64_t e_sn;
|
||||
|
||||
union {
|
||||
/*
|
||||
* List linkage used when the edata_t is active; either in
|
||||
* arena's large allocations or bin_t's slabs_full.
|
||||
*/
|
||||
ql_elm(edata_t) ql_link_active;
|
||||
/*
|
||||
* Pairing heap linkage. Used whenever the extent is inactive
|
||||
* (in the page allocators), or when it is active and in
|
||||
* slabs_nonfull, or when the edata_t is unassociated with an
|
||||
* extent and sitting in an edata_cache.
|
||||
*/
|
||||
union {
|
||||
edata_heap_link_t heap_link;
|
||||
edata_avail_link_t avail_link;
|
||||
};
|
||||
};
|
||||
|
||||
union {
|
||||
/*
|
||||
* List linkage used when the extent is inactive:
|
||||
* - Stashed dirty extents
|
||||
* - Ecache LRU functionality.
|
||||
*/
|
||||
ql_elm(edata_t) ql_link_inactive;
|
||||
/* Small region slab metadata. */
|
||||
slab_data_t e_slab_data;
|
||||
|
||||
/* Profiling data, used for large objects. */
|
||||
e_prof_info_t e_prof_info;
|
||||
};
|
||||
};
|
||||
|
||||
TYPED_LIST(edata_list_active, edata_t, ql_link_active)
|
||||
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
|
||||
|
||||
static inline unsigned
|
||||
edata_arena_ind_get(const edata_t *edata) {
|
||||
unsigned arena_ind = (unsigned)((edata->e_bits &
|
||||
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
|
||||
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
static inline szind_t
|
||||
edata_szind_get_maybe_invalid(const edata_t *edata) {
|
||||
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
|
||||
EDATA_BITS_SZIND_SHIFT);
|
||||
assert(szind <= SC_NSIZES);
|
||||
return szind;
|
||||
}
|
||||
|
||||
static inline szind_t
|
||||
edata_szind_get(const edata_t *edata) {
|
||||
szind_t szind = edata_szind_get_maybe_invalid(edata);
|
||||
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
||||
return szind;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
edata_usize_get(const edata_t *edata) {
|
||||
return sz_index2size(edata_szind_get(edata));
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
edata_binshard_get(const edata_t *edata) {
|
||||
unsigned binshard = (unsigned)((edata->e_bits &
|
||||
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
|
||||
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||
return binshard;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
edata_sn_get(const edata_t *edata) {
|
||||
return edata->e_sn;
|
||||
}
|
||||
|
||||
static inline extent_state_t
|
||||
edata_state_get(const edata_t *edata) {
|
||||
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
|
||||
EDATA_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_guarded_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
|
||||
EDATA_BITS_GUARDED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_zeroed_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
|
||||
EDATA_BITS_ZEROED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_committed_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
|
||||
EDATA_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline extent_pai_t
|
||||
edata_pai_get(const edata_t *edata) {
|
||||
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
|
||||
EDATA_BITS_PAI_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_slab_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
|
||||
EDATA_BITS_SLAB_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
edata_nfree_get(const edata_t *edata) {
|
||||
assert(edata_slab_get(edata));
|
||||
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
|
||||
EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
edata_base_get(const edata_t *edata) {
|
||||
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
||||
!edata_slab_get(edata));
|
||||
return PAGE_ADDR2BASE(edata->e_addr);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
edata_addr_get(const edata_t *edata) {
|
||||
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
||||
!edata_slab_get(edata));
|
||||
return edata->e_addr;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
edata_size_get(const edata_t *edata) {
|
||||
return (edata->e_size_esn & EDATA_SIZE_MASK);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
edata_esn_get(const edata_t *edata) {
|
||||
return (edata->e_size_esn & EDATA_ESN_MASK);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
edata_bsize_get(const edata_t *edata) {
|
||||
return edata->e_bsize;
|
||||
}
|
||||
|
||||
static inline hpdata_t *
|
||||
edata_ps_get(const edata_t *edata) {
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
||||
return edata->e_ps;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
edata_before_get(const edata_t *edata) {
|
||||
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
edata_last_get(const edata_t *edata) {
|
||||
return (void *)((uintptr_t)edata_base_get(edata) +
|
||||
edata_size_get(edata) - PAGE);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
edata_past_get(const edata_t *edata) {
|
||||
return (void *)((uintptr_t)edata_base_get(edata) +
|
||||
edata_size_get(edata));
|
||||
}
|
||||
|
||||
static inline slab_data_t *
|
||||
edata_slab_data_get(edata_t *edata) {
|
||||
assert(edata_slab_get(edata));
|
||||
return &edata->e_slab_data;
|
||||
}
|
||||
|
||||
static inline const slab_data_t *
|
||||
edata_slab_data_get_const(const edata_t *edata) {
|
||||
assert(edata_slab_get(edata));
|
||||
return &edata->e_slab_data;
|
||||
}
|
||||
|
||||
static inline prof_tctx_t *
|
||||
edata_prof_tctx_get(const edata_t *edata) {
|
||||
return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline const nstime_t *
|
||||
edata_prof_alloc_time_get(const edata_t *edata) {
|
||||
return &edata->e_prof_info.e_prof_alloc_time;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
edata_prof_alloc_size_get(const edata_t *edata) {
|
||||
return edata->e_prof_info.e_prof_alloc_size;
|
||||
}
|
||||
|
||||
static inline prof_recent_t *
|
||||
edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
|
||||
return (prof_recent_t *)atomic_load_p(
|
||||
&edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
|
||||
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_binshard_set(edata_t *edata, unsigned binshard) {
|
||||
/* The assertion assumes szind is set already. */
|
||||
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
|
||||
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_addr_set(edata_t *edata, void *addr) {
|
||||
edata->e_addr = addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_size_set(edata_t *edata, size_t size) {
|
||||
assert((size & ~EDATA_SIZE_MASK) == 0);
|
||||
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_esn_set(edata_t *edata, size_t esn) {
|
||||
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
|
||||
EDATA_ESN_MASK);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_bsize_set(edata_t *edata, size_t bsize) {
|
||||
edata->e_bsize = bsize;
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_ps_set(edata_t *edata, hpdata_t *ps) {
|
||||
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
||||
edata->e_ps = ps;
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_szind_set(edata_t *edata, szind_t szind) {
|
||||
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
|
||||
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_nfree_set(edata_t *edata, unsigned nfree) {
|
||||
assert(edata_slab_get(edata));
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
|
||||
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
|
||||
/* The assertion assumes szind is set already. */
|
||||
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
||||
edata->e_bits = (edata->e_bits &
|
||||
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
|
||||
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
|
||||
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_nfree_inc(edata_t *edata) {
|
||||
assert(edata_slab_get(edata));
|
||||
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_nfree_dec(edata_t *edata) {
|
||||
assert(edata_slab_get(edata));
|
||||
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_nfree_sub(edata_t *edata, uint64_t n) {
|
||||
assert(edata_slab_get(edata));
|
||||
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_sn_set(edata_t *edata, uint64_t sn) {
|
||||
edata->e_sn = sn;
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_state_set(edata_t *edata, extent_state_t state) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
|
||||
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_guarded_set(edata_t *edata, bool guarded) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
|
||||
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_zeroed_set(edata_t *edata, bool zeroed) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
|
||||
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_committed_set(edata_t *edata, bool committed) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
|
||||
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_pai_set(edata_t *edata, extent_pai_t pai) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
|
||||
((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_slab_set(edata_t *edata, bool slab) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
|
||||
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
|
||||
atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
|
||||
nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_prof_alloc_size_set(edata_t *edata, size_t size) {
|
||||
edata->e_prof_info.e_prof_alloc_size = size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
|
||||
prof_recent_t *recent_alloc) {
|
||||
atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_is_head_get(edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
|
||||
EDATA_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_is_head_set(edata_t *edata, bool is_head) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
|
||||
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_state_in_transition(extent_state_t state) {
|
||||
return state >= extent_state_transition;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because this function is implemented as a sequence of bitfield modifications,
|
||||
* even though each individual bit is properly initialized, we technically read
|
||||
* uninitialized data within it. This is mostly fine, since most callers get
|
||||
* their edatas from zeroing sources, but callers who make stack edata_ts need
|
||||
* to manually zero them.
|
||||
*/
|
||||
static inline void
|
||||
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
||||
bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed, extent_pai_t pai, extent_head_state_t is_head) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
edata_arena_ind_set(edata, arena_ind);
|
||||
edata_addr_set(edata, addr);
|
||||
edata_size_set(edata, size);
|
||||
edata_slab_set(edata, slab);
|
||||
edata_szind_set(edata, szind);
|
||||
edata_sn_set(edata, sn);
|
||||
edata_state_set(edata, state);
|
||||
edata_guarded_set(edata, false);
|
||||
edata_zeroed_set(edata, zeroed);
|
||||
edata_committed_set(edata, committed);
|
||||
edata_pai_set(edata, pai);
|
||||
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
|
||||
if (config_prof) {
|
||||
edata_prof_tctx_set(edata, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
|
||||
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
|
||||
edata_addr_set(edata, addr);
|
||||
edata_bsize_set(edata, bsize);
|
||||
edata_slab_set(edata, false);
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
edata_sn_set(edata, sn);
|
||||
edata_state_set(edata, extent_state_active);
|
||||
edata_guarded_set(edata, false);
|
||||
edata_zeroed_set(edata, true);
|
||||
edata_committed_set(edata, true);
|
||||
/*
|
||||
* This isn't strictly true, but base allocated extents never get
|
||||
* deallocated and can't be looked up in the emap, but no sense in
|
||||
* wasting a state bit to encode this fact.
|
||||
*/
|
||||
edata_pai_set(edata, EXTENT_PAI_PAC);
|
||||
}
|
||||
|
||||
static inline int
|
||||
edata_esn_comp(const edata_t *a, const edata_t *b) {
|
||||
size_t a_esn = edata_esn_get(a);
|
||||
size_t b_esn = edata_esn_get(b);
|
||||
|
||||
return (a_esn > b_esn) - (a_esn < b_esn);
|
||||
}
|
||||
|
||||
static inline int
|
||||
edata_ead_comp(const edata_t *a, const edata_t *b) {
|
||||
uintptr_t a_eaddr = (uintptr_t)a;
|
||||
uintptr_t b_eaddr = (uintptr_t)b;
|
||||
|
||||
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
||||
}
|
||||
|
||||
static inline edata_cmp_summary_t
|
||||
edata_cmp_summary_get(const edata_t *edata) {
|
||||
return (edata_cmp_summary_t){edata_sn_get(edata),
|
||||
(uintptr_t)edata_addr_get(edata)};
|
||||
}
|
||||
|
||||
static inline int
|
||||
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
|
||||
int ret;
|
||||
ret = (a.sn > b.sn) - (a.sn < b.sn);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
ret = (a.addr > b.addr) - (a.addr < b.addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
edata_snad_comp(const edata_t *a, const edata_t *b) {
|
||||
edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
|
||||
edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
|
||||
|
||||
return edata_cmp_summary_comp(a_cmp, b_cmp);
|
||||
}
|
||||
|
||||
static inline int
|
||||
edata_esnead_comp(const edata_t *a, const edata_t *b) {
|
||||
int ret;
|
||||
|
||||
ret = edata_esn_comp(a, b);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = edata_ead_comp(a, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ph_proto(, edata_avail, edata_t)
|
||||
ph_proto(, edata_heap, edata_t)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EDATA_H */
|
49
BeefRT/JEMalloc/include/jemalloc/internal/edata_cache.h
Normal file
49
BeefRT/JEMalloc/include/jemalloc/internal/edata_cache.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
|
||||
#define JEMALLOC_INTERNAL_EDATA_CACHE_H
|
||||
|
||||
#include "jemalloc/internal/base.h"
|
||||
|
||||
/* For tests only. */
|
||||
#define EDATA_CACHE_FAST_FILL 4
|
||||
|
||||
/*
|
||||
* A cache of edata_t structures allocated via base_alloc_edata (as opposed to
|
||||
* the underlying extents they describe). The contents of returned edata_t
|
||||
* objects are garbage and cannot be relied upon.
|
||||
*/
|
||||
|
||||
typedef struct edata_cache_s edata_cache_t;
|
||||
struct edata_cache_s {
|
||||
edata_avail_t avail;
|
||||
atomic_zu_t count;
|
||||
malloc_mutex_t mtx;
|
||||
base_t *base;
|
||||
};
|
||||
|
||||
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
|
||||
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
|
||||
void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
|
||||
|
||||
void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
|
||||
void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
|
||||
void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
|
||||
|
||||
/*
|
||||
* An edata_cache_small is like an edata_cache, but it relies on external
|
||||
* synchronization and avoids first-fit strategies.
|
||||
*/
|
||||
|
||||
typedef struct edata_cache_fast_s edata_cache_fast_t;
|
||||
struct edata_cache_fast_s {
|
||||
edata_list_inactive_t list;
|
||||
edata_cache_t *fallback;
|
||||
bool disabled;
|
||||
};
|
||||
|
||||
void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
|
||||
edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
|
||||
void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
|
||||
edata_t *edata);
|
||||
void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
|
412
BeefRT/JEMalloc/include/jemalloc/internal/ehooks.h
Normal file
412
BeefRT/JEMalloc/include/jemalloc/internal/ehooks.h
Normal file
|
@ -0,0 +1,412 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EHOOKS_H
|
||||
#define JEMALLOC_INTERNAL_EHOOKS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
|
||||
/*
|
||||
* This module is the internal interface to the extent hooks (both
|
||||
* user-specified and external). Eventually, this will give us the flexibility
|
||||
* to use multiple different versions of user-visible extent-hook APIs under a
|
||||
* single user interface.
|
||||
*
|
||||
* Current API expansions (not available to anyone but the default hooks yet):
|
||||
* - Head state tracking. Hooks can decide whether or not to merge two
|
||||
* extents based on whether or not one of them is the head (i.e. was
|
||||
* allocated on its own). The later extent loses its "head" status.
|
||||
*/
|
||||
|
||||
extern const extent_hooks_t ehooks_default_extent_hooks;
|
||||
|
||||
typedef struct ehooks_s ehooks_t;
|
||||
struct ehooks_s {
|
||||
/*
|
||||
* The user-visible id that goes with the ehooks (i.e. that of the base
|
||||
* they're a part of, the associated arena's index within the arenas
|
||||
* array).
|
||||
*/
|
||||
unsigned ind;
|
||||
/* Logically an extent_hooks_t *. */
|
||||
atomic_p_t ptr;
|
||||
};
|
||||
|
||||
extern const extent_hooks_t ehooks_default_extent_hooks;
|
||||
|
||||
/*
|
||||
* These are not really part of the public API. Each hook has a fast-path for
|
||||
* the default-hooks case that can avoid various small inefficiencies:
|
||||
* - Forgetting tsd and then calling tsd_get within the hook.
|
||||
* - Getting more state than necessary out of the extent_t.
|
||||
* - Doing arena_ind -> arena -> arena_ind lookups.
|
||||
* By making the calls to these functions visible to the compiler, it can move
|
||||
* those extra bits of computation down below the fast-paths where they get ignored.
|
||||
*/
|
||||
void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
bool ehooks_default_dalloc_impl(void *addr, size_t size);
|
||||
void ehooks_default_destroy_impl(void *addr, size_t size);
|
||||
bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
|
||||
bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
|
||||
#ifdef PAGES_CAN_PURGE_LAZY
|
||||
bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
|
||||
#endif
|
||||
#ifdef PAGES_CAN_PURGE_FORCED
|
||||
bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
|
||||
#endif
|
||||
bool ehooks_default_split_impl();
|
||||
/*
|
||||
* Merge is the only default extent hook we declare -- see the comment in
|
||||
* ehooks_merge.
|
||||
*/
|
||||
bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
|
||||
size_t size_a, void *addr_b, size_t size_b, bool committed,
|
||||
unsigned arena_ind);
|
||||
bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
|
||||
void ehooks_default_zero_impl(void *addr, size_t size);
|
||||
void ehooks_default_guard_impl(void *guard1, void *guard2);
|
||||
void ehooks_default_unguard_impl(void *guard1, void *guard2);
|
||||
|
||||
/*
|
||||
* We don't officially support reentrancy from wtihin the extent hooks. But
|
||||
* various people who sit within throwing distance of the jemalloc team want
|
||||
* that functionality in certain limited cases. The default reentrancy guards
|
||||
* assert that we're not reentrant from a0 (since it's the bootstrap arena,
|
||||
* where reentrant allocations would be redirected), which we would incorrectly
|
||||
* trigger in cases where a0 has extent hooks (those hooks themselves can't be
|
||||
* reentrant, then, but there are reasonable uses for such functionality, like
|
||||
* putting internal metadata on hugepages). Therefore, we use the raw
|
||||
* reentrancy guards.
|
||||
*
|
||||
* Eventually, we need to think more carefully about whether and where we
|
||||
* support allocating from within extent hooks (and what that means for things
|
||||
* like profiling, stats collection, etc.), and document what the guarantee is.
|
||||
*/
|
||||
static inline void
|
||||
ehooks_pre_reentrancy(tsdn_t *tsdn) {
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
tsd_pre_reentrancy_raw(tsd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ehooks_post_reentrancy(tsdn_t *tsdn) {
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
tsd_post_reentrancy_raw(tsd);
|
||||
}
|
||||
|
||||
/* Beginning of the public API. */
|
||||
void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
|
||||
|
||||
static inline unsigned
|
||||
ehooks_ind_get(const ehooks_t *ehooks) {
|
||||
return ehooks->ind;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
|
||||
atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline extent_hooks_t *
|
||||
ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
|
||||
return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_are_default(ehooks_t *ehooks) {
|
||||
return ehooks_get_extent_hooks_ptr(ehooks) ==
|
||||
&ehooks_default_extent_hooks;
|
||||
}
|
||||
|
||||
/*
|
||||
* In some cases, a caller needs to allocate resources before attempting to call
|
||||
* a hook. If that hook is doomed to fail, this is wasteful. We therefore
|
||||
* include some checks for such cases.
|
||||
*/
|
||||
static inline bool
|
||||
ehooks_dalloc_will_fail(ehooks_t *ehooks) {
|
||||
if (ehooks_are_default(ehooks)) {
|
||||
return opt_retain;
|
||||
} else {
|
||||
return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_split_will_fail(ehooks_t *ehooks) {
|
||||
return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_merge_will_fail(ehooks_t *ehooks) {
|
||||
return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_guard_will_fail(ehooks_t *ehooks) {
|
||||
/*
|
||||
* Before the guard hooks are officially introduced, limit the use to
|
||||
* the default hooks only.
|
||||
*/
|
||||
return !ehooks_are_default(ehooks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some hooks are required to return zeroed memory in certain situations. In
|
||||
* debug mode, we do some heuristic checks that they did what they were supposed
|
||||
* to.
|
||||
*
|
||||
* This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
|
||||
* But incorrect zero information indicates an ehook bug.
|
||||
*/
|
||||
static inline void
|
||||
ehooks_debug_zero_check(void *addr, size_t size) {
|
||||
assert(((uintptr_t)addr & PAGE_MASK) == 0);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(size > 0);
|
||||
if (config_debug) {
|
||||
/* Check the whole first page. */
|
||||
size_t *p = (size_t *)addr;
|
||||
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
/*
|
||||
* And 4 spots within. There's a tradeoff here; the larger
|
||||
* this number, the more likely it is that we'll catch a bug
|
||||
* where ehooks return a sparsely non-zero range. But
|
||||
* increasing the number of checks also increases the number of
|
||||
* page faults in debug mode. FreeBSD does much of their
|
||||
* day-to-day development work in debug mode, so we don't want
|
||||
* even the debug builds to be too slow.
|
||||
*/
|
||||
const size_t nchecks = 4;
|
||||
assert(PAGE >= sizeof(size_t) * nchecks);
|
||||
for (size_t i = 0; i < nchecks; ++i) {
|
||||
assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void *
|
||||
ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit) {
|
||||
bool orig_zero = *zero;
|
||||
void *ret;
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
|
||||
alignment, zero, commit, ehooks_ind_get(ehooks));
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
ret = extent_hooks->alloc(extent_hooks, new_addr, size,
|
||||
alignment, zero, commit, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
}
|
||||
assert(new_addr == NULL || ret == NULL || new_addr == ret);
|
||||
assert(!orig_zero || *zero);
|
||||
if (*zero && ret != NULL) {
|
||||
ehooks_debug_zero_check(ret, size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
bool committed) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
return ehooks_default_dalloc_impl(addr, size);
|
||||
} else if (extent_hooks->dalloc == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->dalloc(extent_hooks, addr, size,
|
||||
committed, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
bool committed) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_destroy_impl(addr, size);
|
||||
} else if (extent_hooks->destroy == NULL) {
|
||||
/* Do nothing. */
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
extent_hooks->destroy(extent_hooks, addr, size, committed,
|
||||
ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
size_t offset, size_t length) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
bool err;
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
err = ehooks_default_commit_impl(addr, offset, length);
|
||||
} else if (extent_hooks->commit == NULL) {
|
||||
err = true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
err = extent_hooks->commit(extent_hooks, addr, size,
|
||||
offset, length, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
}
|
||||
if (!err) {
|
||||
ehooks_debug_zero_check(addr, size);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
size_t offset, size_t length) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
return ehooks_default_decommit_impl(addr, offset, length);
|
||||
} else if (extent_hooks->decommit == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->decommit(extent_hooks, addr, size,
|
||||
offset, length, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
size_t offset, size_t length) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
#ifdef PAGES_CAN_PURGE_LAZY
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
return ehooks_default_purge_lazy_impl(addr, offset, length);
|
||||
}
|
||||
#endif
|
||||
if (extent_hooks->purge_lazy == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
|
||||
offset, length, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
size_t offset, size_t length) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
/*
|
||||
* It would be correct to have a ehooks_debug_zero_check call at the end
|
||||
* of this function; purge_forced is required to zero. But checking
|
||||
* would touch the page in question, which may have performance
|
||||
* consequences (imagine the hooks are using hugepages, with a global
|
||||
* zero page off). Even in debug mode, it's usually a good idea to
|
||||
* avoid cases that can dramatically increase memory consumption.
|
||||
*/
|
||||
#ifdef PAGES_CAN_PURGE_FORCED
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
return ehooks_default_purge_forced_impl(addr, offset, length);
|
||||
}
|
||||
#endif
|
||||
if (extent_hooks->purge_forced == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
|
||||
offset, length, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
|
||||
size_t size_a, size_t size_b, bool committed) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (ehooks_are_default(ehooks)) {
|
||||
return ehooks_default_split_impl();
|
||||
} else if (extent_hooks->split == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
|
||||
size_b, committed, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
|
||||
void *addr_b, size_t size_b, bool committed) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
|
||||
} else if (extent_hooks->merge == NULL) {
|
||||
return true;
|
||||
} else {
|
||||
ehooks_pre_reentrancy(tsdn);
|
||||
bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
|
||||
addr_b, size_b, committed, ehooks_ind_get(ehooks));
|
||||
ehooks_post_reentrancy(tsdn);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_zero_impl(addr, size);
|
||||
} else {
|
||||
/*
|
||||
* It would be correct to try using the user-provided purge
|
||||
* hooks (since they are required to have zeroed the extent if
|
||||
* they indicate success), but we don't necessarily know their
|
||||
* cost. We'll be conservative and use memset.
|
||||
*/
|
||||
memset(addr, 0, size);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
|
||||
bool err;
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_guard_impl(guard1, guard2);
|
||||
err = false;
|
||||
} else {
|
||||
err = true;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
|
||||
bool err;
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_unguard_impl(guard1, guard2);
|
||||
err = false;
|
||||
} else {
|
||||
err = true;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
|
357
BeefRT/JEMalloc/include/jemalloc/internal/emap.h
Normal file
357
BeefRT/JEMalloc/include/jemalloc/internal/emap.h
Normal file
|
@ -0,0 +1,357 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EMAP_H
|
||||
#define JEMALLOC_INTERNAL_EMAP_H
|
||||
|
||||
#include "jemalloc/internal/base.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
/*
|
||||
* Note: Ends without at semicolon, so that
|
||||
* EMAP_DECLARE_RTREE_CTX;
|
||||
* in uses will avoid empty-statement warnings.
|
||||
*/
|
||||
#define EMAP_DECLARE_RTREE_CTX \
|
||||
rtree_ctx_t rtree_ctx_fallback; \
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
|
||||
|
||||
typedef struct emap_s emap_t;
|
||||
struct emap_s {
|
||||
rtree_t rtree;
|
||||
};
|
||||
|
||||
/* Used to pass rtree lookup context down the path. */
|
||||
typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
|
||||
struct emap_alloc_ctx_t {
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
};
|
||||
|
||||
typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
|
||||
struct emap_full_alloc_ctx_s {
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
edata_t *edata;
|
||||
};
|
||||
|
||||
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
|
||||
|
||||
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
|
||||
bool slab);
|
||||
|
||||
void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t state);
|
||||
|
||||
/*
|
||||
* The two acquire functions below allow accessing neighbor edatas, if it's safe
|
||||
* and valid to do so (i.e. from the same arena, of the same state, etc.). This
|
||||
* is necessary because the ecache locks are state based, and only protect
|
||||
* edatas with the same state. Therefore the neighbor edata's state needs to be
|
||||
* verified first, before chasing the edata pointer. The returned edata will be
|
||||
* in an acquired state, meaning other threads will be prevented from accessing
|
||||
* it, even if technically the edata can still be discovered from the rtree.
|
||||
*
|
||||
* This means, at any moment when holding pointers to edata, either one of the
|
||||
* state based locks is held (and the edatas are all of the protected state), or
|
||||
* the edatas are in an acquired state (e.g. in active or merging state). The
|
||||
* acquire operation itself (changing the edata to an acquired state) is done
|
||||
* under the state locks.
|
||||
*/
|
||||
edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
|
||||
edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
|
||||
bool forward);
|
||||
edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
|
||||
edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
|
||||
void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t new_state);
|
||||
|
||||
/*
|
||||
* Associate the given edata with its beginning and end address, setting the
|
||||
* szind and slab info appropriately.
|
||||
* Returns true on error (i.e. resource exhaustion).
|
||||
*/
|
||||
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
szind_t szind, bool slab);
|
||||
|
||||
/*
|
||||
* Does the same thing, but with the interior of the range, for slab
|
||||
* allocations.
|
||||
*
|
||||
* You might wonder why we don't just have a single emap_register function that
|
||||
* does both depending on the value of 'slab'. The answer is twofold:
|
||||
* - As a practical matter, in places like the extract->split->commit pathway,
|
||||
* we defer the interior operation until we're sure that the commit won't fail
|
||||
* (but we have to register the split boundaries there).
|
||||
* - In general, we're trying to move to a world where the page-specific
|
||||
* allocator doesn't know as much about how the pages it allocates will be
|
||||
* used, and passing a 'slab' parameter everywhere makes that more
|
||||
* complicated.
|
||||
*
|
||||
* Unlike the boundary version, this function can't fail; this is because slabs
|
||||
* can't get big enough to touch a new page that neither of the boundaries
|
||||
* touched, so no allocation is necessary to fill the interior once the boundary
|
||||
* has been touched.
|
||||
*/
|
||||
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
szind_t szind);
|
||||
|
||||
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||
|
||||
typedef struct emap_prepare_s emap_prepare_t;
|
||||
struct emap_prepare_s {
|
||||
rtree_leaf_elm_t *lead_elm_a;
|
||||
rtree_leaf_elm_t *lead_elm_b;
|
||||
rtree_leaf_elm_t *trail_elm_a;
|
||||
rtree_leaf_elm_t *trail_elm_b;
|
||||
};
|
||||
|
||||
/**
|
||||
* These functions the emap metadata management for merging, splitting, and
|
||||
* reusing extents. In particular, they set the boundary mappings from
|
||||
* addresses to edatas. If the result is going to be used as a slab, you
|
||||
* still need to call emap_register_interior on it, though.
|
||||
*
|
||||
* Remap simply changes the szind and slab status of an extent's boundary
|
||||
* mappings. If the extent is not a slab, it doesn't bother with updating the
|
||||
* end mapping (since lookups only occur in the interior of an extent for
|
||||
* slabs). Since the szind and slab status only make sense for active extents,
|
||||
* this should only be called while activating or deactivating an extent.
|
||||
*
|
||||
* Split and merge have a "prepare" and a "commit" portion. The prepare portion
|
||||
* does the operations that can be done without exclusive access to the extent
|
||||
* in question, while the commit variant requires exclusive access to maintain
|
||||
* the emap invariants. The only function that can fail is emap_split_prepare,
|
||||
* and it returns true on failure (at which point the caller shouldn't commit).
|
||||
*
|
||||
* In all cases, "lead" refers to the lower-addressed extent, and trail to the
|
||||
* higher-addressed one. It's the caller's responsibility to set the edata
|
||||
* state appropriately.
|
||||
*/
|
||||
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
|
||||
void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
|
||||
void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||
edata_t *lead, edata_t *trail);
|
||||
void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||
edata_t *lead, edata_t *trail);
|
||||
|
||||
/* Assert that the emap's view of the given edata matches the edata's view. */
|
||||
void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||
static inline void
|
||||
emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||
if (config_debug) {
|
||||
emap_do_assert_mapped(tsdn, emap, edata);
|
||||
}
|
||||
}
|
||||
|
||||
/* Assert that the given edata isn't in the map. */
|
||||
void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||
static inline void
|
||||
emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||
if (config_debug) {
|
||||
emap_do_assert_not_mapped(tsdn, emap, edata);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||
assert(config_debug);
|
||||
emap_assert_mapped(tsdn, emap, edata);
|
||||
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
|
||||
(uintptr_t)edata_base_get(edata));
|
||||
|
||||
return edata_state_in_transition(contents.metadata.state);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||
if (!config_debug) {
|
||||
/* For assertions only. */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The edata is considered acquired if no other threads will attempt to
|
||||
* read / write any fields from it. This includes a few cases:
|
||||
*
|
||||
* 1) edata not hooked into emap yet -- This implies the edata just got
|
||||
* allocated or initialized.
|
||||
*
|
||||
* 2) in an active or transition state -- In both cases, the edata can
|
||||
* be discovered from the emap, however the state tracked in the rtree
|
||||
* will prevent other threads from accessing the actual edata.
|
||||
*/
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
|
||||
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
|
||||
/* init_missing */ false);
|
||||
if (elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
|
||||
/* dependent */ true);
|
||||
if (contents.edata == NULL ||
|
||||
contents.metadata.state == extent_state_active ||
|
||||
edata_state_in_transition(contents.metadata.state)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
|
||||
assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
|
||||
assert(edata_pai_get(inner) == edata_pai_get(outer));
|
||||
assert(edata_committed_get(inner) == edata_committed_get(outer));
|
||||
assert(edata_state_get(inner) == extent_state_active);
|
||||
assert(edata_state_get(outer) == extent_state_merging);
|
||||
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
|
||||
assert(edata_base_get(inner) == edata_past_get(outer) ||
|
||||
edata_base_get(outer) == edata_past_get(inner));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
|
||||
assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
|
||||
assert(edata_pai_get(original) == edata_pai_get(expand));
|
||||
assert(edata_state_get(original) == extent_state_active);
|
||||
assert(edata_state_get(expand) == extent_state_merging);
|
||||
assert(edata_past_get(original) == edata_base_get(expand));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||
emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
|
||||
return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
|
||||
}
|
||||
|
||||
/* Fills in alloc_ctx with the info in the map. */
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||
emap_alloc_ctx_t *alloc_ctx) {
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
|
||||
rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
|
||||
rtree_ctx, (uintptr_t)ptr);
|
||||
alloc_ctx->szind = metadata.szind;
|
||||
alloc_ctx->slab = metadata.slab;
|
||||
}
|
||||
|
||||
/* The pointer must be mapped. */
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||
emap_full_alloc_ctx_t *full_alloc_ctx) {
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
|
||||
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
|
||||
(uintptr_t)ptr);
|
||||
full_alloc_ctx->edata = contents.edata;
|
||||
full_alloc_ctx->szind = contents.metadata.szind;
|
||||
full_alloc_ctx->slab = contents.metadata.slab;
|
||||
}
|
||||
|
||||
/*
|
||||
* The pointer is allowed to not be mapped.
|
||||
*
|
||||
* Returns true when the pointer is not present.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
|
||||
emap_full_alloc_ctx_t *full_alloc_ctx) {
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
|
||||
rtree_contents_t contents;
|
||||
bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, &contents);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
full_alloc_ctx->edata = contents.edata;
|
||||
full_alloc_ctx->szind = contents.metadata.szind;
|
||||
full_alloc_ctx->slab = contents.metadata.slab;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only used on the fastpath of free. Returns true when cannot be fulfilled by
|
||||
* fast path, e.g. when the metadata key is not cached.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
|
||||
emap_alloc_ctx_t *alloc_ctx) {
|
||||
/* Use the unsafe getter since this may gets called during exit. */
|
||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
|
||||
|
||||
rtree_metadata_t metadata;
|
||||
bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
|
||||
rtree_ctx, (uintptr_t)ptr, &metadata);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
alloc_ctx->szind = metadata.szind;
|
||||
alloc_ctx->slab = metadata.slab;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to do batch lookups out of the cache bins, which use
|
||||
* cache_bin_ptr_array_get to access the i'th element of the bin (since they
|
||||
* invert usual ordering in deciding what to flush). This lets the emap avoid
|
||||
* caring about its caller's ordering.
|
||||
*/
|
||||
typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
|
||||
/*
|
||||
* This allows size-checking assertions, which we can only do while we're in the
|
||||
* process of edata lookups.
|
||||
*/
|
||||
typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
|
||||
|
||||
typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
|
||||
union emap_batch_lookup_result_u {
|
||||
edata_t *edata;
|
||||
rtree_leaf_elm_t *rtree_leaf;
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
|
||||
emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
|
||||
emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
|
||||
emap_batch_lookup_result_t *result) {
|
||||
/* Avoids null-checking tsdn in the loop below. */
|
||||
util_assume(tsd != NULL);
|
||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
|
||||
|
||||
for (size_t i = 0; i < nptrs; i++) {
|
||||
const void *ptr = ptr_getter(ptr_getter_ctx, i);
|
||||
/*
|
||||
* Reuse the edatas array as a temp buffer, lying a little about
|
||||
* the types.
|
||||
*/
|
||||
result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
|
||||
&emap->rtree, rtree_ctx, (uintptr_t)ptr,
|
||||
/* dependent */ true, /* init_missing */ false);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < nptrs; i++) {
|
||||
rtree_leaf_elm_t *elm = result[i].rtree_leaf;
|
||||
rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
|
||||
&emap->rtree, elm, /* dependent */ true);
|
||||
result[i].edata = contents.edata;
|
||||
emap_full_alloc_ctx_t alloc_ctx;
|
||||
/*
|
||||
* Not all these fields are read in practice by the metadata
|
||||
* visitor. But the compiler can easily optimize away the ones
|
||||
* that aren't, so no sense in being incomplete.
|
||||
*/
|
||||
alloc_ctx.szind = contents.metadata.szind;
|
||||
alloc_ctx.slab = contents.metadata.slab;
|
||||
alloc_ctx.edata = contents.edata;
|
||||
metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EMAP_H */
|
510
BeefRT/JEMalloc/include/jemalloc/internal/emitter.h
Normal file
510
BeefRT/JEMalloc/include/jemalloc/internal/emitter.h
Normal file
|
@ -0,0 +1,510 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EMITTER_H
|
||||
#define JEMALLOC_INTERNAL_EMITTER_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
typedef enum emitter_output_e emitter_output_t;
|
||||
enum emitter_output_e {
|
||||
emitter_output_json,
|
||||
emitter_output_json_compact,
|
||||
emitter_output_table
|
||||
};
|
||||
|
||||
typedef enum emitter_justify_e emitter_justify_t;
|
||||
enum emitter_justify_e {
|
||||
emitter_justify_left,
|
||||
emitter_justify_right,
|
||||
/* Not for users; just to pass to internal functions. */
|
||||
emitter_justify_none
|
||||
};
|
||||
|
||||
typedef enum emitter_type_e emitter_type_t;
|
||||
enum emitter_type_e {
|
||||
emitter_type_bool,
|
||||
emitter_type_int,
|
||||
emitter_type_int64,
|
||||
emitter_type_unsigned,
|
||||
emitter_type_uint32,
|
||||
emitter_type_uint64,
|
||||
emitter_type_size,
|
||||
emitter_type_ssize,
|
||||
emitter_type_string,
|
||||
/*
|
||||
* A title is a column title in a table; it's just a string, but it's
|
||||
* not quoted.
|
||||
*/
|
||||
emitter_type_title,
|
||||
};
|
||||
|
||||
typedef struct emitter_col_s emitter_col_t;
|
||||
struct emitter_col_s {
|
||||
/* Filled in by the user. */
|
||||
emitter_justify_t justify;
|
||||
int width;
|
||||
emitter_type_t type;
|
||||
union {
|
||||
bool bool_val;
|
||||
int int_val;
|
||||
unsigned unsigned_val;
|
||||
uint32_t uint32_val;
|
||||
uint32_t uint32_t_val;
|
||||
uint64_t uint64_val;
|
||||
uint64_t uint64_t_val;
|
||||
size_t size_val;
|
||||
ssize_t ssize_val;
|
||||
const char *str_val;
|
||||
};
|
||||
|
||||
/* Filled in by initialization. */
|
||||
ql_elm(emitter_col_t) link;
|
||||
};
|
||||
|
||||
typedef struct emitter_row_s emitter_row_t;
|
||||
struct emitter_row_s {
|
||||
ql_head(emitter_col_t) cols;
|
||||
};
|
||||
|
||||
typedef struct emitter_s emitter_t;
|
||||
struct emitter_s {
|
||||
emitter_output_t output;
|
||||
/* The output information. */
|
||||
write_cb_t *write_cb;
|
||||
void *cbopaque;
|
||||
int nesting_depth;
|
||||
/* True if we've already emitted a value at the given depth. */
|
||||
bool item_at_depth;
|
||||
/* True if we emitted a key and will emit corresponding value next. */
|
||||
bool emitted_key;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
emitter_outputs_json(emitter_t *emitter) {
|
||||
return emitter->output == emitter_output_json ||
|
||||
emitter->output == emitter_output_json_compact;
|
||||
}
|
||||
|
||||
/* Internal convenience function. Write to the emitter the given string. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_printf(emitter_t *emitter, const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
static inline const char * JEMALLOC_FORMAT_ARG(3)
|
||||
emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
|
||||
emitter_justify_t justify, int width) {
|
||||
size_t written;
|
||||
fmt_specifier++;
|
||||
if (justify == emitter_justify_none) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%s", fmt_specifier);
|
||||
} else if (justify == emitter_justify_left) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%-%d%s", width, fmt_specifier);
|
||||
} else {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%d%s", width, fmt_specifier);
|
||||
}
|
||||
/* Only happens in case of bad format string, which *we* choose. */
|
||||
assert(written < out_size);
|
||||
return out_fmt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal. Emit the given value type in the relevant encoding (so that the
|
||||
* bool true gets mapped to json "true", but the string "true" gets mapped to
|
||||
* json "\"true\"", for instance.
|
||||
*
|
||||
* Width is ignored if justify is emitter_justify_none.
|
||||
*/
|
||||
static inline void
|
||||
emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
size_t str_written;
|
||||
#define BUF_SIZE 256
|
||||
#define FMT_SIZE 10
|
||||
/*
|
||||
* We dynamically generate a format string to emit, to let us use the
|
||||
* snprintf machinery. This is kinda hacky, but gets the job done
|
||||
* quickly without having to think about the various snprintf edge
|
||||
* cases.
|
||||
*/
|
||||
char fmt[FMT_SIZE];
|
||||
char buf[BUF_SIZE];
|
||||
|
||||
#define EMIT_SIMPLE(type, format) \
|
||||
emitter_printf(emitter, \
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
|
||||
*(const type *)value);
|
||||
|
||||
switch (value_type) {
|
||||
case emitter_type_bool:
|
||||
emitter_printf(emitter,
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
|
||||
*(const bool *)value ? "true" : "false");
|
||||
break;
|
||||
case emitter_type_int:
|
||||
EMIT_SIMPLE(int, "%d")
|
||||
break;
|
||||
case emitter_type_int64:
|
||||
EMIT_SIMPLE(int64_t, "%" FMTd64)
|
||||
break;
|
||||
case emitter_type_unsigned:
|
||||
EMIT_SIMPLE(unsigned, "%u")
|
||||
break;
|
||||
case emitter_type_ssize:
|
||||
EMIT_SIMPLE(ssize_t, "%zd")
|
||||
break;
|
||||
case emitter_type_size:
|
||||
EMIT_SIMPLE(size_t, "%zu")
|
||||
break;
|
||||
case emitter_type_string:
|
||||
str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
|
||||
*(const char *const *)value);
|
||||
/*
|
||||
* We control the strings we output; we shouldn't get anything
|
||||
* anywhere near the fmt size.
|
||||
*/
|
||||
assert(str_written < BUF_SIZE);
|
||||
emitter_printf(emitter,
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
|
||||
break;
|
||||
case emitter_type_uint32:
|
||||
EMIT_SIMPLE(uint32_t, "%" FMTu32)
|
||||
break;
|
||||
case emitter_type_uint64:
|
||||
EMIT_SIMPLE(uint64_t, "%" FMTu64)
|
||||
break;
|
||||
case emitter_type_title:
|
||||
EMIT_SIMPLE(char *const, "%s");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
#undef BUF_SIZE
|
||||
#undef FMT_SIZE
|
||||
}
|
||||
|
||||
|
||||
/* Internal functions. In json mode, tracks nesting state. */
|
||||
static inline void
|
||||
emitter_nest_inc(emitter_t *emitter) {
|
||||
emitter->nesting_depth++;
|
||||
emitter->item_at_depth = false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_nest_dec(emitter_t *emitter) {
|
||||
emitter->nesting_depth--;
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_indent(emitter_t *emitter) {
|
||||
int amount = emitter->nesting_depth;
|
||||
const char *indent_str;
|
||||
assert(emitter->output != emitter_output_json_compact);
|
||||
if (emitter->output == emitter_output_json) {
|
||||
indent_str = "\t";
|
||||
} else {
|
||||
amount *= 2;
|
||||
indent_str = " ";
|
||||
}
|
||||
for (int i = 0; i < amount; i++) {
|
||||
emitter_printf(emitter, "%s", indent_str);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_key_prefix(emitter_t *emitter) {
|
||||
assert(emitter_outputs_json(emitter));
|
||||
if (emitter->emitted_key) {
|
||||
emitter->emitted_key = false;
|
||||
return;
|
||||
}
|
||||
if (emitter->item_at_depth) {
|
||||
emitter_printf(emitter, ",");
|
||||
}
|
||||
if (emitter->output != emitter_output_json_compact) {
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* Public functions for emitter_t. */
|
||||
|
||||
static inline void
|
||||
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
|
||||
write_cb_t *write_cb, void *cbopaque) {
|
||||
emitter->output = emitter_output;
|
||||
emitter->write_cb = write_cb;
|
||||
emitter->cbopaque = cbopaque;
|
||||
emitter->item_at_depth = false;
|
||||
emitter->emitted_key = false;
|
||||
emitter->nesting_depth = 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* JSON public API. */
|
||||
|
||||
/*
|
||||
* Emits a key (e.g. as appears in an object). The next json entity emitted will
|
||||
* be the corresponding value.
|
||||
*/
|
||||
static inline void
|
||||
emitter_json_key(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\":%s", json_key,
|
||||
emitter->output == emitter_output_json_compact ? "" : " ");
|
||||
emitter->emitted_key = true;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
|
||||
const void *value) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_value. */
|
||||
static inline void
|
||||
emitter_json_kv(emitter_t *emitter, const char *json_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_value(emitter, value_type, value);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_array_begin(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "[");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
|
||||
static inline void
|
||||
emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_array_begin(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_array_end(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
if (emitter->output != emitter_output_json_compact) {
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
emitter_printf(emitter, "]");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_object_begin(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
|
||||
static inline void
|
||||
emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_object_begin(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_object_end(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
if (emitter->output != emitter_output_json_compact) {
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
emitter_printf(emitter, "}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Table public API. */
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s\n", table_key);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_nest_dec(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv_note(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s: ", table_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
if (table_note_key != NULL) {
|
||||
emitter_printf(emitter, " (%s: ", table_note_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
table_note_value_type, table_note_value);
|
||||
emitter_printf(emitter, ")");
|
||||
}
|
||||
emitter_printf(emitter, "\n");
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* Write to the emitter the given string, but only in table mode. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_table_printf(emitter_t *emitter, const char *format, ...) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
|
||||
if (emitter->output != emitter_output_table) {
|
||||
return;
|
||||
}
|
||||
emitter_col_t *col;
|
||||
ql_foreach(col, &row->cols, link) {
|
||||
emitter_print_value(emitter, col->justify, col->width,
|
||||
col->type, (const void *)&col->bool_val);
|
||||
}
|
||||
emitter_table_printf(emitter, "\n");
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_row_init(emitter_row_t *row) {
|
||||
ql_new(&row->cols);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
|
||||
ql_elm_new(col, link);
|
||||
ql_tail_insert(&row->cols, col, link);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Generalized public API. Emits using either JSON or table, according to
|
||||
* settings in the emitter_t. */
|
||||
|
||||
/*
|
||||
* Note emits a different kv pair as well, but only in table mode. Omits the
|
||||
* note if table_note_key is NULL.
|
||||
*/
|
||||
static inline void
|
||||
emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_value(emitter, value_type, value);
|
||||
} else {
|
||||
emitter_table_kv_note(emitter, table_key, value_type, value,
|
||||
table_note_key, table_note_value_type, table_note_value);
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_begin(emitter_t *emitter, const char *json_key,
|
||||
const char *table_header) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_object_begin(emitter);
|
||||
} else {
|
||||
emitter_table_dict_begin(emitter, table_header);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_end(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
emitter_json_object_end(emitter);
|
||||
} else {
|
||||
emitter_table_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_begin(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
assert(emitter->nesting_depth == 0);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
/*
|
||||
* This guarantees that we always call write_cb at least once.
|
||||
* This is useful if some invariant is established by each call
|
||||
* to write_cb, but doesn't hold initially: e.g., some buffer
|
||||
* holds a null-terminated string.
|
||||
*/
|
||||
emitter_printf(emitter, "%s", "");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_end(emitter_t *emitter) {
|
||||
if (emitter_outputs_json(emitter)) {
|
||||
assert(emitter->nesting_depth == 1);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "%s", emitter->output ==
|
||||
emitter_output_json_compact ? "}" : "\n}\n");
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EMITTER_H */
|
77
BeefRT/JEMalloc/include/jemalloc/internal/eset.h
Normal file
77
BeefRT/JEMalloc/include/jemalloc/internal/eset.h
Normal file
|
@ -0,0 +1,77 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ESET_H
|
||||
#define JEMALLOC_INTERNAL_ESET_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/fb.h"
|
||||
#include "jemalloc/internal/edata.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
/*
|
||||
* An eset ("extent set") is a quantized collection of extents, with built-in
|
||||
* LRU queue.
|
||||
*
|
||||
* This class is not thread-safe; synchronization must be done externally if
|
||||
* there are mutating operations. One exception is the stats counters, which
|
||||
* may be read without any locking.
|
||||
*/
|
||||
|
||||
typedef struct eset_bin_s eset_bin_t;
|
||||
struct eset_bin_s {
|
||||
edata_heap_t heap;
|
||||
/*
|
||||
* We do first-fit across multiple size classes. If we compared against
|
||||
* the min element in each heap directly, we'd take a cache miss per
|
||||
* extent we looked at. If we co-locate the edata summaries, we only
|
||||
* take a miss on the edata we're actually going to return (which is
|
||||
* inevitable anyways).
|
||||
*/
|
||||
edata_cmp_summary_t heap_min;
|
||||
};
|
||||
|
||||
typedef struct eset_bin_stats_s eset_bin_stats_t;
|
||||
struct eset_bin_stats_s {
|
||||
atomic_zu_t nextents;
|
||||
atomic_zu_t nbytes;
|
||||
};
|
||||
|
||||
typedef struct eset_s eset_t;
|
||||
struct eset_s {
|
||||
/* Bitmap for which set bits correspond to non-empty heaps. */
|
||||
fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
|
||||
|
||||
/* Quantized per size class heaps of extents. */
|
||||
eset_bin_t bins[SC_NPSIZES + 1];
|
||||
|
||||
eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
|
||||
|
||||
/* LRU of all extents in heaps. */
|
||||
edata_list_inactive_t lru;
|
||||
|
||||
/* Page sum for all extents in heaps. */
|
||||
atomic_zu_t npages;
|
||||
|
||||
/*
|
||||
* A duplication of the data in the containing ecache. We use this only
|
||||
* for assertions on the states of the passed-in extents.
|
||||
*/
|
||||
extent_state_t state;
|
||||
};
|
||||
|
||||
void eset_init(eset_t *eset, extent_state_t state);
|
||||
|
||||
size_t eset_npages_get(eset_t *eset);
|
||||
/* Get the number of extents in the given page size index. */
|
||||
size_t eset_nextents_get(eset_t *eset, pszind_t ind);
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
|
||||
|
||||
void eset_insert(eset_t *eset, edata_t *edata);
|
||||
void eset_remove(eset_t *eset, edata_t *edata);
|
||||
/*
|
||||
* Select an extent from this eset of the given size and alignment. Returns
|
||||
* null if no such item could be found.
|
||||
*/
|
||||
edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
|
||||
unsigned lg_max_fit);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ESET_H */
|
50
BeefRT/JEMalloc/include/jemalloc/internal/exp_grow.h
Normal file
50
BeefRT/JEMalloc/include/jemalloc/internal/exp_grow.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
|
||||
#define JEMALLOC_INTERNAL_EXP_GROW_H
|
||||
|
||||
typedef struct exp_grow_s exp_grow_t;
|
||||
struct exp_grow_s {
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*/
|
||||
pszind_t next;
|
||||
pszind_t limit;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
|
||||
size_t *r_alloc_size, pszind_t *r_skip) {
|
||||
*r_skip = 0;
|
||||
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
|
||||
while (*r_alloc_size < alloc_size_min) {
|
||||
(*r_skip)++;
|
||||
if (exp_grow->next + *r_skip >=
|
||||
sz_psz2ind(SC_LARGE_MAXCLASS)) {
|
||||
/* Outside legal range. */
|
||||
return true;
|
||||
}
|
||||
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
|
||||
if (exp_grow->next + skip + 1 <= exp_grow->limit) {
|
||||
exp_grow->next += skip + 1;
|
||||
} else {
|
||||
exp_grow->next = exp_grow->limit;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void exp_grow_init(exp_grow_t *exp_grow);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
|
137
BeefRT/JEMalloc/include/jemalloc/internal/extent.h
Normal file
137
BeefRT/JEMalloc/include/jemalloc/internal/extent.h
Normal file
|
@ -0,0 +1,137 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTENT_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_H
|
||||
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
/*
|
||||
* This module contains the page-level allocator. It chooses the addresses that
|
||||
* allocations requested by other modules will inhabit, and updates the global
|
||||
* metadata to reflect allocation/deallocation/purging decisions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
|
||||
* is the max ratio between the size of the active extent and the new extent.
|
||||
*/
|
||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||
extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero, bool guarded);
|
||||
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero, bool guarded);
|
||||
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, size_t npages_min);
|
||||
|
||||
void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
|
||||
void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *edata);
|
||||
void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *edata);
|
||||
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
|
||||
bool growing_retained);
|
||||
void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *edata);
|
||||
void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *edata);
|
||||
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||
size_t offset, size_t length);
|
||||
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||
size_t offset, size_t length);
|
||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||
size_t offset, size_t length);
|
||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||
size_t offset, size_t length);
|
||||
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
|
||||
ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
|
||||
bool holding_core_locks);
|
||||
bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *a, edata_t *b);
|
||||
bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||
bool commit, bool zero, bool growing_retained);
|
||||
size_t extent_sn_next(pac_t *pac);
|
||||
bool extent_boot(void);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
extent_neighbor_head_state_mergeable(bool edata_is_head,
|
||||
bool neighbor_is_head, bool forward) {
|
||||
/*
|
||||
* Head states checking: disallow merging if the higher addr extent is a
|
||||
* head extent. This helps preserve first-fit, and more importantly
|
||||
* makes sure no merge across arenas.
|
||||
*/
|
||||
if (forward) {
|
||||
if (neighbor_is_head) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (edata_is_head) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward,
|
||||
bool expanding) {
|
||||
edata_t *neighbor = contents.edata;
|
||||
if (neighbor == NULL) {
|
||||
return false;
|
||||
}
|
||||
/* It's not safe to access *neighbor yet; must verify states first. */
|
||||
bool neighbor_is_head = contents.metadata.is_head;
|
||||
if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
|
||||
neighbor_is_head, forward)) {
|
||||
return false;
|
||||
}
|
||||
extent_state_t neighbor_state = contents.metadata.state;
|
||||
if (pai == EXTENT_PAI_PAC) {
|
||||
if (neighbor_state != expected_state) {
|
||||
return false;
|
||||
}
|
||||
/* From this point, it's safe to access *neighbor. */
|
||||
if (!expanding && (edata_committed_get(edata) !=
|
||||
edata_committed_get(neighbor))) {
|
||||
/*
|
||||
* Some platforms (e.g. Windows) require an explicit
|
||||
* commit step (and writing to uncommitted memory is not
|
||||
* allowed).
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (neighbor_state == extent_state_active) {
|
||||
return false;
|
||||
}
|
||||
/* From this point, it's safe to access *neighbor. */
|
||||
}
|
||||
|
||||
assert(edata_pai_get(edata) == pai);
|
||||
if (edata_pai_get(neighbor) != pai) {
|
||||
return false;
|
||||
}
|
||||
if (opt_retain) {
|
||||
assert(edata_arena_ind_get(edata) ==
|
||||
edata_arena_ind_get(neighbor));
|
||||
} else {
|
||||
if (edata_arena_ind_get(edata) !=
|
||||
edata_arena_ind_get(neighbor)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
|
26
BeefRT/JEMalloc/include/jemalloc/internal/extent_dss.h
Normal file
26
BeefRT/JEMalloc/include/jemalloc/internal/extent_dss.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||
|
||||
typedef enum {
|
||||
dss_prec_disabled = 0,
|
||||
dss_prec_primary = 1,
|
||||
dss_prec_secondary = 2,
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
extern const char *dss_prec_names[];
|
||||
|
||||
extern const char *opt_dss;
|
||||
|
||||
dss_prec_t extent_dss_prec_get(void);
|
||||
bool extent_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit);
|
||||
bool extent_in_dss(void *addr);
|
||||
bool extent_dss_mergeable(void *addr_a, void *addr_b);
|
||||
void extent_dss_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
|
10
BeefRT/JEMalloc/include/jemalloc/internal/extent_mmap.h
Normal file
10
BeefRT/JEMalloc/include/jemalloc/internal/extent_mmap.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
|
||||
extern bool opt_retain;
|
||||
|
||||
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit);
|
||||
bool extent_dalloc_mmap(void *addr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
|
373
BeefRT/JEMalloc/include/jemalloc/internal/fb.h
Normal file
373
BeefRT/JEMalloc/include/jemalloc/internal/fb.h
Normal file
|
@ -0,0 +1,373 @@
|
|||
#ifndef JEMALLOC_INTERNAL_FB_H
|
||||
#define JEMALLOC_INTERNAL_FB_H
|
||||
|
||||
/*
|
||||
* The flat bitmap module. This has a larger API relative to the bitmap module
|
||||
* (supporting things like backwards searches, and searching for both set and
|
||||
* unset bits), at the cost of slower operations for very large bitmaps.
|
||||
*
|
||||
* Initialized flat bitmaps start at all-zeros (all bits unset).
|
||||
*/
|
||||
|
||||
typedef unsigned long fb_group_t;
|
||||
#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
|
||||
#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
|
||||
+ ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
|
||||
|
||||
static inline void
|
||||
fb_init(fb_group_t *fb, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
memset(fb, 0, ngroups * sizeof(fb_group_t));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
fb_empty(fb_group_t *fb, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
for (size_t i = 0; i < ngroups; i++) {
|
||||
if (fb[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
fb_full(fb_group_t *fb, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
size_t trailing_bits = nbits % FB_GROUP_BITS;
|
||||
size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
|
||||
for (size_t i = 0; i < limit; i++) {
|
||||
if (fb[i] != ~(fb_group_t)0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (trailing_bits == 0) {
|
||||
return true;
|
||||
}
|
||||
return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
|
||||
assert(bit < nbits);
|
||||
size_t group_ind = bit / FB_GROUP_BITS;
|
||||
size_t bit_ind = bit % FB_GROUP_BITS;
|
||||
return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
|
||||
}
|
||||
|
||||
static inline void
|
||||
fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
|
||||
assert(bit < nbits);
|
||||
size_t group_ind = bit / FB_GROUP_BITS;
|
||||
size_t bit_ind = bit % FB_GROUP_BITS;
|
||||
fb[group_ind] |= ((fb_group_t)1 << bit_ind);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
|
||||
assert(bit < nbits);
|
||||
size_t group_ind = bit / FB_GROUP_BITS;
|
||||
size_t bit_ind = bit % FB_GROUP_BITS;
|
||||
fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Some implementation details. This visitation function lets us apply a group
|
||||
* visitor to each group in the bitmap (potentially modifying it). The mask
|
||||
* indicates which bits are logically part of the visitation.
|
||||
*/
|
||||
typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
|
||||
size_t start, size_t cnt) {
|
||||
assert(cnt > 0);
|
||||
assert(start + cnt <= nbits);
|
||||
size_t group_ind = start / FB_GROUP_BITS;
|
||||
size_t start_bit_ind = start % FB_GROUP_BITS;
|
||||
/*
|
||||
* The first group is special; it's the only one we don't start writing
|
||||
* to from bit 0.
|
||||
*/
|
||||
size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
|
||||
? FB_GROUP_BITS - start_bit_ind : cnt);
|
||||
/*
|
||||
* We can basically split affected words into:
|
||||
* - The first group, where we touch only the high bits
|
||||
* - The last group, where we touch only the low bits
|
||||
* - The middle, where we set all the bits to the same thing.
|
||||
* We treat each case individually. The last two could be merged, but
|
||||
* this can lead to bad codegen for those middle words.
|
||||
*/
|
||||
/* First group */
|
||||
fb_group_t mask = ((~(fb_group_t)0)
|
||||
>> (FB_GROUP_BITS - first_group_cnt))
|
||||
<< start_bit_ind;
|
||||
visit(ctx, &fb[group_ind], mask);
|
||||
|
||||
cnt -= first_group_cnt;
|
||||
group_ind++;
|
||||
/* Middle groups */
|
||||
while (cnt > FB_GROUP_BITS) {
|
||||
visit(ctx, &fb[group_ind], ~(fb_group_t)0);
|
||||
cnt -= FB_GROUP_BITS;
|
||||
group_ind++;
|
||||
}
|
||||
/* Last group */
|
||||
if (cnt != 0) {
|
||||
mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
|
||||
visit(ctx, &fb[group_ind], mask);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
|
||||
bool val = *(bool *)ctx;
|
||||
if (val) {
|
||||
*fb |= mask;
|
||||
} else {
|
||||
*fb &= ~mask;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sets the cnt bits starting at position start. Must not have a 0 count. */
|
||||
static inline void
|
||||
fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
|
||||
bool val = true;
|
||||
fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
|
||||
}
|
||||
|
||||
/* Unsets the cnt bits starting at position start. Must not have a 0 count. */
|
||||
static inline void
|
||||
fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
|
||||
bool val = false;
|
||||
fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
|
||||
size_t *scount = (size_t *)ctx;
|
||||
*scount += popcount_lu(*fb & mask);
|
||||
}
|
||||
|
||||
/* Finds the number of set bit in the of length cnt starting at start. */
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
|
||||
size_t scount = 0;
|
||||
fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
|
||||
return scount;
|
||||
}
|
||||
|
||||
/* Finds the number of unset bit in the of length cnt starting at start. */
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
|
||||
size_t scount = fb_scount(fb, nbits, start, cnt);
|
||||
return cnt - scount;
|
||||
}
|
||||
|
||||
/*
|
||||
* An implementation detail; find the first bit at position >= min_bit with the
|
||||
* value val.
|
||||
*
|
||||
* Returns the number of bits in the bitmap if no such bit exists.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE ssize_t
|
||||
fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
|
||||
bool forward) {
|
||||
assert(start < nbits);
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
ssize_t group_ind = start / FB_GROUP_BITS;
|
||||
size_t bit_ind = start % FB_GROUP_BITS;
|
||||
|
||||
fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
|
||||
|
||||
fb_group_t group = fb[group_ind];
|
||||
group ^= maybe_invert;
|
||||
if (forward) {
|
||||
/* Only keep ones in bits bit_ind and above. */
|
||||
group &= ~((1LU << bit_ind) - 1);
|
||||
} else {
|
||||
/*
|
||||
* Only keep ones in bits bit_ind and below. You might more
|
||||
* naturally express this as (1 << (bit_ind + 1)) - 1, but
|
||||
* that shifts by an invalid amount if bit_ind is one less than
|
||||
* FB_GROUP_BITS.
|
||||
*/
|
||||
group &= ((2LU << bit_ind) - 1);
|
||||
}
|
||||
ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
|
||||
while (group == 0) {
|
||||
group_ind += forward ? 1 : -1;
|
||||
if (group_ind == group_ind_bound) {
|
||||
return forward ? (ssize_t)nbits : (ssize_t)-1;
|
||||
}
|
||||
group = fb[group_ind];
|
||||
group ^= maybe_invert;
|
||||
}
|
||||
assert(group != 0);
|
||||
size_t bit = forward ? ffs_lu(group) : fls_lu(group);
|
||||
size_t pos = group_ind * FB_GROUP_BITS + bit;
|
||||
/*
|
||||
* The high bits of a partially filled last group are zeros, so if we're
|
||||
* looking for zeros we don't want to report an invalid result.
|
||||
*/
|
||||
if (forward && !val && pos > nbits) {
|
||||
return nbits;
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the first set bit in the bitmap with an index >= min_bit. Returns the
|
||||
* number of bits in the bitmap if no such bit exists.
|
||||
*/
|
||||
static inline size_t
|
||||
fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
|
||||
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
|
||||
/* forward */ true);
|
||||
}
|
||||
|
||||
/* The same, but looks for an unset bit. */
|
||||
static inline size_t
|
||||
fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
|
||||
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
|
||||
/* forward */ true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
|
||||
* no such bit exists.
|
||||
*/
|
||||
static inline ssize_t
|
||||
fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
|
||||
return fb_find_impl(fb, nbits, max_bit, /* val */ false,
|
||||
/* forward */ false);
|
||||
}
|
||||
|
||||
static inline ssize_t
|
||||
fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
|
||||
return fb_find_impl(fb, nbits, max_bit, /* val */ true,
|
||||
/* forward */ false);
|
||||
}
|
||||
|
||||
/* Returns whether or not we found a range. */
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
|
||||
size_t *r_len, bool val, bool forward) {
|
||||
assert(start < nbits);
|
||||
ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
|
||||
if ((forward && next_range_begin == (ssize_t)nbits)
|
||||
|| (!forward && next_range_begin == (ssize_t)-1)) {
|
||||
return false;
|
||||
}
|
||||
/* Half open range; the set bits are [begin, end). */
|
||||
ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
|
||||
forward);
|
||||
if (forward) {
|
||||
*r_begin = next_range_begin;
|
||||
*r_len = next_range_end - next_range_begin;
|
||||
} else {
|
||||
*r_begin = next_range_end + 1;
|
||||
*r_len = next_range_begin - next_range_end;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Used to iterate through ranges of set bits.
|
||||
*
|
||||
* Tries to find the next contiguous sequence of set bits with a first index >=
|
||||
* start. If one exists, puts the earliest bit of the range in *r_begin, its
|
||||
* length in *r_len, and returns true. Otherwise, returns false (without
|
||||
* touching *r_begin or *r_end).
|
||||
*/
|
||||
static inline bool
|
||||
fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
|
||||
size_t *r_len) {
|
||||
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
|
||||
/* val */ true, /* forward */ true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The same as fb_srange_iter, but searches backwards from start rather than
|
||||
* forwards. (The position returned is still the earliest bit in the range).
|
||||
*/
|
||||
static inline bool
|
||||
fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
|
||||
size_t *r_len) {
|
||||
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
|
||||
/* val */ true, /* forward */ false);
|
||||
}
|
||||
|
||||
/* Similar to fb_srange_iter, but searches for unset bits. */
|
||||
static inline bool
|
||||
fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
|
||||
size_t *r_len) {
|
||||
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
|
||||
/* val */ false, /* forward */ true);
|
||||
}
|
||||
|
||||
/* Similar to fb_srange_riter, but searches for unset bits. */
|
||||
static inline bool
|
||||
fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
|
||||
size_t *r_len) {
|
||||
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
|
||||
/* val */ false, /* forward */ false);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
|
||||
size_t begin = 0;
|
||||
size_t longest_len = 0;
|
||||
size_t len = 0;
|
||||
while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
|
||||
&len, val, /* forward */ true)) {
|
||||
if (len > longest_len) {
|
||||
longest_len = len;
|
||||
}
|
||||
begin += len;
|
||||
}
|
||||
return longest_len;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
fb_srange_longest(fb_group_t *fb, size_t nbits) {
|
||||
return fb_range_longest_impl(fb, nbits, /* val */ true);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
fb_urange_longest(fb_group_t *fb, size_t nbits) {
|
||||
return fb_range_longest_impl(fb, nbits, /* val */ false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes each bit of dst with the bitwise-AND of the corresponding bits of
|
||||
* src1 and src2. All bitmaps must be the same size.
|
||||
*/
|
||||
static inline void
|
||||
fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
for (size_t i = 0; i < ngroups; i++) {
|
||||
dst[i] = src1[i] & src2[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* Like fb_bit_and, but with bitwise-OR. */
|
||||
static inline void
|
||||
fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
for (size_t i = 0; i < ngroups; i++) {
|
||||
dst[i] = src1[i] | src2[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes dst bit i to the negation of source bit i. */
|
||||
static inline void
|
||||
fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
|
||||
size_t ngroups = FB_NGROUPS(nbits);
|
||||
for (size_t i = 0; i < ngroups; i++) {
|
||||
dst[i] = ~src[i];
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_FB_H */
|
126
BeefRT/JEMalloc/include/jemalloc/internal/fxp.h
Normal file
126
BeefRT/JEMalloc/include/jemalloc/internal/fxp.h
Normal file
|
@ -0,0 +1,126 @@
|
|||
#ifndef JEMALLOC_INTERNAL_FXP_H
|
||||
#define JEMALLOC_INTERNAL_FXP_H
|
||||
|
||||
/*
|
||||
* A simple fixed-point math implementation, supporting only unsigned values
|
||||
* (with overflow being an error).
|
||||
*
|
||||
* It's not in general safe to use floating point in core code, because various
|
||||
* libc implementations we get linked against can assume that malloc won't touch
|
||||
* floating point state and call it with an unusual calling convention.
|
||||
*/
|
||||
|
||||
/*
|
||||
* High 16 bits are the integer part, low 16 are the fractional part. Or
|
||||
* equivalently, repr == 2**16 * val, where we use "val" to refer to the
|
||||
* (imaginary) fractional representation of the true value.
|
||||
*
|
||||
* We pick a uint32_t here since it's convenient in some places to
|
||||
* double the representation size (i.e. multiplication and division use
|
||||
* 64-bit integer types), and a uint64_t is the largest type we're
|
||||
* certain is available.
|
||||
*/
|
||||
typedef uint32_t fxp_t;
|
||||
#define FXP_INIT_INT(x) ((x) << 16)
|
||||
#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
|
||||
|
||||
/*
|
||||
* Amount of precision used in parsing and printing numbers. The integer bound
|
||||
* is simply because the integer part of the number gets 16 bits, and so is
|
||||
* bounded by 65536.
|
||||
*
|
||||
* We use a lot of precision for the fractional part, even though most of it
|
||||
* gets rounded off; this lets us get exact values for the important special
|
||||
* case where the denominator is a small power of 2 (for instance,
|
||||
* 1/512 == 0.001953125 is exactly representable even with only 16 bits of
|
||||
* fractional precision). We need to left-shift by 16 before dividing by
|
||||
* 10**precision, so we pick precision to be floor(log(2**48)) = 14.
|
||||
*/
|
||||
#define FXP_INTEGER_PART_DIGITS 5
|
||||
#define FXP_FRACTIONAL_PART_DIGITS 14
|
||||
|
||||
/*
|
||||
* In addition to the integer and fractional parts of the number, we need to
|
||||
* include a null character and (possibly) a decimal point.
|
||||
*/
|
||||
#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
|
||||
|
||||
static inline fxp_t
|
||||
fxp_add(fxp_t a, fxp_t b) {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
static inline fxp_t
|
||||
fxp_sub(fxp_t a, fxp_t b) {
|
||||
assert(a >= b);
|
||||
return a - b;
|
||||
}
|
||||
|
||||
static inline fxp_t
|
||||
fxp_mul(fxp_t a, fxp_t b) {
|
||||
uint64_t unshifted = (uint64_t)a * (uint64_t)b;
|
||||
/*
|
||||
* Unshifted is (a.val * 2**16) * (b.val * 2**16)
|
||||
* == (a.val * b.val) * 2**32, but we want
|
||||
* (a.val * b.val) * 2 ** 16.
|
||||
*/
|
||||
return (uint32_t)(unshifted >> 16);
|
||||
}
|
||||
|
||||
static inline fxp_t
|
||||
fxp_div(fxp_t a, fxp_t b) {
|
||||
assert(b != 0);
|
||||
uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
|
||||
/*
|
||||
* Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
|
||||
* == (a.val / b.val) * (2 ** 32), which again corresponds to a right
|
||||
* shift of 16.
|
||||
*/
|
||||
return (uint32_t)(unshifted >> 16);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
fxp_round_down(fxp_t a) {
|
||||
return a >> 16;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
fxp_round_nearest(fxp_t a) {
|
||||
uint32_t fractional_part = (a & ((1U << 16) - 1));
|
||||
uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
|
||||
return (a >> 16) + increment;
|
||||
}
|
||||
|
||||
/*
|
||||
* Approximately computes x * frac, without the size limitations that would be
|
||||
* imposed by converting u to an fxp_t.
|
||||
*/
|
||||
static inline size_t
|
||||
fxp_mul_frac(size_t x_orig, fxp_t frac) {
|
||||
assert(frac <= (1U << 16));
|
||||
/*
|
||||
* Work around an over-enthusiastic warning about type limits below (on
|
||||
* 32-bit platforms, a size_t is always less than 1ULL << 48).
|
||||
*/
|
||||
uint64_t x = (uint64_t)x_orig;
|
||||
/*
|
||||
* If we can guarantee no overflow, multiply first before shifting, to
|
||||
* preserve some precision. Otherwise, shift first and then multiply.
|
||||
* In the latter case, we only lose the low 16 bits of a 48-bit number,
|
||||
* so we're still accurate to within 1/2**32.
|
||||
*/
|
||||
if (x < (1ULL << 48)) {
|
||||
return (size_t)((x * frac) >> 16);
|
||||
} else {
|
||||
return (size_t)((x >> 16) * (uint64_t)frac);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true on error. Otherwise, returns false and updates *ptr to point to
|
||||
* the first character not parsed (because it wasn't a digit).
|
||||
*/
|
||||
bool fxp_parse(fxp_t *a, const char *ptr, char **end);
|
||||
void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_FXP_H */
|
320
BeefRT/JEMalloc/include/jemalloc/internal/hash.h
Normal file
320
BeefRT/JEMalloc/include/jemalloc/internal/hash.h
Normal file
|
@ -0,0 +1,320 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HASH_H
|
||||
#define JEMALLOC_INTERNAL_HASH_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
||||
* details.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
static inline uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||
uint32_t ret;
|
||||
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||
uint64_t ret;
|
||||
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_fmix_32(uint32_t h) {
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_fmix_64(uint64_t k) {
|
||||
k ^= k >> 33;
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = hash_rotl_32(h1, 13);
|
||||
h1 = h1*5 + 0xe6546b64;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
|
||||
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch (len & 3) {
|
||||
case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return h1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
uint32_t h2 = seed;
|
||||
uint32_t h3 = seed;
|
||||
uint32_t h4 = seed;
|
||||
|
||||
const uint32_t c1 = 0x239b961b;
|
||||
const uint32_t c2 = 0xab0e9789;
|
||||
const uint32_t c3 = 0x38b34ae5;
|
||||
const uint32_t c4 = 0xa1e38b93;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
|
||||
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
|
||||
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
|
||||
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_32(h1, 19); h1 += h2;
|
||||
h1 = h1*5 + 0x561ccd1b;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_32(h2, 17); h2 += h3;
|
||||
h2 = h2*5 + 0x0bcaa747;
|
||||
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
h3 = hash_rotl_32(h3, 15); h3 += h4;
|
||||
h3 = h3*5 + 0x96cd1c35;
|
||||
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
h4 = hash_rotl_32(h4, 13); h4 += h1;
|
||||
h4 = h4*5 + 0x32ac3b17;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
|
||||
uint32_t k1 = 0;
|
||||
uint32_t k2 = 0;
|
||||
uint32_t k3 = 0;
|
||||
uint32_t k4 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 13: k4 ^= tail[12] << 0;
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
JEMALLOC_FALLTHROUGH;
|
||||
case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
|
||||
case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 9: k3 ^= tail[ 8] << 0;
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
JEMALLOC_FALLTHROUGH;
|
||||
case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
|
||||
case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 5: k2 ^= tail[ 4] << 0;
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
JEMALLOC_FALLTHROUGH;
|
||||
case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
|
||||
case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 1: k1 ^= tail[ 0] << 0;
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
h2 = hash_fmix_32(h2);
|
||||
h3 = hash_fmix_32(h3);
|
||||
h4 = hash_fmix_32(h4);
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
r_out[0] = (((uint64_t) h2) << 32) | h1;
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = KQU(0x87c37b91114253d5);
|
||||
const uint64_t c2 = KQU(0x4cf5ad432745937f);
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint64_t *blocks = (const uint64_t *) (data);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
|
||||
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_64(h1, 27); h1 += h2;
|
||||
h1 = h1*5 + 0x52dce729;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_64(h2, 31); h2 += h1;
|
||||
h2 = h2*5 + 0x38495ab5;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
|
||||
uint64_t k1 = 0;
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
JEMALLOC_FALLTHROUGH;
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = hash_fmix_64(h1);
|
||||
h2 = hash_fmix_64(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
r_out[0] = h1;
|
||||
r_out[1] = h2;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
static inline void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
{
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, (int)len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HASH_H */
|
163
BeefRT/JEMalloc/include/jemalloc/internal/hook.h
Normal file
163
BeefRT/JEMalloc/include/jemalloc/internal/hook.h
Normal file
|
@ -0,0 +1,163 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HOOK_H
|
||||
#define JEMALLOC_INTERNAL_HOOK_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* This API is *extremely* experimental, and may get ripped out, changed in API-
|
||||
* and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
|
||||
*
|
||||
* It allows hooking the stateful parts of the API to see changes as they
|
||||
* happen.
|
||||
*
|
||||
* Allocation hooks are called after the allocation is done, free hooks are
|
||||
* called before the free is done, and expand hooks are called after the
|
||||
* allocation is expanded.
|
||||
*
|
||||
* For realloc and rallocx, if the expansion happens in place, the expansion
|
||||
* hook is called. If it is moved, then the alloc hook is called on the new
|
||||
* location, and then the free hook is called on the old location (i.e. both
|
||||
* hooks are invoked in between the alloc and the dalloc).
|
||||
*
|
||||
* If we return NULL from OOM, then usize might not be trustworthy. Calling
|
||||
* realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
|
||||
* only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
|
||||
* and only calls the alloc hook).
|
||||
*
|
||||
* Reentrancy:
|
||||
* Reentrancy is guarded against from within the hook implementation. If you
|
||||
* call allocator functions from within a hook, the hooks will not be invoked
|
||||
* again.
|
||||
* Threading:
|
||||
* The installation of a hook synchronizes with all its uses. If you can
|
||||
* prove the installation of a hook happens-before a jemalloc entry point,
|
||||
* then the hook will get invoked (unless there's a racing removal).
|
||||
*
|
||||
* Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
|
||||
* allocates and has the alloc hook invoked, then a subsequent free on the
|
||||
* same thread will also have the free hook invoked).
|
||||
*
|
||||
* The *removal* of a hook does *not* block until all threads are done with
|
||||
* the hook. Hook authors have to be resilient to this, and need some
|
||||
* out-of-band mechanism for cleaning up any dynamically allocated memory
|
||||
* associated with their hook.
|
||||
* Ordering:
|
||||
* Order of hook execution is unspecified, and may be different than insertion
|
||||
* order.
|
||||
*/
|
||||
|
||||
#define HOOK_MAX 4
|
||||
|
||||
enum hook_alloc_e {
|
||||
hook_alloc_malloc,
|
||||
hook_alloc_posix_memalign,
|
||||
hook_alloc_aligned_alloc,
|
||||
hook_alloc_calloc,
|
||||
hook_alloc_memalign,
|
||||
hook_alloc_valloc,
|
||||
hook_alloc_mallocx,
|
||||
|
||||
/* The reallocating functions have both alloc and dalloc variants */
|
||||
hook_alloc_realloc,
|
||||
hook_alloc_rallocx,
|
||||
};
|
||||
/*
|
||||
* We put the enum typedef after the enum, since this file may get included by
|
||||
* jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
|
||||
*/
|
||||
typedef enum hook_alloc_e hook_alloc_t;
|
||||
|
||||
enum hook_dalloc_e {
|
||||
hook_dalloc_free,
|
||||
hook_dalloc_dallocx,
|
||||
hook_dalloc_sdallocx,
|
||||
|
||||
/*
|
||||
* The dalloc halves of reallocation (not called if in-place expansion
|
||||
* happens).
|
||||
*/
|
||||
hook_dalloc_realloc,
|
||||
hook_dalloc_rallocx,
|
||||
};
|
||||
typedef enum hook_dalloc_e hook_dalloc_t;
|
||||
|
||||
|
||||
enum hook_expand_e {
|
||||
hook_expand_realloc,
|
||||
hook_expand_rallocx,
|
||||
hook_expand_xallocx,
|
||||
};
|
||||
typedef enum hook_expand_e hook_expand_t;
|
||||
|
||||
typedef void (*hook_alloc)(
|
||||
void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
typedef void (*hook_dalloc)(
|
||||
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
|
||||
|
||||
typedef void (*hook_expand)(
|
||||
void *extra, hook_expand_t type, void *address, size_t old_usize,
|
||||
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
|
||||
|
||||
typedef struct hooks_s hooks_t;
|
||||
struct hooks_s {
|
||||
hook_alloc alloc_hook;
|
||||
hook_dalloc dalloc_hook;
|
||||
hook_expand expand_hook;
|
||||
void *extra;
|
||||
};
|
||||
|
||||
/*
|
||||
* Begin implementation details; everything above this point might one day live
|
||||
* in a public API. Everything below this point never will.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The realloc pathways haven't gotten any refactoring love in a while, and it's
|
||||
* fairly difficult to pass information from the entry point to the hooks. We
|
||||
* put the informaiton the hooks will need into a struct to encapsulate
|
||||
* everything.
|
||||
*
|
||||
* Much of these pathways are force-inlined, so that the compiler can avoid
|
||||
* materializing this struct until we hit an extern arena function. For fairly
|
||||
* goofy reasons, *many* of the realloc paths hit an extern arena function.
|
||||
* These paths are cold enough that it doesn't matter; eventually, we should
|
||||
* rewrite the realloc code to make the expand-in-place and the
|
||||
* free-then-realloc paths more orthogonal, at which point we don't need to
|
||||
* spread the hook logic all over the place.
|
||||
*/
|
||||
typedef struct hook_ralloc_args_s hook_ralloc_args_t;
|
||||
struct hook_ralloc_args_s {
|
||||
/* I.e. as opposed to rallocx. */
|
||||
bool is_realloc;
|
||||
/*
|
||||
* The expand hook takes 4 arguments, even if only 3 are actually used;
|
||||
* we add an extra one in case the user decides to memcpy without
|
||||
* looking too closely at the hooked function.
|
||||
*/
|
||||
uintptr_t args[4];
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns an opaque handle to be used when removing the hook. NULL means that
|
||||
* we couldn't install the hook.
|
||||
*/
|
||||
bool hook_boot();
|
||||
|
||||
void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
|
||||
/* Uninstalls the hook with the handle previously returned from hook_install. */
|
||||
void hook_remove(tsdn_t *tsdn, void *opaque);
|
||||
|
||||
/* Hooks */
|
||||
|
||||
void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
void hook_invoke_dalloc(hook_dalloc_t type, void *address,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
|
||||
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HOOK_H */
|
182
BeefRT/JEMalloc/include/jemalloc/internal/hpa.h
Normal file
182
BeefRT/JEMalloc/include/jemalloc/internal/hpa.h
Normal file
|
@ -0,0 +1,182 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HPA_H
|
||||
#define JEMALLOC_INTERNAL_HPA_H
|
||||
|
||||
#include "jemalloc/internal/exp_grow.h"
|
||||
#include "jemalloc/internal/hpa_hooks.h"
|
||||
#include "jemalloc/internal/hpa_opts.h"
|
||||
#include "jemalloc/internal/pai.h"
|
||||
#include "jemalloc/internal/psset.h"
|
||||
|
||||
typedef struct hpa_central_s hpa_central_t;
|
||||
struct hpa_central_s {
|
||||
/*
|
||||
* The mutex guarding most of the operations on the central data
|
||||
* structure.
|
||||
*/
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* Guards expansion of eden. We separate this from the regular mutex so
|
||||
* that cheaper operations can still continue while we're doing the OS
|
||||
* call.
|
||||
*/
|
||||
malloc_mutex_t grow_mtx;
|
||||
/*
|
||||
* Either NULL (if empty), or some integer multiple of a
|
||||
* hugepage-aligned number of hugepages. We carve them off one at a
|
||||
* time to satisfy new pageslab requests.
|
||||
*
|
||||
* Guarded by grow_mtx.
|
||||
*/
|
||||
void *eden;
|
||||
size_t eden_len;
|
||||
/* Source for metadata. */
|
||||
base_t *base;
|
||||
/* Number of grow operations done on this hpa_central_t. */
|
||||
uint64_t age_counter;
|
||||
|
||||
/* The HPA hooks. */
|
||||
hpa_hooks_t hooks;
|
||||
};
|
||||
|
||||
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
|
||||
struct hpa_shard_nonderived_stats_s {
|
||||
/*
|
||||
* The number of times we've purged within a hugepage.
|
||||
*
|
||||
* Guarded by mtx.
|
||||
*/
|
||||
uint64_t npurge_passes;
|
||||
/*
|
||||
* The number of individual purge calls we perform (which should always
|
||||
* be bigger than npurge_passes, since each pass purges at least one
|
||||
* extent within a hugepage.
|
||||
*
|
||||
* Guarded by mtx.
|
||||
*/
|
||||
uint64_t npurges;
|
||||
|
||||
/*
|
||||
* The number of times we've hugified a pageslab.
|
||||
*
|
||||
* Guarded by mtx.
|
||||
*/
|
||||
uint64_t nhugifies;
|
||||
/*
|
||||
* The number of times we've dehugified a pageslab.
|
||||
*
|
||||
* Guarded by mtx.
|
||||
*/
|
||||
uint64_t ndehugifies;
|
||||
};
|
||||
|
||||
/* Completely derived; only used by CTL. */
|
||||
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
|
||||
struct hpa_shard_stats_s {
|
||||
psset_stats_t psset_stats;
|
||||
hpa_shard_nonderived_stats_t nonderived_stats;
|
||||
};
|
||||
|
||||
typedef struct hpa_shard_s hpa_shard_t;
|
||||
struct hpa_shard_s {
|
||||
/*
|
||||
* pai must be the first member; we cast from a pointer to it to a
|
||||
* pointer to the hpa_shard_t.
|
||||
*/
|
||||
pai_t pai;
|
||||
|
||||
/* The central allocator we get our hugepages from. */
|
||||
hpa_central_t *central;
|
||||
/* Protects most of this shard's state. */
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* Guards the shard's access to the central allocator (preventing
|
||||
* multiple threads operating on this shard from accessing the central
|
||||
* allocator).
|
||||
*/
|
||||
malloc_mutex_t grow_mtx;
|
||||
/* The base metadata allocator. */
|
||||
base_t *base;
|
||||
|
||||
/*
|
||||
* This edata cache is the one we use when allocating a small extent
|
||||
* from a pageslab. The pageslab itself comes from the centralized
|
||||
* allocator, and so will use its edata_cache.
|
||||
*/
|
||||
edata_cache_fast_t ecf;
|
||||
|
||||
psset_t psset;
|
||||
|
||||
/*
|
||||
* How many grow operations have occurred.
|
||||
*
|
||||
* Guarded by grow_mtx.
|
||||
*/
|
||||
uint64_t age_counter;
|
||||
|
||||
/* The arena ind we're associated with. */
|
||||
unsigned ind;
|
||||
|
||||
/*
|
||||
* Our emap. This is just a cache of the emap pointer in the associated
|
||||
* hpa_central.
|
||||
*/
|
||||
emap_t *emap;
|
||||
|
||||
/* The configuration choices for this hpa shard. */
|
||||
hpa_shard_opts_t opts;
|
||||
|
||||
/*
|
||||
* How many pages have we started but not yet finished purging in this
|
||||
* hpa shard.
|
||||
*/
|
||||
size_t npending_purge;
|
||||
|
||||
/*
|
||||
* Those stats which are copied directly into the CTL-centric hpa shard
|
||||
* stats.
|
||||
*/
|
||||
hpa_shard_nonderived_stats_t stats;
|
||||
|
||||
/*
|
||||
* Last time we performed purge on this shard.
|
||||
*/
|
||||
nstime_t last_purge;
|
||||
};
|
||||
|
||||
/*
|
||||
* Whether or not the HPA can be used given the current configuration. This is
|
||||
* is not necessarily a guarantee that it backs its allocations by hugepages,
|
||||
* just that it can function properly given the system it's running on.
|
||||
*/
|
||||
bool hpa_supported();
|
||||
bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
|
||||
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
|
||||
base_t *base, edata_cache_t *edata_cache, unsigned ind,
|
||||
const hpa_shard_opts_t *opts);
|
||||
|
||||
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
|
||||
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
|
||||
hpa_shard_stats_t *dst);
|
||||
|
||||
/*
|
||||
* Notify the shard that we won't use it for allocations much longer. Due to
|
||||
* the possibility of races, we don't actually prevent allocations; just flush
|
||||
* and disable the embedded edata_cache_small.
|
||||
*/
|
||||
void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
|
||||
void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
|
||||
bool deferral_allowed);
|
||||
void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
|
||||
/*
|
||||
* We share the fork ordering with the PA and arena prefork handling; that's why
|
||||
* these are 3 and 4 rather than 0 and 1.
|
||||
*/
|
||||
void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HPA_H */
|
17
BeefRT/JEMalloc/include/jemalloc/internal/hpa_hooks.h
Normal file
17
BeefRT/JEMalloc/include/jemalloc/internal/hpa_hooks.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
|
||||
#define JEMALLOC_INTERNAL_HPA_HOOKS_H
|
||||
|
||||
typedef struct hpa_hooks_s hpa_hooks_t;
|
||||
struct hpa_hooks_s {
|
||||
void *(*map)(size_t size);
|
||||
void (*unmap)(void *ptr, size_t size);
|
||||
void (*purge)(void *ptr, size_t size);
|
||||
void (*hugify)(void *ptr, size_t size);
|
||||
void (*dehugify)(void *ptr, size_t size);
|
||||
void (*curtime)(nstime_t *r_time, bool first_reading);
|
||||
uint64_t (*ms_since)(nstime_t *r_time);
|
||||
};
|
||||
|
||||
extern hpa_hooks_t hpa_hooks_default;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
|
74
BeefRT/JEMalloc/include/jemalloc/internal/hpa_opts.h
Normal file
74
BeefRT/JEMalloc/include/jemalloc/internal/hpa_opts.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
|
||||
#define JEMALLOC_INTERNAL_HPA_OPTS_H
|
||||
|
||||
#include "jemalloc/internal/fxp.h"
|
||||
|
||||
/*
|
||||
* This file is morally part of hpa.h, but is split out for header-ordering
|
||||
* reasons.
|
||||
*/
|
||||
|
||||
typedef struct hpa_shard_opts_s hpa_shard_opts_t;
|
||||
struct hpa_shard_opts_s {
|
||||
/*
|
||||
* The largest size we'll allocate out of the shard. For those
|
||||
* allocations refused, the caller (in practice, the PA module) will
|
||||
* fall back to the more general (for now) PAC, which can always handle
|
||||
* any allocation request.
|
||||
*/
|
||||
size_t slab_max_alloc;
|
||||
|
||||
/*
|
||||
* When the number of active bytes in a hugepage is >=
|
||||
* hugification_threshold, we force hugify it.
|
||||
*/
|
||||
size_t hugification_threshold;
|
||||
|
||||
/*
|
||||
* The HPA purges whenever the number of pages exceeds dirty_mult *
|
||||
* active_pages. This may be set to (fxp_t)-1 to disable purging.
|
||||
*/
|
||||
fxp_t dirty_mult;
|
||||
|
||||
/*
|
||||
* Whether or not the PAI methods are allowed to defer work to a
|
||||
* subsequent hpa_shard_do_deferred_work() call. Practically, this
|
||||
* corresponds to background threads being enabled. We track this
|
||||
* ourselves for encapsulation purposes.
|
||||
*/
|
||||
bool deferral_allowed;
|
||||
|
||||
/*
|
||||
* How long a hugepage has to be a hugification candidate before it will
|
||||
* actually get hugified.
|
||||
*/
|
||||
uint64_t hugify_delay_ms;
|
||||
|
||||
/*
|
||||
* Minimum amount of time between purges.
|
||||
*/
|
||||
uint64_t min_purge_interval_ms;
|
||||
};
|
||||
|
||||
#define HPA_SHARD_OPTS_DEFAULT { \
|
||||
/* slab_max_alloc */ \
|
||||
64 * 1024, \
|
||||
/* hugification_threshold */ \
|
||||
HUGEPAGE * 95 / 100, \
|
||||
/* dirty_mult */ \
|
||||
FXP_INIT_PERCENT(25), \
|
||||
/* \
|
||||
* deferral_allowed \
|
||||
* \
|
||||
* Really, this is always set by the arena during creation \
|
||||
* or by an hpa_shard_set_deferral_allowed call, so the value \
|
||||
* we put here doesn't matter. \
|
||||
*/ \
|
||||
false, \
|
||||
/* hugify_delay_ms */ \
|
||||
10 * 1000, \
|
||||
/* min_purge_interval_ms */ \
|
||||
5 * 1000 \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
|
413
BeefRT/JEMalloc/include/jemalloc/internal/hpdata.h
Normal file
413
BeefRT/JEMalloc/include/jemalloc/internal/hpdata.h
Normal file
|
@ -0,0 +1,413 @@
|
|||
#ifndef JEMALLOC_INTERNAL_HPDATA_H
|
||||
#define JEMALLOC_INTERNAL_HPDATA_H
|
||||
|
||||
#include "jemalloc/internal/fb.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/typed_list.h"
|
||||
|
||||
/*
|
||||
* The metadata representation we use for extents in hugepages. While the PAC
|
||||
* uses the edata_t to represent both active and inactive extents, the HP only
|
||||
* uses the edata_t for active ones; instead, inactive extent state is tracked
|
||||
* within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
|
||||
* region of virtual address space.
|
||||
*
|
||||
* An hpdata need not be "truly" backed by a hugepage (which is not necessarily
|
||||
* an observable property of any given region of address space). It's just
|
||||
* hugepage-sized and hugepage-aligned; it's *potentially* huge.
|
||||
*/
|
||||
typedef struct hpdata_s hpdata_t;
|
||||
ph_structs(hpdata_age_heap, hpdata_t);
|
||||
struct hpdata_s {
|
||||
/*
|
||||
* We likewise follow the edata convention of mangling names and forcing
|
||||
* the use of accessors -- this lets us add some consistency checks on
|
||||
* access.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The address of the hugepage in question. This can't be named h_addr,
|
||||
* since that conflicts with a macro defined in Windows headers.
|
||||
*/
|
||||
void *h_address;
|
||||
/* Its age (measured in psset operations). */
|
||||
uint64_t h_age;
|
||||
/* Whether or not we think the hugepage is mapped that way by the OS. */
|
||||
bool h_huge;
|
||||
|
||||
/*
|
||||
* For some properties, we keep parallel sets of bools; h_foo_allowed
|
||||
* and h_in_psset_foo_container. This is a decoupling mechanism to
|
||||
* avoid bothering the hpa (which manages policies) from the psset
|
||||
* (which is the mechanism used to enforce those policies). This allows
|
||||
* all the container management logic to live in one place, without the
|
||||
* HPA needing to know or care how that happens.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Whether or not the hpdata is allowed to be used to serve allocations,
|
||||
* and whether or not the psset is currently tracking it as such.
|
||||
*/
|
||||
bool h_alloc_allowed;
|
||||
bool h_in_psset_alloc_container;
|
||||
|
||||
/*
|
||||
* The same, but with purging. There's no corresponding
|
||||
* h_in_psset_purge_container, because the psset (currently) always
|
||||
* removes hpdatas from their containers during updates (to implement
|
||||
* LRU for purging).
|
||||
*/
|
||||
bool h_purge_allowed;
|
||||
|
||||
/* And with hugifying. */
|
||||
bool h_hugify_allowed;
|
||||
/* When we became a hugification candidate. */
|
||||
nstime_t h_time_hugify_allowed;
|
||||
bool h_in_psset_hugify_container;
|
||||
|
||||
/* Whether or not a purge or hugify is currently happening. */
|
||||
bool h_mid_purge;
|
||||
bool h_mid_hugify;
|
||||
|
||||
/*
|
||||
* Whether or not the hpdata is being updated in the psset (i.e. if
|
||||
* there has been a psset_update_begin call issued without a matching
|
||||
* psset_update_end call). Eventually this will expand to other types
|
||||
* of updates.
|
||||
*/
|
||||
bool h_updating;
|
||||
|
||||
/* Whether or not the hpdata is in a psset. */
|
||||
bool h_in_psset;
|
||||
|
||||
union {
|
||||
/* When nonempty (and also nonfull), used by the psset bins. */
|
||||
hpdata_age_heap_link_t age_link;
|
||||
/*
|
||||
* When empty (or not corresponding to any hugepage), list
|
||||
* linkage.
|
||||
*/
|
||||
ql_elm(hpdata_t) ql_link_empty;
|
||||
};
|
||||
|
||||
/*
|
||||
* Linkage for the psset to track candidates for purging and hugifying.
|
||||
*/
|
||||
ql_elm(hpdata_t) ql_link_purge;
|
||||
ql_elm(hpdata_t) ql_link_hugify;
|
||||
|
||||
/* The length of the largest contiguous sequence of inactive pages. */
|
||||
size_t h_longest_free_range;
|
||||
|
||||
/* Number of active pages. */
|
||||
size_t h_nactive;
|
||||
|
||||
/* A bitmap with bits set in the active pages. */
|
||||
fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
|
||||
|
||||
/*
|
||||
* Number of dirty or active pages, and a bitmap tracking them. One
|
||||
* way to think of this is as which pages are dirty from the OS's
|
||||
* perspective.
|
||||
*/
|
||||
size_t h_ntouched;
|
||||
|
||||
/* The touched pages (using the same definition as above). */
|
||||
fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
|
||||
};
|
||||
|
||||
TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
|
||||
TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
|
||||
TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
|
||||
|
||||
ph_proto(, hpdata_age_heap, hpdata_t);
|
||||
|
||||
static inline void *
|
||||
hpdata_addr_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_address;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_addr_set(hpdata_t *hpdata, void *addr) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
hpdata->h_address = addr;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hpdata_age_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_age;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
|
||||
hpdata->h_age = age;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_huge_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_huge;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_alloc_allowed;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
|
||||
hpdata->h_alloc_allowed = alloc_allowed;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_in_psset_alloc_container;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
|
||||
assert(in_container != hpdata->h_in_psset_alloc_container);
|
||||
hpdata->h_in_psset_alloc_container = in_container;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_purge_allowed_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_purge_allowed;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
|
||||
assert(purge_allowed == false || !hpdata->h_mid_purge);
|
||||
hpdata->h_purge_allowed = purge_allowed;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_hugify_allowed;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
|
||||
assert(!hpdata->h_mid_hugify);
|
||||
hpdata->h_hugify_allowed = true;
|
||||
hpdata->h_time_hugify_allowed = now;
|
||||
}
|
||||
|
||||
static inline nstime_t
|
||||
hpdata_time_hugify_allowed(hpdata_t *hpdata) {
|
||||
return hpdata->h_time_hugify_allowed;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_disallow_hugify(hpdata_t *hpdata) {
|
||||
hpdata->h_hugify_allowed = false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_in_psset_hugify_container;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
|
||||
assert(in_container != hpdata->h_in_psset_hugify_container);
|
||||
hpdata->h_in_psset_hugify_container = in_container;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_mid_purge_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_mid_purge;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
|
||||
assert(mid_purge != hpdata->h_mid_purge);
|
||||
hpdata->h_mid_purge = mid_purge;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_mid_hugify_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_mid_hugify;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
|
||||
assert(mid_hugify != hpdata->h_mid_hugify);
|
||||
hpdata->h_mid_hugify = mid_hugify;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_changing_state_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_mid_purge || hpdata->h_mid_hugify;
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
hpdata_updating_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_updating;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_updating_set(hpdata_t *hpdata, bool updating) {
|
||||
assert(updating != hpdata->h_updating);
|
||||
hpdata->h_updating = updating;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_in_psset_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_in_psset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
|
||||
assert(in_psset != hpdata->h_in_psset);
|
||||
hpdata->h_in_psset = in_psset;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hpdata_longest_free_range_get(const hpdata_t *hpdata) {
|
||||
return hpdata->h_longest_free_range;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
|
||||
assert(longest_free_range <= HUGEPAGE_PAGES);
|
||||
hpdata->h_longest_free_range = longest_free_range;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hpdata_nactive_get(hpdata_t *hpdata) {
|
||||
return hpdata->h_nactive;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hpdata_ntouched_get(hpdata_t *hpdata) {
|
||||
return hpdata->h_ntouched;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hpdata_ndirty_get(hpdata_t *hpdata) {
|
||||
return hpdata->h_ntouched - hpdata->h_nactive;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hpdata_nretained_get(hpdata_t *hpdata) {
|
||||
return HUGEPAGE_PAGES - hpdata->h_ntouched;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_assert_empty(hpdata_t *hpdata) {
|
||||
assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
|
||||
assert(hpdata->h_nactive == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only used in tests, and in hpdata_assert_consistent, below. Verifies some
|
||||
* consistency properties of the hpdata (e.g. that cached counts of page stats
|
||||
* match computed ones).
|
||||
*/
|
||||
static inline bool
|
||||
hpdata_consistent(hpdata_t *hpdata) {
|
||||
if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
|
||||
!= hpdata_longest_free_range_get(hpdata)) {
|
||||
return false;
|
||||
}
|
||||
if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
|
||||
!= hpdata->h_nactive) {
|
||||
return false;
|
||||
}
|
||||
if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
|
||||
!= hpdata->h_ntouched) {
|
||||
return false;
|
||||
}
|
||||
if (hpdata->h_ntouched < hpdata->h_nactive) {
|
||||
return false;
|
||||
}
|
||||
if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
|
||||
return false;
|
||||
}
|
||||
if (hpdata_changing_state_get(hpdata)
|
||||
&& ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
|
||||
return false;
|
||||
}
|
||||
if (hpdata_hugify_allowed_get(hpdata)
|
||||
!= hpdata_in_psset_hugify_container_get(hpdata)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hpdata_assert_consistent(hpdata_t *hpdata) {
|
||||
assert(hpdata_consistent(hpdata));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_empty(hpdata_t *hpdata) {
|
||||
return hpdata->h_nactive == 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
hpdata_full(hpdata_t *hpdata) {
|
||||
return hpdata->h_nactive == HUGEPAGE_PAGES;
|
||||
}
|
||||
|
||||
void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
|
||||
|
||||
/*
|
||||
* Given an hpdata which can serve an allocation request, pick and reserve an
|
||||
* offset within that allocation.
|
||||
*/
|
||||
void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
|
||||
void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
|
||||
|
||||
/*
|
||||
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
|
||||
* subranges of a hugepage while holding a lock, drop the lock during the actual
|
||||
* purging of them, and reacquire it to update the metadata again.
|
||||
*/
|
||||
typedef struct hpdata_purge_state_s hpdata_purge_state_t;
|
||||
struct hpdata_purge_state_s {
|
||||
size_t npurged;
|
||||
size_t ndirty_to_purge;
|
||||
fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
|
||||
size_t next_purge_search_begin;
|
||||
};
|
||||
|
||||
/*
|
||||
* Initializes purge state. The access to hpdata must be externally
|
||||
* synchronized with other hpdata_* calls.
|
||||
*
|
||||
* You can tell whether or not a thread is purging or hugifying a given hpdata
|
||||
* via hpdata_changing_state_get(hpdata). Racing hugification or purging
|
||||
* operations aren't allowed.
|
||||
*
|
||||
* Once you begin purging, you have to follow through and call hpdata_purge_next
|
||||
* until you're done, and then end. Allocating out of an hpdata undergoing
|
||||
* purging is not allowed.
|
||||
*
|
||||
* Returns the number of dirty pages that will be purged.
|
||||
*/
|
||||
size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
|
||||
|
||||
/*
|
||||
* If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
|
||||
* true, and returns true. Otherwise, returns false to indicate that we're
|
||||
* done.
|
||||
*
|
||||
* This requires exclusive access to the purge state, but *not* to the hpdata.
|
||||
* In particular, unreserve calls are allowed while purging (i.e. you can dalloc
|
||||
* into one part of the hpdata while purging a different part).
|
||||
*/
|
||||
bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
|
||||
void **r_purge_addr, size_t *r_purge_size);
|
||||
/*
|
||||
* Updates the hpdata metadata after all purging is done. Needs external
|
||||
* synchronization.
|
||||
*/
|
||||
void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
|
||||
|
||||
void hpdata_hugify(hpdata_t *hpdata);
|
||||
void hpdata_dehugify(hpdata_t *hpdata);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HPDATA_H */
|
40
BeefRT/JEMalloc/include/jemalloc/internal/inspect.h
Normal file
40
BeefRT/JEMalloc/include/jemalloc/internal/inspect.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
#ifndef JEMALLOC_INTERNAL_INSPECT_H
|
||||
#define JEMALLOC_INTERNAL_INSPECT_H
|
||||
|
||||
/*
|
||||
* This module contains the heap introspection capabilities. For now they are
|
||||
* exposed purely through mallctl APIs in the experimental namespace, but this
|
||||
* may change over time.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The following two structs are for experimental purposes. See
|
||||
* experimental_utilization_query_ctl and
|
||||
* experimental_utilization_batch_query_ctl in src/ctl.c.
|
||||
*/
|
||||
typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
|
||||
struct inspect_extent_util_stats_s {
|
||||
size_t nfree;
|
||||
size_t nregs;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
typedef struct inspect_extent_util_stats_verbose_s
|
||||
inspect_extent_util_stats_verbose_t;
|
||||
|
||||
struct inspect_extent_util_stats_verbose_s {
|
||||
void *slabcur_addr;
|
||||
size_t nfree;
|
||||
size_t nregs;
|
||||
size_t size;
|
||||
size_t bin_nfree;
|
||||
size_t bin_nregs;
|
||||
};
|
||||
|
||||
void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
|
||||
size_t *nfree, size_t *nregs, size_t *size);
|
||||
void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
||||
size_t *nfree, size_t *nregs, size_t *size,
|
||||
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INSPECT_H */
|
|
@ -0,0 +1,108 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
# include "msvc_compat/strings.h"
|
||||
# ifdef _WIN64
|
||||
# if LG_VADDR <= 32
|
||||
# error Generate the headers using x64 vcargs
|
||||
# endif
|
||||
# else
|
||||
# if LG_VADDR > 32
|
||||
# undef LG_VADDR
|
||||
# define LG_VADDR 32
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# include <sys/param.h>
|
||||
# include <sys/mman.h>
|
||||
# if !defined(__pnacl__) && !defined(__native_client__)
|
||||
# include <sys/syscall.h>
|
||||
# if !defined(SYS_write) && defined(__NR_write)
|
||||
# define SYS_write __NR_write
|
||||
# endif
|
||||
# if defined(SYS_open) && defined(__aarch64__)
|
||||
/* Android headers may define SYS_open to __NR_open even though
|
||||
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
|
||||
# undef SYS_open
|
||||
# endif
|
||||
# include <sys/uio.h>
|
||||
# endif
|
||||
# include <pthread.h>
|
||||
# if defined(__FreeBSD__) || defined(__DragonFly__)
|
||||
# include <pthread_np.h>
|
||||
# include <sched.h>
|
||||
# if defined(__FreeBSD__)
|
||||
# define cpu_set_t cpuset_t
|
||||
# endif
|
||||
# endif
|
||||
# include <signal.h>
|
||||
# ifdef JEMALLOC_OS_UNFAIR_LOCK
|
||||
# include <os/lock.h>
|
||||
# endif
|
||||
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
# include <sched.h>
|
||||
# endif
|
||||
# include <errno.h>
|
||||
# include <sys/time.h>
|
||||
# include <time.h>
|
||||
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
# include <mach/mach_time.h>
|
||||
# endif
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <limits.h>
|
||||
#ifndef SIZE_T_MAX
|
||||
# define SIZE_T_MAX SIZE_MAX
|
||||
#endif
|
||||
#ifndef SSIZE_MAX
|
||||
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
|
||||
#endif
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#ifndef offsetof
|
||||
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
|
||||
#endif
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <ctype.h>
|
||||
#ifdef _MSC_VER
|
||||
# include <io.h>
|
||||
typedef intptr_t ssize_t;
|
||||
# define PATH_MAX 1024
|
||||
# define STDERR_FILENO 2
|
||||
# define __func__ __FUNCTION__
|
||||
# ifdef JEMALLOC_HAS_RESTRICT
|
||||
# define restrict __restrict
|
||||
# endif
|
||||
/* Disable warnings about deprecated system functions. */
|
||||
# pragma warning(disable: 4996)
|
||||
#if _MSC_VER < 1800
|
||||
static int
|
||||
isblank(int c) {
|
||||
return (c == '\t' || c == ' ');
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
|
||||
/*
|
||||
* The Win32 midl compiler has #define small char; we don't use midl, but
|
||||
* "small" is a nice identifier to have available when talking about size
|
||||
* classes.
|
||||
*/
|
||||
#ifdef small
|
||||
# undef small
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_H */
|
|
@ -0,0 +1,428 @@
|
|||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
#define JEMALLOC_PREFIX "je_"
|
||||
#define JEMALLOC_CPREFIX "JE_"
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT _mm_pause()
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 1
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
/* #undef JEMALLOC_C11_ATOMICS */
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
/* #undef JEMALLOC_GCC_ATOMIC_ATOMICS */
|
||||
/* and the 8-bit variant support. */
|
||||
/* #undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS */
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
/* #undef JEMALLOC_GCC_SYNC_ATOMICS */
|
||||
/* and the 8-bit variant support. */
|
||||
/* #undef JEMALLOC_GCC_U8_SYNC_ATOMICS */
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
/* #undef JEMALLOC_USE_SYSCALL */
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP */
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_REALTIME */
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
/* #undef JEMALLOC_THREADED_INIT */
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
/* #undef JEMALLOC_DSS */
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 21
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
/* #undef JEMALLOC_MAPS_COALESCE */
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
/* #undef JEMALLOC_RETAIN */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
/* #undef JEMALLOC_TLS */
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE abort
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
/* #undef JEMALLOC_INTERNAL_POPCOUNTL */
|
||||
/* #undef JEMALLOC_INTERNAL_POPCOUNT */
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
/* #undef JEMALLOC_HAVE_MADVISE */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
/* #undef JEMALLOC_HAVE_MPROTECT */
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
/* #undef JEMALLOC_HAS_RESTRICT */
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 2
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD */
|
||||
|
||||
/* dlsym() support */
|
||||
/* #undef JEMALLOC_HAVE_DLSYM */
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
/* #undef JEMALLOC_BACKGROUND_THREAD */
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
/* #undef JEMALLOC_IS_MALLOC */
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
|
@ -0,0 +1,427 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
#undef JEMALLOC_PREFIX
|
||||
#undef JEMALLOC_CPREFIX
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#undef JEMALLOC_PRIVATE_NAMESPACE
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#undef CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#undef HAVE_CPU_SPINWAIT
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#undef LG_VADDR
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#undef JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#undef JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#undef JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
#undef JEMALLOC_OS_UNFAIR_LOCK
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#undef JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
#undef JEMALLOC_THREADED_INIT
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
#undef JEMALLOC_MUTEX_INIT_CB
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#undef JEMALLOC_TLS_MODEL
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
#undef JEMALLOC_DEBUG
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#undef JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
#undef JEMALLOC_PROF
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
#undef JEMALLOC_PROF_LIBUNWIND
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
#undef JEMALLOC_PROF_LIBGCC
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
#undef JEMALLOC_PROF_GCC
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#undef JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#undef JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
#undef JEMALLOC_UTRACE
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
#undef JEMALLOC_UTRACE_LABEL
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
#undef JEMALLOC_XMALLOC
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
#undef JEMALLOC_LAZY_LOCK
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
#undef LG_QUANTUM
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#undef LG_PAGE
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
#undef CONFIG_LG_SLAB_MAXREGS
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#undef LG_HUGEPAGE
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#undef JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#undef JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#undef JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#undef JEMALLOC_INTERNAL_UNREACHABLE
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#undef JEMALLOC_INTERNAL_FFSLL
|
||||
#undef JEMALLOC_INTERNAL_FFSL
|
||||
#undef JEMALLOC_INTERNAL_FFS
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#undef JEMALLOC_INTERNAL_POPCOUNTL
|
||||
#undef JEMALLOC_INTERNAL_POPCOUNT
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#undef JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
#undef JEMALLOC_LOG
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
#undef JEMALLOC_READLINKAT
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
#undef JEMALLOC_ZONE
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#undef JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#undef JEMALLOC_PURGE_MADVISE_FREE
|
||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
#undef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#undef JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
#undef JEMALLOC_MADVISE_NOCORE
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#undef JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
#undef JEMALLOC_THP
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
#undef JEMALLOC_HAVE_POSIX_MADVISE
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
|
||||
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MEMCNTL
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MALLOC_SIZE
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#undef JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#undef JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
#undef JEMALLOC_BIG_ENDIAN
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#undef LG_SIZEOF_INT
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#undef LG_SIZEOF_LONG
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#undef LG_SIZEOF_LONG_LONG
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#undef LG_SIZEOF_INTMAX_T
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
#undef JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
|
||||
/* glibc memalign hook. */
|
||||
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
||||
|
||||
/* pthread support */
|
||||
#undef JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#undef JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#undef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#undef JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
#undef JEMALLOC_EXPORT
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#undef JEMALLOC_CONFIG_MALLOC_CONF
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#undef JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
#undef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
|
||||
/* Is C++ support being built? */
|
||||
#undef JEMALLOC_ENABLE_CXX
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
#undef JEMALLOC_OPT_SIZE_CHECKS
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
#undef JEMALLOC_UAF_DETECTION
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
#undef JEMALLOC_HAVE_VM_MAKE_TAG
|
||||
|
||||
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||
#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
|
@ -0,0 +1,75 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/hpa_opts.h"
|
||||
#include "jemalloc/internal/sec_opts.h"
|
||||
#include "jemalloc/internal/tsd_types.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
|
||||
/* TSD checks this to set thread local slow state accordingly. */
|
||||
extern bool malloc_slow;
|
||||
|
||||
/* Run-time options. */
|
||||
extern bool opt_abort;
|
||||
extern bool opt_abort_conf;
|
||||
extern bool opt_trust_madvise;
|
||||
extern bool opt_confirm_conf;
|
||||
extern bool opt_hpa;
|
||||
extern hpa_shard_opts_t opt_hpa_opts;
|
||||
extern sec_opts_t opt_hpa_sec_opts;
|
||||
|
||||
extern const char *opt_junk;
|
||||
extern bool opt_junk_alloc;
|
||||
extern bool opt_junk_free;
|
||||
extern void (*junk_free_callback)(void *ptr, size_t size);
|
||||
extern void (*junk_alloc_callback)(void *ptr, size_t size);
|
||||
extern bool opt_utrace;
|
||||
extern bool opt_xmalloc;
|
||||
extern bool opt_experimental_infallible_new;
|
||||
extern bool opt_zero;
|
||||
extern unsigned opt_narenas;
|
||||
extern zero_realloc_action_t opt_zero_realloc_action;
|
||||
extern malloc_init_t malloc_init_state;
|
||||
extern const char *zero_realloc_mode_names[];
|
||||
extern atomic_zu_t zero_realloc_count;
|
||||
extern bool opt_cache_oblivious;
|
||||
|
||||
/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
|
||||
extern uintptr_t san_cache_bin_nonfast_mask;
|
||||
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
/* Number of arenas used for automatic multiplexing of threads and arenas. */
|
||||
extern unsigned narenas_auto;
|
||||
|
||||
/* Base index for manual arenas. */
|
||||
extern unsigned manual_arena_base;
|
||||
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*/
|
||||
extern atomic_p_t arenas[];
|
||||
|
||||
void *a0malloc(size_t size);
|
||||
void a0dalloc(void *ptr);
|
||||
void *bootstrap_malloc(size_t size);
|
||||
void *bootstrap_calloc(size_t num, size_t size);
|
||||
void bootstrap_free(void *ptr);
|
||||
void arena_set(unsigned ind, arena_t *arena);
|
||||
unsigned narenas_total_get(void);
|
||||
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
|
||||
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
|
||||
void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
|
||||
void iarena_cleanup(tsd_t *tsd);
|
||||
void arena_cleanup(tsd_t *tsd);
|
||||
size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
|
||||
void jemalloc_prefork(void);
|
||||
void jemalloc_postfork_parent(void);
|
||||
void jemalloc_postfork_child(void);
|
||||
void je_sdallocx_noflags(void *ptr, size_t size);
|
||||
void *malloc_default(size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
|
|
@ -0,0 +1,84 @@
|
|||
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
|
||||
#define JEMALLOC_INTERNAL_INCLUDES_H
|
||||
|
||||
/*
|
||||
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
|
||||
* but there are circular dependencies that cannot be broken without
|
||||
* substantial performance degradation.
|
||||
*
|
||||
* Historically, we dealt with this by each header into four sections (types,
|
||||
* structs, externs, and inlines), and included each header file multiple times
|
||||
* in this file, picking out the portion we want on each pass using the
|
||||
* following #defines:
|
||||
* JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data
|
||||
* types.
|
||||
* JEMALLOC_H_STRUCTS : Data structures.
|
||||
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
|
||||
* JEMALLOC_H_INLINES : Inline functions.
|
||||
*
|
||||
* We're moving toward a world in which the dependencies are explicit; each file
|
||||
* will #include the headers it depends on (rather than relying on them being
|
||||
* implicitly available via this file including every header file in the
|
||||
* project).
|
||||
*
|
||||
* We're now in an intermediate state: we've broken up the header files to avoid
|
||||
* having to include each one multiple times, but have not yet moved the
|
||||
* dependency information into the header files (i.e. we still rely on the
|
||||
* ordering in this file to ensure all a header's dependencies are available in
|
||||
* its translation unit). Each component is now broken up into multiple header
|
||||
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
|
||||
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
|
||||
*
|
||||
* Those files which have been converted to explicitly include their
|
||||
* inter-component dependencies are now in the initial HERMETIC HEADERS
|
||||
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
|
||||
* must be included first in every translation unit) for system headers and
|
||||
* global jemalloc definitions, however.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* TYPES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* STRUCTS */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/prof_structs.h"
|
||||
#include "jemalloc/internal/arena_structs.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/background_thread_structs.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||
#include "jemalloc/internal/arena_externs.h"
|
||||
#include "jemalloc/internal/large_externs.h"
|
||||
#include "jemalloc/internal/tcache_externs.h"
|
||||
#include "jemalloc/internal/prof_externs.h"
|
||||
#include "jemalloc/internal/background_thread_externs.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
|
||||
/*
|
||||
* Include portions of arena code interleaved with tcache code in order to
|
||||
* resolve circular dependencies.
|
||||
*/
|
||||
#include "jemalloc/internal/arena_inlines_a.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
|
||||
#include "jemalloc/internal/tcache_inlines.h"
|
||||
#include "jemalloc/internal/arena_inlines_b.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
|
||||
#include "jemalloc/internal/prof_inlines.h"
|
||||
#include "jemalloc/internal/background_thread_inlines.h"
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
|
|
@ -0,0 +1,122 @@
|
|||
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_A_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
||||
malloc_getcpu(void) {
|
||||
assert(have_percpu_arena);
|
||||
#if defined(_WIN32)
|
||||
return GetCurrentProcessorNumber();
|
||||
#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
return (malloc_cpuid_t)sched_getcpu();
|
||||
#else
|
||||
not_reached();
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Return the chosen arena index based on current cpu. */
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
percpu_arena_choose(void) {
|
||||
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
|
||||
|
||||
malloc_cpuid_t cpuid = malloc_getcpu();
|
||||
assert(cpuid >= 0);
|
||||
|
||||
unsigned arena_ind;
|
||||
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
|
||||
2)) {
|
||||
arena_ind = cpuid;
|
||||
} else {
|
||||
assert(opt_percpu_arena == per_phycpu_arena);
|
||||
/* Hyper threads on the same physical CPU share arena. */
|
||||
arena_ind = cpuid - ncpus / 2;
|
||||
}
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
|
||||
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
|
||||
if (mode == per_phycpu_arena && ncpus > 1) {
|
||||
if (ncpus % 2) {
|
||||
/* This likely means a misconfig. */
|
||||
return ncpus / 2 + 1;
|
||||
}
|
||||
return ncpus / 2;
|
||||
} else {
|
||||
return ncpus;
|
||||
}
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
||||
arena_t *ret;
|
||||
|
||||
assert(ind < MALLOCX_ARENA_LIMIT);
|
||||
|
||||
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
|
||||
if (unlikely(ret == NULL)) {
|
||||
if (init_if_missing) {
|
||||
ret = arena_init(tsdn, ind, &arena_config_default);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tcache_available(tsd_t *tsd) {
|
||||
/*
|
||||
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
||||
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
||||
* or config options. This check covers all cases.
|
||||
*/
|
||||
if (likely(tsd_tcache_enabled_get(tsd))) {
|
||||
/* Associated arena == NULL implies tcache init in progress. */
|
||||
if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
|
||||
tcache_assert_initialized(tsd_tcachep_get(tsd));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(tsd_t *tsd) {
|
||||
if (!tcache_available(tsd)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tcachep_get(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_slow_t *
|
||||
tcache_slow_get(tsd_t *tsd) {
|
||||
if (!tcache_available(tsd)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tcache_slowp_get(tsd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
||||
/* arena is the current context. Reentry from a0 is not allowed. */
|
||||
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
||||
tsd_pre_reentrancy_raw(tsd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
post_reentrancy(tsd_t *tsd) {
|
||||
tsd_post_reentrancy_raw(tsd);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
|
@ -0,0 +1,103 @@
|
|||
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_B_H
|
||||
|
||||
#include "jemalloc/internal/extent.h"
|
||||
|
||||
static inline void
|
||||
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
||||
assert(have_percpu_arena);
|
||||
arena_t *oldarena = tsd_arena_get(tsd);
|
||||
assert(oldarena != NULL);
|
||||
unsigned oldind = arena_ind_get(oldarena);
|
||||
|
||||
if (oldind != cpu) {
|
||||
unsigned newind = cpu;
|
||||
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
|
||||
assert(newarena != NULL);
|
||||
|
||||
/* Set new arena/tcache associations. */
|
||||
arena_migrate(tsd, oldarena, newarena);
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
|
||||
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
|
||||
tcache, newarena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Choose an arena based on a per-thread value. */
|
||||
static inline arena_t *
|
||||
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||
arena_t *ret;
|
||||
|
||||
if (arena != NULL) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
/* During reentrancy, arena 0 is the safest bet. */
|
||||
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
|
||||
return arena_get(tsd_tsdn(tsd), 0, true);
|
||||
}
|
||||
|
||||
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||
if (unlikely(ret == NULL)) {
|
||||
ret = arena_choose_hard(tsd, internal);
|
||||
assert(ret);
|
||||
if (tcache_available(tsd)) {
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
if (tcache_slow->arena != NULL) {
|
||||
/* See comments in tsd_tcache_data_init().*/
|
||||
assert(tcache_slow->arena ==
|
||||
arena_get(tsd_tsdn(tsd), 0, false));
|
||||
if (tcache_slow->arena != ret) {
|
||||
tcache_arena_reassociate(tsd_tsdn(tsd),
|
||||
tcache_slow, tcache, ret);
|
||||
}
|
||||
} else {
|
||||
tcache_arena_associate(tsd_tsdn(tsd),
|
||||
tcache_slow, tcache, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that for percpu arena, if the current arena is outside of the
|
||||
* auto percpu arena range, (i.e. thread is assigned to a manually
|
||||
* managed arena), then percpu arena is skipped.
|
||||
*/
|
||||
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
|
||||
!internal && (arena_ind_get(ret) <
|
||||
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
|
||||
tsd_tsdn(tsd))) {
|
||||
unsigned ind = percpu_arena_choose();
|
||||
if (arena_ind_get(ret) != ind) {
|
||||
percpu_arena_update(tsd, ind);
|
||||
ret = tsd_arena_get(tsd);
|
||||
}
|
||||
ret->last_thd = tsd_tsdn(tsd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_choose(tsd_t *tsd, arena_t *arena) {
|
||||
return arena_choose_impl(tsd, arena, false);
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||
return arena_choose_impl(tsd, arena, true);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
arena_is_auto(arena_t *arena) {
|
||||
assert(narenas_auto > 0);
|
||||
|
||||
return (arena_ind_get(arena) < manual_arena_base);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
|
@ -0,0 +1,340 @@
|
|||
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_C_H
|
||||
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/log.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/thread_event.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/*
|
||||
* Translating the names of the 'i' functions:
|
||||
* Abbreviations used in the first part of the function name (before
|
||||
* alloc/dalloc) describe what that function accomplishes:
|
||||
* a: arena (query)
|
||||
* s: size (query, or sized deallocation)
|
||||
* e: extent (query)
|
||||
* p: aligned (allocates)
|
||||
* vs: size (query, without knowing that the pointer is into the heap)
|
||||
* r: rallocx implementation
|
||||
* x: xallocx implementation
|
||||
* Abbreviations used in the second part of the function name (after
|
||||
* alloc/dalloc) describe the arguments it takes
|
||||
* z: whether to return zeroed memory
|
||||
* t: accepts a tcache_t * parameter
|
||||
* m: accepts an arena_t * parameter
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_aalloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
isalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_salloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
bool is_internal, arena_t *arena, bool slow_path) {
|
||||
void *ret;
|
||||
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
}
|
||||
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
||||
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
|
||||
NULL, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||
void *ret;
|
||||
|
||||
assert(usize != 0);
|
||||
assert(usize == sz_sa2u(usize, alignment));
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, arena_t *arena) {
|
||||
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
||||
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
||||
tcache_get(tsd), false, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return arena_vsalloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
|
||||
assert(ptr != NULL);
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||
}
|
||||
if (!is_internal && !tsdn_null(tsdn) &&
|
||||
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
assert(tcache == NULL);
|
||||
}
|
||||
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloc(tsd_t *tsd, void *ptr) {
|
||||
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
|
||||
hook_ralloc_args_t *hook_args) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
void *p;
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = sz_sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
* expectation that the extra bytes will be reliably preserved.
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(p, ptr, copysize);
|
||||
hook_invoke_alloc(hook_args->is_realloc
|
||||
? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
|
||||
hook_args->args);
|
||||
hook_invoke_dalloc(hook_args->is_realloc
|
||||
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
||||
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_realloc threads through the knowledge of whether or not this call comes
|
||||
* from je_realloc (as opposed to je_rallocx); this ensures that we pass the
|
||||
* correct entry point into any hooks.
|
||||
* Note that these functions are all force-inlined, so no actual bool gets
|
||||
* passed-around anywhere.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||
bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
|
||||
{
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
/*
|
||||
* Existing object alignment is inadequate; allocate new space
|
||||
* and copy.
|
||||
*/
|
||||
return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
|
||||
zero, tcache, arena, hook_args);
|
||||
}
|
||||
|
||||
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
|
||||
tcache, hook_args);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||
bool zero, hook_ralloc_args_t *hook_args) {
|
||||
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
|
||||
tcache_get(tsd), NULL, hook_args);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, size_t *newsize) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
/* Existing object alignment is inadequate. */
|
||||
*newsize = oldsize;
|
||||
return true;
|
||||
}
|
||||
|
||||
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
|
||||
newsize);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
|
||||
cache_bin_t *bin, void *ret) {
|
||||
thread_allocated_set(tsd, allocated_after);
|
||||
if (config_stats) {
|
||||
bin->tstats.nrequests++;
|
||||
}
|
||||
|
||||
LOG("core.malloc.exit", "result: %p", ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
malloc_initialized(void) {
|
||||
return (malloc_init_state == malloc_init_initialized);
|
||||
}
|
||||
|
||||
/*
|
||||
* malloc() fastpath. Included here so that we can inline it into operator new;
|
||||
* function call overhead there is non-negligible as a fraction of total CPU in
|
||||
* allocation-heavy C++ programs. We take the fallback alloc to allow malloc
|
||||
* (which can return NULL) to differ in its behavior from operator new (which
|
||||
* can't). It matches the signature of malloc / operator new so that we can
|
||||
* tail-call the fallback allocator, allowing us to avoid setting up the call
|
||||
* frame in the common case.
|
||||
*
|
||||
* Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
|
||||
* tcache. If either of these is false, we tail-call to the slowpath,
|
||||
* malloc_default(). Tail-calling is used to avoid any caller-saved
|
||||
* registers.
|
||||
*
|
||||
* fastpath supports ticker and profiling, both of which will also
|
||||
* tail-call to the slowpath if they fire.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
|
||||
LOG("core.malloc.entry", "size: %zu", size);
|
||||
if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
|
||||
return fallback_alloc(size);
|
||||
}
|
||||
|
||||
tsd_t *tsd = tsd_get(false);
|
||||
if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
|
||||
return fallback_alloc(size);
|
||||
}
|
||||
/*
|
||||
* The code below till the branch checking the next_event threshold may
|
||||
* execute before malloc_init(), in which case the threshold is 0 to
|
||||
* trigger slow path and initialization.
|
||||
*
|
||||
* Note that when uninitialized, only the fast-path variants of the sz /
|
||||
* tsd facilities may be called.
|
||||
*/
|
||||
szind_t ind;
|
||||
/*
|
||||
* The thread_allocated counter in tsd serves as a general purpose
|
||||
* accumulator for bytes of allocation to trigger different types of
|
||||
* events. usize is always needed to advance thread_allocated, though
|
||||
* it's not always needed in the core allocation logic.
|
||||
*/
|
||||
size_t usize;
|
||||
sz_size2index_usize_fastpath(size, &ind, &usize);
|
||||
/* Fast path relies on size being a bin. */
|
||||
assert(ind < SC_NBINS);
|
||||
assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
|
||||
(size <= SC_SMALL_MAXCLASS));
|
||||
|
||||
uint64_t allocated, threshold;
|
||||
te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
|
||||
uint64_t allocated_after = allocated + usize;
|
||||
/*
|
||||
* The ind and usize might be uninitialized (or partially) before
|
||||
* malloc_init(). The assertions check for: 1) full correctness (usize
|
||||
* & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
|
||||
* when !initialized.
|
||||
*/
|
||||
if (!malloc_initialized()) {
|
||||
assert(threshold == 0);
|
||||
} else {
|
||||
assert(ind == sz_size2index(size));
|
||||
assert(usize > 0 && usize == sz_index2size(ind));
|
||||
}
|
||||
/*
|
||||
* Check for events and tsd non-nominal (fast_threshold will be set to
|
||||
* 0) in a single branch.
|
||||
*/
|
||||
if (unlikely(allocated_after >= threshold)) {
|
||||
return fallback_alloc(size);
|
||||
}
|
||||
assert(tsd_fast(tsd));
|
||||
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
assert(tcache == tcache_get(tsd));
|
||||
cache_bin_t *bin = &tcache->bins[ind];
|
||||
bool tcache_success;
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* We split up the code this way so that redundant low-water
|
||||
* computation doesn't happen on the (more common) case in which we
|
||||
* don't touch the low water mark. The compiler won't do this
|
||||
* duplication on its own.
|
||||
*/
|
||||
ret = cache_bin_alloc_easy(bin, &tcache_success);
|
||||
if (tcache_success) {
|
||||
fastpath_success_finish(tsd, allocated_after, bin, ret);
|
||||
return ret;
|
||||
}
|
||||
ret = cache_bin_alloc(bin, &tcache_success);
|
||||
if (tcache_success) {
|
||||
fastpath_success_finish(tsd, allocated_after, bin, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return fallback_alloc(size);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
|
|
@ -0,0 +1,111 @@
|
|||
#ifndef JEMALLOC_INTERNAL_MACROS_H
|
||||
#define JEMALLOC_INTERNAL_MACROS_H
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define JEMALLOC_ALWAYS_INLINE static inline
|
||||
#else
|
||||
# ifdef _MSC_VER
|
||||
# define JEMALLOC_ALWAYS_INLINE static __forceinline
|
||||
# else
|
||||
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
|
||||
# endif
|
||||
#endif
|
||||
#ifdef _MSC_VER
|
||||
# define inline _inline
|
||||
#endif
|
||||
|
||||
#define UNUSED JEMALLOC_ATTR(unused)
|
||||
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define ZD(z) ((ssize_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
#define QD(q) ((int64_t)q)
|
||||
|
||||
#define KZU(z) ZU(z##ULL)
|
||||
#define KZD(z) ZD(z##LL)
|
||||
#define KQU(q) QU(q##ULL)
|
||||
#define KQD(q) QI(q##LL)
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
#endif
|
||||
|
||||
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
|
||||
# define restrict
|
||||
#endif
|
||||
|
||||
/* Various function pointers are static and immutable except during testing. */
|
||||
#ifdef JEMALLOC_JET
|
||||
# define JET_MUTABLE
|
||||
#else
|
||||
# define JET_MUTABLE const
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
|
||||
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
|
||||
|
||||
/* Diagnostic suppression macros */
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
|
||||
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
/* #pragma GCC diagnostic first appeared in gcc 4.6. */
|
||||
#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
|
||||
(__GNUC_MINOR__ > 5)))) || defined(__clang__)
|
||||
/*
|
||||
* The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
|
||||
* diagnostic suppression macros and should not be used anywhere else.
|
||||
*/
|
||||
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
|
||||
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
|
||||
JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
|
||||
|
||||
/*
|
||||
* The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
|
||||
* all clang versions up to version 7 (currently trunk, unreleased). This macro
|
||||
* suppresses the warning for the affected compiler versions only.
|
||||
*/
|
||||
# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
|
||||
defined(__clang__)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
|
||||
# else
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# endif
|
||||
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
|
||||
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
|
||||
# else
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# endif
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
|
||||
JEMALLOC_DIAGNOSTIC_PUSH \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
|
||||
#else
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH
|
||||
# define JEMALLOC_DIAGNOSTIC_POP
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disables spurious diagnostics for all headers. Since these headers are not
|
||||
* included by users directly, it does not affect their diagnostic settings.
|
||||
*/
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MACROS_H */
|
|
@ -0,0 +1,130 @@
|
|||
#ifndef JEMALLOC_INTERNAL_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/quantum.h"
|
||||
|
||||
/* Processor / core id type. */
|
||||
typedef int malloc_cpuid_t;
|
||||
|
||||
/* When realloc(non-null-ptr, 0) is called, what happens? */
|
||||
enum zero_realloc_action_e {
|
||||
/* Realloc(ptr, 0) is free(ptr); return malloc(0); */
|
||||
zero_realloc_action_alloc = 0,
|
||||
/* Realloc(ptr, 0) is free(ptr); */
|
||||
zero_realloc_action_free = 1,
|
||||
/* Realloc(ptr, 0) aborts. */
|
||||
zero_realloc_action_abort = 2
|
||||
};
|
||||
typedef enum zero_realloc_action_e zero_realloc_action_t;
|
||||
|
||||
/* Signature of write callback. */
|
||||
typedef void (write_cb_t)(void *, const char *);
|
||||
|
||||
enum malloc_init_e {
|
||||
malloc_init_uninitialized = 3,
|
||||
malloc_init_a0_initialized = 2,
|
||||
malloc_init_recursible = 1,
|
||||
malloc_init_initialized = 0 /* Common case --> jnz. */
|
||||
};
|
||||
typedef enum malloc_init_e malloc_init_t;
|
||||
|
||||
/*
|
||||
* Flags bits:
|
||||
*
|
||||
* a: arena
|
||||
* t: tcache
|
||||
* 0: unused
|
||||
* z: zero
|
||||
* n: alignment
|
||||
*
|
||||
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
|
||||
*/
|
||||
#define MALLOCX_ARENA_BITS 12
|
||||
#define MALLOCX_TCACHE_BITS 12
|
||||
#define MALLOCX_LG_ALIGN_BITS 6
|
||||
#define MALLOCX_ARENA_SHIFT 20
|
||||
#define MALLOCX_TCACHE_SHIFT 8
|
||||
#define MALLOCX_ARENA_MASK \
|
||||
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
|
||||
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
|
||||
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
|
||||
#define MALLOCX_TCACHE_MASK \
|
||||
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
|
||||
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
|
||||
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
|
||||
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
|
||||
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
|
||||
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
|
||||
#define MALLOCX_ALIGN_GET(flags) \
|
||||
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
|
||||
#define MALLOCX_ZERO_GET(flags) \
|
||||
((bool)(flags & MALLOCX_ZERO))
|
||||
|
||||
#define MALLOCX_TCACHE_GET(flags) \
|
||||
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
|
||||
#define MALLOCX_ARENA_GET(flags) \
|
||||
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
|
||||
|
||||
/* Smallest size class to support. */
|
||||
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||
|
||||
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
|
||||
#define LONG_MASK (LONG - 1)
|
||||
|
||||
/* Return the smallest long multiple that is >= a. */
|
||||
#define LONG_CEILING(a) \
|
||||
(((a) + LONG_MASK) & ~LONG_MASK)
|
||||
|
||||
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||
#define PTR_MASK (SIZEOF_PTR - 1)
|
||||
|
||||
/* Return the smallest (void *) multiple that is >= a. */
|
||||
#define PTR_CEILING(a) \
|
||||
(((a) + PTR_MASK) & ~PTR_MASK)
|
||||
|
||||
/*
|
||||
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
||||
* In addition, this controls the spacing of cacheline-spaced size classes.
|
||||
*
|
||||
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
|
||||
* only handle raw constants.
|
||||
*/
|
||||
#define LG_CACHELINE 6
|
||||
#define CACHELINE 64
|
||||
#define CACHELINE_MASK (CACHELINE - 1)
|
||||
|
||||
/* Return the smallest cacheline multiple that is >= s. */
|
||||
#define CACHELINE_CEILING(s) \
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Return the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
|
||||
|
||||
/* Return the offset between a and the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||
((size_t)((uintptr_t)(a) & (alignment - 1)))
|
||||
|
||||
/* Return the smallest alignment multiple that is >= s. */
|
||||
#define ALIGNMENT_CEILING(s, alignment) \
|
||||
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
|
||||
|
||||
/* Declare a variable-length array. */
|
||||
#if __STDC_VERSION__ < 199901L
|
||||
# ifdef _MSC_VER
|
||||
# include <malloc.h>
|
||||
# define alloca _alloca
|
||||
# else
|
||||
# ifdef JEMALLOC_HAS_ALLOCA_H
|
||||
# include <alloca.h>
|
||||
# else
|
||||
# include <stdlib.h>
|
||||
# endif
|
||||
# endif
|
||||
# define VARIABLE_ARRAY(type, name, count) \
|
||||
type *name = alloca(sizeof(type) * (count))
|
||||
#else
|
||||
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TYPES_H */
|
263
BeefRT/JEMalloc/include/jemalloc/internal/jemalloc_preamble.h
Normal file
263
BeefRT/JEMalloc/include/jemalloc/internal/jemalloc_preamble.h
Normal file
|
@ -0,0 +1,263 @@
|
|||
#ifndef JEMALLOC_PREAMBLE_H
|
||||
#define JEMALLOC_PREAMBLE_H
|
||||
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
|
||||
#include <sys/ktrace.h>
|
||||
# if defined(JEMALLOC_UTRACE)
|
||||
# define UTRACE_CALL(p, l) utrace(p, l)
|
||||
# else
|
||||
# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
|
||||
# define JEMALLOC_UTRACE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# undef JEMALLOC_IS_MALLOC
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
# define JEMALLOC_NO_RENAME
|
||||
# include "../jemalloc.h"
|
||||
# undef JEMALLOC_NO_RENAME
|
||||
#else
|
||||
# define JEMALLOC_N(n) je_##n
|
||||
# include "../jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ZONE
|
||||
#include <mach/mach_error.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/vm_map.h>
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/*
|
||||
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||
* possible.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
# ifndef JEMALLOC_JET
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# else
|
||||
# include "jemalloc/internal/private_namespace_jet.h"
|
||||
# endif
|
||||
#endif
|
||||
#include "jemalloc/internal/test_hooks.h"
|
||||
|
||||
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
# define JEMALLOC_MADV_FREE 8
|
||||
#endif
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_madvise_huge =
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_lazy_lock =
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libgcc =
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libunwind =
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool maps_coalesce =
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_utrace =
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_xmalloc =
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_cache_oblivious =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, for jemalloc development use only at the moment. See the note
|
||||
* in jemalloc/internal/log.h.
|
||||
*/
|
||||
static const bool config_log =
|
||||
#ifdef JEMALLOC_LOG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Extra debugging of sized deallocations too onerous to be included in the
|
||||
* general safety checks.
|
||||
*/
|
||||
static const bool config_opt_size_checks =
|
||||
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool config_uaf_detection =
|
||||
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Whether or not the C++ extensions are enabled. */
|
||||
static const bool config_enable_cxx =
|
||||
#ifdef JEMALLOC_ENABLE_CXX
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
static const bool have_percpu_arena =
|
||||
#ifdef JEMALLOC_PERCPU_ARENA
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, and not recommended; the application should take full
|
||||
* responsibility for tracking provenance.
|
||||
*/
|
||||
static const bool force_ivsalloc =
|
||||
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_background_thread =
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_high_res_timer =
|
||||
#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool have_memcntl =
|
||||
#ifdef JEMALLOC_HAVE_MEMCNTL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
263
BeefRT/JEMalloc/include/jemalloc/internal/jemalloc_preamble.h.in
Normal file
263
BeefRT/JEMalloc/include/jemalloc/internal/jemalloc_preamble.h.in
Normal file
|
@ -0,0 +1,263 @@
|
|||
#ifndef JEMALLOC_PREAMBLE_H
|
||||
#define JEMALLOC_PREAMBLE_H
|
||||
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
|
||||
#include <sys/ktrace.h>
|
||||
# if defined(JEMALLOC_UTRACE)
|
||||
# define UTRACE_CALL(p, l) utrace(p, l)
|
||||
# else
|
||||
# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
|
||||
# define JEMALLOC_UTRACE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# undef JEMALLOC_IS_MALLOC
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
# define JEMALLOC_NO_RENAME
|
||||
# include "../jemalloc@install_suffix@.h"
|
||||
# undef JEMALLOC_NO_RENAME
|
||||
#else
|
||||
# define JEMALLOC_N(n) @private_namespace@##n
|
||||
# include "../jemalloc@install_suffix@.h"
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ZONE
|
||||
#include <mach/mach_error.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/vm_map.h>
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/*
|
||||
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||
* possible.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
# ifndef JEMALLOC_JET
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# else
|
||||
# include "jemalloc/internal/private_namespace_jet.h"
|
||||
# endif
|
||||
#endif
|
||||
#include "jemalloc/internal/test_hooks.h"
|
||||
|
||||
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
# define JEMALLOC_MADV_FREE 8
|
||||
#endif
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_madvise_huge =
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_lazy_lock =
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libgcc =
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libunwind =
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool maps_coalesce =
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_utrace =
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_xmalloc =
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_cache_oblivious =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, for jemalloc development use only at the moment. See the note
|
||||
* in jemalloc/internal/log.h.
|
||||
*/
|
||||
static const bool config_log =
|
||||
#ifdef JEMALLOC_LOG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Extra debugging of sized deallocations too onerous to be included in the
|
||||
* general safety checks.
|
||||
*/
|
||||
static const bool config_opt_size_checks =
|
||||
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool config_uaf_detection =
|
||||
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Whether or not the C++ extensions are enabled. */
|
||||
static const bool config_enable_cxx =
|
||||
#ifdef JEMALLOC_ENABLE_CXX
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
static const bool have_percpu_arena =
|
||||
#ifdef JEMALLOC_PERCPU_ARENA
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, and not recommended; the application should take full
|
||||
* responsibility for tracking provenance.
|
||||
*/
|
||||
static const bool force_ivsalloc =
|
||||
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_background_thread =
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_high_res_timer =
|
||||
#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool have_memcntl =
|
||||
#ifdef JEMALLOC_HAVE_MEMCNTL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
24
BeefRT/JEMalloc/include/jemalloc/internal/large_externs.h
Normal file
24
BeefRT/JEMalloc/include/jemalloc/internal/large_externs.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/hook.h"
|
||||
|
||||
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero);
|
||||
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
|
||||
size_t usize_max, bool zero);
|
||||
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args);
|
||||
|
||||
void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
|
||||
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
|
||||
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
|
||||
size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
|
||||
void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
|
||||
bool reset_recent);
|
||||
void large_prof_tctx_reset(edata_t *edata);
|
||||
void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
204
BeefRT/JEMalloc/include/jemalloc/internal/lockedint.h
Normal file
204
BeefRT/JEMalloc/include/jemalloc/internal/lockedint.h
Normal file
|
@ -0,0 +1,204 @@
|
|||
#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
|
||||
#define JEMALLOC_INTERNAL_LOCKEDINT_H
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
|
||||
typedef struct locked_u64_s locked_u64_t;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
struct locked_u64_s {
|
||||
atomic_u64_t val;
|
||||
};
|
||||
#else
|
||||
/* Must hold the associated mutex. */
|
||||
struct locked_u64_s {
|
||||
uint64_t val;
|
||||
};
|
||||
#endif
|
||||
|
||||
typedef struct locked_zu_s locked_zu_t;
|
||||
struct locked_zu_s {
|
||||
atomic_zu_t val;
|
||||
};
|
||||
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
|
||||
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
|
||||
malloc_mutex_init(&(mu), name, rank, rank_mode)
|
||||
# define LOCKEDINT_MTX(mtx) (&(mtx))
|
||||
# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
|
||||
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
|
||||
# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
|
||||
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
|
||||
malloc_mutex_postfork_parent(tsdn, &(mu))
|
||||
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
|
||||
malloc_mutex_postfork_child(tsdn, &(mu))
|
||||
#else
|
||||
# define LOCKEDINT_MTX_DECLARE(name)
|
||||
# define LOCKEDINT_MTX(mtx) NULL
|
||||
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
|
||||
# define LOCKEDINT_MTX_LOCK(tsdn, mu)
|
||||
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
|
||||
# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
|
||||
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
|
||||
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
|
||||
#else
|
||||
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
|
||||
malloc_mutex_assert_owner(tsdn, (mtx))
|
||||
#endif
|
||||
|
||||
static inline uint64_t
|
||||
locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(&p->val, ATOMIC_RELAXED);
|
||||
#else
|
||||
return p->val;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
|
||||
uint64_t x) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
p->val += x;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
|
||||
uint64_t x) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
p->val -= x;
|
||||
assert(p->val + x >= p->val);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Increment and take modulus. Returns whether the modulo made any change. */
|
||||
static inline bool
|
||||
locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
|
||||
const uint64_t x, const uint64_t modulus) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
uint64_t before, after;
|
||||
bool overflow;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
|
||||
do {
|
||||
after = before + x;
|
||||
assert(after >= before);
|
||||
overflow = (after >= modulus);
|
||||
if (overflow) {
|
||||
after %= modulus;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
|
||||
ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
before = p->val;
|
||||
after = before + x;
|
||||
overflow = (after >= modulus);
|
||||
if (overflow) {
|
||||
after %= modulus;
|
||||
}
|
||||
p->val = after;
|
||||
#endif
|
||||
return overflow;
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
* the types here are atomic).
|
||||
*/
|
||||
static inline void
|
||||
locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
|
||||
atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
|
||||
#else
|
||||
dst->val += src;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
locked_read_u64_unsynchronized(locked_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(&p->val, ATOMIC_RELAXED);
|
||||
#else
|
||||
return p->val;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
p->val = x;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
|
||||
#else
|
||||
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
|
||||
size_t x) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
|
||||
atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
|
||||
size_t x) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
|
||||
atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like the _u64 variant, needs an externally synchronized *dst. */
|
||||
static inline void
|
||||
locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
|
||||
size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
|
||||
atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike the _u64 variant, this is safe to call unconditionally.
|
||||
*/
|
||||
static inline size_t
|
||||
locked_read_atomic_zu(locked_zu_t *p) {
|
||||
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
|
115
BeefRT/JEMalloc/include/jemalloc/internal/log.h
Normal file
115
BeefRT/JEMalloc/include/jemalloc/internal/log.h
Normal file
|
@ -0,0 +1,115 @@
|
|||
#ifndef JEMALLOC_INTERNAL_LOG_H
|
||||
#define JEMALLOC_INTERNAL_LOG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
#ifdef JEMALLOC_LOG
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
|
||||
#else
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_LOG_BUFSIZE 4096
|
||||
|
||||
/*
|
||||
* The log malloc_conf option is a '|'-delimited list of log_var name segments
|
||||
* which should be logged. The names are themselves hierarchical, with '.' as
|
||||
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
|
||||
* you have:
|
||||
*
|
||||
* log("arena", "log msg for arena"); // 1
|
||||
* log("arena.a", "log msg for arena.a"); // 2
|
||||
* log("arena.b", "log msg for arena.b"); // 3
|
||||
* log("arena.a.a", "log msg for arena.a.a"); // 4
|
||||
* log("extent.a", "log msg for extent.a"); // 5
|
||||
* log("extent.b", "log msg for extent.b"); // 6
|
||||
*
|
||||
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
|
||||
* 6 will print at runtime. You can enable logging from all log vars by
|
||||
* writing "log=.".
|
||||
*
|
||||
* None of this should be regarded as a stable API for right now. It's intended
|
||||
* as a debugging interface, to let us keep around some of our printf-debugging
|
||||
* statements.
|
||||
*/
|
||||
|
||||
extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
|
||||
extern atomic_b_t log_init_done;
|
||||
|
||||
typedef struct log_var_s log_var_t;
|
||||
struct log_var_s {
|
||||
/*
|
||||
* Lowest bit is "inited", second lowest is "enabled". Putting them in
|
||||
* a single word lets us avoid any fences on weak architectures.
|
||||
*/
|
||||
atomic_u_t state;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define LOG_NOT_INITIALIZED 0U
|
||||
#define LOG_INITIALIZED_NOT_ENABLED 1U
|
||||
#define LOG_ENABLED 2U
|
||||
|
||||
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
|
||||
|
||||
/*
|
||||
* Returns the value we should assume for state (which is not necessarily
|
||||
* accurate; if logging is done before logging has finished initializing, then
|
||||
* we default to doing the safe thing by logging everything).
|
||||
*/
|
||||
unsigned log_var_update_state(log_var_t *log_var);
|
||||
|
||||
/* We factor out the metadata management to allow us to test more easily. */
|
||||
#define log_do_begin(log_var) \
|
||||
if (config_log) { \
|
||||
unsigned log_state = atomic_load_u(&(log_var).state, \
|
||||
ATOMIC_RELAXED); \
|
||||
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
|
||||
log_state = log_var_update_state(&(log_var)); \
|
||||
assert(log_state != LOG_NOT_INITIALIZED); \
|
||||
} \
|
||||
if (log_state == LOG_ENABLED) { \
|
||||
{
|
||||
/* User code executes here. */
|
||||
#define log_do_end(log_var) \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
/*
|
||||
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
|
||||
* preprocessing. To work around this, we take all potential extra arguments in
|
||||
* a var-args functions. Since a varargs macro needs at least one argument in
|
||||
* the "...", we accept the format string there, and require that the first
|
||||
* argument in this "..." is a const char *.
|
||||
*/
|
||||
static inline void
|
||||
log_impl_varargs(const char *name, ...) {
|
||||
char buf[JEMALLOC_LOG_BUFSIZE];
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, name);
|
||||
const char *format = va_arg(ap, const char *);
|
||||
size_t dst_offset = 0;
|
||||
dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
|
||||
dst_offset += malloc_vsnprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
|
||||
dst_offset += malloc_snprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
|
||||
va_end(ap);
|
||||
|
||||
malloc_write(buf);
|
||||
}
|
||||
|
||||
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
|
||||
#define LOG(log_var_str, ...) \
|
||||
do { \
|
||||
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
|
||||
log_do_begin(log_var) \
|
||||
log_impl_varargs((log_var).name, __VA_ARGS__); \
|
||||
log_do_end(log_var) \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LOG_H */
|
105
BeefRT/JEMalloc/include/jemalloc/internal/malloc_io.h
Normal file
105
BeefRT/JEMalloc/include/jemalloc/internal/malloc_io.h
Normal file
|
@ -0,0 +1,105 @@
|
|||
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||
#define JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifdef _WIN64
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX "ll"
|
||||
# else
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX ""
|
||||
# endif
|
||||
# define FMTd32 "d"
|
||||
# define FMTu32 "u"
|
||||
# define FMTx32 "x"
|
||||
# define FMTd64 FMT64_PREFIX "d"
|
||||
# define FMTu64 FMT64_PREFIX "u"
|
||||
# define FMTx64 FMT64_PREFIX "x"
|
||||
# define FMTdPTR FMTPTR_PREFIX "d"
|
||||
# define FMTuPTR FMTPTR_PREFIX "u"
|
||||
# define FMTxPTR FMTPTR_PREFIX "x"
|
||||
#else
|
||||
# include <inttypes.h>
|
||||
# define FMTd32 PRId32
|
||||
# define FMTu32 PRIu32
|
||||
# define FMTx32 PRIx32
|
||||
# define FMTd64 PRId64
|
||||
# define FMTu64 PRIu64
|
||||
# define FMTx64 PRIx64
|
||||
# define FMTdPTR PRIdPTR
|
||||
# define FMTuPTR PRIuPTR
|
||||
# define FMTxPTR PRIxPTR
|
||||
#endif
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
|
||||
write_cb_t wrtmessage;
|
||||
int buferror(int err, char *buf, size_t buflen);
|
||||
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
|
||||
int base);
|
||||
void malloc_write(const char *s);
|
||||
|
||||
/*
|
||||
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||
* point math.
|
||||
*/
|
||||
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
/*
|
||||
* The caller can set write_cb to null to choose to print with the
|
||||
* je_malloc_message hook.
|
||||
*/
|
||||
void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
|
||||
va_list ap);
|
||||
void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
|
||||
...) JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||
|
||||
static inline ssize_t
|
||||
malloc_write_fd(int fd, const void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* result in the widest plausible type to avoid compiler warnings.
|
||||
*/
|
||||
long result = syscall(SYS_write, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = (ssize_t)write(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
static inline ssize_t
|
||||
malloc_read_fd(int fd, void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
long result = syscall(SYS_read, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = read(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
|
134
BeefRT/JEMalloc/include/jemalloc/internal/mpsc_queue.h
Normal file
134
BeefRT/JEMalloc/include/jemalloc/internal/mpsc_queue.h
Normal file
|
@ -0,0 +1,134 @@
|
|||
#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
|
||||
#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
|
||||
/*
|
||||
* A concurrent implementation of a multi-producer, single-consumer queue. It
|
||||
* supports three concurrent operations:
|
||||
* - Push
|
||||
* - Push batch
|
||||
* - Pop batch
|
||||
*
|
||||
* These operations are all lock-free.
|
||||
*
|
||||
* The implementation is the simple two-stack queue built on a Treiber stack.
|
||||
* It's not terribly efficient, but this isn't expected to go into anywhere with
|
||||
* hot code. In fact, we don't really even need queue semantics in any
|
||||
* anticipated use cases; we could get away with just the stack. But this way
|
||||
* lets us frame the API in terms of the existing list types, which is a nice
|
||||
* convenience. We can save on cache misses by introducing our own (parallel)
|
||||
* single-linked list type here, and dropping FIFO semantics, if we need this to
|
||||
* get faster. Since we're currently providing queue semantics though, we use
|
||||
* the prev field in the link rather than the next field for Treiber-stack
|
||||
* linkage, so that we can preserve order for bash-pushed lists (recall that the
|
||||
* two-stack tricks reverses orders in the lock-free first stack).
|
||||
*/
|
||||
|
||||
#define mpsc_queue(a_type) \
|
||||
struct { \
|
||||
atomic_p_t tail; \
|
||||
}
|
||||
|
||||
#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
|
||||
a_list_type) \
|
||||
/* Initialize a queue. */ \
|
||||
a_attr void \
|
||||
a_prefix##new(a_queue_type *queue); \
|
||||
/* Insert all items in src into the queue, clearing src. */ \
|
||||
a_attr void \
|
||||
a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
|
||||
/* Insert node into the queue. */ \
|
||||
a_attr void \
|
||||
a_prefix##push(a_queue_type *queue, a_type *node); \
|
||||
/* \
|
||||
* Pop all items in the queue into the list at dst. dst should already \
|
||||
* be initialized (and may contain existing items, which then remain \
|
||||
* in dst). \
|
||||
*/ \
|
||||
a_attr void \
|
||||
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
|
||||
|
||||
#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
|
||||
a_list_type, a_link) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_queue_type *queue) { \
|
||||
atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
|
||||
/* \
|
||||
* Reuse the ql list next field as the Treiber stack next \
|
||||
* field. \
|
||||
*/ \
|
||||
a_type *first = ql_first(src); \
|
||||
a_type *last = ql_last(src, a_link); \
|
||||
void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
|
||||
do { \
|
||||
/* \
|
||||
* Note that this breaks the queue ring structure; \
|
||||
* it's not a ring any more! \
|
||||
*/ \
|
||||
first->a_link.qre_prev = cur_tail; \
|
||||
/* \
|
||||
* Note: the upcoming CAS doesn't need an atomic; every \
|
||||
* push only needs to synchronize with the next pop, \
|
||||
* which we get from the release sequence rules. \
|
||||
*/ \
|
||||
} while (!atomic_compare_exchange_weak_p(&queue->tail, \
|
||||
&cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
|
||||
ql_new(src); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##push(a_queue_type *queue, a_type *node) { \
|
||||
ql_elm_new(node, a_link); \
|
||||
a_list_type list; \
|
||||
ql_new(&list); \
|
||||
ql_head_insert(&list, node, a_link); \
|
||||
a_prefix##push_batch(queue, &list); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
|
||||
a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
|
||||
if (tail == NULL) { \
|
||||
/* \
|
||||
* In the common special case where there are no \
|
||||
* pending elements, bail early without a costly RMW. \
|
||||
*/ \
|
||||
return; \
|
||||
} \
|
||||
tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
|
||||
/* \
|
||||
* It's a single-consumer queue, so if cur started non-NULL, \
|
||||
* it'd better stay non-NULL. \
|
||||
*/ \
|
||||
assert(tail != NULL); \
|
||||
/* \
|
||||
* We iterate through the stack and both fix up the link \
|
||||
* structure (stack insertion broke the list requirement that \
|
||||
* the list be circularly linked). It's just as efficient at \
|
||||
* this point to make the queue a "real" queue, so do that as \
|
||||
* well. \
|
||||
* If this ever gets to be a hot spot, we can omit this fixup \
|
||||
* and make the queue a bag (i.e. not necessarily ordered), but \
|
||||
* that would mean jettisoning the existing list API as the \
|
||||
* batch pushing/popping interface. \
|
||||
*/ \
|
||||
a_list_type reversed; \
|
||||
ql_new(&reversed); \
|
||||
while (tail != NULL) { \
|
||||
/* \
|
||||
* Pop an item off the stack, prepend it onto the list \
|
||||
* (reversing the order). Recall that we use the \
|
||||
* list prev field as the Treiber stack next field to \
|
||||
* preserve order of batch-pushed items when reversed. \
|
||||
*/ \
|
||||
a_type *next = tail->a_link.qre_prev; \
|
||||
ql_elm_new(tail, a_link); \
|
||||
ql_head_insert(&reversed, tail, a_link); \
|
||||
tail = next; \
|
||||
} \
|
||||
ql_concat(dst, &reversed, a_link); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
|
319
BeefRT/JEMalloc/include/jemalloc/internal/mutex.h
Normal file
319
BeefRT/JEMalloc/include/jemalloc/internal/mutex.h
Normal file
|
@ -0,0 +1,319 @@
|
|||
#ifndef JEMALLOC_INTERNAL_MUTEX_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
extern int64_t opt_mutex_max_spin;
|
||||
|
||||
typedef enum {
|
||||
/* Can only acquire one mutex of a given witness rank at a time. */
|
||||
malloc_mutex_rank_exclusive,
|
||||
/*
|
||||
* Can acquire multiple mutexes of the same witness rank, but in
|
||||
* address-ascending order only.
|
||||
*/
|
||||
malloc_mutex_address_ordered
|
||||
} malloc_mutex_lock_order_t;
|
||||
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
struct malloc_mutex_s {
|
||||
union {
|
||||
struct {
|
||||
/*
|
||||
* prof_data is defined first to reduce cacheline
|
||||
* bouncing: the data is not touched by the mutex holder
|
||||
* during unlocking, while might be modified by
|
||||
* contenders. Having it before the mutex itself could
|
||||
* avoid prefetching a modified cacheline (for the
|
||||
* unlocking thread).
|
||||
*/
|
||||
mutex_prof_data_t prof_data;
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
# else
|
||||
CRITICAL_SECTION lock;
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
os_unfair_lock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
/*
|
||||
* Hint flag to avoid exclusive cache line contention
|
||||
* during spin waiting
|
||||
*/
|
||||
atomic_b_t locked;
|
||||
};
|
||||
/*
|
||||
* We only touch witness when configured w/ debug. However we
|
||||
* keep the field in a union when !debug so that we don't have
|
||||
* to pollute the code base with #ifdefs, while avoid paying the
|
||||
* memory cost.
|
||||
*/
|
||||
#if !defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
|
||||
# else
|
||||
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
|
||||
#else
|
||||
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
|
||||
#endif
|
||||
|
||||
#define LOCK_PROF_DATA_INITIALIZER \
|
||||
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
|
||||
ATOMIC_INIT(0), 0, NULL, 0}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# if (defined(JEMALLOC_DEBUG))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
|
||||
#else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
#else
|
||||
# undef isthreaded /* Undo private_namespace.h definition. */
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
|
||||
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_boot(void);
|
||||
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||
MALLOC_MUTEX_LOCK(mutex);
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
||||
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (config_stats) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
data->n_lock_ops++;
|
||||
if (data->prev_owner != tsdn) {
|
||||
data->prev_owner = tsdn;
|
||||
data->n_owner_switches++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Trylock: return false if the lock is successfully acquired. */
|
||||
static inline bool
|
||||
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
return true;
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Aggregate lock prof data. */
|
||||
static inline void
|
||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
||||
}
|
||||
|
||||
sum->n_wait_times += data->n_wait_times;
|
||||
sum->n_spin_acquired += data->n_spin_acquired;
|
||||
|
||||
if (sum->max_n_thds < data->max_n_thds) {
|
||||
sum->max_n_thds = data->max_n_thds;
|
||||
}
|
||||
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
||||
&data->n_waiting_thds, ATOMIC_RELAXED);
|
||||
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
sum->n_owner_switches += data->n_owner_switches;
|
||||
sum->n_lock_ops += data->n_lock_ops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
malloc_mutex_lock_slow(mutex);
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
|
||||
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
MALLOC_MUTEX_UNLOCK(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
|
||||
/*
|
||||
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
||||
* atomic data), but the mutex protection makes this safe, and writing
|
||||
* a member-for-member copy is tedious for this situation.
|
||||
*/
|
||||
*dst = *source;
|
||||
/* n_wait_thds is not reported (modified w/o locking). */
|
||||
atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/* Copy the prof data from mutex for processing. */
|
||||
static inline void
|
||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
malloc_mutex_prof_copy(data, &mutex->prof_data);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
nstime_add(&data->tot_wait_time, &source->tot_wait_time);
|
||||
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
|
||||
nstime_copy(&data->max_wait_time, &source->max_wait_time);
|
||||
}
|
||||
data->n_wait_times += source->n_wait_times;
|
||||
data->n_spin_acquired += source->n_spin_acquired;
|
||||
if (data->max_n_thds < source->max_n_thds) {
|
||||
data->max_n_thds = source->max_n_thds;
|
||||
}
|
||||
/* n_wait_thds is not reported. */
|
||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
data->n_owner_switches += source->n_owner_switches;
|
||||
data->n_lock_ops += source->n_lock_ops;
|
||||
}
|
||||
|
||||
/* Compare the prof data and update to the maximum. */
|
||||
static inline void
|
||||
malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
|
||||
nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
|
||||
}
|
||||
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
|
||||
nstime_copy(&data->max_wait_time, &source->max_wait_time);
|
||||
}
|
||||
if (source->n_wait_times > data->n_wait_times) {
|
||||
data->n_wait_times = source->n_wait_times;
|
||||
}
|
||||
if (source->n_spin_acquired > data->n_spin_acquired) {
|
||||
data->n_spin_acquired = source->n_spin_acquired;
|
||||
}
|
||||
if (source->max_n_thds > data->max_n_thds) {
|
||||
data->max_n_thds = source->max_n_thds;
|
||||
}
|
||||
if (source->n_owner_switches > data->n_owner_switches) {
|
||||
data->n_owner_switches = source->n_owner_switches;
|
||||
}
|
||||
if (source->n_lock_ops > data->n_lock_ops) {
|
||||
data->n_lock_ops = source->n_lock_ops;
|
||||
}
|
||||
/* n_wait_thds is not reported. */
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
|
117
BeefRT/JEMalloc/include/jemalloc/internal/mutex_prof.h
Normal file
117
BeefRT/JEMalloc/include/jemalloc/internal/mutex_prof.h
Normal file
|
@ -0,0 +1,117 @@
|
|||
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
#include "jemalloc/internal/tsd_types.h"
|
||||
|
||||
#define MUTEX_PROF_GLOBAL_MUTEXES \
|
||||
OP(background_thread) \
|
||||
OP(max_per_bg_thd) \
|
||||
OP(ctl) \
|
||||
OP(prof) \
|
||||
OP(prof_thds_data) \
|
||||
OP(prof_dump) \
|
||||
OP(prof_recent_alloc) \
|
||||
OP(prof_recent_dump) \
|
||||
OP(prof_stats)
|
||||
|
||||
typedef enum {
|
||||
#define OP(mtx) global_prof_mutex_##mtx,
|
||||
MUTEX_PROF_GLOBAL_MUTEXES
|
||||
#undef OP
|
||||
mutex_prof_num_global_mutexes
|
||||
} mutex_prof_global_ind_t;
|
||||
|
||||
#define MUTEX_PROF_ARENA_MUTEXES \
|
||||
OP(large) \
|
||||
OP(extent_avail) \
|
||||
OP(extents_dirty) \
|
||||
OP(extents_muzzy) \
|
||||
OP(extents_retained) \
|
||||
OP(decay_dirty) \
|
||||
OP(decay_muzzy) \
|
||||
OP(base) \
|
||||
OP(tcache_list) \
|
||||
OP(hpa_shard) \
|
||||
OP(hpa_shard_grow) \
|
||||
OP(hpa_sec)
|
||||
|
||||
typedef enum {
|
||||
#define OP(mtx) arena_prof_mutex_##mtx,
|
||||
MUTEX_PROF_ARENA_MUTEXES
|
||||
#undef OP
|
||||
mutex_prof_num_arena_mutexes
|
||||
} mutex_prof_arena_ind_t;
|
||||
|
||||
/*
|
||||
* The forth parameter is a boolean value that is true for derived rate counters
|
||||
* and false for real ones.
|
||||
*/
|
||||
#define MUTEX_PROF_UINT64_COUNTERS \
|
||||
OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
|
||||
OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
|
||||
OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
|
||||
OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
|
||||
OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
|
||||
OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
|
||||
OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
|
||||
OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
|
||||
OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
|
||||
OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
|
||||
OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
|
||||
|
||||
#define MUTEX_PROF_UINT32_COUNTERS \
|
||||
OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
|
||||
|
||||
#define MUTEX_PROF_COUNTERS \
|
||||
MUTEX_PROF_UINT64_COUNTERS \
|
||||
MUTEX_PROF_UINT32_COUNTERS
|
||||
|
||||
#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
|
||||
|
||||
#define COUNTER_ENUM(counter_list, t) \
|
||||
typedef enum { \
|
||||
counter_list \
|
||||
mutex_prof_num_##t##_counters \
|
||||
} mutex_prof_##t##_counter_ind_t;
|
||||
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
|
||||
|
||||
#undef COUNTER_ENUM
|
||||
#undef OP
|
||||
|
||||
typedef struct {
|
||||
/*
|
||||
* Counters touched on the slow path, i.e. when there is lock
|
||||
* contention. We update them once we have the lock.
|
||||
*/
|
||||
/* Total time (in nano seconds) spent waiting on this mutex. */
|
||||
nstime_t tot_wait_time;
|
||||
/* Max time (in nano seconds) spent on a single lock operation. */
|
||||
nstime_t max_wait_time;
|
||||
/* # of times have to wait for this mutex (after spinning). */
|
||||
uint64_t n_wait_times;
|
||||
/* # of times acquired the mutex through local spinning. */
|
||||
uint64_t n_spin_acquired;
|
||||
/* Max # of threads waiting for the mutex at the same time. */
|
||||
uint32_t max_n_thds;
|
||||
/* Current # of threads waiting on the lock. Atomic synced. */
|
||||
atomic_u32_t n_waiting_thds;
|
||||
|
||||
/*
|
||||
* Data touched on the fast path. These are modified right after we
|
||||
* grab the lock, so it's placed closest to the end (i.e. right before
|
||||
* the lock) so that we have a higher chance of them being on the same
|
||||
* cacheline.
|
||||
*/
|
||||
/* # of times the mutex holder is different than the previous one. */
|
||||
uint64_t n_owner_switches;
|
||||
/* Previous mutex holder, to facilitate n_owner_switches. */
|
||||
tsdn_t *prev_owner;
|
||||
/* # of lock() operations in total. */
|
||||
uint64_t n_lock_ops;
|
||||
} mutex_prof_data_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
|
73
BeefRT/JEMalloc/include/jemalloc/internal/nstime.h
Normal file
73
BeefRT/JEMalloc/include/jemalloc/internal/nstime.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
#ifndef JEMALLOC_INTERNAL_NSTIME_H
|
||||
#define JEMALLOC_INTERNAL_NSTIME_H
|
||||
|
||||
/* Maximum supported number of seconds (~584 years). */
|
||||
#define NSTIME_SEC_MAX KQU(18446744072)
|
||||
|
||||
#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
|
||||
#else
|
||||
# define NSTIME_ZERO_INITIALIZER {0}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
uint64_t ns;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint32_t magic; /* Tracks if initialized. */
|
||||
#endif
|
||||
} nstime_t;
|
||||
|
||||
static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
|
||||
|
||||
void nstime_init(nstime_t *time, uint64_t ns);
|
||||
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
|
||||
uint64_t nstime_ns(const nstime_t *time);
|
||||
uint64_t nstime_sec(const nstime_t *time);
|
||||
uint64_t nstime_msec(const nstime_t *time);
|
||||
uint64_t nstime_nsec(const nstime_t *time);
|
||||
void nstime_copy(nstime_t *time, const nstime_t *source);
|
||||
int nstime_compare(const nstime_t *a, const nstime_t *b);
|
||||
void nstime_add(nstime_t *time, const nstime_t *addend);
|
||||
void nstime_iadd(nstime_t *time, uint64_t addend);
|
||||
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
|
||||
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
|
||||
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
|
||||
void nstime_idivide(nstime_t *time, uint64_t divisor);
|
||||
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
|
||||
uint64_t nstime_ns_since(const nstime_t *past);
|
||||
|
||||
typedef bool (nstime_monotonic_t)(void);
|
||||
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
|
||||
|
||||
typedef void (nstime_update_t)(nstime_t *);
|
||||
extern nstime_update_t *JET_MUTABLE nstime_update;
|
||||
|
||||
typedef void (nstime_prof_update_t)(nstime_t *);
|
||||
extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
|
||||
|
||||
void nstime_init_update(nstime_t *time);
|
||||
void nstime_prof_init_update(nstime_t *time);
|
||||
|
||||
enum prof_time_res_e {
|
||||
prof_time_res_default = 0,
|
||||
prof_time_res_high = 1
|
||||
};
|
||||
typedef enum prof_time_res_e prof_time_res_t;
|
||||
|
||||
extern prof_time_res_t opt_prof_time_res;
|
||||
extern const char *prof_time_res_mode_names[];
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
nstime_init_zero(nstime_t *time) {
|
||||
nstime_copy(time, &nstime_zero);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
nstime_equals_zero(nstime_t *time) {
|
||||
int diff = nstime_compare(time, &nstime_zero);
|
||||
assert(diff >= 0);
|
||||
return diff == 0;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_NSTIME_H */
|
243
BeefRT/JEMalloc/include/jemalloc/internal/pa.h
Normal file
243
BeefRT/JEMalloc/include/jemalloc/internal/pa.h
Normal file
|
@ -0,0 +1,243 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PA_H
|
||||
#define JEMALLOC_INTERNAL_PA_H
|
||||
|
||||
#include "jemalloc/internal/base.h"
|
||||
#include "jemalloc/internal/decay.h"
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/edata_cache.h"
|
||||
#include "jemalloc/internal/emap.h"
|
||||
#include "jemalloc/internal/hpa.h"
|
||||
#include "jemalloc/internal/lockedint.h"
|
||||
#include "jemalloc/internal/pac.h"
|
||||
#include "jemalloc/internal/pai.h"
|
||||
#include "jemalloc/internal/sec.h"
|
||||
|
||||
/*
|
||||
* The page allocator; responsible for acquiring pages of memory for
|
||||
* allocations. It picks the implementation of the page allocator interface
|
||||
* (i.e. a pai_t) to handle a given page-level allocation request. For now, the
|
||||
* only such implementation is the PAC code ("page allocator classic"), but
|
||||
* others will be coming soon.
|
||||
*/
|
||||
|
||||
typedef struct pa_central_s pa_central_t;
|
||||
struct pa_central_s {
|
||||
hpa_central_t hpa;
|
||||
};
|
||||
|
||||
/*
|
||||
* The stats for a particular pa_shard. Because of the way the ctl module
|
||||
* handles stats epoch data collection (it has its own arena_stats, and merges
|
||||
* the stats from each arena into it), this needs to live in the arena_stats_t;
|
||||
* hence we define it here and let the pa_shard have a pointer (rather than the
|
||||
* more natural approach of just embedding it in the pa_shard itself).
|
||||
*
|
||||
* We follow the arena_stats_t approach of marking the derived fields. These
|
||||
* are the ones that are not maintained on their own; instead, their values are
|
||||
* derived during those stats merges.
|
||||
*/
|
||||
typedef struct pa_shard_stats_s pa_shard_stats_t;
|
||||
struct pa_shard_stats_s {
|
||||
/* Number of edata_t structs allocated by base, but not being used. */
|
||||
size_t edata_avail; /* Derived. */
|
||||
/*
|
||||
* Stats specific to the PAC. For now, these are the only stats that
|
||||
* exist, but there will eventually be other page allocators. Things
|
||||
* like edata_avail make sense in a cross-PA sense, but things like
|
||||
* npurges don't.
|
||||
*/
|
||||
pac_stats_t pac_stats;
|
||||
};
|
||||
|
||||
/*
|
||||
* The local allocator handle. Keeps the state necessary to satisfy page-sized
|
||||
* allocations.
|
||||
*
|
||||
* The contents are mostly internal to the PA module. The key exception is that
|
||||
* arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
|
||||
* decay_ts, for a couple of queries, passing them back to a PA function, or
|
||||
* acquiring decay.mtx and looking at decay.purging. The reasoning is that,
|
||||
* while PA decides what and how to purge, the arena code decides when and where
|
||||
* (e.g. on what thread). It's allowed to use the presence of another purger to
|
||||
* decide.
|
||||
* (The background thread code also touches some other decay internals, but
|
||||
* that's not fundamental; its' just an artifact of a partial refactoring, and
|
||||
* its accesses could be straightforwardly moved inside the decay module).
|
||||
*/
|
||||
typedef struct pa_shard_s pa_shard_t;
|
||||
struct pa_shard_s {
|
||||
/* The central PA this shard is associated with. */
|
||||
pa_central_t *central;
|
||||
|
||||
/*
|
||||
* Number of pages in active extents.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_zu_t nactive;
|
||||
|
||||
/*
|
||||
* Whether or not we should prefer the hugepage allocator. Atomic since
|
||||
* it may be concurrently modified by a thread setting extent hooks.
|
||||
* Note that we still may do HPA operations in this arena; if use_hpa is
|
||||
* changed from true to false, we'll free back to the hugepage allocator
|
||||
* for those allocations.
|
||||
*/
|
||||
atomic_b_t use_hpa;
|
||||
|
||||
/*
|
||||
* If we never used the HPA to begin with, it wasn't initialized, and so
|
||||
* we shouldn't try to e.g. acquire its mutexes during fork. This
|
||||
* tracks that knowledge.
|
||||
*/
|
||||
bool ever_used_hpa;
|
||||
|
||||
/* Allocates from a PAC. */
|
||||
pac_t pac;
|
||||
|
||||
/*
|
||||
* We place a small extent cache in front of the HPA, since we intend
|
||||
* these configurations to use many fewer arenas, and therefore have a
|
||||
* higher risk of hot locks.
|
||||
*/
|
||||
sec_t hpa_sec;
|
||||
hpa_shard_t hpa_shard;
|
||||
|
||||
/* The source of edata_t objects. */
|
||||
edata_cache_t edata_cache;
|
||||
|
||||
unsigned ind;
|
||||
|
||||
malloc_mutex_t *stats_mtx;
|
||||
pa_shard_stats_t *stats;
|
||||
|
||||
/* The emap this shard is tied to. */
|
||||
emap_t *emap;
|
||||
|
||||
/* The base from which we get the ehooks and allocate metadat. */
|
||||
base_t *base;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
|
||||
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
|
||||
pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
|
||||
}
|
||||
|
||||
static inline ehooks_t *
|
||||
pa_shard_ehooks_get(pa_shard_t *shard) {
|
||||
return base_ehooks_get(shard->base);
|
||||
}
|
||||
|
||||
/* Returns true on error. */
|
||||
bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
|
||||
hpa_hooks_t *hpa_hooks);
|
||||
|
||||
/* Returns true on error. */
|
||||
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
|
||||
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
|
||||
malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
|
||||
|
||||
/*
|
||||
* This isn't exposed to users; we allow late enablement of the HPA shard so
|
||||
* that we can boot without worrying about the HPA, then turn it on in a0.
|
||||
*/
|
||||
bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
|
||||
|
||||
/*
|
||||
* We stop using the HPA when custom extent hooks are installed, but still
|
||||
* redirect deallocations to it.
|
||||
*/
|
||||
void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/*
|
||||
* This does the PA-specific parts of arena reset (i.e. freeing all active
|
||||
* allocations).
|
||||
*/
|
||||
void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/*
|
||||
* Destroy all the remaining retained extents. Should only be called after
|
||||
* decaying all active, dirty, and muzzy extents to the retained state, as the
|
||||
* last step in destroying the shard.
|
||||
*/
|
||||
void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/* Gets an edata for the given allocation. */
|
||||
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
|
||||
bool *deferred_work_generated);
|
||||
/* Returns true on error, in which case nothing changed. */
|
||||
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
|
||||
/*
|
||||
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
|
||||
* false otherwise.
|
||||
*/
|
||||
bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool *deferred_work_generated);
|
||||
/*
|
||||
* Frees the given edata back to the pa. Sets *generated_dirty if we produced
|
||||
* new dirty pages (well, we always set it for now; but this need not be the
|
||||
* case).
|
||||
* (We could make generated_dirty the return value of course, but this is more
|
||||
* consistent with the shrink pathway and our error codes here).
|
||||
*/
|
||||
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
|
||||
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
|
||||
ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
|
||||
|
||||
/*
|
||||
* Do deferred work on this PA shard.
|
||||
*
|
||||
* Morally, this should do both PAC decay and the HPA deferred work. For now,
|
||||
* though, the arena, background thread, and PAC modules are tightly interwoven
|
||||
* in a way that's tricky to extricate, so we only do the HPA-specific parts.
|
||||
*/
|
||||
void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
bool deferral_allowed);
|
||||
void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Various bits of "boring" functionality that are still part of this module,
|
||||
* but that we relegate to pa_extra.c, to keep the core logic in pa.c as
|
||||
* readable as possible.
|
||||
*/
|
||||
|
||||
/*
|
||||
* These fork phases are synchronized with the arena fork phase numbering to
|
||||
* make it easy to keep straight. That's why there's no prefork1.
|
||||
*/
|
||||
void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
|
||||
size_t *ndirty, size_t *nmuzzy);
|
||||
|
||||
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
|
||||
hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
|
||||
size_t *resident);
|
||||
|
||||
/*
|
||||
* Reads the PA-owned mutex stats into the output stats array, at the
|
||||
* appropriate positions. Morally, these stats should really live in
|
||||
* pa_shard_stats_t, but the indices are sort of baked into the various mutex
|
||||
* prof macros. This would be a good thing to do at some point.
|
||||
*/
|
||||
void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PA_H */
|
179
BeefRT/JEMalloc/include/jemalloc/internal/pac.h
Normal file
179
BeefRT/JEMalloc/include/jemalloc/internal/pac.h
Normal file
|
@ -0,0 +1,179 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PAC_H
|
||||
#define JEMALLOC_INTERNAL_PAC_H
|
||||
|
||||
#include "jemalloc/internal/exp_grow.h"
|
||||
#include "jemalloc/internal/pai.h"
|
||||
#include "san_bump.h"
|
||||
|
||||
|
||||
/*
|
||||
* Page allocator classic; an implementation of the PAI interface that:
|
||||
* - Can be used for arenas with custom extent hooks.
|
||||
* - Can always satisfy any allocation request (including highly-fragmentary
|
||||
* ones).
|
||||
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
|
||||
*/
|
||||
|
||||
/* How "eager" decay/purging should be. */
|
||||
enum pac_purge_eagerness_e {
|
||||
PAC_PURGE_ALWAYS,
|
||||
PAC_PURGE_NEVER,
|
||||
PAC_PURGE_ON_EPOCH_ADVANCE
|
||||
};
|
||||
typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
|
||||
|
||||
typedef struct pac_decay_stats_s pac_decay_stats_t;
|
||||
struct pac_decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
locked_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
locked_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
locked_u64_t purged;
|
||||
};
|
||||
|
||||
typedef struct pac_estats_s pac_estats_t;
|
||||
struct pac_estats_s {
|
||||
/*
|
||||
* Stats for a given index in the range [0, SC_NPSIZES] in the various
|
||||
* ecache_ts.
|
||||
* We track both bytes and # of extents: two extents in the same bucket
|
||||
* may have different sizes if adjacent size classes differ by more than
|
||||
* a page, so bytes cannot always be derived from # of extents.
|
||||
*/
|
||||
size_t ndirty;
|
||||
size_t dirty_bytes;
|
||||
size_t nmuzzy;
|
||||
size_t muzzy_bytes;
|
||||
size_t nretained;
|
||||
size_t retained_bytes;
|
||||
};
|
||||
|
||||
typedef struct pac_stats_s pac_stats_t;
|
||||
struct pac_stats_s {
|
||||
pac_decay_stats_t decay_dirty;
|
||||
pac_decay_stats_t decay_muzzy;
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
size_t retained; /* Derived. */
|
||||
|
||||
/*
|
||||
* Number of bytes currently mapped, excluding retained memory (and any
|
||||
* base-allocated memory, which is tracked by the arena stats).
|
||||
*
|
||||
* We name this "pac_mapped" to avoid confusion with the arena_stats
|
||||
* "mapped".
|
||||
*/
|
||||
atomic_zu_t pac_mapped;
|
||||
|
||||
/* VM space had to be leaked (undocumented). Normally 0. */
|
||||
atomic_zu_t abandoned_vm;
|
||||
};
|
||||
|
||||
typedef struct pac_s pac_t;
|
||||
struct pac_s {
|
||||
/*
|
||||
* Must be the first member (we convert it to a PAC given only a
|
||||
* pointer). The handle to the allocation interface.
|
||||
*/
|
||||
pai_t pai;
|
||||
/*
|
||||
* Collections of extents that were previously allocated. These are
|
||||
* used when allocating extents, in an attempt to re-use address space.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
ecache_t ecache_dirty;
|
||||
ecache_t ecache_muzzy;
|
||||
ecache_t ecache_retained;
|
||||
|
||||
base_t *base;
|
||||
emap_t *emap;
|
||||
edata_cache_t *edata_cache;
|
||||
|
||||
/* The grow info for the retained ecache. */
|
||||
exp_grow_t exp_grow;
|
||||
malloc_mutex_t grow_mtx;
|
||||
|
||||
/* Special allocator for guarded frequently reused extents. */
|
||||
san_bump_alloc_t sba;
|
||||
|
||||
/* How large extents should be before getting auto-purged. */
|
||||
atomic_zu_t oversize_threshold;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
* transitions.
|
||||
*
|
||||
* Synchronization: via the internal mutex.
|
||||
*/
|
||||
decay_t decay_dirty; /* dirty --> muzzy */
|
||||
decay_t decay_muzzy; /* muzzy --> retained */
|
||||
|
||||
malloc_mutex_t *stats_mtx;
|
||||
pac_stats_t *stats;
|
||||
|
||||
/* Extent serial number generator state. */
|
||||
atomic_zu_t extent_sn_next;
|
||||
};
|
||||
|
||||
bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
|
||||
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
||||
malloc_mutex_t *stats_mtx);
|
||||
|
||||
static inline size_t
|
||||
pac_mapped(pac_t *pac) {
|
||||
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline ehooks_t *
|
||||
pac_ehooks_get(pac_t *pac) {
|
||||
return base_ehooks_get(pac->base);
|
||||
}
|
||||
|
||||
/*
|
||||
* All purging functions require holding decay->mtx. This is one of the few
|
||||
* places external modules are allowed to peek inside pa_shard_t internals.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Decays the number of pages currently in the ecache. This might not leave the
|
||||
* ecache empty if other threads are inserting dirty objects into it
|
||||
* concurrently with the call.
|
||||
*/
|
||||
void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
|
||||
/*
|
||||
* Updates decay settings for the current time, and conditionally purges in
|
||||
* response (depending on decay_purge_setting). Returns whether or not the
|
||||
* epoch advanced.
|
||||
*/
|
||||
bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
pac_purge_eagerness_t eagerness);
|
||||
|
||||
/*
|
||||
* Gets / sets the maximum amount that we'll grow an arena down the
|
||||
* grow-retained pathways (unless forced to by an allocaction request).
|
||||
*
|
||||
* Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
|
||||
* care about the previous value.
|
||||
*
|
||||
* Returns true on error (if the new limit is not valid).
|
||||
*/
|
||||
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
|
||||
size_t *new_limit);
|
||||
|
||||
bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
|
||||
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
|
||||
ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
|
||||
|
||||
void pac_reset(tsdn_t *tsdn, pac_t *pac);
|
||||
void pac_destroy(tsdn_t *tsdn, pac_t *pac);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAC_H */
|
119
BeefRT/JEMalloc/include/jemalloc/internal/pages.h
Normal file
119
BeefRT/JEMalloc/include/jemalloc/internal/pages.h
Normal file
|
@ -0,0 +1,119 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||
|
||||
/* Page size. LG_PAGE is determined by the configure script. */
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
/* Return the page base address for the page containing address a. */
|
||||
#define PAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~PAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||
/* Return the largest pagesize multiple that is <=s. */
|
||||
#define PAGE_FLOOR(s) \
|
||||
((s) & ~PAGE_MASK)
|
||||
|
||||
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
|
||||
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
||||
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
||||
|
||||
#if LG_HUGEPAGE != 0
|
||||
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
|
||||
#else
|
||||
/*
|
||||
* It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
|
||||
* we can't autodetect the hugepage size, it gets treated as 0, in which case
|
||||
* we'll trigger a compiler error in those arrays. Avoid this case by ensuring
|
||||
* that this value is at least 1. (We won't ever run in this degraded state;
|
||||
* hpa_supported() returns false in this case.
|
||||
*/
|
||||
# define HUGEPAGE_PAGES 1
|
||||
#endif
|
||||
|
||||
/* Return the huge page base address for the huge page containing address a. */
|
||||
#define HUGEPAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define HUGEPAGE_CEILING(s) \
|
||||
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
|
||||
|
||||
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
|
||||
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define PAGES_CAN_PURGE_LAZY
|
||||
#endif
|
||||
/*
|
||||
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
|
||||
*
|
||||
* The only supported way to hard-purge on Windows is to decommit and then
|
||||
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
|
||||
* propagate the "poisoned" memory state. Since we typically decommit as the
|
||||
* next step after purging on Windows anyway, there's no point in adding such
|
||||
* complexity.
|
||||
*/
|
||||
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
|
||||
defined(JEMALLOC_MAPS_COALESCE))
|
||||
# define PAGES_CAN_PURGE_FORCED
|
||||
#endif
|
||||
|
||||
static const bool pages_can_purge_lazy =
|
||||
#ifdef PAGES_CAN_PURGE_LAZY
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool pages_can_purge_forced =
|
||||
#ifdef PAGES_CAN_PURGE_FORCED
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
|
||||
# define PAGES_CAN_HUGIFY
|
||||
#endif
|
||||
|
||||
static const bool pages_can_hugify =
|
||||
#ifdef PAGES_CAN_HUGIFY
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
typedef enum {
|
||||
thp_mode_default = 0, /* Do not change hugepage settings. */
|
||||
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
|
||||
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
|
||||
|
||||
thp_mode_names_limit = 3, /* Used for option processing. */
|
||||
thp_mode_not_supported = 3 /* No THP support detected. */
|
||||
} thp_mode_t;
|
||||
|
||||
#define THP_MODE_DEFAULT thp_mode_default
|
||||
extern thp_mode_t opt_thp;
|
||||
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
|
||||
extern const char *thp_mode_names[];
|
||||
|
||||
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
|
||||
void pages_unmap(void *addr, size_t size);
|
||||
bool pages_commit(void *addr, size_t size);
|
||||
bool pages_decommit(void *addr, size_t size);
|
||||
bool pages_purge_lazy(void *addr, size_t size);
|
||||
bool pages_purge_forced(void *addr, size_t size);
|
||||
bool pages_huge(void *addr, size_t size);
|
||||
bool pages_nohuge(void *addr, size_t size);
|
||||
bool pages_dontdump(void *addr, size_t size);
|
||||
bool pages_dodump(void *addr, size_t size);
|
||||
bool pages_boot(void);
|
||||
void pages_set_thp_state (void *ptr, size_t size);
|
||||
void pages_mark_guards(void *head, void *tail);
|
||||
void pages_unmark_guards(void *head, void *tail);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|
95
BeefRT/JEMalloc/include/jemalloc/internal/pai.h
Normal file
95
BeefRT/JEMalloc/include/jemalloc/internal/pai.h
Normal file
|
@ -0,0 +1,95 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PAI_H
|
||||
#define JEMALLOC_INTERNAL_PAI_H
|
||||
|
||||
/* An interface for page allocation. */
|
||||
|
||||
typedef struct pai_s pai_t;
|
||||
struct pai_s {
|
||||
/* Returns NULL on failure. */
|
||||
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
/*
|
||||
* Returns the number of extents added to the list (which may be fewer
|
||||
* than requested, in case of OOM). The list should already be
|
||||
* initialized. The only alignment guarantee is page-alignment, and
|
||||
* the results are not necessarily zeroed.
|
||||
*/
|
||||
size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results,
|
||||
bool *deferred_work_generated);
|
||||
bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero,
|
||||
bool *deferred_work_generated);
|
||||
bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
/* This function empties out list as a side-effect of being called. */
|
||||
void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list, bool *deferred_work_generated);
|
||||
uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
|
||||
};
|
||||
|
||||
/*
|
||||
* These are just simple convenience functions to avoid having to reference the
|
||||
* same pai_t twice on every invocation.
|
||||
*/
|
||||
|
||||
static inline edata_t *
|
||||
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated) {
|
||||
return self->alloc(tsdn, self, size, alignment, zero, guarded,
|
||||
frequent_reuse, deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
edata_list_active_t *results, bool *deferred_work_generated) {
|
||||
return self->alloc_batch(tsdn, self, size, nallocs, results,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated) {
|
||||
return self->expand(tsdn, self, edata, old_size, new_size, zero,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool *deferred_work_generated) {
|
||||
return self->shrink(tsdn, self, edata, old_size, new_size,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
self->dalloc(tsdn, self, edata, deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
||||
bool *deferred_work_generated) {
|
||||
self->dalloc_batch(tsdn, self, list, deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
return self->time_until_deferred_work(tsdn, self);
|
||||
}
|
||||
|
||||
/*
|
||||
* An implementation of batch allocation that simply calls alloc once for
|
||||
* each item in the list.
|
||||
*/
|
||||
size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||
/* Ditto, for dalloc. */
|
||||
void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list, bool *deferred_work_generated);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAI_H */
|
37
BeefRT/JEMalloc/include/jemalloc/internal/peak.h
Normal file
37
BeefRT/JEMalloc/include/jemalloc/internal/peak.h
Normal file
|
@ -0,0 +1,37 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PEAK_H
|
||||
#define JEMALLOC_INTERNAL_PEAK_H
|
||||
|
||||
typedef struct peak_s peak_t;
|
||||
struct peak_s {
|
||||
/* The highest recorded peak value, after adjustment (see below). */
|
||||
uint64_t cur_max;
|
||||
/*
|
||||
* The difference between alloc and dalloc at the last set_zero call;
|
||||
* this lets us cancel out the appropriate amount of excess.
|
||||
*/
|
||||
uint64_t adjustment;
|
||||
};
|
||||
|
||||
#define PEAK_INITIALIZER {0, 0}
|
||||
|
||||
static inline uint64_t
|
||||
peak_max(peak_t *peak) {
|
||||
return peak->cur_max;
|
||||
}
|
||||
|
||||
static inline void
|
||||
peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
|
||||
int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment);
|
||||
if (candidate_max > (int64_t)peak->cur_max) {
|
||||
peak->cur_max = candidate_max;
|
||||
}
|
||||
}
|
||||
|
||||
/* Resets the counter to zero; all peaks are now relative to this point. */
|
||||
static inline void
|
||||
peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
|
||||
peak->cur_max = 0;
|
||||
peak->adjustment = alloc - dalloc;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PEAK_H */
|
24
BeefRT/JEMalloc/include/jemalloc/internal/peak_event.h
Normal file
24
BeefRT/JEMalloc/include/jemalloc/internal/peak_event.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
|
||||
#define JEMALLOC_INTERNAL_PEAK_EVENT_H
|
||||
|
||||
/*
|
||||
* While peak.h contains the simple helper struct that tracks state, this
|
||||
* contains the allocator tie-ins (and knows about tsd, the event module, etc.).
|
||||
*/
|
||||
|
||||
/* Update the peak with current tsd state. */
|
||||
void peak_event_update(tsd_t *tsd);
|
||||
/* Set current state to zero. */
|
||||
void peak_event_zero(tsd_t *tsd);
|
||||
uint64_t peak_event_max(tsd_t *tsd);
|
||||
|
||||
/* Manual hooks. */
|
||||
/* The activity-triggered hooks. */
|
||||
uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
|
||||
uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
|
||||
void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
|
||||
uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
|
||||
void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */
|
520
BeefRT/JEMalloc/include/jemalloc/internal/ph.h
Normal file
520
BeefRT/JEMalloc/include/jemalloc/internal/ph.h
Normal file
|
@ -0,0 +1,520 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PH_H
|
||||
#define JEMALLOC_INTERNAL_PH_H
|
||||
|
||||
/*
|
||||
* A Pairing Heap implementation.
|
||||
*
|
||||
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
|
||||
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
|
||||
*
|
||||
* With auxiliary twopass list, described in a follow on paper.
|
||||
*
|
||||
* "Pairing Heaps: Experiments and Analysis"
|
||||
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
|
||||
*
|
||||
*******************************************************************************
|
||||
*
|
||||
* We include a non-obvious optimization:
|
||||
* - First, we introduce a new pop-and-link operation; pop the two most
|
||||
* recently-inserted items off the aux-list, link them, and push the resulting
|
||||
* heap.
|
||||
* - We maintain a count of the number of insertions since the last time we
|
||||
* merged the aux-list (i.e. via first() or remove_first()). After N inserts,
|
||||
* we do ffs(N) pop-and-link operations.
|
||||
*
|
||||
* One way to think of this is that we're progressively building up a tree in
|
||||
* the aux-list, rather than a linked-list (think of the series of merges that
|
||||
* will be performed as the aux-count grows).
|
||||
*
|
||||
* There's a couple reasons we benefit from this:
|
||||
* - Ordinarily, after N insertions, the aux-list is of size N. With our
|
||||
* strategy, it's of size O(log(N)). So we decrease the worst-case time of
|
||||
* first() calls, and reduce the average cost of remove_min calls. Since
|
||||
* these almost always occur while holding a lock, we practically reduce the
|
||||
* frequency of unusually long hold times.
|
||||
* - This moves the bulk of the work of merging the aux-list onto the threads
|
||||
* that are inserting into the heap. In some common scenarios, insertions
|
||||
* happen in bulk, from a single thread (think tcache flushing; we potentially
|
||||
* move many slabs from slabs_full to slabs_nonfull). All the nodes in this
|
||||
* case are in the inserting threads cache, and linking them is very cheap
|
||||
* (cache misses dominate linking cost). Without this optimization, linking
|
||||
* happens on the next call to remove_first. Since that remove_first call
|
||||
* likely happens on a different thread (or at least, after the cache has
|
||||
* gotten cold if done on the same thread), deferring linking trades cheap
|
||||
* link operations now for expensive ones later.
|
||||
*
|
||||
* The ffs trick keeps amortized insert cost at constant time. Similar
|
||||
* strategies based on periodically sorting the list after a batch of operations
|
||||
* perform worse than this in practice, even with various fancy tricks; they
|
||||
* all took amortized complexity of an insert from O(1) to O(log(n)).
|
||||
*/
|
||||
|
||||
typedef int (*ph_cmp_t)(void *, void *);
|
||||
|
||||
/* Node structure. */
|
||||
typedef struct phn_link_s phn_link_t;
|
||||
struct phn_link_s {
|
||||
void *prev;
|
||||
void *next;
|
||||
void *lchild;
|
||||
};
|
||||
|
||||
typedef struct ph_s ph_t;
|
||||
struct ph_s {
|
||||
void *root;
|
||||
/*
|
||||
* Inserts done since the last aux-list merge. This is not necessarily
|
||||
* the size of the aux-list, since it's possible that removals have
|
||||
* happened since, and we don't track whether or not those removals are
|
||||
* from the aux list.
|
||||
*/
|
||||
size_t auxcount;
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE phn_link_t *
|
||||
phn_link_get(void *phn, size_t offset) {
|
||||
return (phn_link_t *)(((uintptr_t)phn) + offset);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
phn_link_init(void *phn, size_t offset) {
|
||||
phn_link_get(phn, offset)->prev = NULL;
|
||||
phn_link_get(phn, offset)->next = NULL;
|
||||
phn_link_get(phn, offset)->lchild = NULL;
|
||||
}
|
||||
|
||||
/* Internal utility helpers. */
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
phn_lchild_get(void *phn, size_t offset) {
|
||||
return phn_link_get(phn, offset)->lchild;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
phn_lchild_set(void *phn, void *lchild, size_t offset) {
|
||||
phn_link_get(phn, offset)->lchild = lchild;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
phn_next_get(void *phn, size_t offset) {
|
||||
return phn_link_get(phn, offset)->next;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
phn_next_set(void *phn, void *next, size_t offset) {
|
||||
phn_link_get(phn, offset)->next = next;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
phn_prev_get(void *phn, size_t offset) {
|
||||
return phn_link_get(phn, offset)->prev;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
phn_prev_set(void *phn, void *prev, size_t offset) {
|
||||
phn_link_get(phn, offset)->prev = prev;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
phn_merge_ordered(void *phn0, void *phn1, size_t offset,
|
||||
ph_cmp_t cmp) {
|
||||
void *phn0child;
|
||||
|
||||
assert(phn0 != NULL);
|
||||
assert(phn1 != NULL);
|
||||
assert(cmp(phn0, phn1) <= 0);
|
||||
|
||||
phn_prev_set(phn1, phn0, offset);
|
||||
phn0child = phn_lchild_get(phn0, offset);
|
||||
phn_next_set(phn1, phn0child, offset);
|
||||
if (phn0child != NULL) {
|
||||
phn_prev_set(phn0child, phn1, offset);
|
||||
}
|
||||
phn_lchild_set(phn0, phn1, offset);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
|
||||
void *result;
|
||||
if (phn0 == NULL) {
|
||||
result = phn1;
|
||||
} else if (phn1 == NULL) {
|
||||
result = phn0;
|
||||
} else if (cmp(phn0, phn1) < 0) {
|
||||
phn_merge_ordered(phn0, phn1, offset, cmp);
|
||||
result = phn0;
|
||||
} else {
|
||||
phn_merge_ordered(phn1, phn0, offset, cmp);
|
||||
result = phn1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
|
||||
void *head = NULL;
|
||||
void *tail = NULL;
|
||||
void *phn0 = phn;
|
||||
void *phn1 = phn_next_get(phn0, offset);
|
||||
|
||||
/*
|
||||
* Multipass merge, wherein the first two elements of a FIFO
|
||||
* are repeatedly merged, and each result is appended to the
|
||||
* singly linked FIFO, until the FIFO contains only a single
|
||||
* element. We start with a sibling list but no reference to
|
||||
* its tail, so we do a single pass over the sibling list to
|
||||
* populate the FIFO.
|
||||
*/
|
||||
if (phn1 != NULL) {
|
||||
void *phnrest = phn_next_get(phn1, offset);
|
||||
if (phnrest != NULL) {
|
||||
phn_prev_set(phnrest, NULL, offset);
|
||||
}
|
||||
phn_prev_set(phn0, NULL, offset);
|
||||
phn_next_set(phn0, NULL, offset);
|
||||
phn_prev_set(phn1, NULL, offset);
|
||||
phn_next_set(phn1, NULL, offset);
|
||||
phn0 = phn_merge(phn0, phn1, offset, cmp);
|
||||
head = tail = phn0;
|
||||
phn0 = phnrest;
|
||||
while (phn0 != NULL) {
|
||||
phn1 = phn_next_get(phn0, offset);
|
||||
if (phn1 != NULL) {
|
||||
phnrest = phn_next_get(phn1, offset);
|
||||
if (phnrest != NULL) {
|
||||
phn_prev_set(phnrest, NULL, offset);
|
||||
}
|
||||
phn_prev_set(phn0, NULL, offset);
|
||||
phn_next_set(phn0, NULL, offset);
|
||||
phn_prev_set(phn1, NULL, offset);
|
||||
phn_next_set(phn1, NULL, offset);
|
||||
phn0 = phn_merge(phn0, phn1, offset, cmp);
|
||||
phn_next_set(tail, phn0, offset);
|
||||
tail = phn0;
|
||||
phn0 = phnrest;
|
||||
} else {
|
||||
phn_next_set(tail, phn0, offset);
|
||||
tail = phn0;
|
||||
phn0 = NULL;
|
||||
}
|
||||
}
|
||||
phn0 = head;
|
||||
phn1 = phn_next_get(phn0, offset);
|
||||
if (phn1 != NULL) {
|
||||
while (true) {
|
||||
head = phn_next_get(phn1, offset);
|
||||
assert(phn_prev_get(phn0, offset) == NULL);
|
||||
phn_next_set(phn0, NULL, offset);
|
||||
assert(phn_prev_get(phn1, offset) == NULL);
|
||||
phn_next_set(phn1, NULL, offset);
|
||||
phn0 = phn_merge(phn0, phn1, offset, cmp);
|
||||
if (head == NULL) {
|
||||
break;
|
||||
}
|
||||
phn_next_set(tail, phn0, offset);
|
||||
tail = phn0;
|
||||
phn0 = head;
|
||||
phn1 = phn_next_get(phn0, offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
return phn0;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) {
|
||||
ph->auxcount = 0;
|
||||
void *phn = phn_next_get(ph->root, offset);
|
||||
if (phn != NULL) {
|
||||
phn_prev_set(ph->root, NULL, offset);
|
||||
phn_next_set(ph->root, NULL, offset);
|
||||
phn_prev_set(phn, NULL, offset);
|
||||
phn = phn_merge_siblings(phn, offset, cmp);
|
||||
assert(phn_next_get(phn, offset) == NULL);
|
||||
ph->root = phn_merge(ph->root, phn, offset, cmp);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) {
|
||||
void *result;
|
||||
void *lchild = phn_lchild_get(phn, offset);
|
||||
if (lchild == NULL) {
|
||||
result = NULL;
|
||||
} else {
|
||||
result = phn_merge_siblings(lchild, offset, cmp);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
ph_new(ph_t *ph) {
|
||||
ph->root = NULL;
|
||||
ph->auxcount = 0;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
ph_empty(ph_t *ph) {
|
||||
return ph->root == NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
|
||||
if (ph->root == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ph_merge_aux(ph, offset, cmp);
|
||||
return ph->root;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ph_any(ph_t *ph, size_t offset) {
|
||||
if (ph->root == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
void *aux = phn_next_get(ph->root, offset);
|
||||
if (aux != NULL) {
|
||||
return aux;
|
||||
}
|
||||
return ph->root;
|
||||
}
|
||||
|
||||
/* Returns true if we should stop trying to merge. */
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) {
|
||||
assert(ph->root != NULL);
|
||||
void *phn0 = phn_next_get(ph->root, offset);
|
||||
if (phn0 == NULL) {
|
||||
return true;
|
||||
}
|
||||
void *phn1 = phn_next_get(phn0, offset);
|
||||
if (phn1 == NULL) {
|
||||
return true;
|
||||
}
|
||||
void *next_phn1 = phn_next_get(phn1, offset);
|
||||
phn_next_set(phn0, NULL, offset);
|
||||
phn_prev_set(phn0, NULL, offset);
|
||||
phn_next_set(phn1, NULL, offset);
|
||||
phn_prev_set(phn1, NULL, offset);
|
||||
phn0 = phn_merge(phn0, phn1, offset, cmp);
|
||||
phn_next_set(phn0, next_phn1, offset);
|
||||
if (next_phn1 != NULL) {
|
||||
phn_prev_set(next_phn1, phn0, offset);
|
||||
}
|
||||
phn_next_set(ph->root, phn0, offset);
|
||||
phn_prev_set(phn0, ph->root, offset);
|
||||
return next_phn1 == NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
|
||||
phn_link_init(phn, offset);
|
||||
|
||||
/*
|
||||
* Treat the root as an aux list during insertion, and lazily merge
|
||||
* during a_prefix##remove_first(). For elements that are inserted,
|
||||
* then removed via a_prefix##remove() before the aux list is ever
|
||||
* processed, this makes insert/remove constant-time, whereas eager
|
||||
* merging would make insert O(log n).
|
||||
*/
|
||||
if (ph->root == NULL) {
|
||||
ph->root = phn;
|
||||
} else {
|
||||
/*
|
||||
* As a special case, check to see if we can replace the root.
|
||||
* This is practically common in some important cases, and lets
|
||||
* us defer some insertions (hopefully, until the point where
|
||||
* some of the items in the aux list have been removed, savings
|
||||
* us from linking them at all).
|
||||
*/
|
||||
if (cmp(phn, ph->root) < 0) {
|
||||
phn_lchild_set(phn, ph->root, offset);
|
||||
phn_prev_set(ph->root, phn, offset);
|
||||
ph->root = phn;
|
||||
ph->auxcount = 0;
|
||||
return;
|
||||
}
|
||||
ph->auxcount++;
|
||||
phn_next_set(phn, phn_next_get(ph->root, offset), offset);
|
||||
if (phn_next_get(ph->root, offset) != NULL) {
|
||||
phn_prev_set(phn_next_get(ph->root, offset), phn,
|
||||
offset);
|
||||
}
|
||||
phn_prev_set(phn, ph->root, offset);
|
||||
phn_next_set(ph->root, phn, offset);
|
||||
}
|
||||
if (ph->auxcount > 1) {
|
||||
unsigned nmerges = ffs_zu(ph->auxcount - 1);
|
||||
bool done = false;
|
||||
for (unsigned i = 0; i < nmerges && !done; i++) {
|
||||
done = ph_try_aux_merge_pair(ph, offset, cmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
|
||||
void *ret;
|
||||
|
||||
if (ph->root == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ph_merge_aux(ph, offset, cmp);
|
||||
ret = ph->root;
|
||||
ph->root = ph_merge_children(ph->root, offset, cmp);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
|
||||
void *replace;
|
||||
void *parent;
|
||||
|
||||
if (ph->root == phn) {
|
||||
/*
|
||||
* We can delete from aux list without merging it, but we need
|
||||
* to merge if we are dealing with the root node and it has
|
||||
* children.
|
||||
*/
|
||||
if (phn_lchild_get(phn, offset) == NULL) {
|
||||
ph->root = phn_next_get(phn, offset);
|
||||
if (ph->root != NULL) {
|
||||
phn_prev_set(ph->root, NULL, offset);
|
||||
}
|
||||
return;
|
||||
}
|
||||
ph_merge_aux(ph, offset, cmp);
|
||||
if (ph->root == phn) {
|
||||
ph->root = ph_merge_children(ph->root, offset, cmp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get parent (if phn is leftmost child) before mutating. */
|
||||
if ((parent = phn_prev_get(phn, offset)) != NULL) {
|
||||
if (phn_lchild_get(parent, offset) != phn) {
|
||||
parent = NULL;
|
||||
}
|
||||
}
|
||||
/* Find a possible replacement node, and link to parent. */
|
||||
replace = ph_merge_children(phn, offset, cmp);
|
||||
/* Set next/prev for sibling linked list. */
|
||||
if (replace != NULL) {
|
||||
if (parent != NULL) {
|
||||
phn_prev_set(replace, parent, offset);
|
||||
phn_lchild_set(parent, replace, offset);
|
||||
} else {
|
||||
phn_prev_set(replace, phn_prev_get(phn, offset),
|
||||
offset);
|
||||
if (phn_prev_get(phn, offset) != NULL) {
|
||||
phn_next_set(phn_prev_get(phn, offset), replace,
|
||||
offset);
|
||||
}
|
||||
}
|
||||
phn_next_set(replace, phn_next_get(phn, offset), offset);
|
||||
if (phn_next_get(phn, offset) != NULL) {
|
||||
phn_prev_set(phn_next_get(phn, offset), replace,
|
||||
offset);
|
||||
}
|
||||
} else {
|
||||
if (parent != NULL) {
|
||||
void *next = phn_next_get(phn, offset);
|
||||
phn_lchild_set(parent, next, offset);
|
||||
if (next != NULL) {
|
||||
phn_prev_set(next, parent, offset);
|
||||
}
|
||||
} else {
|
||||
assert(phn_prev_get(phn, offset) != NULL);
|
||||
phn_next_set(
|
||||
phn_prev_get(phn, offset),
|
||||
phn_next_get(phn, offset), offset);
|
||||
}
|
||||
if (phn_next_get(phn, offset) != NULL) {
|
||||
phn_prev_set(
|
||||
phn_next_get(phn, offset),
|
||||
phn_prev_get(phn, offset), offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define ph_structs(a_prefix, a_type) \
|
||||
typedef struct { \
|
||||
phn_link_t link; \
|
||||
} a_prefix##_link_t; \
|
||||
\
|
||||
typedef struct { \
|
||||
ph_t ph; \
|
||||
} a_prefix##_t;
|
||||
|
||||
/*
|
||||
* The ph_proto() macro generates function prototypes that correspond to the
|
||||
* functions generated by an equivalently parameterized call to ph_gen().
|
||||
*/
|
||||
#define ph_proto(a_attr, a_prefix, a_type) \
|
||||
\
|
||||
a_attr void a_prefix##_new(a_prefix##_t *ph); \
|
||||
a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
|
||||
a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
|
||||
a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
|
||||
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
|
||||
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
|
||||
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
|
||||
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
|
||||
|
||||
/* The ph_gen() macro generates a type-specific pairing heap implementation. */
|
||||
#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
|
||||
JEMALLOC_ALWAYS_INLINE int \
|
||||
a_prefix##_ph_cmp(void *a, void *b) { \
|
||||
return a_cmp((a_type *)a, (a_type *)b); \
|
||||
} \
|
||||
\
|
||||
a_attr void \
|
||||
a_prefix##_new(a_prefix##_t *ph) { \
|
||||
ph_new(&ph->ph); \
|
||||
} \
|
||||
\
|
||||
a_attr bool \
|
||||
a_prefix##_empty(a_prefix##_t *ph) { \
|
||||
return ph_empty(&ph->ph); \
|
||||
} \
|
||||
\
|
||||
a_attr a_type * \
|
||||
a_prefix##_first(a_prefix##_t *ph) { \
|
||||
return ph_first(&ph->ph, offsetof(a_type, a_field), \
|
||||
&a_prefix##_ph_cmp); \
|
||||
} \
|
||||
\
|
||||
a_attr a_type * \
|
||||
a_prefix##_any(a_prefix##_t *ph) { \
|
||||
return ph_any(&ph->ph, offsetof(a_type, a_field)); \
|
||||
} \
|
||||
\
|
||||
a_attr void \
|
||||
a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
|
||||
ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
|
||||
a_prefix##_ph_cmp); \
|
||||
} \
|
||||
\
|
||||
a_attr a_type * \
|
||||
a_prefix##_remove_first(a_prefix##_t *ph) { \
|
||||
return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
|
||||
a_prefix##_ph_cmp); \
|
||||
} \
|
||||
\
|
||||
a_attr void \
|
||||
a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
|
||||
ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
|
||||
a_prefix##_ph_cmp); \
|
||||
} \
|
||||
\
|
||||
a_attr a_type * \
|
||||
a_prefix##_remove_any(a_prefix##_t *ph) { \
|
||||
a_type *ret = a_prefix##_any(ph); \
|
||||
if (ret != NULL) { \
|
||||
a_prefix##_remove(ph, ret); \
|
||||
} \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PH_H */
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
for symbol in `cat "$@"` ; do
|
||||
echo "#define ${symbol} JEMALLOC_N(${symbol})"
|
||||
done
|
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env awk -f
|
||||
|
||||
BEGIN {
|
||||
sym_prefix = ""
|
||||
split("\
|
||||
je_aligned_alloc \
|
||||
je_calloc \
|
||||
je_dallocx \
|
||||
je_free \
|
||||
je_mallctl \
|
||||
je_mallctlbymib \
|
||||
je_mallctlnametomib \
|
||||
je_malloc \
|
||||
je_malloc_conf \
|
||||
je_malloc_conf_2_conf_harder \
|
||||
je_malloc_message \
|
||||
je_malloc_stats_print \
|
||||
je_malloc_usable_size \
|
||||
je_mallocx \
|
||||
je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c \
|
||||
je_nallocx \
|
||||
je_posix_memalign \
|
||||
je_rallocx \
|
||||
je_realloc \
|
||||
je_sallocx \
|
||||
je_sdallocx \
|
||||
je_xallocx \
|
||||
tls_callback \
|
||||
", exported_symbol_names)
|
||||
# Store exported symbol names as keys in exported_symbols.
|
||||
for (i in exported_symbol_names) {
|
||||
exported_symbols[exported_symbol_names[i]] = 1
|
||||
}
|
||||
}
|
||||
|
||||
# Process 'nm -a <c_source.o>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 0000000000000008 D opt_junk
|
||||
# 0000000000007574 T malloc_initialized
|
||||
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||
}
|
||||
|
||||
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 353 00008098 SECT4 notype External | opt_junk
|
||||
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||
print $NF
|
||||
}
|
51
BeefRT/JEMalloc/include/jemalloc/internal/private_symbols.sh
Normal file
51
BeefRT/JEMalloc/include/jemalloc/internal/private_symbols.sh
Normal file
|
@ -0,0 +1,51 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Generate private_symbols[_jet].awk.
|
||||
#
|
||||
# Usage: private_symbols.sh <sym_prefix> <sym>*
|
||||
#
|
||||
# <sym_prefix> is typically "" or "_".
|
||||
|
||||
sym_prefix=$1
|
||||
shift
|
||||
|
||||
cat <<EOF
|
||||
#!/usr/bin/env awk -f
|
||||
|
||||
BEGIN {
|
||||
sym_prefix = "${sym_prefix}"
|
||||
split("\\
|
||||
EOF
|
||||
|
||||
for public_sym in "$@" ; do
|
||||
cat <<EOF
|
||||
${sym_prefix}${public_sym} \\
|
||||
EOF
|
||||
done
|
||||
|
||||
cat <<"EOF"
|
||||
", exported_symbol_names)
|
||||
# Store exported symbol names as keys in exported_symbols.
|
||||
for (i in exported_symbol_names) {
|
||||
exported_symbols[exported_symbol_names[i]] = 1
|
||||
}
|
||||
}
|
||||
|
||||
# Process 'nm -a <c_source.o>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 0000000000000008 D opt_junk
|
||||
# 0000000000007574 T malloc_initialized
|
||||
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||
}
|
||||
|
||||
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 353 00008098 SECT4 notype External | opt_junk
|
||||
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||
print $NF
|
||||
}
|
||||
EOF
|
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env awk -f
|
||||
|
||||
BEGIN {
|
||||
sym_prefix = ""
|
||||
split("\
|
||||
jet_aligned_alloc \
|
||||
jet_calloc \
|
||||
jet_dallocx \
|
||||
jet_free \
|
||||
jet_mallctl \
|
||||
jet_mallctlbymib \
|
||||
jet_mallctlnametomib \
|
||||
jet_malloc \
|
||||
jet_malloc_conf \
|
||||
jet_malloc_conf_2_conf_harder \
|
||||
jet_malloc_message \
|
||||
jet_malloc_stats_print \
|
||||
jet_malloc_usable_size \
|
||||
jet_mallocx \
|
||||
jet_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c \
|
||||
jet_nallocx \
|
||||
jet_posix_memalign \
|
||||
jet_rallocx \
|
||||
jet_realloc \
|
||||
jet_sallocx \
|
||||
jet_sdallocx \
|
||||
jet_xallocx \
|
||||
tls_callback \
|
||||
", exported_symbol_names)
|
||||
# Store exported symbol names as keys in exported_symbols.
|
||||
for (i in exported_symbol_names) {
|
||||
exported_symbols[exported_symbol_names[i]] = 1
|
||||
}
|
||||
}
|
||||
|
||||
# Process 'nm -a <c_source.o>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 0000000000000008 D opt_junk
|
||||
# 0000000000007574 T malloc_initialized
|
||||
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||
}
|
||||
|
||||
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 353 00008098 SECT4 notype External | opt_junk
|
||||
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||
print $NF
|
||||
}
|
168
BeefRT/JEMalloc/include/jemalloc/internal/prng.h
Normal file
168
BeefRT/JEMalloc/include/jemalloc/internal/prng.h
Normal file
|
@ -0,0 +1,168 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PRNG_H
|
||||
#define JEMALLOC_INTERNAL_PRNG_H
|
||||
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
|
||||
/*
|
||||
* Simple linear congruential pseudo-random number generator:
|
||||
*
|
||||
* prng(y) = (a*x + c) % m
|
||||
*
|
||||
* where the following constants ensure maximal period:
|
||||
*
|
||||
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
|
||||
* c == Odd number (relatively prime to 2^n).
|
||||
* m == 2^32
|
||||
*
|
||||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example, the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_state_next_u32(uint32_t state) {
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_state_next_u64(uint64_t state) {
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_state_next_zu(size_t state) {
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
#elif LG_SIZEOF_PTR == 3
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
#else
|
||||
#error Unsupported pointer size
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* The prng_lg_range functions give a uniform int in the half-open range [0,
|
||||
* 2**lg_range).
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_lg_range_u32(uint32_t *state, unsigned lg_range) {
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 32);
|
||||
|
||||
*state = prng_state_next_u32(*state);
|
||||
uint32_t ret = *state >> (32 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 64);
|
||||
|
||||
*state = prng_state_next_u64(*state);
|
||||
uint64_t ret = *state >> (64 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_lg_range_zu(size_t *state, unsigned lg_range) {
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||
|
||||
*state = prng_state_next_zu(*state);
|
||||
size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The prng_range functions behave like the prng_lg_range, but return a result
|
||||
* in [0, range) instead of [0, 2**lg_range).
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_range_u32(uint32_t *state, uint32_t range) {
|
||||
assert(range != 0);
|
||||
/*
|
||||
* If range were 1, lg_range would be 0, so the shift in
|
||||
* prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
|
||||
* which is UB. Just handle this case as a one-off.
|
||||
*/
|
||||
if (range == 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
unsigned lg_range = ffs_u32(pow2_ceil_u32(range));
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
uint32_t ret;
|
||||
do {
|
||||
ret = prng_lg_range_u32(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||
assert(range != 0);
|
||||
|
||||
/* See the note in prng_range_u32. */
|
||||
if (range == 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
uint64_t ret;
|
||||
do {
|
||||
ret = prng_lg_range_u64(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_range_zu(size_t *state, size_t range) {
|
||||
assert(range != 0);
|
||||
|
||||
/* See the note in prng_range_u32. */
|
||||
if (range == 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
size_t ret;
|
||||
do {
|
||||
ret = prng_lg_range_zu(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PRNG_H */
|
37
BeefRT/JEMalloc/include/jemalloc/internal/prof_data.h
Normal file
37
BeefRT/JEMalloc/include/jemalloc/internal/prof_data.h
Normal file
|
@ -0,0 +1,37 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
|
||||
#define JEMALLOC_INTERNAL_PROF_DATA_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
extern malloc_mutex_t bt2gctx_mtx;
|
||||
extern malloc_mutex_t tdatas_mtx;
|
||||
extern malloc_mutex_t prof_dump_mtx;
|
||||
|
||||
extern malloc_mutex_t *gctx_locks;
|
||||
extern malloc_mutex_t *tdata_locks;
|
||||
|
||||
extern size_t prof_unbiased_sz[PROF_SC_NSIZES];
|
||||
extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
|
||||
|
||||
void prof_bt_hash(const void *key, size_t r_hash[2]);
|
||||
bool prof_bt_keycomp(const void *k1, const void *k2);
|
||||
|
||||
bool prof_data_init(tsd_t *tsd);
|
||||
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
||||
char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
|
||||
int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
|
||||
void prof_unbias_map_init();
|
||||
void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
|
||||
prof_tdata_t *tdata, bool leakcheck);
|
||||
prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
|
||||
uint64_t thr_discrim, char *thread_name, bool active);
|
||||
void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||
void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
|
||||
|
||||
/* Used in unit tests. */
|
||||
size_t prof_tdata_count(void);
|
||||
size_t prof_bt_count(void);
|
||||
void prof_cnt_all(prof_cnt_t *cnt_all);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */
|
95
BeefRT/JEMalloc/include/jemalloc/internal/prof_externs.h
Normal file
95
BeefRT/JEMalloc/include/jemalloc/internal/prof_externs.h
Normal file
|
@ -0,0 +1,95 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/prof_hook.h"
|
||||
|
||||
extern bool opt_prof;
|
||||
extern bool opt_prof_active;
|
||||
extern bool opt_prof_thread_active_init;
|
||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
extern bool opt_prof_final; /* Final profile dumping. */
|
||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||
extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */
|
||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||
extern bool opt_prof_log; /* Turn logging on at boot. */
|
||||
extern char opt_prof_prefix[
|
||||
/* Minimize memory bloat for non-prof builds. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
PATH_MAX +
|
||||
#endif
|
||||
1];
|
||||
extern bool opt_prof_unbias;
|
||||
|
||||
/* For recording recent allocations */
|
||||
extern ssize_t opt_prof_recent_alloc_max;
|
||||
|
||||
/* Whether to use thread name provided by the system or by mallctl. */
|
||||
extern bool opt_prof_sys_thread_name;
|
||||
|
||||
/* Whether to record per size class counts and request size totals. */
|
||||
extern bool opt_prof_stats;
|
||||
|
||||
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_active_state;
|
||||
|
||||
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_gdump_val;
|
||||
|
||||
/* Profile dump interval, measured in bytes allocated. */
|
||||
extern uint64_t prof_interval;
|
||||
|
||||
/*
|
||||
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
||||
* resets.
|
||||
*/
|
||||
extern size_t lg_prof_sample;
|
||||
|
||||
extern bool prof_booted;
|
||||
|
||||
void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
|
||||
prof_backtrace_hook_t prof_backtrace_hook_get();
|
||||
|
||||
void prof_dump_hook_set(prof_dump_hook_t hook);
|
||||
prof_dump_hook_t prof_dump_hook_get();
|
||||
|
||||
/* Functions only accessed in prof_inlines.h */
|
||||
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
||||
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
|
||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx);
|
||||
void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
|
||||
size_t usize, prof_tctx_t *tctx);
|
||||
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
|
||||
prof_tctx_t *prof_tctx_create(tsd_t *tsd);
|
||||
void prof_idump(tsdn_t *tsdn);
|
||||
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||
void prof_gdump(tsdn_t *tsdn);
|
||||
|
||||
void prof_tdata_cleanup(tsd_t *tsd);
|
||||
bool prof_active_get(tsdn_t *tsdn);
|
||||
bool prof_active_set(tsdn_t *tsdn, bool active);
|
||||
const char *prof_thread_name_get(tsd_t *tsd);
|
||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||
bool prof_thread_active_get(tsd_t *tsd);
|
||||
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
||||
bool prof_thread_active_init_get(tsdn_t *tsdn);
|
||||
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
|
||||
bool prof_gdump_get(tsdn_t *tsdn);
|
||||
bool prof_gdump_set(tsdn_t *tsdn, bool active);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(tsd_t *tsd, base_t *base);
|
||||
void prof_prefork0(tsdn_t *tsdn);
|
||||
void prof_prefork1(tsdn_t *tsdn);
|
||||
void prof_postfork_parent(tsdn_t *tsdn);
|
||||
void prof_postfork_child(tsdn_t *tsdn);
|
||||
|
||||
/* Only accessed by thread event. */
|
||||
uint64_t prof_sample_new_event_wait(tsd_t *tsd);
|
||||
uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
|
||||
void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
|
21
BeefRT/JEMalloc/include/jemalloc/internal/prof_hook.h
Normal file
21
BeefRT/JEMalloc/include/jemalloc/internal/prof_hook.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
|
||||
#define JEMALLOC_INTERNAL_PROF_HOOK_H
|
||||
|
||||
/*
|
||||
* The hooks types of which are declared in this file are experimental and
|
||||
* undocumented, thus the typedefs are located in an 'internal' header.
|
||||
*/
|
||||
|
||||
/*
|
||||
* A hook to mock out backtrace functionality. This can be handy, since it's
|
||||
* otherwise difficult to guarantee that two allocations are reported as coming
|
||||
* from the exact same stack trace in the presence of an optimizing compiler.
|
||||
*/
|
||||
typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned);
|
||||
|
||||
/*
|
||||
* A callback hook that notifies about recently dumped heap profile.
|
||||
*/
|
||||
typedef void (*prof_dump_hook_t)(const char *filename);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */
|
261
BeefRT/JEMalloc/include/jemalloc/internal/prof_inlines.h
Normal file
261
BeefRT/JEMalloc/include/jemalloc/internal/prof_inlines.h
Normal file
|
@ -0,0 +1,261 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_PROF_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/thread_event.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_active_assert() {
|
||||
cassert(config_prof);
|
||||
/*
|
||||
* If opt_prof is off, then prof_active must always be off, regardless
|
||||
* of whether prof_active_mtx is in effect or not.
|
||||
*/
|
||||
assert(opt_prof || !prof_active_state);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void) {
|
||||
prof_active_assert();
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return prof_active_state;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_gdump_get_unlocked(void) {
|
||||
/*
|
||||
* No locking is used when reading prof_gdump_val in the fast path, so
|
||||
* there are no guarantees regarding how long it will take for all
|
||||
* threads to notice state changes.
|
||||
*/
|
||||
return prof_gdump_val;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
||||
prof_tdata_get(tsd_t *tsd, bool create) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = tsd_prof_tdata_get(tsd);
|
||||
if (create) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
if (unlikely(tdata == NULL)) {
|
||||
if (tsd_nominal(tsd)) {
|
||||
tdata = prof_tdata_init(tsd);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
} else if (unlikely(tdata->expired)) {
|
||||
tdata = prof_tdata_reinit(tsd, tdata);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
assert(tdata == NULL || tdata->attached);
|
||||
}
|
||||
|
||||
return tdata;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
|
||||
prof_info_t *prof_info) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(prof_info != NULL);
|
||||
|
||||
arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr,
|
||||
emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(prof_info != NULL);
|
||||
|
||||
arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_tctx_reset(tsd, ptr, alloc_ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_tctx_reset_sampled(tsd, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
|
||||
cassert(config_prof);
|
||||
assert(edata != NULL);
|
||||
assert((uintptr_t)tctx > (uintptr_t)1U);
|
||||
|
||||
arena_prof_info_set(tsd, edata, tctx, size);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_should_skip(tsd_t *tsd, bool sample_event) {
|
||||
cassert(config_prof);
|
||||
|
||||
/* Fastpath: no need to load tdata */
|
||||
if (likely(!sample_event)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* sample_event is always obtained from the thread event module, and
|
||||
* whenever it's true, it means that the thread event module has
|
||||
* already checked the reentrancy level.
|
||||
*/
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata = prof_tdata_get(tsd, true);
|
||||
if (unlikely(tdata == NULL)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return !tdata->active;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
|
||||
prof_tctx_t *ret;
|
||||
|
||||
if (!prof_active ||
|
||||
likely(prof_sample_should_skip(tsd, sample_event))) {
|
||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||
} else {
|
||||
ret = prof_tctx_create(tsd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
|
||||
emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
|
||||
} else {
|
||||
prof_tctx_reset(tsd, ptr, alloc_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
|
||||
prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
|
||||
prof_info_t *old_prof_info, bool sample_event) {
|
||||
bool sampled, old_sampled, moved;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||
|
||||
if (prof_active && ptr != NULL) {
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
if (prof_sample_should_skip(tsd, sample_event)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to prof_alloc_prep()
|
||||
* was larger than what actually got allocated, so a
|
||||
* backtrace was captured for this allocation, even
|
||||
* though its actual usize was insufficient to cross the
|
||||
* sample threshold.
|
||||
*/
|
||||
prof_alloc_rollback(tsd, tctx);
|
||||
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||
}
|
||||
}
|
||||
|
||||
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
||||
old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U);
|
||||
moved = (ptr != old_ptr);
|
||||
|
||||
if (unlikely(sampled)) {
|
||||
prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
|
||||
} else if (moved) {
|
||||
prof_tctx_reset(tsd, ptr, NULL);
|
||||
} else if (unlikely(old_sampled)) {
|
||||
/*
|
||||
* prof_tctx_reset() would work for the !moved case as well,
|
||||
* but prof_tctx_reset_sampled() is slightly cheaper, and the
|
||||
* proper thing to do here in the presence of explicit
|
||||
* knowledge re: moved state.
|
||||
*/
|
||||
prof_tctx_reset_sampled(tsd, ptr);
|
||||
} else {
|
||||
prof_info_t prof_info;
|
||||
prof_info_get(tsd, ptr, NULL, &prof_info);
|
||||
assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U);
|
||||
}
|
||||
|
||||
/*
|
||||
* The prof_free_sampled_object() call must come after the
|
||||
* prof_malloc_sample_object() call, because tctx and old_tctx may be
|
||||
* the same, in which case reversing the call order could cause the tctx
|
||||
* to be prematurely destroyed as a side effect of momentarily zeroed
|
||||
* counters.
|
||||
*/
|
||||
if (unlikely(old_sampled)) {
|
||||
prof_free_sampled_object(tsd, old_usize, old_prof_info);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prof_sample_align(size_t orig_align) {
|
||||
/*
|
||||
* Enforce page alignment, so that sampled allocations can be identified
|
||||
* w/o metadata lookup.
|
||||
*/
|
||||
assert(opt_prof);
|
||||
return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
|
||||
orig_align;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_aligned(const void *ptr) {
|
||||
return ((uintptr_t)ptr & PAGE_MASK) == 0;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sampled(tsd_t *tsd, const void *ptr) {
|
||||
prof_info_t prof_info;
|
||||
prof_info_get(tsd, ptr, NULL, &prof_info);
|
||||
bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U;
|
||||
if (sampled) {
|
||||
assert(prof_sample_aligned(ptr));
|
||||
}
|
||||
return sampled;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_free(tsd_t *tsd, const void *ptr, size_t usize,
|
||||
emap_alloc_ctx_t *alloc_ctx) {
|
||||
prof_info_t prof_info;
|
||||
prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
|
||||
if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) {
|
||||
assert(prof_sample_aligned(ptr));
|
||||
prof_free_sampled_object(tsd, usize, &prof_info);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
|
22
BeefRT/JEMalloc/include/jemalloc/internal/prof_log.h
Normal file
22
BeefRT/JEMalloc/include/jemalloc/internal/prof_log.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
|
||||
#define JEMALLOC_INTERNAL_PROF_LOG_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
extern malloc_mutex_t log_mtx;
|
||||
|
||||
void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
|
||||
bool prof_log_init(tsd_t *tsdn);
|
||||
|
||||
/* Used in unit tests. */
|
||||
size_t prof_log_bt_count(void);
|
||||
size_t prof_log_alloc_count(void);
|
||||
size_t prof_log_thr_count(void);
|
||||
bool prof_log_is_logging(void);
|
||||
bool prof_log_rep_check(void);
|
||||
void prof_log_dummy_set(bool new_value);
|
||||
|
||||
bool prof_log_start(tsdn_t *tsdn, const char *filename);
|
||||
bool prof_log_stop(tsdn_t *tsdn);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_LOG_H */
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue