aboutsummaryrefslogtreecommitdiff
path: root/client/asmjit/core
diff options
context:
space:
mode:
authorauth12 <[email protected]>2020-07-19 11:57:04 -0700
committerGitHub <[email protected]>2020-07-19 11:57:04 -0700
commit1bae439a35a3aadca6772716aaeea8c8a0991114 (patch)
treef8eab7a7bae237ad697feecfae26b17bab91b16e /client/asmjit/core
parentMore placeholders and general plan. (diff)
parentMerge branch 'master' into windows (diff)
downloadloader-1bae439a35a3aadca6772716aaeea8c8a0991114.tar.xz
loader-1bae439a35a3aadca6772716aaeea8c8a0991114.zip
Merge pull request #1 from auth12/windows
Windows
Diffstat (limited to 'client/asmjit/core')
-rw-r--r--client/asmjit/core/api-build_p.h77
-rw-r--r--client/asmjit/core/api-config.h556
-rw-r--r--client/asmjit/core/arch.cpp59
-rw-r--r--client/asmjit/core/arch.h64
-rw-r--r--client/asmjit/core/assembler.cpp403
-rw-r--r--client/asmjit/core/assembler.h152
-rw-r--r--client/asmjit/core/builder.cpp927
-rw-r--r--client/asmjit/core/builder.h1435
-rw-r--r--client/asmjit/core/callconv.cpp59
-rw-r--r--client/asmjit/core/callconv.h374
-rw-r--r--client/asmjit/core/codebuffer.h126
-rw-r--r--client/asmjit/core/codebufferwriter_p.h189
-rw-r--r--client/asmjit/core/codeholder.cpp1110
-rw-r--r--client/asmjit/core/codeholder.h929
-rw-r--r--client/asmjit/core/compiler.cpp645
-rw-r--r--client/asmjit/core/compiler.h816
-rw-r--r--client/asmjit/core/constpool.cpp375
-rw-r--r--client/asmjit/core/constpool.h262
-rw-r--r--client/asmjit/core/cpuinfo.cpp97
-rw-r--r--client/asmjit/core/cpuinfo.h154
-rw-r--r--client/asmjit/core/datatypes.h1071
-rw-r--r--client/asmjit/core/emitter.cpp392
-rw-r--r--client/asmjit/core/emitter.h714
-rw-r--r--client/asmjit/core/emitterutils.cpp150
-rw-r--r--client/asmjit/core/emitterutils_p.h109
-rw-r--r--client/asmjit/core/environment.cpp64
-rw-r--r--client/asmjit/core/environment.h591
-rw-r--r--client/asmjit/core/errorhandler.cpp37
-rw-r--r--client/asmjit/core/errorhandler.h267
-rw-r--r--client/asmjit/core/features.h186
-rw-r--r--client/asmjit/core/formatter.cpp469
-rw-r--r--client/asmjit/core/formatter.h256
-rw-r--r--client/asmjit/core/func.cpp143
-rw-r--r--client/asmjit/core/func.h976
-rw-r--r--client/asmjit/core/globals.cpp144
-rw-r--r--client/asmjit/core/globals.h453
-rw-r--r--client/asmjit/core/inst.cpp139
-rw-r--r--client/asmjit/core/inst.h559
-rw-r--r--client/asmjit/core/jitallocator.cpp1152
-rw-r--r--client/asmjit/core/jitallocator.h278
-rw-r--r--client/asmjit/core/jitruntime.cpp120
-rw-r--r--client/asmjit/core/jitruntime.h126
-rw-r--r--client/asmjit/core/logger.cpp124
-rw-r--r--client/asmjit/core/logger.h228
-rw-r--r--client/asmjit/core/misc_p.h49
-rw-r--r--client/asmjit/core/operand.cpp136
-rw-r--r--client/asmjit/core/operand.h1418
-rw-r--r--client/asmjit/core/osutils.cpp106
-rw-r--r--client/asmjit/core/osutils.h87
-rw-r--r--client/asmjit/core/osutils_p.h94
-rw-r--r--client/asmjit/core/raassignment_p.h399
-rw-r--r--client/asmjit/core/rabuilders_p.h644
-rw-r--r--client/asmjit/core/radefs_p.h1091
-rw-r--r--client/asmjit/core/ralocal.cpp1039
-rw-r--r--client/asmjit/core/ralocal_p.h282
-rw-r--r--client/asmjit/core/rapass.cpp2010
-rw-r--r--client/asmjit/core/rapass_p.h1196
-rw-r--r--client/asmjit/core/rastack.cpp206
-rw-r--r--client/asmjit/core/rastack_p.h187
-rw-r--r--client/asmjit/core/string.cpp551
-rw-r--r--client/asmjit/core/string.h400
-rw-r--r--client/asmjit/core/support.cpp507
-rw-r--r--client/asmjit/core/support.h1516
-rw-r--r--client/asmjit/core/target.cpp37
-rw-r--r--client/asmjit/core/target.h175
-rw-r--r--client/asmjit/core/type.cpp92
-rw-r--r--client/asmjit/core/type.h375
-rw-r--r--client/asmjit/core/virtmem.cpp589
-rw-r--r--client/asmjit/core/virtmem.h145
-rw-r--r--client/asmjit/core/zone.cpp382
-rw-r--r--client/asmjit/core/zone.h649
-rw-r--r--client/asmjit/core/zonehash.cpp331
-rw-r--r--client/asmjit/core/zonehash.h218
-rw-r--r--client/asmjit/core/zonelist.cpp182
-rw-r--r--client/asmjit/core/zonelist.h205
-rw-r--r--client/asmjit/core/zonestack.cpp197
-rw-r--r--client/asmjit/core/zonestack.h234
-rw-r--r--client/asmjit/core/zonestring.h137
-rw-r--r--client/asmjit/core/zonetree.cpp118
-rw-r--r--client/asmjit/core/zonetree.h385
-rw-r--r--client/asmjit/core/zonevector.cpp375
-rw-r--r--client/asmjit/core/zonevector.h710
82 files changed, 35011 insertions, 0 deletions
diff --git a/client/asmjit/core/api-build_p.h b/client/asmjit/core/api-build_p.h
new file mode 100644
index 0000000..db37ca7
--- /dev/null
+++ b/client/asmjit/core/api-build_p.h
@@ -0,0 +1,77 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+
+#define ASMJIT_EXPORTS
+
+// Only turn-off these warnings when building asmjit itself.
+#ifdef _MSC_VER
+ #ifndef _CRT_SECURE_NO_DEPRECATE
+ #define _CRT_SECURE_NO_DEPRECATE
+ #endif
+ #ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+ #endif
+#endif
+
+// Dependencies only required for asmjit build, but never exposed through public headers.
+#ifdef _WIN32
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build-Only]
+// ============================================================================
+
+#include "./api-config.h"
+
+#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
+ #define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
+#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
+ #define ASMJIT_FAVOR_SPEED
+#else
+ #define ASMJIT_FAVOR_SIZE
+ #define ASMJIT_FAVOR_SPEED
+#endif
+
+// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
+#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
+ #define ASMJIT_TEST
+#endif
+
+// Include a unit testing package if this is a `asmjit_test_unit` build.
+#if defined(ASMJIT_TEST)
+ #include "../../../test/broken.h"
+#endif
+
+#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED
diff --git a/client/asmjit/core/api-config.h b/client/asmjit/core/api-config.h
new file mode 100644
index 0000000..3b896e9
--- /dev/null
+++ b/client/asmjit/core/api-config.h
@@ -0,0 +1,556 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
+#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
+
+// ============================================================================
+// [asmjit::Version]
+// ============================================================================
+
+//! \addtogroup asmjit_core
+//! \{
+
+//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
+#define ASMJIT_LIBRARY_VERSION 0x010400 /* 1.4.0 */
+
+//! \}
+
+// ============================================================================
+// [asmjit::Build - Documentation]
+// ============================================================================
+
+// NOTE: Doxygen cannot document macros that are not defined, that's why we have
+// to define them and then undefine them, so it won't use the macros with its
+// own preprocessor.
+#ifdef _DOXYGEN
+namespace asmjit {
+
+//! \addtogroup asmjit_build
+//! \{
+
+//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
+#define ASMJIT_EMBED
+
+//! Enables static-library build.
+#define ASMJIT_STATIC
+
+//! Defined when AsmJit's build configuration is 'Debug'.
+//!
+//! \note Can be defined explicitly to bypass autodetection.
+#define ASMJIT_BUILD_DEBUG
+
+//! Defined when AsmJit's build configuration is 'Release'.
+//!
+//! \note Can be defined explicitly to bypass autodetection.
+#define ASMJIT_BUILD_RELEASE
+
+//! Defined to build X86/X64 backend.
+#define ASMJIT_BUILD_X86
+
+//! Defined to build ARM/AArch64 backend.
+#define ASMJIT_BUILD_ARM
+
+//! Defined to build host backend autodetected at compile-time.
+#define ASMJIT_BUILD_HOST
+
+//! Disables deprecated API at compile time.
+#define ASMJIT_NO_DEPRECATED
+
+//! Disable non-host architectures entirely.
+#define ASMJIT_NO_FOREIGN
+
+//! Disables \ref asmjit_builder functionality completely.
+#define ASMJIT_NO_BUILDER
+
+//! Disables \ref asmjit_compiler functionality completely.
+#define ASMJIT_NO_COMPILER
+
+//! Disables JIT memory management and \ref JitRuntime.
+#define ASMJIT_NO_JIT
+
+//! Disables \ref Logger and \ref Formatter.
+#define ASMJIT_NO_LOGGING
+
+//! Disables everything that contains text.
+#define ASMJIT_NO_TEXT
+
+//! Disables instruction validation API.
+#define ASMJIT_NO_VALIDATION
+
+//! Disables instruction introspection API.
+#define ASMJIT_NO_INTROSPECTION
+
+// Avoid doxygen preprocessor using feature-selection definitions.
+#undef ASMJIT_NO_DEPRECATED
+#undef ASMJIT_NO_BUILDER
+#undef ASMJIT_NO_COMPILER
+#undef ASMJIT_NO_JIT
+#undef ASMJIT_NO_LOGGING
+#undef ASMJIT_NO_TEXT
+#undef ASMJIT_NO_VALIDATION
+#undef ASMJIT_NO_INTROSPECTION
+
+//! \}
+
+} // {asmjit}
+#endif // _DOXYGEN
+
+// ============================================================================
+// [asmjit::Dependencies]
+// ============================================================================
+
+// We really want std-types as globals.
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <new>
+#include <limits>
+#include <type_traits>
+#include <utility>
+
+#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
+ #include <pthread.h>
+#endif
+
+
+// ============================================================================
+// [asmjit::Options]
+// ============================================================================
+
+// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
+#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
+ #define ASMJIT_NO_COMPILER
+#endif
+
+// Prevent compile-time errors caused by misconfiguration.
+#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
+ #pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
+ #undef ASMJIT_NO_TEXT
+#endif
+
+#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
+ #pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
+ #undef ASMJIT_NO_INTROSPECTION
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Deprecated]
+// ============================================================================
+
+#ifndef ASMJIT_NO_DEPRECATED
+ #if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
+ #if defined(ASMJIT_BUILD_EMBED)
+ #pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+ #if defined(ASMJIT_BUILD_STATIC)
+ #pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+
+ #if !defined(ASMJIT_STATIC)
+ #define ASMJIT_STATIC
+ #endif
+ #endif
+#endif // !ASMJIT_NO_DEPRECATED
+
+// ============================================================================
+// [asmjit::Build - Globals - Build Mode]
+// ============================================================================
+
+// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
+#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
+ #if !defined(NDEBUG)
+ #define ASMJIT_BUILD_DEBUG
+ #else
+ #define ASMJIT_BUILD_RELEASE
+ #endif
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Target Architecture Information]
+// ============================================================================
+
+#if defined(_M_X64) || defined(__x86_64__)
+ #define ASMJIT_ARCH_X86 64
+#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
+ #define ASMJIT_ARCH_X86 32
+#else
+ #define ASMJIT_ARCH_X86 0
+#endif
+
+#if defined(__arm64__) || defined(__aarch64__)
+# define ASMJIT_ARCH_ARM 64
+#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
+ #define ASMJIT_ARCH_ARM 32
+#else
+ #define ASMJIT_ARCH_ARM 0
+#endif
+
+#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
+ #define ASMJIT_ARCH_MIPS 64
+#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
+ #define ASMJIT_ARCH_MIPS 32
+#else
+ #define ASMJIT_ARCH_MIPS 0
+#endif
+
+#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
+#if ASMJIT_ARCH_BITS == 0
+ #undef ASMJIT_ARCH_BITS
+ #if defined (__LP64__) || defined(_LP64)
+ #define ASMJIT_ARCH_BITS 64
+ #else
+ #define ASMJIT_ARCH_BITS 32
+ #endif
+#endif
+
+#if (defined(__ARMEB__)) || \
+ (defined(__MIPSEB__)) || \
+ (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ #define ASMJIT_ARCH_LE 0
+ #define ASMJIT_ARCH_BE 1
+#else
+ #define ASMJIT_ARCH_LE 1
+ #define ASMJIT_ARCH_BE 0
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build Architectures Definitions]
+// ============================================================================
+
+#if !defined(ASMJIT_NO_FOREIGN)
+ // If 'ASMJIT_NO_FOREIGN' is not defined then all architectures will be built.
+ #if !defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_X86
+ #endif
+#else
+ // Detect architectures to build if building only for the host architecture.
+ #if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_X86
+ #endif
+
+ #if ASMJIT_ARCH_ARM && !defined(ASMJIT_BUILD_ARM)
+ // #define ASMJIT_BUILD_ARM
+ #endif
+#endif
+
+// Define 'ASMJIT_BUILD_HOST' if we know that host architecture will be built.
+#if !defined(ASMJIT_BUILD_HOST) && ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_HOST
+#endif
+
+#if !defined(ASMJIT_BUILD_HOST) && ASMJIT_ARCH_ARM && defined(ASMJIT_BUILD_ARM)
+ #define ASMJIT_BUILD_HOST
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - C++ Compiler and Features Detection]
+// ============================================================================
+
+#define ASMJIT_CXX_GNU 0
+#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
+
+// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
+// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
+#if defined(__INTEL_COMPILER)
+
+// MSC Compiler:
+// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
+//
+// Version List:
+// - 16.00.0 == VS2010
+// - 17.00.0 == VS2012
+// - 18.00.0 == VS2013
+// - 19.00.0 == VS2015
+// - 19.10.0 == VS2017
+#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
+
+// Clang Compiler [Pretends to be GNU, so it must be checked before]:
+// - https://clang.llvm.org/cxx_status.html
+#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
+
+// GNU Compiler:
+// - https://gcc.gnu.org/projects/cxx-status.html
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+
+ #undef ASMJIT_CXX_GNU
+ #define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__)
+
+#endif
+
+// Compiler features detection macros.
+#if defined(__clang__) && defined(__has_attribute)
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
+#else
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - API Decorators & Language Extensions]
+// ============================================================================
+
+// API (Export / Import).
+#if !defined(ASMJIT_STATIC)
+ #if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __declspec(dllexport)
+ #else
+ #define ASMJIT_API __declspec(dllimport)
+ #endif
+ #elif defined(_WIN32) && defined(__GNUC__)
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __attribute__((__dllexport__))
+ #else
+ #define ASMJIT_API __attribute__((__dllimport__))
+ #endif
+ #elif defined(__GNUC__)
+ #define ASMJIT_API __attribute__((__visibility__("default")))
+ #endif
+#endif
+
+#if !defined(ASMJIT_API)
+ #define ASMJIT_API
+#endif
+
+#if !defined(ASMJIT_VARAPI)
+ #define ASMJIT_VARAPI extern ASMJIT_API
+#endif
+
+// This is basically a workaround. When using MSVC and marking class as DLL
+// export everything gets exported, which is unwanted in most projects. MSVC
+// automatically exports typeinfo and vtable if at least one symbol of the
+// class is exported. However, GCC has some strange behavior that even if
+// one or more symbol is exported it doesn't export typeinfo unless the
+// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
+#if !defined(_WIN32) && defined(__GNUC__)
+ #define ASMJIT_VIRTAPI ASMJIT_API
+#else
+ #define ASMJIT_VIRTAPI
+#endif
+
+// Function attributes.
+#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
+ #define ASMJIT_INLINE inline __attribute__((__always_inline__))
+#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
+ #define ASMJIT_INLINE __forceinline
+#else
+ #define ASMJIT_INLINE inline
+#endif
+
+#if defined(__GNUC__)
+ #define ASMJIT_NOINLINE __attribute__((__noinline__))
+ #define ASMJIT_NORETURN __attribute__((__noreturn__))
+#elif defined(_MSC_VER)
+ #define ASMJIT_NOINLINE __declspec(noinline)
+ #define ASMJIT_NORETURN __declspec(noreturn)
+#else
+ #define ASMJIT_NOINLINE
+ #define ASMJIT_NORETURN
+#endif
+
+// Calling conventions.
+#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
+ #define ASMJIT_CDECL __attribute__((__cdecl__))
+ #define ASMJIT_STDCALL __attribute__((__stdcall__))
+ #define ASMJIT_FASTCALL __attribute__((__fastcall__))
+ #define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
+#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
+ #define ASMJIT_CDECL __cdecl
+ #define ASMJIT_STDCALL __stdcall
+ #define ASMJIT_FASTCALL __fastcall
+ #define ASMJIT_REGPARM(N)
+#else
+ #define ASMJIT_CDECL
+ #define ASMJIT_STDCALL
+ #define ASMJIT_FASTCALL
+ #define ASMJIT_REGPARM(N)
+#endif
+
+#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
+ #define ASMJIT_VECTORCALL __vectorcall
+#elif ASMJIT_ARCH_X86 && defined(_WIN32)
+ #define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
+#else
+ #define ASMJIT_VECTORCALL
+#endif
+
+
+// Type alignment (not allowed by C++11 'alignas' keyword).
+#if defined(__GNUC__)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
+#elif defined(_MSC_VER)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
+#else
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
+#endif
+
+//! \def ASMJIT_MAY_ALIAS
+//!
+//! Expands to `__attribute__((__may_alias__))` if supported.
+#if defined(__GNUC__)
+ #define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define ASMJIT_MAY_ALIAS
+#endif
+
+//! \def ASMJIT_LIKELY(...)
+//!
+//! Condition is likely to be taken (mostly error handling and edge cases).
+
+//! \def ASMJIT_UNLIKELY(...)
+//!
+//! Condition is unlikely to be taken (mostly error handling and edge cases).
+#if defined(__GNUC__)
+ #define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
+ #define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
+#else
+ #define ASMJIT_LIKELY(...) (__VA_ARGS__)
+ #define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
+#endif
+
+//! \def ASMJIT_FALLTHROUGH
+//!
+//! Portable [[fallthrough]] attribute.
+#if defined(__clang__) && __cplusplus >= 201103L
+ #define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
+#elif defined(__GNUC__) && __GNUC__ >= 7
+ #define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
+#else
+ #define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
+#endif
+
+//! \def ASMJIT_DEPRECATED
+//!
+//! Marks function, class, struct, enum, or anything else as deprecated.
+#if defined(__GNUC__)
+ #define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
+ #if defined(__clang__)
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
+ #else
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
+ #endif
+#elif defined(_MSC_VER)
+ #define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
+#else
+ #define ASMJIT_DEPRECATED(MESSAGE)
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE)
+#endif
+
+// Utilities.
+#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
+#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
+
+#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
+#else
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
+// ============================================================================
+
+#if defined(__clang__)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
+ _Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("clang diagnostic pop") \
+ }
+#elif defined(__GNUC__) && __GNUC__ == 4
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif defined(__GNUC__) && __GNUC__ >= 8
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: 4127)) /* conditional expression is const */ \
+ __pragma(warning(disable: 4201)) /* nameless struct/union */
+ #define ASMJIT_END_NAMESPACE \
+ __pragma(warning(pop)) \
+ }
+#endif
+
+#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
+ #define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
+ #define ASMJIT_END_NAMESPACE }
+#endif
+
+#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
+ ASMJIT_BEGIN_NAMESPACE \
+ namespace NAMESPACE {
+
+#define ASMJIT_END_SUB_NAMESPACE \
+ } \
+ ASMJIT_END_NAMESPACE
+
+// ============================================================================
+// [asmjit::Build - Globals - Utilities]
+// ============================================================================
+
+#define ASMJIT_NONCOPYABLE(...) \
+ private: \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+#define ASMJIT_NONCONSTRUCTIBLE(...) \
+ private: \
+ __VA_ARGS__() = delete; \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+// ============================================================================
+// [asmjit::Build - Globals - Cleanup]
+// ============================================================================
+
+// Cleanup definitions that are only used within this header file.
+#undef ASMJIT_CXX_GNU
+#undef ASMJIT_CXX_MAKE_VER
+
+#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED
diff --git a/client/asmjit/core/arch.cpp b/client/asmjit/core/arch.cpp
new file mode 100644
index 0000000..7200897
--- /dev/null
+++ b/client/asmjit/core/arch.cpp
@@ -0,0 +1,59 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86archdata_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armarchdata_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfoOut) noexcept {
+ // Zero the output in case the input is invalid.
+ *typeIdOut = 0;
+ regInfoOut->reset();
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::ArchInternal::typeIdToRegInfo(arch, typeId, typeIdOut, regInfoOut);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::ArchInternal::typeIdToRegInfo(arch, typeId, typeIdOut, regInfoOut);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/arch.h b/client/asmjit/core/arch.h
new file mode 100644
index 0000000..4991a33
--- /dev/null
+++ b/client/asmjit/core/arch.h
@@ -0,0 +1,64 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ARCH_H_INCLUDED
+#define ASMJIT_CORE_ARCH_H_INCLUDED
+
+#include "../core/environment.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::ArchRegs]
+// ============================================================================
+
+//! Information about registers of a CPU architecture.
+struct ArchRegs {
+ //! Register information and signatures indexed by `BaseReg::RegType`.
+ RegInfo regInfo[BaseReg::kTypeMax + 1];
+ //! Count (maximum) of registers per `BaseReg::RegType`.
+ uint8_t regCount[BaseReg::kTypeMax + 1];
+ //! Converts RegType to TypeId, see `Type::Id`.
+ uint8_t regTypeToTypeId[BaseReg::kTypeMax + 1];
+};
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+//! Architecture utilities.
+namespace ArchUtils {
+
+ASMJIT_API Error typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfo) noexcept;
+
+} // {ArchUtils}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ARCH_H_INCLUDED
diff --git a/client/asmjit/core/assembler.cpp b/client/asmjit/core/assembler.cpp
new file mode 100644
index 0000000..08ca973
--- /dev/null
+++ b/client/asmjit/core/assembler.cpp
@@ -0,0 +1,403 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/codebufferwriter_p.h"
+#include "../core/constpool.h"
+#include "../core/emitterutils_p.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseAssembler - Construction / Destruction]
+// ============================================================================
+
+BaseAssembler::BaseAssembler() noexcept
+ : BaseEmitter(kTypeAssembler),
+ _section(nullptr),
+ _bufferData(nullptr),
+ _bufferEnd(nullptr),
+ _bufferPtr(nullptr) {}
+BaseAssembler::~BaseAssembler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Buffer Management]
+// ============================================================================
+
+Error BaseAssembler::setOffset(size_t offset) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
+ if (ASMJIT_UNLIKELY(offset > size))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ _bufferPtr = _bufferData + offset;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Section Management]
+// ============================================================================
+
+static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
+ uint8_t* p = section->_buffer._data;
+
+ self->_section = section;
+ self->_bufferData = p;
+ self->_bufferPtr = p + section->_buffer._size;
+ self->_bufferEnd = p + section->_buffer._capacity;
+}
+
+Error BaseAssembler::section(Section* section) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
+ return reportError(DebugUtils::errored(kErrorInvalidSection));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logf(".section %s {#%u}\n", section->name(), section->id());
+#endif
+
+ BaseAssembler_initSection(this, section);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Label Management]
+// ============================================================================
+
+Label BaseAssembler::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newLabelEntry(&le);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Error BaseAssembler::bind(const Label& label) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ Error err = _code->bindLabel(label, _section->id(), offset());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ EmitterUtils::logLabelBound(this, label);
+#endif
+
+ resetInlineComment();
+ if (err)
+ return reportError(err);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Embed]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+struct DataSizeByPower {
+ char str[4];
+};
+
+static const DataSizeByPower dataSizeByPowerTable[] = {
+ { "db" },
+ { "dw" },
+ { "dd" },
+ { "dq" }
+};
+#endif
+
+Error BaseAssembler::embed(const void* data, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (dataSize == 0)
+ return kErrorOk;
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+ writer.emitData(data, dataSize);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(data, dataSize);
+#endif
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount) {
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
+ uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (itemCcount == 0 || repeatCount == 0)
+ return kErrorOk;
+
+ uint32_t typeSize = Type::sizeOf(finalTypeId);
+ Support::FastUInt8 of = 0;
+
+ size_t dataSize = Support::mulOverflow(itemCcount, size_t(typeSize), &of);
+ size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
+
+ if (ASMJIT_UNLIKELY(of))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ const uint8_t* start = writer.cursor();
+#endif
+
+ for (size_t i = 0; i < repeatCount; i++) {
+ writer.emitData(data, dataSize);
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(start, totalSize);
+#endif
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (ASMJIT_UNLIKELY(!isLabelValid(label)))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ size_t size = pool.size();
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
+
+ pool.fill(writer.cursor());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(writer.cursor(), size);
+#endif
+
+ writer.advance(size);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ ASMJIT_ASSERT(_code != nullptr);
+ RelocEntry* re;
+ LabelEntry* le = _code->labelEntry(label);
+
+ if (ASMJIT_UNLIKELY(!le))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ if (dataSize == 0)
+ dataSize = registerSize();
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
+ return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger) {
+ StringTmp<256> sb;
+ sb.appendFormat("%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Formatter::formatLabel(sb, 0, this, label.id());
+ sb.append('\n');
+ _logger->log(sb);
+ }
+#endif
+
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, uint32_t(dataSize));
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+
+ if (le->isBound()) {
+ re->_targetSectionId = le->section()->id();
+ re->_payload = le->offset();
+ }
+ else {
+ LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0);
+ if (ASMJIT_UNLIKELY(!link))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ link->relocId = re->id();
+ }
+
+ // Emit dummy DWORD/QWORD depending on the data size.
+ writer.emitZeros(dataSize);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ LabelEntry* labelEntry = _code->labelEntry(label);
+ LabelEntry* baseEntry = _code->labelEntry(base);
+
+ if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ if (dataSize == 0)
+ dataSize = registerSize();
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
+ return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger) {
+ StringTmp<256> sb;
+ sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Formatter::formatLabel(sb, 0, this, label.id());
+ sb.append(" - ");
+ Formatter::formatLabel(sb, 0, this, base.id());
+ sb.append(")\n");
+ _logger->log(sb);
+ }
+#endif
+
+ // If both labels are bound within the same section it means the delta can be calculated now.
+ if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
+ uint64_t delta = labelEntry->offset() - baseEntry->offset();
+ writer.emitValueLE(delta, dataSize);
+ }
+ else {
+ RelocEntry* re;
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression, uint32_t(dataSize));
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ Expression* exp = _code->_zone.newT<Expression>();
+ if (ASMJIT_UNLIKELY(!exp))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ exp->reset();
+ exp->opType = Expression::kOpSub;
+ exp->setValueAsLabel(0, labelEntry);
+ exp->setValueAsLabel(1, baseEntry);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_payload = (uint64_t)(uintptr_t)exp;
+
+ writer.emitZeros(dataSize);
+ }
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Comment]
+// ============================================================================
+
+Error BaseAssembler::comment(const char* data, size_t size) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger) {
+ _logger->log(data, size);
+ _logger->log("\n", 1);
+ return kErrorOk;
+ }
+#else
+ DebugUtils::unused(data, size);
+#endif
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Events]
+// ============================================================================
+
+Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ // Attach to the end of the .text section.
+ BaseAssembler_initSection(this, code->_sections[0]);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
+ _section = nullptr;
+ _bufferData = nullptr;
+ _bufferEnd = nullptr;
+ _bufferPtr = nullptr;
+ return Base::onDetach(code);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/assembler.h b/client/asmjit/core/assembler.h
new file mode 100644
index 0000000..fb84207
--- /dev/null
+++ b/client/asmjit/core/assembler.h
@@ -0,0 +1,152 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+
+#include "../core/codeholder.h"
+#include "../core/datatypes.h"
+#include "../core/emitter.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_assembler
+//! \{
+
+// ============================================================================
+// [asmjit::BaseAssembler]
+// ============================================================================
+
+//! Base assembler.
+//!
+//! This is a base class that provides interface used by architecture specific
+//! assembler implementations. Assembler doesn't hold any data, instead it's
+//! attached to \ref CodeHolder, which provides all the data that Assembler
+//! needs and which can be altered by it.
+//!
+//! Check out architecture specific assemblers for more details and examples:
+//!
+//! - \ref x86::Assembler - X86/X64 assembler implementation.
+class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseAssembler)
+ typedef BaseEmitter Base;
+
+ //! Current section where the assembling happens.
+ Section* _section;
+ //! Start of the CodeBuffer of the current section.
+ uint8_t* _bufferData;
+ //! End (first invalid byte) of the current section.
+ uint8_t* _bufferEnd;
+ //! Pointer in the CodeBuffer of the current section.
+ uint8_t* _bufferPtr;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseAssembler` instance.
+ ASMJIT_API BaseAssembler() noexcept;
+ //! Destroys the `BaseAssembler` instance.
+ ASMJIT_API virtual ~BaseAssembler() noexcept;
+
+ //! \}
+
+ //! \name Code-Buffer Management
+ //! \{
+
+ //! Returns the capacity of the current CodeBuffer.
+ inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
+ //! Returns the number of remaining bytes in the current CodeBuffer.
+ inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
+
+ //! Returns the current position in the CodeBuffer.
+ inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
+
+ //! Sets the current position in the CodeBuffer to `offset`.
+ //!
+ //! \note The `offset` cannot be greater than buffer size even if it's
+ //! within the buffer's capacity.
+ ASMJIT_API Error setOffset(size_t offset);
+
+ //! Returns the start of the CodeBuffer in the current section.
+ inline uint8_t* bufferData() const noexcept { return _bufferData; }
+ //! Returns the end (first invalid byte) in the current section.
+ inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
+ //! Returns the current pointer in the CodeBuffer in the current section.
+ inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ //! Returns the current section.
+ inline Section* currentSection() const noexcept { return _section; }
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, size_t dataSize) override;
+ ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount = 1) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED
diff --git a/client/asmjit/core/builder.cpp b/client/asmjit/core/builder.cpp
new file mode 100644
index 0000000..a582e96
--- /dev/null
+++ b/client/asmjit/core/builder.cpp
@@ -0,0 +1,927 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/builder.h"
+#include "../core/emitterutils_p.h"
+#include "../core/errorhandler.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::PostponedErrorHandler (Internal)]
+// ============================================================================
+
+//! Postponed error handler that never throws. Used as a temporal error handler
+//! to run passes. If error occurs, the caller is notified and will call the
+//! real error handler, that can throw.
+class PostponedErrorHandler : public ErrorHandler {
+public:
+ void handleError(Error err, const char* message, BaseEmitter* origin) override {
+ DebugUtils::unused(err, origin);
+ _message.assign(message);
+ }
+
+ StringTmp<128> _message;
+};
+
+// ============================================================================
+// [asmjit::BaseBuilder - Utilities]
+// ============================================================================
+
+static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
+ for (Pass* pass : self->_passes)
+ pass->~Pass();
+ self->_passes.reset();
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Construction / Destruction]
+// ============================================================================
+
+BaseBuilder::BaseBuilder() noexcept
+ : BaseEmitter(kTypeBuilder),
+ _codeZone(32768 - Zone::kBlockOverhead),
+ _dataZone(16384 - Zone::kBlockOverhead),
+ _passZone(65536 - Zone::kBlockOverhead),
+ _allocator(&_codeZone),
+ _passes(),
+ _labelNodes(),
+ _cursor(nullptr),
+ _firstNode(nullptr),
+ _lastNode(nullptr),
+ _nodeFlags(0) {}
+
+BaseBuilder::~BaseBuilder() noexcept {
+ BaseBuilder_deletePasses(this);
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Node Management]
+// ============================================================================
+
+Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount) {
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ *out = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ return kErrorOk;
+}
+
+
+Error BaseBuilder::_newLabelNode(LabelNode** out) {
+ *out = nullptr;
+
+ ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
+ return registerLabelNode(*out);
+}
+
+Error BaseBuilder::_newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment) {
+ *out = nullptr;
+ return _newNodeT<AlignNode>(out, alignMode, alignment);
+}
+
+Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
+ *out = nullptr;
+
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
+ uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ uint32_t typeSize = Type::sizeOf(finalTypeId);
+ Support::FastUInt8 of = 0;
+
+ size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
+ if (ASMJIT_UNLIKELY(of))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
+
+ node->_embed._typeId = uint8_t(typeId);
+ node->_embed._typeSize = uint8_t(typeSize);
+ node->_itemCount = itemCount;
+ node->_repeatCount = repeatCount;
+
+ uint8_t* dstData = node->_inlineData;
+ if (dataSize > EmbedDataNode::kInlineBufferSize) {
+ dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
+ if (ASMJIT_UNLIKELY(!dstData))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ node->_externalData = dstData;
+ }
+
+ if (data)
+ memcpy(dstData, data, dataSize);
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::_newConstPoolNode(ConstPoolNode** out) {
+ *out = nullptr;
+
+ ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out));
+ return registerLabelNode(*out);
+}
+
+Error BaseBuilder::_newCommentNode(CommentNode** out, const char* data, size_t size) {
+ *out = nullptr;
+
+ if (data) {
+ if (size == SIZE_MAX)
+ size = strlen(data);
+
+ if (size > 0) {
+ data = static_cast<char*>(_dataZone.dup(data, size, true));
+ if (ASMJIT_UNLIKELY(!data))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+ }
+
+ return _newNodeT<CommentNode>(out, data);
+}
+
+BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+
+ if (!_cursor) {
+ if (!_firstNode) {
+ _firstNode = node;
+ _lastNode = node;
+ }
+ else {
+ node->_next = _firstNode;
+ _firstNode->_prev = node;
+ _firstNode = node;
+ }
+ }
+ else {
+ BaseNode* prev = _cursor;
+ BaseNode* next = _cursor->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+ }
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ _cursor = node;
+ return node;
+}
+
+BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(ref);
+
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+
+ BaseNode* prev = ref;
+ BaseNode* next = ref->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node != nullptr);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+ ASMJIT_ASSERT(ref != nullptr);
+ ASMJIT_ASSERT(ref->isActive());
+
+ BaseNode* prev = ref->prev();
+ BaseNode* next = ref;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ next->_prev = node;
+ if (prev)
+ prev->_next = node;
+ else
+ _firstNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
+ if (!node->isActive())
+ return node;
+
+ BaseNode* prev = node->prev();
+ BaseNode* next = node->next();
+
+ if (_firstNode == node)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == node)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ return node;
+}
+
+void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
+ if (first == last) {
+ removeNode(first);
+ return;
+ }
+
+ if (!first->isActive())
+ return;
+
+ BaseNode* prev = first->prev();
+ BaseNode* next = last->next();
+
+ if (_firstNode == first)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == last)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ BaseNode* node = first;
+ uint32_t didRemoveSection = false;
+
+ for (;;) {
+ next = node->next();
+ ASMJIT_ASSERT(next != nullptr);
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ didRemoveSection |= uint32_t(node->isSection());
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ if (node == last)
+ break;
+ node = next;
+ }
+
+ if (didRemoveSection)
+ _dirtySectionLinks = true;
+}
+
+BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
+ BaseNode* old = _cursor;
+ _cursor = node;
+ return old;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Section]
+// ============================================================================
+
+Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
+ *out = nullptr;
+
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
+ return reportError(DebugUtils::errored(kErrorInvalidSection));
+
+ if (sectionId >= _sectionNodes.size()) {
+ Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
+ if (ASMJIT_UNLIKELY(err != kErrorOk))
+ return reportError(err);
+ }
+
+ SectionNode* node = nullptr;
+ if (sectionId < _sectionNodes.size())
+ node = _sectionNodes[sectionId];
+
+ if (!node) {
+ ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
+
+ // We have already reserved enough space, this cannot fail now.
+ if (sectionId >= _sectionNodes.size())
+ _sectionNodes.resize(&_allocator, sectionId + 1);
+
+ _sectionNodes[sectionId] = node;
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::section(Section* section) {
+ SectionNode* node;
+ ASMJIT_PROPAGATE(sectionNodeOf(&node, section->id()));
+
+ if (!node->isActive()) {
+ // Insert the section at the end if it was not part of the code.
+ addAfter(node, lastNode());
+ _cursor = node;
+ }
+ else {
+ // This is a bit tricky. We cache section links to make sure that
+ // switching sections doesn't involve traversal in linked-list unless
+ // the position of the section has changed.
+ if (hasDirtySectionLinks())
+ updateSectionLinks();
+
+ if (node->_nextSection)
+ _cursor = node->_nextSection->_prev;
+ else
+ _cursor = _lastNode;
+ }
+
+ return kErrorOk;
+}
+
+void BaseBuilder::updateSectionLinks() noexcept {
+ if (!_dirtySectionLinks)
+ return;
+
+ BaseNode* node_ = _firstNode;
+ SectionNode* currentSection = nullptr;
+
+ while (node_) {
+ if (node_->isSection()) {
+ if (currentSection)
+ currentSection->_nextSection = node_->as<SectionNode>();
+ currentSection = node_->as<SectionNode>();
+ }
+ node_ = node_->next();
+ }
+
+ if (currentSection)
+ currentSection->_nextSection = nullptr;
+
+ _dirtySectionLinks = false;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Labels]
+// ============================================================================
+
+Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
+ *out = nullptr;
+
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ uint32_t index = labelId;
+ if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (index >= _labelNodes.size())
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
+
+ LabelNode* node = _labelNodes[index];
+ if (!node) {
+ ASMJIT_PROPAGATE(_newNodeT<LabelNode>(&node, labelId));
+ _labelNodes[index] = node;
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::registerLabelNode(LabelNode* node) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ LabelEntry* le;
+ ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
+ uint32_t labelId = le->id();
+
+ // We just added one label so it must be true.
+ ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
+
+ _labelNodes[labelId] = node;
+ node->_labelId = labelId;
+
+ return kErrorOk;
+}
+
+static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
+ ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
+
+ uint32_t growBy = labelId - self->_labelNodes.size();
+ Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
+
+ if (ASMJIT_UNLIKELY(err))
+ return self->reportError(err);
+
+ LabelNode* node;
+ ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
+
+ self->_labelNodes.resize(&self->_allocator, labelId + 1);
+ self->_labelNodes[labelId] = node;
+ node->_labelId = labelId;
+ return kErrorOk;
+}
+
+Label BaseBuilder::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ LabelEntry* le;
+
+ if (_code &&
+ _code->newLabelEntry(&le) == kErrorOk &&
+ BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
+ labelId = le->id();
+ }
+
+ return Label(labelId);
+}
+
+Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ LabelEntry* le;
+
+ if (_code &&
+ _code->newNamedLabelEntry(&le, name, nameSize, type, parentId) == kErrorOk &&
+ BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
+ labelId = le->id();
+ }
+
+ return Label(labelId);
+}
+
+Error BaseBuilder::bind(const Label& label) {
+ LabelNode* node;
+ ASMJIT_PROPAGATE(labelNodeOf(&node, label));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Passes]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
+ for (Pass* pass : _passes)
+ if (strcmp(pass->name(), name) == 0)
+ return pass;
+ return nullptr;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr)) {
+ // Since this is directly called by `addPassT()` we treat `null` argument
+ // as out-of-memory condition. Otherwise it would be API misuse.
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ else if (ASMJIT_UNLIKELY(pass->_cb)) {
+ // Kinda weird, but okay...
+ if (pass->_cb == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
+ pass->_cb = this;
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (pass->_cb != nullptr) {
+ if (pass->_cb != this)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t index = _passes.indexOf(pass);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ pass->_cb = nullptr;
+ _passes.removeAt(index);
+ }
+
+ pass->~Pass();
+ return kErrorOk;
+}
+
+Error BaseBuilder::runPasses() {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (_passes.empty())
+ return kErrorOk;
+
+ ErrorHandler* prev = errorHandler();
+ PostponedErrorHandler postponed;
+
+ Error err = kErrorOk;
+ setErrorHandler(&postponed);
+
+ for (Pass* pass : _passes) {
+ _passZone.reset();
+ err = pass->run(&_passZone, _logger);
+ if (err)
+ break;
+ }
+ _passZone.reset();
+ setErrorHandler(prev);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Emit]
+// ============================================================================
+
+Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
+ uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
+ uint32_t options = instOptions() | forcedInstOptions();
+
+ if (options & BaseInst::kOptionReserved) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_VALIDATION
+ // Strict validation.
+ if (hasValidationOption(kValidationOptionIntermediate)) {
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ Error err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount);
+ if (ASMJIT_UNLIKELY(err)) {
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return reportError(err);
+ }
+ }
+#endif
+
+ // Clear options that should never be part of `InstNode`.
+ options &= ~BaseInst::kOptionReserved;
+ }
+
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+
+ if (ASMJIT_UNLIKELY(!node)) {
+ resetExtraReg();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ node = new(node) InstNode(this, instId, options, opCount, opCapacity);
+ node->setExtraReg(extraReg());
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ for (uint32_t i = 3; i < opCount; i++)
+ node->setOp(i, opExt[i - 3]);
+ node->resetOpRange(opCount, opCapacity);
+
+ if (comment)
+ node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ resetExtraReg();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Align]
+// ============================================================================
+
+Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ AlignNode* node;
+ ASMJIT_PROPAGATE(_newAlignNode(&node, alignMode, alignment));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Embed]
+// ============================================================================
+
+Error BaseBuilder::embed(const void* data, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, data, dataSize));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t itemRepeat) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!isLabelValid(label))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, nullptr, pool.size()));
+
+ pool.fill(node->data());
+ addNode(node);
+ return kErrorOk;
+}
+
+// EmbedLabel / EmbedLabelDelta
+// ----------------------------
+//
+// If dataSize is zero it means that the size is the same as target register
+// width, however, if it's provided we really want to validate whether it's
+// within the possible range.
+
+static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
+ return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
+}
+
+Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!BaseBuilder_checkDataSize(dataSize))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ EmbedLabelNode* node;
+ ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!BaseBuilder_checkDataSize(dataSize))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ EmbedLabelDeltaNode* node;
+ ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Comment]
+// ============================================================================
+
+Error BaseBuilder::comment(const char* data, size_t size) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ CommentNode* node;
+ ASMJIT_PROPAGATE(_newCommentNode(&node, data, size));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Serialize]
+// ============================================================================
+
+Error BaseBuilder::serializeTo(BaseEmitter* dst) {
+ Error err = kErrorOk;
+ BaseNode* node_ = _firstNode;
+
+ Operand_ opArray[Globals::kMaxOpCount];
+
+ do {
+ dst->setInlineComment(node_->inlineComment());
+
+ if (node_->isInst()) {
+ InstNode* node = node_->as<InstNode>();
+
+ // NOTE: Inlined to remove one additional call per instruction.
+ dst->setInstOptions(node->instOptions());
+ dst->setExtraReg(node->extraReg());
+
+ const Operand_* op = node->operands();
+ const Operand_* opExt = EmitterUtils::noExt;
+
+ uint32_t opCount = node->opCount();
+ if (opCount > 3) {
+ uint32_t i = 4;
+ opArray[3] = op[3];
+
+ while (i < opCount) {
+ opArray[i].copyFrom(op[i]);
+ i++;
+ }
+ while (i < Globals::kMaxOpCount) {
+ opArray[i].reset();
+ i++;
+ }
+ opExt = opArray + 3;
+ }
+
+ err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
+ }
+ else if (node_->isLabel()) {
+ if (node_->isConstPool()) {
+ ConstPoolNode* node = node_->as<ConstPoolNode>();
+ err = dst->embedConstPool(node->label(), node->constPool());
+ }
+ else {
+ LabelNode* node = node_->as<LabelNode>();
+ err = dst->bind(node->label());
+ }
+ }
+ else if (node_->isAlign()) {
+ AlignNode* node = node_->as<AlignNode>();
+ err = dst->align(node->alignMode(), node->alignment());
+ }
+ else if (node_->isEmbedData()) {
+ EmbedDataNode* node = node_->as<EmbedDataNode>();
+ err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
+ }
+ else if (node_->isEmbedLabel()) {
+ EmbedLabelNode* node = node_->as<EmbedLabelNode>();
+ err = dst->embedLabel(node->label(), node->dataSize());
+ }
+ else if (node_->isEmbedLabelDelta()) {
+ EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
+ err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
+ }
+ else if (node_->isSection()) {
+ SectionNode* node = node_->as<SectionNode>();
+ err = dst->section(_code->sectionById(node->id()));
+ }
+ else if (node_->isComment()) {
+ CommentNode* node = node_->as<CommentNode>();
+ err = dst->comment(node->inlineComment());
+ }
+
+ if (err) break;
+ node_ = node_->next();
+ } while (node_);
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Events]
+// ============================================================================
+
+Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ SectionNode* initialSection;
+ Error err = sectionNodeOf(&initialSection, 0);
+
+ if (!err)
+ err = _passes.willGrow(&_allocator, 8);
+
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ _cursor = initialSection;
+ _firstNode = initialSection;
+ _lastNode = initialSection;
+ initialSection->setFlags(BaseNode::kFlagIsActive);
+
+ return kErrorOk;
+}
+
+Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
+ BaseBuilder_deletePasses(this);
+ _sectionNodes.reset();
+ _labelNodes.reset();
+
+ _allocator.reset(&_codeZone);
+ _codeZone.reset();
+ _dataZone.reset();
+ _passZone.reset();
+
+ _nodeFlags = 0;
+
+ _cursor = nullptr;
+ _firstNode = nullptr;
+ _lastNode = nullptr;
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::Pass - Construction / Destruction]
+// ============================================================================
+
+Pass::Pass(const char* name) noexcept
+ : _cb(nullptr),
+ _name(name) {}
+Pass::~Pass() noexcept {}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
diff --git a/client/asmjit/core/builder.h b/client/asmjit/core/builder.h
new file mode 100644
index 0000000..d6d7d22
--- /dev/null
+++ b/client/asmjit/core/builder.h
@@ -0,0 +1,1435 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_BUILDER_H_INCLUDED
+#define ASMJIT_CORE_BUILDER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/assembler.h"
+#include "../core/codeholder.h"
+#include "../core/constpool.h"
+#include "../core/formatter.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_builder
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseBuilder;
+class Pass;
+
+class BaseNode;
+class InstNode;
+class SectionNode;
+class LabelNode;
+class AlignNode;
+class EmbedDataNode;
+class EmbedLabelNode;
+class ConstPoolNode;
+class CommentNode;
+class SentinelNode;
+class LabelDeltaNode;
+
+// Only used by Compiler infrastructure.
+class JumpAnnotation;
+
+// ============================================================================
+// [asmjit::BaseBuilder]
+// ============================================================================
+
+//! Builder interface.
+//!
+//! `BaseBuilder` interface was designed to be used as a \ref BaseAssembler
+//! replacement in case pre-processing or post-processing of the generated code
+//! is required. The code can be modified during or after code generation. Pre
+//! or post processing can be done manually or through a \ref Pass object. \ref
+//! BaseBuilder stores the emitted code as a double-linked list of nodes, which
+//! allows O(1) insertion and removal during processing.
+//!
+//! Check out architecture specific builders for more details and examples:
+//!
+//! - \ref x86::Builder - X86/X64 builder implementation.
+class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseBuilder)
+ typedef BaseEmitter Base;
+
+ //! Base zone used to allocate nodes and passes.
+ Zone _codeZone;
+ //! Data zone used to allocate data and names.
+ Zone _dataZone;
+ //! Pass zone, passed to `Pass::run()`.
+ Zone _passZone;
+ //! Allocator that uses `_codeZone`.
+ ZoneAllocator _allocator;
+
+ //! Array of `Pass` objects.
+ ZoneVector<Pass*> _passes;
+ //! Maps section indexes to `LabelNode` nodes.
+ ZoneVector<SectionNode*> _sectionNodes;
+ //! Maps label indexes to `LabelNode` nodes.
+ ZoneVector<LabelNode*> _labelNodes;
+
+ //! Current node (cursor).
+ BaseNode* _cursor;
+ //! First node of the current section.
+ BaseNode* _firstNode;
+ //! Last node of the current section.
+ BaseNode* _lastNode;
+
+ //! Flags assigned to each new node.
+ uint32_t _nodeFlags;
+ //! The sections links are dirty (used internally).
+ bool _dirtySectionLinks;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseBuilder` instance.
+ ASMJIT_API BaseBuilder() noexcept;
+ //! Destroys the `BaseBuilder` instance.
+ ASMJIT_API virtual ~BaseBuilder() noexcept;
+
+ //! \}
+
+ //! \name Node Management
+ //! \{
+
+ //! Returns the first node.
+ inline BaseNode* firstNode() const noexcept { return _firstNode; }
+ //! Returns the last node.
+ inline BaseNode* lastNode() const noexcept { return _lastNode; }
+
+ //! Allocates and instantiates a new node of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref BaseNode.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all nodes
+ //! it created so no manual memory management is required.
+ template<typename T, typename... Args>
+ inline Error _newNodeT(T** out, Args&&... args) {
+ *out = _allocator.newT<T>(this, std::forward<Args>(args)...);
+ if (ASMJIT_UNLIKELY(!*out))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return kErrorOk;
+ }
+
+ //! Creates a new \ref InstNode.
+ ASMJIT_API Error _newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount);
+ //! Creates a new \ref LabelNode.
+ ASMJIT_API Error _newLabelNode(LabelNode** out);
+ //! Creates a new \ref AlignNode.
+ ASMJIT_API Error _newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment);
+ //! Creates a new \ref EmbedDataNode.
+ ASMJIT_API Error _newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1);
+ //! Creates a new \ref ConstPoolNode.
+ ASMJIT_API Error _newConstPoolNode(ConstPoolNode** out);
+ //! Creates a new \ref CommentNode.
+ ASMJIT_API Error _newCommentNode(CommentNode** out, const char* data, size_t size);
+
+ //! Adds `node` after the current and sets the current node to the given `node`.
+ ASMJIT_API BaseNode* addNode(BaseNode* node) noexcept;
+ //! Inserts the given `node` after `ref`.
+ ASMJIT_API BaseNode* addAfter(BaseNode* node, BaseNode* ref) noexcept;
+ //! Inserts the given `node` before `ref`.
+ ASMJIT_API BaseNode* addBefore(BaseNode* node, BaseNode* ref) noexcept;
+ //! Removes the given `node`.
+ ASMJIT_API BaseNode* removeNode(BaseNode* node) noexcept;
+ //! Removes multiple nodes.
+ ASMJIT_API void removeNodes(BaseNode* first, BaseNode* last) noexcept;
+
+ //! Returns the cursor.
+ //!
+ //! When the Builder/Compiler is created it automatically creates a '.text'
+ //! \ref SectionNode, which will be the initial one. When instructions are
+ //! added they are always added after the cursor and the cursor is changed
+ //! to be that newly added node. Use `setCursor()` to change where new nodes
+ //! are inserted.
+ inline BaseNode* cursor() const noexcept {
+ return _cursor;
+ }
+
+ //! Sets the current node to `node` and return the previous one.
+ ASMJIT_API BaseNode* setCursor(BaseNode* node) noexcept;
+
+ //! Sets the current node without returning the previous node.
+ //!
+ //! Only use this function if you are concerned about performance and want
+ //! this inlined (for example if you set the cursor in a loop, etc...).
+ inline void _setCursor(BaseNode* node) noexcept {
+ _cursor = node;
+ }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ //! Returns a vector of SectionNode objects.
+ //!
+ //! \note If a section of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector<SectionNode*>& sectionNodes() const noexcept {
+ return _sectionNodes;
+ }
+
+ //! Tests whether the `SectionNode` of the given `sectionId` was registered.
+ inline bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept {
+ return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr;
+ }
+
+ //! Returns or creates a `SectionNode` that matches the given `sectionId`.
+ //!
+ //! \remarks This function will either get the existing `SectionNode` or create
+ //! it in case it wasn't created before. You can check whether a section has a
+ //! registered `SectionNode` by using `BaseBuilder::hasRegisteredSectionNode()`.
+ ASMJIT_API Error sectionNodeOf(SectionNode** out, uint32_t sectionId);
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! Returns whether the section links of active section nodes are dirty. You can
+ //! update these links by calling `updateSectionLinks()` in such case.
+ inline bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; }
+
+ //! Updates links of all active section nodes.
+ ASMJIT_API void updateSectionLinks() noexcept;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ //! Returns a vector of \ref LabelNode nodes.
+ //!
+ //! \note If a label of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector<LabelNode*>& labelNodes() const noexcept { return _labelNodes; }
+
+ //! Tests whether the `LabelNode` of the given `labelId` was registered.
+ inline bool hasRegisteredLabelNode(uint32_t labelId) const noexcept {
+ return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr;
+ }
+
+ //! \overload
+ inline bool hasRegisteredLabelNode(const Label& label) const noexcept {
+ return hasRegisteredLabelNode(label.id());
+ }
+
+ //! Gets or creates a \ref LabelNode that matches the given `labelId`.
+ //!
+ //! \remarks This function will either get the existing `LabelNode` or create
+ //! it in case it wasn't created before. You can check whether a label has a
+ //! registered `LabelNode` by calling \ref BaseBuilder::hasRegisteredLabelNode().
+ ASMJIT_API Error labelNodeOf(LabelNode** out, uint32_t labelId);
+
+ //! \overload
+ inline Error labelNodeOf(LabelNode** out, const Label& label) {
+ return labelNodeOf(out, label.id());
+ }
+
+ //! Registers this \ref LabelNode (internal).
+ //!
+ //! This function is used internally to register a newly created `LabelNode`
+ //! with this instance of Builder/Compiler. Use \ref labelNodeOf() functions
+ //! to get back \ref LabelNode from a label or its identifier.
+ ASMJIT_API Error registerLabelNode(LabelNode* node);
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \name Passes
+ //! \{
+
+ //! Returns a vector of `Pass` instances that will be executed by `runPasses()`.
+ inline const ZoneVector<Pass*>& passes() const noexcept { return _passes; }
+
+ //! Allocates and instantiates a new pass of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref Pass.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all passes
+ //! it created so no manual memory management is required.
+ template<typename T>
+ inline T* newPassT() noexcept { return _codeZone.newT<T>(); }
+
+ //! \overload
+ template<typename T, typename... Args>
+ inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT<T>(std::forward<Args>(args)...); }
+
+ template<typename T>
+ inline Error addPassT() { return addPass(newPassT<T>()); }
+
+ template<typename T, typename... Args>
+ inline Error addPassT(Args&&... args) { return addPass(newPassT<T, Args...>(std::forward<Args>(args)...)); }
+
+ //! Returns `Pass` by name.
+ //!
+ //! If the pass having the given `name` doesn't exist `nullptr` is returned.
+ ASMJIT_API Pass* passByName(const char* name) const noexcept;
+ //! Adds `pass` to the list of passes.
+ ASMJIT_API Error addPass(Pass* pass) noexcept;
+ //! Removes `pass` from the list of passes and delete it.
+ ASMJIT_API Error deletePass(Pass* pass) noexcept;
+
+ //! Runs all passes in order.
+ ASMJIT_API Error runPasses();
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ ASMJIT_API Error align(uint32_t alignMode, uint32_t alignment) override;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, size_t dataSize) override;
+ ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t count, size_t repeat = 1) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Serialization
+ //! \{
+
+ //! Serializes everything the given emitter `dst`.
+ //!
+ //! Although not explicitly required the emitter will most probably be of
+ //! Assembler type. The reason is that there is no known use of serializing
+ //! nodes held by Builder/Compiler into another Builder-like emitter.
+ ASMJIT_API Error serializeTo(BaseEmitter* dst);
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use serializeTo() instead, serialize() is now also an instruction.")
+ inline Error serialize(BaseEmitter* dst) {
+ return serializeTo(dst);
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ ASMJIT_DEPRECATED("Use Formatter::formatNodeList(sb, formatFlags, builder)")
+ inline Error dump(String& sb, uint32_t formatFlags = 0) const noexcept {
+ return Formatter::formatNodeList(sb, formatFlags, this);
+ }
+#endif // !ASMJIT_NO_LOGGING
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::BaseNode]
+// ============================================================================
+
+//! Base node.
+//!
+//! Every node represents a building-block used by \ref BaseBuilder. It can
+//! be instruction, data, label, comment, directive, or any other high-level
+//! representation that can be transformed to the building blocks mentioned.
+//! Every class that inherits \ref BaseBuilder can define its own high-level
+//! nodes that can be later lowered to basic nodes like instructions.
+class BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(BaseNode)
+
+ union {
+ struct {
+ //! Previous node.
+ BaseNode* _prev;
+ //! Next node.
+ BaseNode* _next;
+ };
+ //! Links (an alternative view to previous and next nodes).
+ BaseNode* _links[2];
+ };
+
+ //! Data shared between all types of nodes.
+ struct AnyData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Not used by BaseNode.
+ uint8_t _reserved0;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ //! Data used by \ref InstNode.
+ struct InstData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Instruction operands count (used).
+ uint8_t _opCount;
+ //! Instruction operands capacity (allocated).
+ uint8_t _opCapacity;
+ };
+
+ //! Data used by \ref EmbedDataNode.
+ struct EmbedData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Type id, see \ref Type::Id.
+ uint8_t _typeId;
+ //! Size of `_typeId`.
+ uint8_t _typeSize;
+ };
+
+ //! Data used by \ref SentinelNode.
+ struct SentinelData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Sentinel type.
+ uint8_t _sentinelType;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ //! Data that can have different meaning dependning on \ref NodeType.
+ union {
+ //! Data useful by any node type.
+ AnyData _any;
+ //! Data specific to \ref InstNode.
+ InstData _inst;
+ //! Data specific to \ref EmbedDataNode.
+ EmbedData _embed;
+ //! Data specific to \ref SentinelNode.
+ SentinelData _sentinel;
+ };
+
+ //! Node position in code (should be unique).
+ uint32_t _position;
+
+ //! Value reserved for AsmJit users never touched by AsmJit itself.
+ union {
+ //! User data as 64-bit integer.
+ uint64_t _userDataU64;
+ //! User data as pointer.
+ void* _userDataPtr;
+ };
+
+ //! Data used exclusively by the current `Pass`.
+ void* _passData;
+
+ //! Inline comment/annotation or nullptr if not used.
+ const char* _inlineComment;
+
+ //! Type of `BaseNode`.
+ enum NodeType : uint32_t {
+ //! Invalid node (internal, don't use).
+ kNodeNone = 0,
+
+ // [BaseBuilder]
+
+ //! Node is \ref InstNode or \ref InstExNode.
+ kNodeInst = 1,
+ //! Node is \ref SectionNode.
+ kNodeSection = 2,
+ //! Node is \ref LabelNode.
+ kNodeLabel = 3,
+ //! Node is \ref AlignNode.
+ kNodeAlign = 4,
+ //! Node is \ref EmbedDataNode.
+ kNodeEmbedData = 5,
+ //! Node is \ref EmbedLabelNode.
+ kNodeEmbedLabel = 6,
+ //! Node is \ref EmbedLabelDeltaNode.
+ kNodeEmbedLabelDelta = 7,
+ //! Node is \ref ConstPoolNode.
+ kNodeConstPool = 8,
+ //! Node is \ref CommentNode.
+ kNodeComment = 9,
+ //! Node is \ref SentinelNode.
+ kNodeSentinel = 10,
+
+ // [BaseCompiler]
+
+ //! Node is \ref JumpNode (acts as InstNode).
+ kNodeJump = 15,
+ //! Node is \ref FuncNode (acts as LabelNode).
+ kNodeFunc = 16,
+ //! Node is \ref FuncRetNode (acts as InstNode).
+ kNodeFuncRet = 17,
+ //! Node is \ref InvokeNode (acts as InstNode).
+ kNodeInvoke = 18,
+
+ // [UserDefined]
+
+ //! First id of a user-defined node.
+ kNodeUser = 32,
+
+#ifndef ASMJIT_NO_DEPRECATED
+ kNodeFuncCall = kNodeInvoke
+#endif // !ASMJIT_NO_DEPRECATED
+ };
+
+ //! Node flags, specify what the node is and/or does.
+ enum Flags : uint32_t {
+ //! Node is code that can be executed (instruction, label, align, etc...).
+ kFlagIsCode = 0x01u,
+ //! Node is data that cannot be executed (data, const-pool, etc...).
+ kFlagIsData = 0x02u,
+ //! Node is informative, can be removed and ignored.
+ kFlagIsInformative = 0x04u,
+ //! Node can be safely removed if unreachable.
+ kFlagIsRemovable = 0x08u,
+ //! Node does nothing when executed (label, align, explicit nop).
+ kFlagHasNoEffect = 0x10u,
+ //! Node is an instruction or acts as it.
+ kFlagActsAsInst = 0x20u,
+ //! Node is a label or acts as it.
+ kFlagActsAsLabel = 0x40u,
+ //! Node is active (part of the code).
+ kFlagIsActive = 0x80u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseNode` - always use `BaseBuilder` to allocate nodes.
+ ASMJIT_INLINE BaseNode(BaseBuilder* cb, uint32_t type, uint32_t flags = 0) noexcept {
+ _prev = nullptr;
+ _next = nullptr;
+ _any._nodeType = uint8_t(type);
+ _any._nodeFlags = uint8_t(flags | cb->_nodeFlags);
+ _any._reserved0 = 0;
+ _any._reserved1 = 0;
+ _position = 0;
+ _userDataU64 = 0;
+ _passData = nullptr;
+ _inlineComment = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Casts this node to `T*`.
+ template<typename T>
+ inline T* as() noexcept { return static_cast<T*>(this); }
+ //! Casts this node to `const T*`.
+ template<typename T>
+ inline const T* as() const noexcept { return static_cast<const T*>(this); }
+
+ //! Returns previous node or `nullptr` if this node is either first or not
+ //! part of Builder/Compiler node-list.
+ inline BaseNode* prev() const noexcept { return _prev; }
+ //! Returns next node or `nullptr` if this node is either last or not part
+ //! of Builder/Compiler node-list.
+ inline BaseNode* next() const noexcept { return _next; }
+
+ //! Returns the type of the node, see `NodeType`.
+ inline uint32_t type() const noexcept { return _any._nodeType; }
+
+ //! Sets the type of the node, see `NodeType` (internal).
+ //!
+ //! \remarks You should never set a type of a node to anything else than the
+ //! initial value. This function is only provided for users that use custom
+ //! nodes and need to change the type either during construction or later.
+ inline void setType(uint32_t type) noexcept { _any._nodeType = uint8_t(type); }
+
+ //! Tests whether this node is either `InstNode` or extends it.
+ inline bool isInst() const noexcept { return hasFlag(kFlagActsAsInst); }
+ //! Tests whether this node is `SectionNode`.
+ inline bool isSection() const noexcept { return type() == kNodeSection; }
+ //! Tests whether this node is either `LabelNode` or extends it.
+ inline bool isLabel() const noexcept { return hasFlag(kFlagActsAsLabel); }
+ //! Tests whether this node is `AlignNode`.
+ inline bool isAlign() const noexcept { return type() == kNodeAlign; }
+ //! Tests whether this node is `EmbedDataNode`.
+ inline bool isEmbedData() const noexcept { return type() == kNodeEmbedData; }
+ //! Tests whether this node is `EmbedLabelNode`.
+ inline bool isEmbedLabel() const noexcept { return type() == kNodeEmbedLabel; }
+ //! Tests whether this node is `EmbedLabelDeltaNode`.
+ inline bool isEmbedLabelDelta() const noexcept { return type() == kNodeEmbedLabelDelta; }
+ //! Tests whether this node is `ConstPoolNode`.
+ inline bool isConstPool() const noexcept { return type() == kNodeConstPool; }
+ //! Tests whether this node is `CommentNode`.
+ inline bool isComment() const noexcept { return type() == kNodeComment; }
+ //! Tests whether this node is `SentinelNode`.
+ inline bool isSentinel() const noexcept { return type() == kNodeSentinel; }
+
+ //! Tests whether this node is `FuncNode`.
+ inline bool isFunc() const noexcept { return type() == kNodeFunc; }
+ //! Tests whether this node is `FuncRetNode`.
+ inline bool isFuncRet() const noexcept { return type() == kNodeFuncRet; }
+ //! Tests whether this node is `InvokeNode`.
+ inline bool isInvoke() const noexcept { return type() == kNodeInvoke; }
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use isInvoke")
+ inline bool isFuncCall() const noexcept { return isInvoke(); }
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! Returns the node flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _any._nodeFlags; }
+ //! Tests whether the node has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_any._nodeFlags) & flag) != 0; }
+ //! Replaces node flags with `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(flags); }
+ //! Adds the given `flags` to node flags.
+ inline void addFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags | flags); }
+ //! Clears the given `flags` from node flags.
+ inline void clearFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags & (flags ^ 0xFF)); }
+
+ //! Tests whether the node is code that can be executed.
+ inline bool isCode() const noexcept { return hasFlag(kFlagIsCode); }
+ //! Tests whether the node is data that cannot be executed.
+ inline bool isData() const noexcept { return hasFlag(kFlagIsData); }
+ //! Tests whether the node is informative only (is never encoded like comment, etc...).
+ inline bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
+ //! Tests whether the node is removable if it's in an unreachable code block.
+ inline bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
+ //! Tests whether the node has no effect when executed (label, .align, nop, ...).
+ inline bool hasNoEffect() const noexcept { return hasFlag(kFlagHasNoEffect); }
+ //! Tests whether the node is part of the code.
+ inline bool isActive() const noexcept { return hasFlag(kFlagIsActive); }
+
+ //! Tests whether the node has a position assigned.
+ //!
+ //! \remarks Returns `true` if node position is non-zero.
+ inline bool hasPosition() const noexcept { return _position != 0; }
+ //! Returns node position.
+ inline uint32_t position() const noexcept { return _position; }
+ //! Sets node position.
+ //!
+ //! Node position is a 32-bit unsigned integer that is used by Compiler to
+ //! track where the node is relatively to the start of the function. It doesn't
+ //! describe a byte position in a binary, instead it's just a pseudo position
+ //! used by liveness analysis and other tools around Compiler.
+ //!
+ //! If you don't use Compiler then you may use `position()` and `setPosition()`
+ //! freely for your own purposes if the 32-bit value limit is okay for you.
+ inline void setPosition(uint32_t position) noexcept { _position = position; }
+
+ //! Returns user data casted to `T*`.
+ //!
+ //! User data is decicated to be used only by AsmJit users and not touched
+ //! by the library. The data has a pointer size so you can either store a
+ //! pointer or `intptr_t` value through `setUserDataAsIntPtr()`.
+ template<typename T>
+ inline T* userDataAsPtr() const noexcept { return static_cast<T*>(_userDataPtr); }
+ //! Returns user data casted to `int64_t`.
+ inline int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); }
+ //! Returns user data casted to `uint64_t`.
+ inline uint64_t userDataAsUInt64() const noexcept { return _userDataU64; }
+
+ //! Sets user data to `data`.
+ template<typename T>
+ inline void setUserDataAsPtr(T* data) noexcept { _userDataPtr = static_cast<void*>(data); }
+ //! Sets used data to the given 64-bit signed `value`.
+ inline void setUserDataAsInt64(int64_t value) noexcept { _userDataU64 = uint64_t(value); }
+ //! Sets used data to the given 64-bit unsigned `value`.
+ inline void setUserDataAsUInt64(uint64_t value) noexcept { _userDataU64 = value; }
+
+ //! Resets user data to zero / nullptr.
+ inline void resetUserData() noexcept { _userDataU64 = 0; }
+
+ //! Tests whether the node has an associated pass data.
+ inline bool hasPassData() const noexcept { return _passData != nullptr; }
+ //! Returns the node pass data - data used during processing & transformations.
+ template<typename T>
+ inline T* passData() const noexcept { return (T*)_passData; }
+ //! Sets the node pass data to `data`.
+ template<typename T>
+ inline void setPassData(T* data) noexcept { _passData = (void*)data; }
+ //! Resets the node pass data to nullptr.
+ inline void resetPassData() noexcept { _passData = nullptr; }
+
+ //! Tests whether the node has an inline comment/annotation.
+ inline bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
+ //! Returns an inline comment/annotation string.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets an inline comment/annotation string to `s`.
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets an inline comment/annotation string to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstNode]
+// ============================================================================
+
+//! Instruction node.
+//!
+//! Wraps an instruction with its options and operands.
+class InstNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(InstNode)
+
+ enum : uint32_t {
+ //! Count of embedded operands per `InstNode` that are always allocated as
+ //! a part of the instruction. Minimum embedded operands is 4, but in 32-bit
+ //! more pointers are smaller and we can embed 5. The rest (up to 6 operands)
+ //! is always stored in `InstExNode`.
+ kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_))
+ };
+
+ //! Base instruction data.
+ BaseInst _baseInst;
+ //! First 4 or 5 operands (indexed from 0).
+ Operand_ _opArray[kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstNode` instance.
+ ASMJIT_INLINE InstNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCount, uint32_t opCapacity = kBaseOpCapacity) noexcept
+ : BaseNode(cb, kNodeInst, kFlagIsCode | kFlagIsRemovable | kFlagActsAsInst),
+ _baseInst(instId, options) {
+ _inst._opCapacity = uint8_t(opCapacity);
+ _inst._opCount = uint8_t(opCount);
+ }
+
+ //! \cond INTERNAL
+ //! Reset all built-in operands, including `extraReg`.
+ inline void _resetOps() noexcept {
+ _baseInst.resetExtraReg();
+ resetOpRange(0, opCapacity());
+ }
+ //! \endcond
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline BaseInst& baseInst() noexcept { return _baseInst; }
+ inline const BaseInst& baseInst() const noexcept { return _baseInst; }
+
+ //! Returns the instruction id, see `BaseInst::Id`.
+ inline uint32_t id() const noexcept { return _baseInst.id(); }
+ //! Sets the instruction id to `id`, see `BaseInst::Id`.
+ inline void setId(uint32_t id) noexcept { _baseInst.setId(id); }
+
+ //! Returns instruction options.
+ inline uint32_t instOptions() const noexcept { return _baseInst.options(); }
+ //! Sets instruction options.
+ inline void setInstOptions(uint32_t options) noexcept { _baseInst.setOptions(options); }
+ //! Adds instruction options.
+ inline void addInstOptions(uint32_t options) noexcept { _baseInst.addOptions(options); }
+ //! Clears instruction options.
+ inline void clearInstOptions(uint32_t options) noexcept { _baseInst.clearOptions(options); }
+
+ //! Tests whether the node has an extra register operand.
+ inline bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); }
+ //! Returns extra register operand.
+ inline RegOnly& extraReg() noexcept { return _baseInst.extraReg(); }
+ //! \overload
+ inline const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const RegOnly& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Resets extra register operand.
+ inline void resetExtraReg() noexcept { _baseInst.resetExtraReg(); }
+
+ //! Returns operand count.
+ inline uint32_t opCount() const noexcept { return _inst._opCount; }
+ //! Returns operand capacity.
+ inline uint32_t opCapacity() const noexcept { return _inst._opCapacity; }
+
+ //! Sets operand count.
+ inline void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); }
+
+ //! Returns operands array.
+ inline Operand* operands() noexcept { return (Operand*)_opArray; }
+ //! Returns operands array (const).
+ inline const Operand* operands() const noexcept { return (const Operand*)_opArray; }
+
+ //! Returns operand at the given `index`.
+ inline Operand& op(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as<Operand>();
+ }
+
+ //! Returns operand at the given `index` (const).
+ inline const Operand& op(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as<Operand>();
+ }
+
+ //! Sets operand at the given `index` to `op`.
+ inline void setOp(uint32_t index, const Operand_& op) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].copyFrom(op);
+ }
+
+ //! Resets operand at the given `index` to none.
+ inline void resetOp(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].reset();
+ }
+
+ //! Resets operands at `[start, end)` range.
+ inline void resetOpRange(uint32_t start, uint32_t end) noexcept {
+ for (uint32_t i = start; i < end; i++)
+ _opArray[i].reset();
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline bool hasOpType(uint32_t opType) const noexcept {
+ for (uint32_t i = 0, count = opCount(); i < count; i++)
+ if (_opArray[i].opType() == opType)
+ return true;
+ return false;
+ }
+
+ inline bool hasRegOp() const noexcept { return hasOpType(Operand::kOpReg); }
+ inline bool hasMemOp() const noexcept { return hasOpType(Operand::kOpMem); }
+ inline bool hasImmOp() const noexcept { return hasOpType(Operand::kOpImm); }
+ inline bool hasLabelOp() const noexcept { return hasOpType(Operand::kOpLabel); }
+
+ inline uint32_t indexOfOpType(uint32_t opType) const noexcept {
+ uint32_t i = 0;
+ uint32_t count = opCount();
+
+ while (i < count) {
+ if (_opArray[i].opType() == opType)
+ break;
+ i++;
+ }
+
+ return i;
+ }
+
+ inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(Operand::kOpMem); }
+ inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(Operand::kOpImm); }
+ inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(Operand::kOpLabel); }
+
+ //! \}
+
+ //! \name Rewriting
+ //! \{
+
+ //! \cond INTERNAL
+ inline uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; }
+ inline const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; }
+
+ ASMJIT_INLINE uint32_t getRewriteIndex(const uint32_t* id) const noexcept {
+ const uint32_t* array = _getRewriteArray();
+ ASMJIT_ASSERT(array <= id);
+
+ size_t index = (size_t)(id - array);
+ ASMJIT_ASSERT(index < 32);
+
+ return uint32_t(index);
+ }
+
+ ASMJIT_INLINE void rewriteIdAtIndex(uint32_t index, uint32_t id) noexcept {
+ uint32_t* array = _getRewriteArray();
+ array[index] = id;
+ }
+ //! \endcond
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ //! \cond INTERNAL
+ static inline uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
+ return opCount <= kBaseOpCapacity ? kBaseOpCapacity : Globals::kMaxOpCount;
+ }
+
+ static inline size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
+ size_t base = sizeof(InstNode) - kBaseOpCapacity * sizeof(Operand);
+ return base + opCapacity * sizeof(Operand);
+ }
+ //! \endcond
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstExNode]
+// ============================================================================
+
+//! Instruction node with maximum number of operands.
+//!
+//! This node is created automatically by Builder/Compiler in case that the
+//! required number of operands exceeds the default capacity of `InstNode`.
+class InstExNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(InstExNode)
+
+ //! Continued `_opArray[]` to hold up to `kMaxOpCount` operands.
+ Operand_ _opArrayEx[Globals::kMaxOpCount - kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstExNode` instance.
+ inline InstExNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCapacity = Globals::kMaxOpCount) noexcept
+ : InstNode(cb, instId, options, opCapacity) {}
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SectionNode]
+// ============================================================================
+
+//! Section node.
+class SectionNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SectionNode)
+
+ //! Section id.
+ uint32_t _id;
+
+ //! Next section node that follows this section.
+ //!
+ //! This link is only valid when the section is active (is part of the code)
+ //! and when `Builder::hasDirtySectionLinks()` returns `false`. If you intend
+ //! to use this field you should always call `Builder::updateSectionLinks()`
+ //! before you do so.
+ SectionNode* _nextSection;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SectionNode` instance.
+ inline SectionNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : BaseNode(cb, kNodeSection, kFlagHasNoEffect),
+ _id(id),
+ _nextSection(nullptr) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the section id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelNode]
+// ============================================================================
+
+//! Label node.
+class LabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(LabelNode)
+
+ //! Label identifier.
+ uint32_t _labelId;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `LabelNode` instance.
+ inline LabelNode(BaseBuilder* cb, uint32_t labelId = 0) noexcept
+ : BaseNode(cb, kNodeLabel, kFlagHasNoEffect | kFlagActsAsLabel),
+ _labelId(labelId) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref Label representation of the \ref LabelNode.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::AlignNode]
+// ============================================================================
+
+//! Align directive (BaseBuilder).
+//!
+//! Wraps `.align` directive.
+class AlignNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(AlignNode)
+
+ //! Align mode, see `AlignMode`.
+ uint32_t _alignMode;
+ //! Alignment (in bytes).
+ uint32_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `AlignNode` instance.
+ inline AlignNode(BaseBuilder* cb, uint32_t alignMode, uint32_t alignment) noexcept
+ : BaseNode(cb, kNodeAlign, kFlagIsCode | kFlagHasNoEffect),
+ _alignMode(alignMode),
+ _alignment(alignment) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns align mode.
+ inline uint32_t alignMode() const noexcept { return _alignMode; }
+ //! Sets align mode to `alignMode`.
+ inline void setAlignMode(uint32_t alignMode) noexcept { _alignMode = alignMode; }
+
+ //! Returns align offset in bytes.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ //! Sets align offset in bytes to `offset`.
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedDataNode]
+// ============================================================================
+
+//! Embed data node.
+//!
+//! Wraps `.data` directive. The node contains data that will be placed at the
+//! node's position in the assembler stream. The data is considered to be RAW;
+//! no analysis nor byte-order conversion is performed on RAW data.
+class EmbedDataNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedDataNode)
+
+ enum : uint32_t {
+ kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2)
+ };
+
+ size_t _itemCount;
+ size_t _repeatCount;
+
+ union {
+ uint8_t* _externalData;
+ uint8_t _inlineData[kInlineBufferSize];
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedDataNode` instance.
+ inline EmbedDataNode(BaseBuilder* cb) noexcept
+ : BaseNode(cb, kNodeEmbedData, kFlagIsData),
+ _itemCount(0),
+ _repeatCount(0) {
+ _embed._typeId = uint8_t(Type::kIdU8),
+ _embed._typeSize = uint8_t(1);
+ memset(_inlineData, 0, kInlineBufferSize);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref Type::Id of the data.
+ inline uint32_t typeId() const noexcept { return _embed._typeId; }
+ //! Returns the size of a single data element.
+ inline uint32_t typeSize() const noexcept { return _embed._typeSize; }
+
+ //! Returns a pointer to the data casted to `uint8_t`.
+ inline uint8_t* data() const noexcept {
+ return dataSize() <= kInlineBufferSize ? const_cast<uint8_t*>(_inlineData) : _externalData;
+ }
+
+ //! Returns a pointer to the data casted to `T`.
+ template<typename T>
+ inline T* dataAs() const noexcept { return reinterpret_cast<T*>(data()); }
+
+ //! Returns the number of (typed) items in the array.
+ inline size_t itemCount() const noexcept { return _itemCount; }
+
+ //! Returns how many times the data is repeated (default 1).
+ //!
+ //! Repeated data is useful when defining constants for SIMD, for example.
+ inline size_t repeatCount() const noexcept { return _repeatCount; }
+
+ //! Returns the size of the data, not considering the number of times it repeats.
+ //!
+ //! \note The returned value is the same as `typeSize() * itemCount()`.
+ inline size_t dataSize() const noexcept { return typeSize() * _itemCount; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelNode)
+
+ uint32_t _labelId;
+ uint32_t _dataSize;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelNode` instance.
+ inline EmbedLabelNode(BaseBuilder* cb, uint32_t labelId = 0, uint32_t dataSize = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabel, kFlagIsData),
+ _labelId(labelId),
+ _dataSize(dataSize) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the label to embed as \ref Label operand.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
+ //! Sets the label id (use with caution, improper use can break a lot of things).
+ inline void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
+
+ //! Returns the data size.
+ inline uint32_t dataSize() const noexcept { return _dataSize; }
+ //! Sets the data size.
+ inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelDeltaNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelDeltaNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelDeltaNode)
+
+ uint32_t _labelId;
+ uint32_t _baseLabelId;
+ uint32_t _dataSize;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelDeltaNode` instance.
+ inline EmbedLabelDeltaNode(BaseBuilder* cb, uint32_t labelId = 0, uint32_t baseLabelId = 0, uint32_t dataSize = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabelDelta, kFlagIsData),
+ _labelId(labelId),
+ _baseLabelId(baseLabelId),
+ _dataSize(dataSize) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the label as `Label` operand.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
+ //! Sets the label id.
+ inline void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
+
+ //! Returns the base label as `Label` operand.
+ inline Label baseLabel() const noexcept { return Label(_baseLabelId); }
+ //! Returns the id of the base label.
+ inline uint32_t baseLabelId() const noexcept { return _baseLabelId; }
+
+ //! Sets the base label id from `label` operand.
+ inline void setBaseLabel(const Label& baseLabel) noexcept { setBaseLabelId(baseLabel.id()); }
+ //! Sets the base label id.
+ inline void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; }
+
+ //! Returns the size of the embedded label address.
+ inline uint32_t dataSize() const noexcept { return _dataSize; }
+ //! Sets the size of the embedded label address.
+ inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+
+ ASMJIT_DEPRECATED("Use setLabelId() instead")
+ inline void setId(uint32_t id) noexcept { setLabelId(id); }
+
+ ASMJIT_DEPRECATED("Use baseLabelId() instead")
+ inline uint32_t baseId() const noexcept { return baseLabelId(); }
+
+ ASMJIT_DEPRECATED("Use setBaseLabelId() instead")
+ inline void setBaseId(uint32_t id) noexcept { setBaseLabelId(id); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::ConstPoolNode]
+// ============================================================================
+
+//! A node that wraps `ConstPool`.
+class ConstPoolNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(ConstPoolNode)
+
+ ConstPool _constPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `ConstPoolNode` instance.
+ inline ConstPoolNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : LabelNode(cb, id),
+ _constPool(&cb->_codeZone) {
+
+ setType(kNodeConstPool);
+ addFlags(kFlagIsData);
+ clearFlags(kFlagIsCode | kFlagHasNoEffect);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _constPool.empty(); }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _constPool.size(); }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _constPool.alignment(); }
+
+ //! Returns the wrapped `ConstPool` instance.
+ inline ConstPool& constPool() noexcept { return _constPool; }
+ //! Returns the wrapped `ConstPool` instance (const).
+ inline const ConstPool& constPool() const noexcept { return _constPool; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! See `ConstPool::add()`.
+ inline Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ return _constPool.add(data, size, dstOffset);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CommentNode]
+// ============================================================================
+
+//! Comment node.
+class CommentNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(CommentNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `CommentNode` instance.
+ inline CommentNode(BaseBuilder* cb, const char* comment) noexcept
+ : BaseNode(cb, kNodeComment, kFlagIsInformative | kFlagHasNoEffect | kFlagIsRemovable) {
+ _inlineComment = comment;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SentinelNode]
+// ============================================================================
+
+//! Sentinel node.
+//!
+//! Sentinel is a marker that is completely ignored by the code builder. It's
+//! used to remember a position in a code as it never gets removed by any pass.
+class SentinelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SentinelNode)
+
+ //! Type of the sentinel (purery informative purpose).
+ enum SentinelType : uint32_t {
+ //! Type of the sentinel is not known.
+ kSentinelUnknown = 0u,
+ //! This is a sentinel used at the end of \ref FuncNode.
+ kSentinelFuncEnd = 1u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SentinelNode` instance.
+ inline SentinelNode(BaseBuilder* cb, uint32_t sentinelType = kSentinelUnknown) noexcept
+ : BaseNode(cb, kNodeSentinel, kFlagIsInformative | kFlagHasNoEffect) {
+
+ _sentinel._sentinelType = uint8_t(sentinelType);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the type of the sentinel.
+ inline uint32_t sentinelType() const noexcept {
+ return _sentinel._sentinelType;
+ }
+
+ //! Sets the type of the sentinel.
+ inline void setSentinelType(uint32_t type) noexcept {
+ _sentinel._sentinelType = uint8_t(type);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Pass]
+// ============================================================================
+
+//! Pass can be used to implement code transformations, analysis, and lowering.
+class ASMJIT_VIRTAPI Pass {
+public:
+ ASMJIT_BASE_CLASS(Pass)
+ ASMJIT_NONCOPYABLE(Pass)
+
+ //! BaseBuilder this pass is assigned to.
+ BaseBuilder* _cb;
+ //! Name of the pass.
+ const char* _name;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API Pass(const char* name) noexcept;
+ ASMJIT_API virtual ~Pass() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref BaseBuilder associated with the pass.
+ inline const BaseBuilder* cb() const noexcept { return _cb; }
+ //! Returns the name of the pass.
+ inline const char* name() const noexcept { return _name; }
+
+ //! \}
+
+ //! \name Pass Interface
+ //! \{
+
+ //! Processes the code stored in Builder or Compiler.
+ //!
+ //! This is the only function that is called by the `BaseBuilder` to process
+ //! the code. It passes `zone`, which will be reset after the `run()` finishes.
+ virtual Error run(Zone* zone, Logger* logger) = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
+#endif // ASMJIT_CORE_BUILDER_H_INCLUDED
diff --git a/client/asmjit/core/callconv.cpp b/client/asmjit/core/callconv.cpp
new file mode 100644
index 0000000..722dbcd
--- /dev/null
+++ b/client/asmjit/core/callconv.cpp
@@ -0,0 +1,59 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+#include "../core/func.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86callconv_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armcallconv_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::CallConv - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId, const Environment& environment) noexcept {
+ reset();
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment.isFamilyX86())
+ return x86::CallConvInternal::init(*this, ccId, environment);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment.isFamilyARM())
+ return arm::CallConvInternal::init(*this, ccIdv, environment);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/callconv.h b/client/asmjit/core/callconv.h
new file mode 100644
index 0000000..6e75540
--- /dev/null
+++ b/client/asmjit/core/callconv.h
@@ -0,0 +1,374 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CALLCONV_H_INCLUDED
+#define ASMJIT_CORE_CALLCONV_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/operand.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_function
+//! \{
+
+// ============================================================================
+// [asmjit::CallConv]
+// ============================================================================
+
+//! Function calling convention.
+//!
+//! Function calling convention is a scheme that defines how function parameters
+//! are passed and how function returns its result. AsmJit defines a variety of
+//! architecture and OS specific calling conventions and also provides a compile
+//! time detection to make the code-generation easier.
+struct CallConv {
+ //! Calling convention id, see `Id`.
+ uint8_t _id;
+ //! Architecture identifier, see \ref Environment::Arch.
+ uint8_t _arch;
+ //! Register assignment strategy.
+ uint8_t _strategy;
+ //! Flags.
+ uint8_t _flags;
+
+ //! Red zone size (AMD64 == 128 bytes).
+ uint8_t _redZoneSize;
+ //! Spill zone size (WIN64 == 32 bytes).
+ uint8_t _spillZoneSize;
+ //! Natural stack alignment as defined by OS/ABI.
+ uint8_t _naturalStackAlignment;
+ uint8_t _reserved[1];
+
+ //! Mask of all passed registers, per group.
+ uint32_t _passedRegs[BaseReg::kGroupVirt];
+ //! Mask of all preserved registers, per group.
+ uint32_t _preservedRegs[BaseReg::kGroupVirt];
+
+ //! Internal limits of AsmJit's CallConv.
+ enum Limits : uint32_t {
+ kMaxRegArgsPerGroup = 16
+ };
+
+ //! Passed registers' order.
+ union RegOrder {
+ //! Passed registers, ordered.
+ uint8_t id[kMaxRegArgsPerGroup];
+ uint32_t packed[(kMaxRegArgsPerGroup + 3) / 4];
+ };
+
+ //! Passed registers' order, per register group.
+ RegOrder _passedOrder[BaseReg::kGroupVirt];
+
+ //! Calling convention id.
+ //!
+ //! Calling conventions can be divided into the following groups:
+ //!
+ //! - Universal - calling conventions are applicable to any target. They
+ //! will be converted to a target dependent calling convention at runtime
+ //! by \ref init(). The purpose of these conventions is to make using
+ //! functions less target dependent and closer to how they are declared
+ //! in C and C++.
+ //!
+ //! - Target specific - calling conventions that are used by a particular
+ //! architecture and ABI. For example Windows 64-bit calling convention
+ //! and AMD64 SystemV calling convention.
+ enum Id : uint32_t {
+ //! None or invalid (can't be used).
+ kIdNone = 0,
+
+ // ------------------------------------------------------------------------
+ // [Universal Calling Conventions]
+ // ------------------------------------------------------------------------
+
+ //! Standard function call or explicit `__cdecl` where it can be specified.
+ //!
+ //! This is a universal convention, which is used to initialize specific
+ //! calling connventions based on architecture, platform, and its ABI.
+ kIdCDecl = 1,
+
+ //! `__stdcall` on targets that support this calling convention.
+ //!
+ //! \note This calling convention is only supported on 32-bit X86. If used
+ //! on environment that doesn't support this calling convention \ref kIdCDecl
+ //! will be used instead.
+ kIdStdCall = 2,
+
+ //! `__fastcall` on targets that support this calling convention.
+ //!
+ //! \note This calling convention is only supported on 32-bit X86. If used
+ //! on environment that doesn't support this calling convention \ref kIdCDecl
+ //! will be used instead.
+ kIdFastCall = 3,
+
+ //! `__vectorcall` on targets that support this calling convention.
+ //!
+ //! \note This calling convention is only supported on 32-bit and 64-bit
+ //! X86 architecture on Windows platform. If used on environment that doesn't
+ //! support this calling convention \ref kIdCDecl will be used instead.
+ kIdVectorCall = 4,
+
+ //! `__thiscall` on targets that support this calling convention.
+ //!
+ //! \note This calling convention is only supported on 32-bit X86 Windows
+ //! platform. If used on environment that doesn't support this calling
+ //! convention \ref kIdCDecl will be used instead.
+ kIdThisCall = 5,
+
+ //! `__attribute__((regparm(1)))` convention (GCC and Clang).
+ kIdRegParm1 = 6,
+ //! `__attribute__((regparm(2)))` convention (GCC and Clang).
+ kIdRegParm2 = 7,
+ //! `__attribute__((regparm(3)))` convention (GCC and Clang).
+ kIdRegParm3 = 8,
+
+ //! Soft-float calling convention (ARM).
+ //!
+ //! Floating point arguments are passed via general purpose registers.
+ kIdSoftFloat = 9,
+
+ //! Hard-float calling convention (ARM).
+ //!
+ //! Floating point arguments are passed via SIMD registers.
+ kIdHardFloat = 10,
+
+ //! AsmJit specific calling convention designed for calling functions
+ //! inside a multimedia code that don't use many registers internally,
+ //! but are long enough to be called and not inlined. These functions are
+ //! usually used to calculate trigonometric functions, logarithms, etc...
+ kIdLightCall2 = 16,
+ kIdLightCall3 = 17,
+ kIdLightCall4 = 18,
+
+ // ------------------------------------------------------------------------
+ // [ABI-Specific Calling Conventions]
+ // ------------------------------------------------------------------------
+
+ kIdX64SystemV = 32,
+ kIdX64Windows = 33,
+
+ // ------------------------------------------------------------------------
+ // [Host]
+ // ------------------------------------------------------------------------
+
+ kIdHost =
+#if ASMJIT_ARCH_ARM == 32 && defined(__SOFTFP__)
+ kIdSoftFloat
+#elif ASMJIT_ARCH_ARM == 32 && !defined(__SOFTFP__)
+ kIdHardFloat
+#else
+ kIdCDecl
+#endif
+
+#ifndef ASMJIT_NO_DEPRECATE
+ , kIdHostCDecl = kIdCDecl
+ , kIdHostStdCall = kIdStdCall
+ , kIdHostFastCall = kIdFastCall
+ , kIdHostLightCall2 = kIdLightCall2
+ , kIdHostLightCall3 = kIdLightCall3
+ , kIdHostLightCall4 = kIdLightCall4
+#endif // !ASMJIT_NO_DEPRECATE
+ };
+
+ //! Strategy used to assign registers to function arguments.
+ //!
+ //! This is AsmJit specific. It basically describes how AsmJit should convert
+ //! the function arguments defined by `FuncSignature` into register IDs and
+ //! stack offsets. The default strategy `kStrategyDefault` assigns registers
+ //! and then stack whereas `kStrategyWin64` strategy does register shadowing
+ //! as defined by WIN64 calling convention - it applies to 64-bit calling
+ //! conventions only.
+ enum Strategy : uint32_t {
+ //! Default register assignment strategy.
+ kStrategyDefault = 0,
+ //! Windows 64-bit ABI register assignment strategy.
+ kStrategyX64Windows = 1,
+ //! Windows 64-bit __vectorcall register assignment strategy.
+ kStrategyX64VectorCall = 2,
+
+ //! Number of assignment strategies.
+ kStrategyCount = 3
+ };
+
+ //! Calling convention flags.
+ enum Flags : uint32_t {
+ //! Callee is responsible for cleaning up the stack.
+ kFlagCalleePopsStack = 0x01u,
+ //! Pass vector arguments indirectly (as a pointer).
+ kFlagIndirectVecArgs = 0x02u,
+ //! Pass F32 and F64 arguments by VEC128 register.
+ kFlagPassFloatsByVec = 0x04u,
+ //! Pass MMX and vector arguments by stack if the function has variable arguments.
+ kFlagPassVecByStackIfVA = 0x08u,
+ //! MMX registers are passed and returned via GP registers.
+ kFlagPassMmxByGp = 0x10u,
+ //! MMX registers are passed and returned via XMM registers.
+ kFlagPassMmxByXmm = 0x20u,
+ //! Calling convention can be used with variable arguments.
+ kFlagVarArgCompatible = 0x80u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Initializes this calling convention to the given `ccId` based on the
+ //! `environment`.
+ //!
+ //! See \ref Id and \ref Environment for more details.
+ ASMJIT_API Error init(uint32_t ccId, const Environment& environment) noexcept;
+
+ //! Resets this CallConv struct into a defined state.
+ //!
+ //! It's recommended to reset the \ref CallConv struct in case you would
+ //! like create a custom calling convention as it prevents from using an
+ //! uninitialized data (CallConv doesn't have a constructor that would
+ //! initialize it, it's just a struct).
+ inline void reset() noexcept {
+ memset(this, 0, sizeof(*this));
+ memset(_passedOrder, 0xFF, sizeof(_passedOrder));
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the calling convention id, see `Id`.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Sets the calling convention id, see `Id`.
+ inline void setId(uint32_t id) noexcept { _id = uint8_t(id); }
+
+ //! Returns the calling function architecture id.
+ inline uint32_t arch() const noexcept { return _arch; }
+ //! Sets the calling function architecture id.
+ inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
+
+ //! Returns the strategy used to assign registers to arguments, see `Strategy`.
+ inline uint32_t strategy() const noexcept { return _strategy; }
+ //! Sets the strategy used to assign registers to arguments, see `Strategy`.
+ inline void setStrategy(uint32_t strategy) noexcept { _strategy = uint8_t(strategy); }
+
+ //! Tests whether the calling convention has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_flags) & flag) != 0; }
+ //! Returns the calling convention flags, see `Flags`.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Adds the calling convention flags, see `Flags`.
+ inline void setFlags(uint32_t flag) noexcept { _flags = uint8_t(flag); };
+ //! Adds the calling convention flags, see `Flags`.
+ inline void addFlags(uint32_t flags) noexcept { _flags = uint8_t(_flags | flags); };
+
+ //! Tests whether this calling convention specifies 'RedZone'.
+ inline bool hasRedZone() const noexcept { return _redZoneSize != 0; }
+ //! Tests whether this calling convention specifies 'SpillZone'.
+ inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
+
+ //! Returns size of 'RedZone'.
+ inline uint32_t redZoneSize() const noexcept { return _redZoneSize; }
+ //! Returns size of 'SpillZone'.
+ inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
+
+ //! Sets size of 'RedZone'.
+ inline void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); }
+ //! Sets size of 'SpillZone'.
+ inline void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); }
+
+ //! Returns a natural stack alignment.
+ inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
+ //! Sets a natural stack alignment.
+ //!
+ //! This function can be used to override the default stack alignment in case
+ //! that you know that it's alignment is different. For example it allows to
+ //! implement custom calling conventions that guarantee higher stack alignment.
+ inline void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); }
+
+ //! Returns the order of passed registers of the given `group`, see \ref BaseReg::RegGroup.
+ inline const uint8_t* passedOrder(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _passedOrder[group].id;
+ }
+
+ //! Returns the mask of passed registers of the given `group`, see \ref BaseReg::RegGroup.
+ inline uint32_t passedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _passedRegs[group];
+ }
+
+ inline void _setPassedPacked(uint32_t group, uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ _passedOrder[group].packed[0] = p0;
+ _passedOrder[group].packed[1] = p1;
+ _passedOrder[group].packed[2] = p2;
+ _passedOrder[group].packed[3] = p3;
+ }
+
+ //! Resets the order and mask of passed registers.
+ inline void setPassedToNone(uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ _setPassedPacked(group, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu);
+ _passedRegs[group] = 0u;
+ }
+
+ //! Sets the order and mask of passed registers.
+ inline void setPassedOrder(uint32_t group, uint32_t a0, uint32_t a1 = 0xFF, uint32_t a2 = 0xFF, uint32_t a3 = 0xFF, uint32_t a4 = 0xFF, uint32_t a5 = 0xFF, uint32_t a6 = 0xFF, uint32_t a7 = 0xFF) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ // NOTE: This should always be called with all arguments known at compile time,
+ // so even if it looks scary it should be translated into few instructions.
+ _setPassedPacked(group, Support::bytepack32_4x8(a0, a1, a2, a3),
+ Support::bytepack32_4x8(a4, a5, a6, a7),
+ 0xFFFFFFFFu,
+ 0xFFFFFFFFu);
+
+ _passedRegs[group] = (a0 != 0xFF ? 1u << a0 : 0u) |
+ (a1 != 0xFF ? 1u << a1 : 0u) |
+ (a2 != 0xFF ? 1u << a2 : 0u) |
+ (a3 != 0xFF ? 1u << a3 : 0u) |
+ (a4 != 0xFF ? 1u << a4 : 0u) |
+ (a5 != 0xFF ? 1u << a5 : 0u) |
+ (a6 != 0xFF ? 1u << a6 : 0u) |
+ (a7 != 0xFF ? 1u << a7 : 0u) ;
+ }
+
+ //! Returns preserved register mask of the given `group`, see \ref BaseReg::RegGroup.
+ inline uint32_t preservedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _preservedRegs[group];
+ }
+
+ //! Sets preserved register mask of the given `group`, see \ref BaseReg::RegGroup.
+ inline void setPreservedRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _preservedRegs[group] = regs;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CALLCONV_H_INCLUDED
diff --git a/client/asmjit/core/codebuffer.h b/client/asmjit/core/codebuffer.h
new file mode 100644
index 0000000..76c86b1
--- /dev/null
+++ b/client/asmjit/core/codebuffer.h
@@ -0,0 +1,126 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CodeBuffer]
+// ============================================================================
+
+//! Code or data buffer.
+struct CodeBuffer {
+ //! The content of the buffer (data).
+ uint8_t* _data;
+ //! Number of bytes of `data` used.
+ size_t _size;
+ //! Buffer capacity (in bytes).
+ size_t _capacity;
+ //! Buffer flags.
+ uint32_t _flags;
+
+ //! Code buffer flags.
+ enum Flags : uint32_t {
+ //! Buffer is external (not allocated by asmjit).
+ kFlagIsExternal = 0x00000001u,
+ //! Buffer is fixed (cannot be reallocated).
+ kFlagIsFixed = 0x00000002u
+ };
+
+ //! \name Overloaded Operators
+ //! \{
+
+ //! Returns a referebce to the byte at the given `index`.
+ inline uint8_t& operator[](size_t index) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+ //! \overload
+ inline const uint8_t& operator[](size_t index) const noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns code buffer flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the code buffer has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ //! Tests whether this code buffer has a fixed size.
+ //!
+ //! Fixed size means that the code buffer is fixed and cannot grow.
+ inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
+
+ //! Tests whether the data in this code buffer is external.
+ //!
+ //! External data can only be provided by users, it's never used by AsmJit.
+ inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
+
+ //! Tests whether the data in this code buffer is allocated (non-null).
+ inline bool isAllocated() const noexcept { return _data != nullptr; }
+
+ //! Tests whether the code buffer is empty.
+ inline bool empty() const noexcept { return !_size; }
+
+ //! Returns the size of the data.
+ inline size_t size() const noexcept { return _size; }
+ //! Returns the capacity of the data.
+ inline size_t capacity() const noexcept { return _capacity; }
+
+ //! Returns the pointer to the data the buffer references.
+ inline uint8_t* data() noexcept { return _data; }
+ //! \overload
+ inline const uint8_t* data() const noexcept { return _data; }
+
+ //! \}
+
+ //! \name Iterators
+ //! \{
+
+ inline uint8_t* begin() noexcept { return _data; }
+ inline const uint8_t* begin() const noexcept { return _data; }
+
+ inline uint8_t* end() noexcept { return _data + _size; }
+ inline const uint8_t* end() const noexcept { return _data + _size; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+
diff --git a/client/asmjit/core/codebufferwriter_p.h b/client/asmjit/core/codebufferwriter_p.h
new file mode 100644
index 0000000..75ee047
--- /dev/null
+++ b/client/asmjit/core/codebufferwriter_p.h
@@ -0,0 +1,189 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+
+#include "../core/assembler.h"
+#include "../core/codebuffer.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_assembler
+//! \{
+
+// ============================================================================
+// [asmjit::CodeBufferWriter]
+// ============================================================================
+
+//! Helper that is used to write into a `CodeBuffer` held by `BaseAssembler`.
+class CodeBufferWriter {
+public:
+ uint8_t* _cursor;
+
+ ASMJIT_INLINE explicit CodeBufferWriter(BaseAssembler* a) noexcept
+ : _cursor(a->_bufferPtr) {}
+
+ ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
+ size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
+ if (ASMJIT_UNLIKELY(remainingSpace < n)) {
+ CodeBuffer& buffer = a->_section->_buffer;
+ Error err = a->_code->growBuffer(&buffer, n);
+ if (ASMJIT_UNLIKELY(err))
+ return a->reportError(err);
+ _cursor = a->_bufferPtr;
+ }
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
+ ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
+ ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
+
+ ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
+ ASMJIT_ASSERT(_cursor >= from);
+ return (size_t)(_cursor - from);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit8(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor++;
+ }
+
+ template<typename T, typename Y>
+ ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size_t(cond) <= 1u);
+
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor += size_t(cond);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit16uLE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit16uBE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit32uLE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit32uBE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memcpy(_cursor, data, size);
+ _cursor += size;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v & 0xFFu);
+ v >>= 8;
+ }
+ _cursor += size;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v >> (sizeof(T) - 8));
+ v <<= 8;
+ }
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void emitZeros(size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memset(_cursor, 0, size);
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
+ ASMJIT_ASSERT(where < _cursor);
+
+ uint8_t* p = where;
+ while (++p != _cursor)
+ p[-1] = p[0];
+ _cursor--;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
+ uint8_t* p = _cursor;
+
+ while (p != where) {
+ p[0] = p[-1];
+ p--;
+ }
+
+ *p = uint8_t(val & 0xFF);
+ _cursor++;
+ }
+
+ ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
+ CodeBuffer& buffer = a->_section->_buffer;
+ size_t newSize = (size_t)(_cursor - a->_bufferData);
+ ASMJIT_ASSERT(newSize <= buffer.capacity());
+
+ a->_bufferPtr = _cursor;
+ buffer._size = Support::max(buffer._size, newSize);
+ }
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
diff --git a/client/asmjit/core/codeholder.cpp b/client/asmjit/core/codeholder.cpp
new file mode 100644
index 0000000..69032e8
--- /dev/null
+++ b/client/asmjit/core/codeholder.cpp
@@ -0,0 +1,1110 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Globals]
+// ============================================================================
+
+static const char CodeHolder_addrTabName[] = ".addrtab";
+
+//! Encode MOD byte.
+static inline uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
+ return (m << 6) | (o << 3) | rm;
+}
+
+// ============================================================================
+// [asmjit::LabelLinkIterator]
+// ============================================================================
+
+class LabelLinkIterator {
+public:
+ ASMJIT_INLINE LabelLinkIterator(LabelEntry* le) noexcept { reset(le); }
+
+ ASMJIT_INLINE explicit operator bool() const noexcept { return isValid(); }
+ ASMJIT_INLINE bool isValid() const noexcept { return _link != nullptr; }
+
+ ASMJIT_INLINE LabelLink* link() const noexcept { return _link; }
+ ASMJIT_INLINE LabelLink* operator->() const noexcept { return _link; }
+
+ ASMJIT_INLINE void reset(LabelEntry* le) noexcept {
+ _pPrev = &le->_links;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void next() noexcept {
+ _pPrev = &_link->next;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void resolveAndNext(CodeHolder* code) noexcept {
+ LabelLink* linkToDelete = _link;
+
+ _link = _link->next;
+ *_pPrev = _link;
+
+ code->_unresolvedLinkCount--;
+ code->_allocator.release(linkToDelete, sizeof(LabelLink));
+ }
+
+ LabelLink** _pPrev;
+ LabelLink* _link;
+};
+
+// ============================================================================
+// [asmjit::CodeHolder - Utilities]
+// ============================================================================
+
+static void CodeHolder_resetInternal(CodeHolder* self, uint32_t resetPolicy) noexcept {
+ uint32_t i;
+ const ZoneVector<BaseEmitter*>& emitters = self->emitters();
+
+ i = emitters.size();
+ while (i)
+ self->detach(emitters[--i]);
+
+ // Reset everything into its construction state.
+ self->_environment.reset();
+ self->_baseAddress = Globals::kNoBaseAddress;
+ self->_logger = nullptr;
+ self->_errorHandler = nullptr;
+
+ // Reset all sections.
+ uint32_t numSections = self->_sections.size();
+ for (i = 0; i < numSections; i++) {
+ Section* section = self->_sections[i];
+ if (section->_buffer.data() && !section->_buffer.isExternal())
+ ::free(section->_buffer._data);
+ section->_buffer._data = nullptr;
+ section->_buffer._capacity = 0;
+ }
+
+ // Reset zone allocator and all containers using it.
+ ZoneAllocator* allocator = self->allocator();
+
+ self->_emitters.reset();
+ self->_namedLabels.reset();
+ self->_relocations.reset();
+ self->_labelEntries.reset();
+ self->_sections.reset();
+
+ self->_unresolvedLinkCount = 0;
+ self->_addressTableSection = nullptr;
+ self->_addressTableEntries.reset();
+
+ allocator->reset(&self->_zone);
+ self->_zone.reset(resetPolicy);
+}
+
+static void CodeHolder_onSettingsUpdated(CodeHolder* self) noexcept {
+ // Notify all attached emitters about a settings update.
+ for (BaseEmitter* emitter : self->emitters()) {
+ emitter->onSettingsUpdated();
+ }
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Construction / Destruction]
+// ============================================================================
+
+CodeHolder::CodeHolder() noexcept
+ : _environment(),
+ _baseAddress(Globals::kNoBaseAddress),
+ _logger(nullptr),
+ _errorHandler(nullptr),
+ _zone(16384 - Zone::kBlockOverhead),
+ _allocator(&_zone),
+ _unresolvedLinkCount(0),
+ _addressTableSection(nullptr) {}
+
+CodeHolder::~CodeHolder() noexcept {
+ CodeHolder_resetInternal(this, Globals::kResetHard);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Init / Reset]
+// ============================================================================
+
+inline void CodeHolder_setSectionDefaultName(
+ Section* section,
+ char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
+ char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
+
+ section->_name.u32[0] = Support::bytepack32_4x8(uint8_t(c0), uint8_t(c1), uint8_t(c2), uint8_t(c3));
+ section->_name.u32[1] = Support::bytepack32_4x8(uint8_t(c4), uint8_t(c5), uint8_t(c6), uint8_t(c7));
+}
+
+Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noexcept {
+ // Cannot reinitialize if it's locked or there is one or more emitter attached.
+ if (isInitialized())
+ return DebugUtils::errored(kErrorAlreadyInitialized);
+
+ // If we are just initializing there should be no emitters attached.
+ ASMJIT_ASSERT(_emitters.empty());
+
+ // Create a default section and insert it to the `_sections` array.
+ Error err = _sections.willGrow(&_allocator);
+ if (err == kErrorOk) {
+ Section* section = _allocator.allocZeroedT<Section>();
+ if (ASMJIT_LIKELY(section)) {
+ section->_flags = Section::kFlagExec | Section::kFlagConst;
+ CodeHolder_setSectionDefaultName(section, '.', 't', 'e', 'x', 't');
+ _sections.appendUnsafe(section);
+ }
+ else {
+ err = DebugUtils::errored(kErrorOutOfMemory);
+ }
+ }
+
+ if (ASMJIT_UNLIKELY(err)) {
+ _zone.reset();
+ return err;
+ }
+ else {
+ _environment = environment;
+ _baseAddress = baseAddress;
+ return kErrorOk;
+ }
+}
+
+void CodeHolder::reset(uint32_t resetPolicy) noexcept {
+ CodeHolder_resetInternal(this, resetPolicy);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Attach / Detach]
+// ============================================================================
+
+Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
+ // Catch a possible misuse of the API.
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Invalid emitter, this should not be possible.
+ uint32_t type = emitter->emitterType();
+ if (ASMJIT_UNLIKELY(type == BaseEmitter::kTypeNone || type >= BaseEmitter::kTypeCount))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // This is suspicious, but don't fail if `emitter` is already attached
+ // to this code holder. This is not error, but it's not recommended.
+ if (emitter->_code != nullptr) {
+ if (emitter->_code == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ // Reserve the space now as we cannot fail after `onAttach()` succeeded.
+ ASMJIT_PROPAGATE(_emitters.willGrow(&_allocator, 1));
+ ASMJIT_PROPAGATE(emitter->onAttach(this));
+
+ // Connect CodeHolder <-> BaseEmitter.
+ ASMJIT_ASSERT(emitter->_code == this);
+ _emitters.appendUnsafe(emitter);
+
+ return kErrorOk;
+}
+
+Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(emitter->_code != this))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // NOTE: We always detach if we were asked to, if error happens during
+ // `emitter->onDetach()` we just propagate it, but the BaseEmitter will
+ // be detached.
+ Error err = kErrorOk;
+ if (!emitter->isDestroyed())
+ err = emitter->onDetach(this);
+
+ // Disconnect CodeHolder <-> BaseEmitter.
+ uint32_t index = _emitters.indexOf(emitter);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ _emitters.removeAt(index);
+ emitter->_code = nullptr;
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Logging]
+// ============================================================================
+
+void CodeHolder::setLogger(Logger* logger) noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ _logger = logger;
+ CodeHolder_onSettingsUpdated(this);
+#else
+ DebugUtils::unused(logger);
+#endif
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Error Handling]
+// ============================================================================
+
+void CodeHolder::setErrorHandler(ErrorHandler* errorHandler) noexcept {
+ _errorHandler = errorHandler;
+ CodeHolder_onSettingsUpdated(this);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Code Buffer]
+// ============================================================================
+
+static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
+ uint8_t* oldData = cb->_data;
+ uint8_t* newData;
+
+ if (oldData && !cb->isExternal())
+ newData = static_cast<uint8_t*>(::realloc(oldData, n));
+ else
+ newData = static_cast<uint8_t*>(::malloc(n));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ cb->_data = newData;
+ cb->_capacity = n;
+
+ // Update pointers used by assemblers, if attached.
+ for (BaseEmitter* emitter : self->emitters()) {
+ if (emitter->isAssembler()) {
+ BaseAssembler* a = static_cast<BaseAssembler*>(emitter);
+ if (&a->_section->_buffer == cb) {
+ size_t offset = a->offset();
+
+ a->_bufferData = newData;
+ a->_bufferEnd = newData + n;
+ a->_bufferPtr = newData + offset;
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
+ // The size of the section must be valid.
+ size_t size = cb->size();
+ if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // We can now check if growing the buffer is really necessary. It's unlikely
+ // that this function is called while there is still room for `n` bytes.
+ size_t capacity = cb->capacity();
+ size_t required = cb->size() + n;
+ if (ASMJIT_UNLIKELY(required <= capacity))
+ return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ size_t kInitialCapacity = 8096;
+ if (capacity < kInitialCapacity)
+ capacity = kInitialCapacity;
+ else
+ capacity += Globals::kAllocOverhead;
+
+ do {
+ size_t old = capacity;
+ if (capacity < Globals::kGrowThreshold)
+ capacity *= 2;
+ else
+ capacity += Globals::kGrowThreshold;
+
+ // Overflow.
+ if (ASMJIT_UNLIKELY(old > capacity))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ } while (capacity - Globals::kAllocOverhead < required);
+
+ return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
+}
+
+Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
+ size_t capacity = cb->capacity();
+
+ if (n <= capacity)
+ return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ return CodeHolder_reserveInternal(this, cb, n);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Sections]
+// ============================================================================
+
+Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, uint32_t flags, uint32_t alignment) noexcept {
+ *sectionOut = nullptr;
+
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment)))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize))
+ return DebugUtils::errored(kErrorInvalidSectionName);
+
+ uint32_t sectionId = _sections.size();
+ if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManySections);
+
+ ASMJIT_PROPAGATE(_sections.willGrow(&_allocator));
+ Section* section = _allocator.allocZeroedT<Section>();
+
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ section->_id = sectionId;
+ section->_flags = flags;
+ section->_alignment = alignment;
+ memcpy(section->_name.str, name, nameSize);
+ _sections.appendUnsafe(section);
+
+ *sectionOut = section;
+ return kErrorOk;
+}
+
+Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept {
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ // This could be also put in a hash-table similarly like we do with labels,
+ // however it's questionable as the number of sections should be pretty low
+ // in general. Create an issue if this becomes a problem.
+ if (nameSize <= Globals::kMaxSectionNameSize) {
+ for (Section* section : _sections)
+ if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0')
+ return section;
+ }
+
+ return nullptr;
+}
+
+Section* CodeHolder::ensureAddressTableSection() noexcept {
+ if (_addressTableSection)
+ return _addressTableSection;
+
+ newSection(&_addressTableSection, CodeHolder_addrTabName, sizeof(CodeHolder_addrTabName) - 1, 0, _environment.registerSize());
+ return _addressTableSection;
+}
+
+Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
+ AddressTableEntry* entry = _addressTableEntries.get(address);
+ if (entry)
+ return kErrorOk;
+
+ Section* section = ensureAddressTableSection();
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ entry = _zone.newT<AddressTableEntry>(address);
+ if (ASMJIT_UNLIKELY(!entry))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _addressTableEntries.insert(entry);
+ section->_virtualSize += _environment.registerSize();
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Labels / Symbols]
+// ============================================================================
+
+//! Only used to lookup a label from `_namedLabels`.
+class LabelByName {
+public:
+ inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept
+ : _key(key),
+ _keySize(uint32_t(keySize)),
+ _hashCode(hashCode),
+ _parentId(parentId) {}
+
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ inline bool matches(const LabelEntry* entry) const noexcept {
+ return entry->nameSize() == _keySize &&
+ entry->parentId() == _parentId &&
+ ::memcmp(entry->name(), _key, _keySize) == 0;
+ }
+
+ const char* _key;
+ uint32_t _keySize;
+ uint32_t _hashCode;
+ uint32_t _parentId;
+};
+
+// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`.
+static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize) noexcept {
+ uint32_t hashCode = 0;
+ if (nameSize == SIZE_MAX) {
+ size_t i = 0;
+ for (;;) {
+ uint8_t c = uint8_t(name[i]);
+ if (!c) break;
+ hashCode = Support::hashRound(hashCode, c);
+ i++;
+ }
+ nameSize = i;
+ }
+ else {
+ for (size_t i = 0; i < nameSize; i++) {
+ uint8_t c = uint8_t(name[i]);
+ if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
+ hashCode = Support::hashRound(hashCode, c);
+ }
+ }
+ return hashCode;
+}
+
+static bool CodeHolder_writeDisplacement(void* dst, int64_t displacement, uint32_t displacementSize) {
+ if (displacementSize == 4 && Support::isInt32(displacement)) {
+ Support::writeI32uLE(dst, int32_t(displacement));
+ return true;
+ }
+ else if (displacementSize == 1 && Support::isInt8(displacement)) {
+ Support::writeI8(dst, int8_t(displacement));
+ return true;
+ }
+
+ return false;
+}
+
+LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept {
+ LabelLink* link = _allocator.allocT<LabelLink>();
+ if (ASMJIT_UNLIKELY(!link)) return nullptr;
+
+ link->next = le->_links;
+ le->_links = link;
+
+ link->sectionId = sectionId;
+ link->relocId = Globals::kInvalidId;
+ link->offset = offset;
+ link->rel = rel;
+
+ _unresolvedLinkCount++;
+ return link;
+}
+
+Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
+ *entryOut = nullptr;
+
+ uint32_t labelId = _labelEntries.size();
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_setId(labelId);
+ le->_parentId = Globals::kInvalidId;
+ le->_offset = 0;
+ _labelEntries.appendUnsafe(le);
+
+ *entryOut = le;
+ return kErrorOk;
+}
+
+Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId) noexcept {
+ *entryOut = nullptr;
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+
+ if (ASMJIT_UNLIKELY(nameSize == 0))
+ return DebugUtils::errored(kErrorInvalidLabelName);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
+ return DebugUtils::errored(kErrorLabelNameTooLong);
+
+ switch (type) {
+ case Label::kTypeLocal:
+ if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size()))
+ return DebugUtils::errored(kErrorInvalidParentLabel);
+
+ hashCode ^= parentId;
+ break;
+
+ case Label::kTypeGlobal:
+ if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
+ return DebugUtils::errored(kErrorNonLocalLabelCannotHaveParent);
+
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+
+ // Don't allow to insert duplicates. Local labels allow duplicates that have
+ // different id, this is already accomplished by having a different hashes
+ // between the same label names having different parent labels.
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
+ if (ASMJIT_UNLIKELY(le))
+ return DebugUtils::errored(kErrorLabelAlreadyDefined);
+
+ Error err = kErrorOk;
+ uint32_t labelId = _labelEntries.size();
+
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ le = _allocator.allocZeroedT<LabelEntry>();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_hashCode = hashCode;
+ le->_setId(labelId);
+ le->_type = uint8_t(type);
+ le->_parentId = parentId;
+ le->_offset = 0;
+ ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize));
+
+ _labelEntries.appendUnsafe(le);
+ _namedLabels.insert(allocator(), le);
+
+ *entryOut = le;
+ return err;
+}
+
+uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+ if (ASMJIT_UNLIKELY(!nameSize))
+ return 0;
+
+ if (parentId != Globals::kInvalidId)
+ hashCode ^= parentId;
+
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
+ return le ? le->id() : uint32_t(Globals::kInvalidId);
+}
+
+ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
+ if (!hasUnresolvedLinks())
+ return kErrorOk;
+
+ Error err = kErrorOk;
+ for (LabelEntry* le : labelEntries()) {
+ if (!le->isBound())
+ continue;
+
+ LabelLinkIterator link(le);
+ if (link) {
+ Support::FastUInt8 of = 0;
+ Section* toSection = le->section();
+ uint64_t toOffset = Support::addOverflow(toSection->offset(), le->offset(), &of);
+
+ do {
+ uint32_t linkSectionId = link->sectionId;
+ if (link->relocId == Globals::kInvalidId) {
+ Section* fromSection = sectionById(linkSectionId);
+ size_t linkOffset = link->offset;
+
+ CodeBuffer& buf = _sections[linkSectionId]->buffer();
+ ASMJIT_ASSERT(linkOffset < buf.size());
+
+ // Calculate the offset relative to the start of the virtual base.
+ uint64_t fromOffset = Support::addOverflow<uint64_t>(fromSection->offset(), linkOffset, &of);
+ int64_t displacement = int64_t(toOffset - fromOffset + uint64_t(int64_t(link->rel)));
+
+ if (!of) {
+ ASMJIT_ASSERT(size_t(linkOffset) < buf.size());
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ uint32_t displacementSize = buf._data[linkOffset];
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize);
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) {
+ link.resolveAndNext(this);
+ continue;
+ }
+ }
+
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ // Falls through to `link.next()`.
+ }
+
+ link.next();
+ } while (link);
+ }
+ }
+
+ return err;
+}
+
+ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept {
+ LabelEntry* le = labelEntry(label);
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (ASMJIT_UNLIKELY(toSectionId > _sections.size()))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ // Label can be bound only once.
+ if (ASMJIT_UNLIKELY(le->isBound()))
+ return DebugUtils::errored(kErrorLabelAlreadyBound);
+
+ // Bind the label.
+ Section* section = _sections[toSectionId];
+ le->_section = section;
+ le->_offset = toOffset;
+
+ Error err = kErrorOk;
+ CodeBuffer& buf = section->buffer();
+
+ // Fix all links to this label we have collected so far if they are within
+ // the same section. We ignore any inter-section links as these have to be
+ // fixed later.
+ LabelLinkIterator link(le);
+ while (link) {
+ uint32_t linkSectionId = link->sectionId;
+ size_t linkOffset = link->offset;
+
+ uint32_t relocId = link->relocId;
+ if (relocId != Globals::kInvalidId) {
+ // Adjust relocation data only.
+ RelocEntry* re = _relocations[relocId];
+ re->_payload += toOffset;
+ re->_targetSectionId = toSectionId;
+ }
+ else {
+ if (linkSectionId != toSectionId) {
+ link.next();
+ continue;
+ }
+
+ ASMJIT_ASSERT(linkOffset < buf.size());
+ int64_t displacement = int64_t(toOffset - uint64_t(linkOffset) + uint64_t(int64_t(link->rel)));
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ uint32_t displacementSize = buf._data[linkOffset];
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize);
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (!CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) {
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ link.next();
+ continue;
+ }
+ }
+
+ link.resolveAndNext(this);
+ }
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Relocations]
+// ============================================================================
+
+Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept {
+ ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
+
+ uint32_t relocId = _relocations.size();
+ if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyRelocations);
+
+ RelocEntry* re = _allocator.allocZeroedT<RelocEntry>();
+ if (ASMJIT_UNLIKELY(!re))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ re->_id = relocId;
+ re->_relocType = uint8_t(relocType);
+ re->_valueSize = uint8_t(valueSize);
+ re->_sourceSectionId = Globals::kInvalidId;
+ re->_targetSectionId = Globals::kInvalidId;
+ _relocations.appendUnsafe(re);
+
+ *dst = re;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Expression Evaluation]
+// ============================================================================
+
+static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, uint64_t* out) noexcept {
+ uint64_t value[2];
+ for (size_t i = 0; i < 2; i++) {
+ uint64_t v;
+ switch (exp->valueType[i]) {
+ case Expression::kValueNone: {
+ v = 0;
+ break;
+ }
+
+ case Expression::kValueConstant: {
+ v = exp->value[i].constant;
+ break;
+ }
+
+ case Expression::kValueLabel: {
+ LabelEntry* le = exp->value[i].label;
+ if (!le->isBound())
+ return DebugUtils::errored(kErrorExpressionLabelNotBound);
+ v = le->section()->offset() + le->offset();
+ break;
+ }
+
+ case Expression::kValueExpression: {
+ Expression* nested = exp->value[i].expression;
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(self, nested, &v));
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ value[i] = v;
+ }
+
+ uint64_t result;
+ uint64_t& a = value[0];
+ uint64_t& b = value[1];
+
+ switch (exp->opType) {
+ case Expression::kOpAdd:
+ result = a + b;
+ break;
+
+ case Expression::kOpSub:
+ result = a - b;
+ break;
+
+ case Expression::kOpMul:
+ result = a * b;
+ break;
+
+ case Expression::kOpSll:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a << b);
+ break;
+
+ case Expression::kOpSrl:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a >> b);
+ break;
+
+ case Expression::kOpSra:
+ result = Support::sar(a, Support::min<uint64_t>(b, 63));
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ *out = result;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Utilities]
+// ============================================================================
+
+Error CodeHolder::flatten() noexcept {
+ uint64_t offset = 0;
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ if (ASMJIT_UNLIKELY(alignedOffset < offset))
+ return DebugUtils::errored(kErrorTooLarge);
+
+ Support::FastUInt8 of = 0;
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+
+ if (ASMJIT_UNLIKELY(of))
+ return DebugUtils::errored(kErrorTooLarge);
+ }
+ }
+
+ // Now we know that we can assign offsets of all sections properly.
+ Section* prev = nullptr;
+ offset = 0;
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+ if (realSize)
+ offset = Support::alignUp(offset, section->alignment());
+ section->_offset = offset;
+
+ // Make sure the previous section extends a bit to cover the alignment.
+ if (prev)
+ prev->_virtualSize = offset - prev->_offset;
+
+ prev = section;
+ offset += realSize;
+ }
+
+ return kErrorOk;
+}
+
+size_t CodeHolder::codeSize() const noexcept {
+ Support::FastUInt8 of = 0;
+ uint64_t offset = 0;
+
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ ASMJIT_ASSERT(alignedOffset >= offset);
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+ }
+ }
+
+ if ((sizeof(uint64_t) > sizeof(size_t) && offset > SIZE_MAX) || of)
+ return SIZE_MAX;
+
+ return size_t(offset);
+}
+
+Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
+ // Base address must be provided.
+ if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ _baseAddress = baseAddress;
+ uint32_t addressSize = _environment.registerSize();
+
+ Section* addressTableSection = _addressTableSection;
+ uint32_t addressTableEntryCount = 0;
+ uint8_t* addressTableEntryData = nullptr;
+
+ if (addressTableSection) {
+ ASMJIT_PROPAGATE(
+ reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
+ addressTableEntryData = addressTableSection->_buffer.data();
+ }
+
+ // Relocate all recorded locations.
+ for (const RelocEntry* re : _relocations) {
+ // Possibly deleted or optimized-out entry.
+ if (re->relocType() == RelocEntry::kTypeNone)
+ continue;
+
+ Section* sourceSection = sectionById(re->sourceSectionId());
+ Section* targetSection = nullptr;
+
+ if (re->targetSectionId() != Globals::kInvalidId)
+ targetSection = sectionById(re->targetSectionId());
+
+ uint64_t value = re->payload();
+ uint64_t sectionOffset = sourceSection->offset();
+ uint64_t sourceOffset = re->sourceOffset();
+
+ // Make sure that the `RelocEntry` doesn't go out of bounds.
+ size_t regionSize = re->leadingSize() + re->valueSize() + re->trailingSize();
+ if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() ||
+ sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ uint8_t* buffer = sourceSection->data();
+ size_t valueOffset = size_t(re->sourceOffset()) + re->leadingSize();
+
+ switch (re->relocType()) {
+ case RelocEntry::kTypeExpression: {
+ Expression* expression = (Expression*)(uintptr_t(value));
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(this, expression, &value));
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToAbs: {
+ break;
+ }
+
+ case RelocEntry::kTypeRelToAbs: {
+ // Value is currently a relative offset from the start of its section.
+ // We have to convert it to an absolute offset (including base address).
+ if (ASMJIT_UNLIKELY(!targetSection))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ //value += baseAddress + sectionOffset + sourceOffset + regionSize;
+ value += baseAddress + targetSection->offset();
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToRel: {
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (addressSize > 4 && !Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+ break;
+ }
+
+ case RelocEntry::kTypeX64AddressEntry: {
+ if (re->valueSize() != 4 || re->leadingSize() < 2)
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // First try whether a relative 32-bit displacement would work.
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (!Support::isInt32(int64_t(value))) {
+ // Relative 32-bit displacement is not possible, use '.addrtab' section.
+ AddressTableEntry* atEntry = _addressTableEntries.get(re->payload());
+ if (ASMJIT_UNLIKELY(!atEntry))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // Cannot be null as we have just matched the `AddressTableEntry`.
+ ASMJIT_ASSERT(addressTableSection != nullptr);
+
+ if (!atEntry->hasAssignedSlot())
+ atEntry->_slot = addressTableEntryCount++;
+
+ size_t atEntryIndex = size_t(atEntry->slot()) * addressSize;
+ uint64_t addrSrc = sectionOffset + sourceOffset + regionSize;
+ uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex);
+
+ value = addrDst - addrSrc;
+ if (!Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+
+ // Bytes that replace [REX, OPCODE] bytes.
+ uint32_t byte0 = 0xFF;
+ uint32_t byte1 = buffer[valueOffset - 1];
+
+ if (byte1 == 0xE8) {
+ // Patch CALL/MOD byte to FF /2 (-> 0x15).
+ byte1 = x86EncodeMod(0, 2, 5);
+ }
+ else if (byte1 == 0xE9) {
+ // Patch JMP/MOD byte to FF /4 (-> 0x25).
+ byte1 = x86EncodeMod(0, 4, 5);
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ // Patch `jmp/call` instruction.
+ buffer[valueOffset - 2] = uint8_t(byte0);
+ buffer[valueOffset - 1] = uint8_t(byte1);
+
+ Support::writeU64uLE(addressTableEntryData + atEntryIndex, re->payload());
+ }
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ switch (re->valueSize()) {
+ case 1:
+ Support::writeU8(buffer + valueOffset, uint32_t(value & 0xFFu));
+ break;
+
+ case 2:
+ Support::writeU16uLE(buffer + valueOffset, uint32_t(value & 0xFFFFu));
+ break;
+
+ case 4:
+ Support::writeU32uLE(buffer + valueOffset, uint32_t(value & 0xFFFFFFFFu));
+ break;
+
+ case 8:
+ Support::writeU64uLE(buffer + valueOffset, value);
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+ }
+
+ // Fixup the virtual size of the address table if it's the last section.
+ if (_sections.last() == addressTableSection) {
+ size_t addressTableSize = addressTableEntryCount * addressSize;
+ addressTableSection->_buffer._size = addressTableSize;
+ addressTableSection->_virtualSize = addressTableSize;
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t copyOptions) noexcept {
+ if (ASMJIT_UNLIKELY(!isSectionValid(sectionId)))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ Section* section = sectionById(sectionId);
+ size_t bufferSize = section->bufferSize();
+
+ if (ASMJIT_UNLIKELY(dstSize < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ memcpy(dst, section->data(), bufferSize);
+
+ if (bufferSize < dstSize && (copyOptions & kCopyPadSectionBuffer)) {
+ size_t paddingSize = dstSize - bufferSize;
+ memset(static_cast<uint8_t*>(dst) + bufferSize, 0, paddingSize);
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOptions) noexcept {
+ size_t end = 0;
+ for (Section* section : _sections) {
+ if (section->offset() > dstSize)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ size_t bufferSize = section->bufferSize();
+ size_t offset = size_t(section->offset());
+
+ if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint8_t* dstTarget = static_cast<uint8_t*>(dst) + offset;
+ size_t paddingSize = 0;
+ memcpy(dstTarget, section->data(), bufferSize);
+
+ if ((copyOptions & kCopyPadSectionBuffer) && bufferSize < section->virtualSize()) {
+ paddingSize = Support::min<size_t>(dstSize - offset, size_t(section->virtualSize())) - bufferSize;
+ memset(dstTarget + bufferSize, 0, paddingSize);
+ }
+
+ end = Support::max(end, offset + bufferSize + paddingSize);
+ }
+
+ if (end < dstSize && (copyOptions & kCopyPadTargetBuffer)) {
+ memset(static_cast<uint8_t*>(dst) + end, 0, dstSize - end);
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/codeholder.h b/client/asmjit/core/codeholder.h
new file mode 100644
index 0000000..9b3466d
--- /dev/null
+++ b/client/asmjit/core/codeholder.h
@@ -0,0 +1,929 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+#define ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/codebuffer.h"
+#include "../core/datatypes.h"
+#include "../core/errorhandler.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/target.h"
+#include "../core/zone.h"
+#include "../core/zonehash.h"
+#include "../core/zonestring.h"
+#include "../core/zonetree.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+class CodeHolder;
+class LabelEntry;
+class Logger;
+
+// ============================================================================
+// [asmjit::AlignMode]
+// ============================================================================
+
+//! Align mode.
+enum AlignMode : uint32_t {
+ //! Align executable code.
+ kAlignCode = 0,
+ //! Align non-executable code.
+ kAlignData = 1,
+ //! Align by a sequence of zeros.
+ kAlignZero = 2,
+ //! Count of alignment modes.
+ kAlignCount = 3
+};
+
+// ============================================================================
+// [asmjit::Section]
+// ============================================================================
+
+//! Section entry.
+class Section {
+public:
+ //! Section id.
+ uint32_t _id;
+ //! Section flags.
+ uint32_t _flags;
+ //! Section alignment requirements (0 if no requirements).
+ uint32_t _alignment;
+ //! Reserved for future use (padding).
+ uint32_t _reserved;
+ //! Offset of this section from base-address.
+ uint64_t _offset;
+ //! Virtual size of the section (zero initialized sections).
+ uint64_t _virtualSize;
+ //! Section name (max 35 characters, PE allows max 8).
+ FixedString<Globals::kMaxSectionNameSize + 1> _name;
+ //! Code or data buffer.
+ CodeBuffer _buffer;
+
+ //! Section flags.
+ enum Flags : uint32_t {
+ //! Executable (.text sections).
+ kFlagExec = 0x00000001u,
+ //! Read-only (.text and .data sections).
+ kFlagConst = 0x00000002u,
+ //! Zero initialized by the loader (BSS).
+ kFlagZero = 0x00000004u,
+ //! Info / comment flag.
+ kFlagInfo = 0x00000008u,
+ //! Section created implicitly and can be deleted by \ref Target.
+ kFlagImplicit = 0x80000000u
+ };
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the section id.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Returns the section name, as a null terminated string.
+ inline const char* name() const noexcept { return _name.str; }
+
+ //! Returns the section data.
+ inline uint8_t* data() noexcept { return _buffer.data(); }
+ //! \overload
+ inline const uint8_t* data() const noexcept { return _buffer.data(); }
+
+ //! Returns the section flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the section has the given `flag`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Adds `flags` to the section flags.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ //! Removes `flags` from the section flags.
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ //! Returns the minimum section alignment
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ //! Sets the minimum section alignment
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ //! Returns the section offset, relative to base.
+ inline uint64_t offset() const noexcept { return _offset; }
+ //! Set the section offset.
+ inline void setOffset(uint64_t offset) noexcept { _offset = offset; }
+
+ //! Returns the virtual size of the section.
+ //!
+ //! Virtual size is initially zero and is never changed by AsmJit. It's normal
+ //! if virtual size is smaller than size returned by `bufferSize()` as the buffer
+ //! stores real data emitted by assemblers or appended by users.
+ //!
+ //! Use `realSize()` to get the real and final size of this section.
+ inline uint64_t virtualSize() const noexcept { return _virtualSize; }
+ //! Sets the virtual size of the section.
+ inline void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; }
+
+ //! Returns the buffer size of the section.
+ inline size_t bufferSize() const noexcept { return _buffer.size(); }
+ //! Returns the real size of the section calculated from virtual and buffer sizes.
+ inline uint64_t realSize() const noexcept { return Support::max<uint64_t>(virtualSize(), bufferSize()); }
+
+ //! Returns the `CodeBuffer` used by this section.
+ inline CodeBuffer& buffer() noexcept { return _buffer; }
+ //! Returns the `CodeBuffer` used by this section (const).
+ inline const CodeBuffer& buffer() const noexcept { return _buffer; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelLink]
+// ============================================================================
+
+//! Data structure used to link either unbound labels or cross-section links.
+struct LabelLink {
+ //! Next link (single-linked list).
+ LabelLink* next;
+ //! Section id where the label is bound.
+ uint32_t sectionId;
+ //! Relocation id or Globals::kInvalidId.
+ uint32_t relocId;
+ //! Label offset relative to the start of the section.
+ size_t offset;
+ //! Inlined rel8/rel32.
+ intptr_t rel;
+};
+
+// ============================================================================
+// [asmjit::Expression]
+// ============================================================================
+
+//! Expression node that can reference constants, labels, and another expressions.
+struct Expression {
+ //! Operation type.
+ enum OpType : uint8_t {
+ //! Addition.
+ kOpAdd = 0,
+ //! Subtraction.
+ kOpSub = 1,
+ //! Multiplication
+ kOpMul = 2,
+ //! Logical left shift.
+ kOpSll = 3,
+ //! Logical right shift.
+ kOpSrl = 4,
+ //! Arithmetic right shift.
+ kOpSra = 5
+ };
+
+ //! Type of \ref Value.
+ enum ValueType : uint8_t {
+ //! No value or invalid.
+ kValueNone = 0,
+ //! Value is 64-bit unsigned integer (constant).
+ kValueConstant = 1,
+ //! Value is \ref LabelEntry, which references a \ref Label.
+ kValueLabel = 2,
+ //! Value is \ref Expression
+ kValueExpression = 3
+ };
+
+ //! Expression value.
+ union Value {
+ //! Constant.
+ uint64_t constant;
+ //! Pointer to another expression.
+ Expression* expression;
+ //! Poitner to \ref LabelEntry.
+ LabelEntry* label;
+ };
+
+ //! Operation type.
+ uint8_t opType;
+ //! Value types of \ref value.
+ uint8_t valueType[2];
+ //! Reserved for future use, should be initialized to zero.
+ uint8_t reserved[5];
+ //! Expression left and right values.
+ Value value[2];
+
+ //! Resets the whole expression.
+ //!
+ //! Changes both values to \ref kValueNone.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! Sets the value type at `index` to \ref kValueConstant and its content to `constant`.
+ inline void setValueAsConstant(size_t index, uint64_t constant) noexcept {
+ valueType[index] = kValueConstant;
+ value[index].constant = constant;
+ }
+
+ //! Sets the value type at `index` to \ref kValueLabel and its content to `labelEntry`.
+ inline void setValueAsLabel(size_t index, LabelEntry* labelEntry) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].label = labelEntry;
+ }
+
+ //! Sets the value type at `index` to \ref kValueExpression and its content to `expression`.
+ inline void setValueAsExpression(size_t index, Expression* expression) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].expression = expression;
+ }
+};
+
+// ============================================================================
+// [asmjit::LabelEntry]
+// ============================================================================
+
+//! Label entry.
+//!
+//! Contains the following properties:
+//! * Label id - This is the only thing that is set to the `Label` operand.
+//! * Label name - Optional, used mostly to create executables and libraries.
+//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
+//! * Label parent id - Derived from many assemblers that allow to define a
+//! local label that falls under a global label. This allows to define
+//! many labels of the same name that have different parent (global) label.
+//! * Offset - offset of the label bound by `Assembler`.
+//! * Links - single-linked list that contains locations of code that has
+//! to be patched when the label gets bound. Every use of unbound label
+//! adds one link to `_links` list.
+//! * HVal - Hash value of label's name and optionally parentId.
+//! * HashNext - Hash-table implementation detail.
+class LabelEntry : public ZoneHashNode {
+public:
+ // Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has
+ // granularity of 32 bytes anyway). This gives `_name` the remaining space,
+ // which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
+ static constexpr uint32_t kStaticNameSize =
+ 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*));
+
+ //! Label type, see `Label::LabelType`.
+ uint8_t _type;
+ //! Must be zero.
+ uint8_t _flags;
+ //! Reserved.
+ uint16_t _reserved16;
+ //! Label parent id or zero.
+ uint32_t _parentId;
+ //! Label offset relative to the start of the `_section`.
+ uint64_t _offset;
+ //! Section where the label was bound.
+ Section* _section;
+ //! Label links.
+ LabelLink* _links;
+ //! Label name.
+ ZoneString<kStaticNameSize> _name;
+
+ //! \name Accessors
+ //! \{
+
+ // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
+ // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
+ // the structure to 64-bits.
+
+ //! Returns label id.
+ inline uint32_t id() const noexcept { return _customData; }
+ //! Sets label id (internal, used only by `CodeHolder`).
+ inline void _setId(uint32_t id) noexcept { _customData = id; }
+
+ //! Returns label type, see `Label::LabelType`.
+ inline uint32_t type() const noexcept { return _type; }
+ //! Returns label flags, returns 0 at the moment.
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ //! Tests whether the label has a parent label.
+ inline bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; }
+ //! Returns label's parent id.
+ inline uint32_t parentId() const noexcept { return _parentId; }
+
+ //! Returns the section where the label was bound.
+ //!
+ //! If the label was not yet bound the return value is `nullptr`.
+ inline Section* section() const noexcept { return _section; }
+
+ //! Tests whether the label has name.
+ inline bool hasName() const noexcept { return !_name.empty(); }
+
+ //! Returns the label's name.
+ //!
+ //! \note Local labels will return their local name without their parent
+ //! part, for example ".L1".
+ inline const char* name() const noexcept { return _name.data(); }
+
+ //! Returns size of label's name.
+ //!
+ //! \note Label name is always null terminated, so you can use `strlen()` to
+ //! get it, however, it's also cached in `LabelEntry` itself, so if you want
+ //! to know the size the fastest way is to call `LabelEntry::nameSize()`.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns links associated with this label.
+ inline LabelLink* links() const noexcept { return _links; }
+
+ //! Tests whether the label is bound.
+ inline bool isBound() const noexcept { return _section != nullptr; }
+ //! Tests whether the label is bound to a the given `sectionId`.
+ inline bool isBoundTo(Section* section) const noexcept { return _section == section; }
+
+ //! Returns the label offset (only useful if the label is bound).
+ inline uint64_t offset() const noexcept { return _offset; }
+
+ //! Returns the hash-value of label's name and its parent label (if any).
+ //!
+ //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
+ //! is implemented in `Support::hashString()` and `Support::hashRound()`.
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RelocEntry]
+// ============================================================================
+
+//! Relocation entry.
+//!
+//! We describe relocation data in the following way:
+//!
+//! ```
+//! +- Start of the buffer +- End of the data
+//! | |*PATCHED*| | or instruction
+//! |xxxxxxxxxxxxxxxxxxxxxx|LeadSize|ValueSize|TrailSize|xxxxxxxxxxxxxxxxxxxx->
+//! |
+//! +- Source offset
+//! ```
+struct RelocEntry {
+ //! Relocation id.
+ uint32_t _id;
+ //! Type of the relocation.
+ uint8_t _relocType;
+ //! Size of the relocation data/value (1, 2, 4 or 8 bytes).
+ uint8_t _valueSize;
+ //! Number of bytes after `_sourceOffset` to reach the value to be patched.
+ uint8_t _leadingSize;
+ //! Number of bytes after `_sourceOffset + _valueSize` to reach end of the
+ //! instruction.
+ uint8_t _trailingSize;
+ //! Source section id.
+ uint32_t _sourceSectionId;
+ //! Target section id.
+ uint32_t _targetSectionId;
+ //! Source offset (relative to start of the section).
+ uint64_t _sourceOffset;
+ //! Payload (target offset, target address, expression, etc).
+ uint64_t _payload;
+
+ //! Relocation type.
+ enum RelocType : uint32_t {
+ //! None/deleted (no relocation).
+ kTypeNone = 0,
+ //! Expression evaluation, `_payload` is pointer to `Expression`.
+ kTypeExpression = 1,
+ //! Relocate absolute to absolute.
+ kTypeAbsToAbs = 2,
+ //! Relocate relative to absolute.
+ kTypeRelToAbs = 3,
+ //! Relocate absolute to relative.
+ kTypeAbsToRel = 4,
+ //! Relocate absolute to relative or use trampoline.
+ kTypeX64AddressEntry = 5
+ };
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t id() const noexcept { return _id; }
+
+ inline uint32_t relocType() const noexcept { return _relocType; }
+ inline uint32_t valueSize() const noexcept { return _valueSize; }
+
+ inline uint32_t leadingSize() const noexcept { return _leadingSize; }
+ inline uint32_t trailingSize() const noexcept { return _trailingSize; }
+
+ inline uint32_t sourceSectionId() const noexcept { return _sourceSectionId; }
+ inline uint32_t targetSectionId() const noexcept { return _targetSectionId; }
+
+ inline uint64_t sourceOffset() const noexcept { return _sourceOffset; }
+ inline uint64_t payload() const noexcept { return _payload; }
+
+ Expression* payloadAsExpression() const noexcept {
+ return reinterpret_cast<Expression*>(uintptr_t(_payload));
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::AddressTableEntry]
+// ============================================================================
+
+//! Entry in an address table.
+class AddressTableEntry : public ZoneTreeNodeT<AddressTableEntry> {
+public:
+ ASMJIT_NONCOPYABLE(AddressTableEntry)
+
+ //! Address.
+ uint64_t _address;
+ //! Slot.
+ uint32_t _slot;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit AddressTableEntry(uint64_t address) noexcept
+ : _address(address),
+ _slot(0xFFFFFFFFu) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint64_t address() const noexcept { return _address; }
+ inline uint32_t slot() const noexcept { return _slot; }
+
+ inline bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; }
+
+ inline bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; }
+ inline bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; }
+
+ inline bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; }
+ inline bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CodeHolder]
+// ============================================================================
+
+//! Contains basic information about the target architecture and its options.
+//!
+//! In addition, it holds assembled code & data (including sections, labels, and
+//! relocation information). `CodeHolder` can store both binary and intermediate
+//! representation of assembly, which can be generated by \ref BaseAssembler,
+//! \ref BaseBuilder, and \ref BaseCompiler
+//!
+//! \note `CodeHolder` has an ability to attach an \ref ErrorHandler, however,
+//! the error handler is not triggered by `CodeHolder` itself, it's instead
+//! propagated to all emitters that attach to it.
+class CodeHolder {
+public:
+ ASMJIT_NONCOPYABLE(CodeHolder)
+
+ //! Environment information.
+ Environment _environment;
+ //! Base address or \ref Globals::kNoBaseAddress.
+ uint64_t _baseAddress;
+
+ //! Attached `Logger`, used by all consumers.
+ Logger* _logger;
+ //! Attached `ErrorHandler`.
+ ErrorHandler* _errorHandler;
+
+ //! Code zone (used to allocate core structures).
+ Zone _zone;
+ //! Zone allocator, used to manage internal containers.
+ ZoneAllocator _allocator;
+
+ //! Attached emitters.
+ ZoneVector<BaseEmitter*> _emitters;
+ //! Section entries.
+ ZoneVector<Section*> _sections;
+ //! Label entries.
+ ZoneVector<LabelEntry*> _labelEntries;
+ //! Relocation entries.
+ ZoneVector<RelocEntry*> _relocations;
+ //! Label name -> LabelEntry (only named labels).
+ ZoneHash<LabelEntry> _namedLabels;
+
+ //! Count of label links, which are not resolved.
+ size_t _unresolvedLinkCount;
+ //! Pointer to an address table section (or null if this section doesn't exist).
+ Section* _addressTableSection;
+ //! Address table entries.
+ ZoneTree<AddressTableEntry> _addressTableEntries;
+
+ //! Options that can be used with \ref copySectionData() and \ref copyFlattenedData().
+ enum CopyOptions : uint32_t {
+ //! If virtual size of a section is greater than the size of its \ref CodeBuffer
+ //! then all bytes between the buffer size and virtual size will be zeroed.
+ //! If this option is not set then those bytes would be left as is, which
+ //! means that if the user didn't initialize them they would have a previous
+ //! content, which may be unwanted.
+ kCopyPadSectionBuffer = 0x00000001u,
+
+#ifndef ASMJIT_NO_DEPRECATED
+ kCopyWithPadding = kCopyPadSectionBuffer,
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! Zeroes the target buffer if the flattened data is less than the destination
+ //! size. This option works only with \ref copyFlattenedData() as it processes
+ //! multiple sections. It is ignored by \ref copySectionData().
+ kCopyPadTargetBuffer = 0x00000002u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates an uninitialized CodeHolder (you must init() it before it can be used).
+ ASMJIT_API CodeHolder() noexcept;
+ //! Destroys the CodeHolder.
+ ASMJIT_API ~CodeHolder() noexcept;
+
+ //! Tests whether the `CodeHolder` has been initialized.
+ //!
+ //! Emitters can be only attached to initialized `CodeHolder` instances.
+ inline bool isInitialized() const noexcept { return _environment.isInitialized(); }
+
+ //! Initializes CodeHolder to hold code described by code `info`.
+ ASMJIT_API Error init(const Environment& environment, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept;
+ //! Detaches all code-generators attached and resets the `CodeHolder`.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Attach & Detach
+ //! \{
+
+ //! Attaches an emitter to this `CodeHolder`.
+ ASMJIT_API Error attach(BaseEmitter* emitter) noexcept;
+ //! Detaches an emitter from this `CodeHolder`.
+ ASMJIT_API Error detach(BaseEmitter* emitter) noexcept;
+
+ //! \}
+
+ //! \name Allocators
+ //! \{
+
+ //! Returns the allocator that the `CodeHolder` uses.
+ //!
+ //! \note This should be only used for AsmJit's purposes. Code holder uses
+ //! arena allocator to allocate everything, so anything allocated through
+ //! this allocator will be invalidated by \ref CodeHolder::reset() or by
+ //! CodeHolder's destructor.
+ inline ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
+
+ //! \}
+
+ //! \name Code & Architecture
+ //! \{
+
+ //! Returns the target environment information, see \ref Environment.
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Returns the target architecture.
+ inline uint32_t arch() const noexcept { return environment().arch(); }
+ //! Returns the target sub-architecture.
+ inline uint32_t subArch() const noexcept { return environment().subArch(); }
+
+ //! Tests whether a static base-address is set.
+ inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
+ //! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set.
+ inline uint64_t baseAddress() const noexcept { return _baseAddress; }
+
+ //! \}
+
+ //! \name Emitters
+ //! \{
+
+ //! Returns a vector of attached emitters.
+ inline const ZoneVector<BaseEmitter*>& emitters() const noexcept { return _emitters; }
+
+ //! \}
+
+ //! \name Logging
+ //! \{
+
+ //! Returns the attached logger, see \ref Logger.
+ inline Logger* logger() const noexcept { return _logger; }
+ //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+ //! Resets the logger to none.
+ inline void resetLogger() noexcept { setLogger(nullptr); }
+
+ //! \name Error Handling
+ //! \{
+
+ //! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+ //! Returns the attached error handler.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+ //! Attach an error handler to this `CodeHolder`.
+ ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
+ //! Resets the error handler to none.
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! \}
+
+ //! \name Code Buffer
+ //! \{
+
+ //! Makes sure that at least `n` bytes can be added to CodeHolder's buffer `cb`.
+ //!
+ //! \note The buffer `cb` must be managed by `CodeHolder` - otherwise the
+ //! behavior of the function is undefined.
+ ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ //! Reserves the size of `cb` to at least `n` bytes.
+ //!
+ //! \note The buffer `cb` must be managed by `CodeHolder` - otherwise the
+ //! behavior of the function is undefined.
+ ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ //! Returns an array of `Section*` records.
+ inline const ZoneVector<Section*>& sections() const noexcept { return _sections; }
+ //! Returns the number of sections.
+ inline uint32_t sectionCount() const noexcept { return _sections.size(); }
+
+ //! Tests whether the given `sectionId` is valid.
+ inline bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); }
+
+ //! Creates a new section and return its pointer in `sectionOut`.
+ //!
+ //! Returns `Error`, does not report a possible error to `ErrorHandler`.
+ ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, uint32_t flags = 0, uint32_t alignment = 1) noexcept;
+
+ //! Returns a section entry of the given index.
+ inline Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; }
+
+ //! Returns section-id that matches the given `name`.
+ //!
+ //! If there is no such section `Section::kInvalidId` is returned.
+ ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept;
+
+ //! Returns '.text' section (section that commonly represents code).
+ //!
+ //! \note Text section is always the first section in \ref CodeHolder::sections() array.
+ inline Section* textSection() const noexcept { return _sections[0]; }
+
+ //! Tests whether '.addrtab' section exists.
+ inline bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; }
+
+ //! Returns '.addrtab' section.
+ //!
+ //! This section is used exclusively by AsmJit to store absolute 64-bit
+ //! addresses that cannot be encoded in instructions like 'jmp' or 'call'.
+ //!
+ //! \note This section is created on demand, the returned pointer can be null.
+ inline Section* addressTableSection() const noexcept { return _addressTableSection; }
+
+ //! Ensures that '.addrtab' section exists (creates it if it doesn't) and
+ //! returns it. Can return `nullptr` on out of memory condition.
+ ASMJIT_API Section* ensureAddressTableSection() noexcept;
+
+ //! Used to add an address to an address table.
+ //!
+ //! This implicitly calls `ensureAddressTableSection()` and then creates
+ //! `AddressTableEntry` that is inserted to `_addressTableEntries`. If the
+ //! address already exists this operation does nothing as the same addresses
+ //! use the same slot.
+ //!
+ //! This function should be considered internal as it's used by assemblers to
+ //! insert an absolute address into the address table. Inserting address into
+ //! address table without creating a particula relocation entry makes no sense.
+ ASMJIT_API Error addAddressToAddressTable(uint64_t address) noexcept;
+
+ //! \}
+
+ //! \name Labels & Symbols
+ //! \{
+
+ //! Returns array of `LabelEntry*` records.
+ inline const ZoneVector<LabelEntry*>& labelEntries() const noexcept { return _labelEntries; }
+
+ //! Returns number of labels created.
+ inline uint32_t labelCount() const noexcept { return _labelEntries.size(); }
+
+ //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(uint32_t labelId) const noexcept {
+ return labelId < _labelEntries.size();
+ }
+
+ //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(const Label& label) const noexcept {
+ return label.id() < _labelEntries.size();
+ }
+
+ //! \overload
+ inline bool isLabelBound(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) && _labelEntries[labelId]->isBound();
+ }
+
+ //! Tests whether the `label` is already bound.
+ //!
+ //! Returns `false` if the `label` is not valid.
+ inline bool isLabelBound(const Label& label) const noexcept {
+ return isLabelBound(label.id());
+ }
+
+ //! Returns LabelEntry of the given label `id`.
+ inline LabelEntry* labelEntry(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast<LabelEntry*>(nullptr);
+ }
+
+ //! Returns LabelEntry of the given `label`.
+ inline LabelEntry* labelEntry(const Label& label) const noexcept {
+ return labelEntry(label.id());
+ }
+
+ //! Returns offset of a `Label` by its `labelId`.
+ //!
+ //! The offset returned is relative to the start of the section. Zero offset
+ //! is returned for unbound labels, which is their initial offset value.
+ inline uint64_t labelOffset(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ return _labelEntries[labelId]->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffset(const Label& label) const noexcept {
+ return labelOffset(label.id());
+ }
+
+ //! Returns offset of a label by it's `labelId` relative to the base offset.
+ //!
+ //! \remarks The offset of the section where the label is bound must be valid
+ //! in order to use this function, otherwise the value returned will not be
+ //! reliable.
+ inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ const LabelEntry* le = _labelEntries[labelId];
+ return (le->isBound() ? le->section()->offset() : uint64_t(0)) + le->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffsetFromBase(const Label& label) const noexcept {
+ return labelOffsetFromBase(label.id());
+ }
+
+ //! Creates a new anonymous label and return its id in `idOut`.
+ //!
+ //! Returns `Error`, does not report error to `ErrorHandler`.
+ ASMJIT_API Error newLabelEntry(LabelEntry** entryOut) noexcept;
+
+ //! Creates a new named \ref LabelEntry of the given label `type`.
+ //!
+ //! \param entryOut Where to store the created \ref LabelEntry.
+ //! \param name The name of the label.
+ //! \param nameSize The length of `name` argument, or `SIZE_MAX` if `name` is
+ //! a null terminated string, which means that the `CodeHolder` will
+ //! use `strlen()` to determine the length.
+ //! \param type The type of the label to create, see \ref Label::LabelType.
+ //! \param parentId Parent id of a local label, otherwise it must be
+ //! \ref Globals::kInvalidId.
+ //!
+ //! \retval Always returns \ref Error, does not report a possible error to
+ //! the attached \ref ErrorHandler.
+ //!
+ //! AsmJit has a support for local labels (\ref Label::kTypeLocal) which
+ //! require a parent label id (parentId). The names of local labels can
+ //! conflict with names of other local labels that have a different parent.
+ ASMJIT_API Error newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Returns a label by name.
+ //!
+ //! If the named label doesn't a default constructed \ref Label is returned,
+ //! which has its id set to \ref Globals::kInvalidId.
+ inline Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept {
+ return Label(labelIdByName(name, nameSize, parentId));
+ }
+
+ //! Returns a label id by name.
+ //!
+ //! If the named label doesn't exist \ref Globals::kInvalidId is returned.
+ ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Tests whether there are any unresolved label links.
+ inline bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; }
+ //! Returns the number of label links, which are unresolved.
+ inline size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; }
+
+ //! Creates a new label-link used to store information about yet unbound labels.
+ //!
+ //! Returns `null` if the allocation failed.
+ ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept;
+
+ //! Resolves cross-section links (`LabelLink`) associated with each label that
+ //! was used as a destination in code of a different section. It's only useful
+ //! to people that use multiple sections as it will do nothing if the code only
+ //! contains a single section in which cross-section links are not possible.
+ ASMJIT_API Error resolveUnresolvedLinks() noexcept;
+
+ //! Binds a label to a given `sectionId` and `offset` (relative to start of the section).
+ //!
+ //! This function is generally used by `BaseAssembler::bind()` to do the heavy lifting.
+ ASMJIT_API Error bindLabel(const Label& label, uint32_t sectionId, uint64_t offset) noexcept;
+
+ //! \}
+
+ //! \name Relocations
+ //! \{
+
+ //! Tests whether the code contains relocation entries.
+ inline bool hasRelocEntries() const noexcept { return !_relocations.empty(); }
+ //! Returns array of `RelocEntry*` records.
+ inline const ZoneVector<RelocEntry*>& relocEntries() const noexcept { return _relocations; }
+
+ //! Returns a RelocEntry of the given `id`.
+ inline RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; }
+
+ //! Creates a new relocation entry of type `relocType` and size `valueSize`.
+ //!
+ //! Additional fields can be set after the relocation entry was created.
+ ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept;
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Flattens all sections by recalculating their offsets, starting at 0.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error flatten() noexcept;
+
+ //! Returns computed the size of code & data of all sections.
+ //!
+ //! \note All sections will be iterated over and the code size returned
+ //! would represent the minimum code size of all combined sections after
+ //! applying minimum alignment. Code size may decrease after calling
+ //! `flatten()` and `relocateToBase()`.
+ ASMJIT_API size_t codeSize() const noexcept;
+
+ //! Relocates the code to the given `baseAddress`.
+ //!
+ //! \param baseAddress Absolute base address where the code will be relocated
+ //! to. Please note that nothing is copied to such base address, it's just an
+ //! absolute value used by the relocator to resolve all stored relocations.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error relocateToBase(uint64_t baseAddress) noexcept;
+
+ //! Copies a single section into `dst`.
+ ASMJIT_API Error copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t copyOptions = 0) noexcept;
+
+ //! Copies all sections into `dst`.
+ //!
+ //! This should only be used if the data was flattened and there are no gaps
+ //! between the sections. The `dstSize` is always checked and the copy will
+ //! never write anything outside the provided buffer.
+ ASMJIT_API Error copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOptions = 0) noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use 'CodeHolder::init(const Environment& environment, uint64_t baseAddress)' instead")
+ inline Error init(const CodeInfo& codeInfo) noexcept { return init(codeInfo._environment, codeInfo._baseAddress); }
+
+ ASMJIT_DEPRECATED("Use nevironment() instead")
+ inline CodeInfo codeInfo() const noexcept { return CodeInfo(_environment, _baseAddress); }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::encodingOptions() - this function always returns zero")
+ inline uint32_t emitterOptions() const noexcept { return 0; }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::addEncodingOptions() - this function does nothing")
+ inline void addEmitterOptions(uint32_t options) noexcept { DebugUtils::unused(options); }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::clearEncodingOptions() - this function does nothing")
+ inline void clearEmitterOptions(uint32_t options) noexcept { DebugUtils::unused(options); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEHOLDER_H_INCLUDED
diff --git a/client/asmjit/core/compiler.cpp b/client/asmjit/core/compiler.cpp
new file mode 100644
index 0000000..6e48253
--- /dev/null
+++ b/client/asmjit/core/compiler.cpp
@@ -0,0 +1,645 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/compiler.h"
+#include "../core/cpuinfo.h"
+#include "../core/logger.h"
+#include "../core/rapass_p.h"
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::GlobalConstPoolPass]
+// ============================================================================
+
+class GlobalConstPoolPass : public Pass {
+ typedef Pass Base;
+ ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
+
+ GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
+
+ Error run(Zone* zone, Logger* logger) override {
+ DebugUtils::unused(zone, logger);
+
+ // Flush the global constant pool.
+ BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
+ if (compiler->_globalConstPool) {
+ compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
+ compiler->_globalConstPool = nullptr;
+ }
+
+ return kErrorOk;
+ }
+};
+
+// ============================================================================
+// [asmjit::InvokeNode - Arg / Ret]
+// ============================================================================
+
+bool InvokeNode::_setArg(uint32_t i, const Operand_& op) noexcept {
+ if ((i & ~kFuncArgHi) >= _funcDetail.argCount())
+ return false;
+
+ _args[i] = op;
+ return true;
+}
+
+bool InvokeNode::_setRet(uint32_t i, const Operand_& op) noexcept {
+ if (i >= 2)
+ return false;
+
+ _rets[i] = op;
+ return true;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Construction / Destruction]
+// ============================================================================
+
+BaseCompiler::BaseCompiler() noexcept
+ : BaseBuilder(),
+ _func(nullptr),
+ _vRegZone(4096 - Zone::kBlockOverhead),
+ _vRegArray(),
+ _localConstPool(nullptr),
+ _globalConstPool(nullptr) {
+
+ _emitterType = uint8_t(kTypeCompiler);
+ _validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
+}
+BaseCompiler::~BaseCompiler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Function Management]
+// ============================================================================
+
+Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
+ *out = nullptr;
+
+ // Create FuncNode together with all the required surrounding nodes.
+ FuncNode* funcNode;
+ ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
+ ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
+ ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
+
+ // Initialize the function's detail info.
+ Error err = funcNode->detail().init(signature, environment());
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // If the Target guarantees greater stack alignment than required by the
+ // calling convention then override it as we can prevent having to perform
+ // dynamic stack alignment
+ uint32_t environmentStackAlignment = _environment.stackAlignment();
+
+ if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
+ funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
+
+ // Initialize the function frame.
+ err = funcNode->_frame.init(funcNode->_funcDetail);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // Allocate space for function arguments.
+ funcNode->_args = nullptr;
+ if (funcNode->argCount() != 0) {
+ funcNode->_args = _allocator.allocT<VirtReg*>(funcNode->argCount() * sizeof(VirtReg*));
+ if (ASMJIT_UNLIKELY(!funcNode->_args))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ memset(funcNode->_args, 0, funcNode->argCount() * sizeof(VirtReg*));
+ }
+
+ ASMJIT_PROPAGATE(registerLabelNode(funcNode));
+
+ *out = funcNode;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
+ ASMJIT_PROPAGATE(_newFuncNode(out, signature));
+ addFunc(*out);
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
+ uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
+ FuncRetNode* node;
+
+ ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
+ node->setOpCount(opCount);
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->resetOpRange(2, node->opCapacity());
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
+ ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
+ addNode(*out);
+ return kErrorOk;
+}
+
+FuncNode* BaseCompiler::addFunc(FuncNode* func) {
+ ASMJIT_ASSERT(_func == nullptr);
+ _func = func;
+
+ addNode(func); // Function node.
+ BaseNode* prev = cursor(); // {CURSOR}.
+ addNode(func->exitNode()); // Function exit label.
+ addNode(func->endNode()); // Function end sentinel.
+
+ _setCursor(prev);
+ return func;
+}
+
+Error BaseCompiler::endFunc() {
+ FuncNode* func = _func;
+
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ // Add the local constant pool at the end of the function (if exists).
+ if (_localConstPool) {
+ setCursor(func->endNode()->prev());
+ addNode(_localConstPool);
+ _localConstPool = nullptr;
+ }
+
+ // Mark as finished.
+ _func = nullptr;
+
+ SentinelNode* end = func->endNode();
+ setCursor(end);
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) {
+ FuncNode* func = _func;
+
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
+ return reportError(DebugUtils::errored(kErrorInvalidVirtId));
+
+ VirtReg* vReg = virtRegByReg(r);
+ func->setArg(argIndex, vReg);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Function Invocation]
+// ============================================================================
+
+Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
+
+ node->setOpCount(1);
+ node->setOp(0, o0);
+ node->resetOpRange(1, node->opCapacity());
+
+ Error err = node->detail().init(signature, environment());
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // Skip the allocation if there are no arguments.
+ uint32_t argCount = signature.argCount();
+ if (argCount) {
+ node->_args = static_cast<Operand*>(_allocator.alloc(argCount * sizeof(Operand)));
+ if (!node->_args)
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ memset(node->_args, 0, argCount * sizeof(Operand));
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
+ addNode(*out);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Virtual Registers]
+// ============================================================================
+
+static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
+ uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
+
+ char buf[64];
+ int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
+
+ ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
+ vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
+}
+
+Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
+ *out = nullptr;
+ uint32_t index = _vRegArray.size();
+
+ if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
+ return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
+
+ if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
+ if (ASMJIT_UNLIKELY(!vReg))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ uint32_t size = Type::sizeOf(typeId);
+ uint32_t alignment = Support::min<uint32_t>(size, 64);
+
+ vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (name && name[0] != '\0')
+ vReg->_name.setData(&_dataZone, name, SIZE_MAX);
+ else
+ BaseCompiler_assignGenericName(this, vReg);
+#else
+ DebugUtils::unused(name);
+#endif
+
+ _vRegArray.appendUnsafe(vReg);
+ *out = vReg;
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
+ out->reset();
+
+ RegInfo regInfo;
+ Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
+
+ out->_initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, typeId, sb.data());
+}
+
+Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
+ out->reset();
+
+ RegInfo regInfo;
+ uint32_t typeId;
+
+ if (isVirtRegValid(ref)) {
+ VirtReg* vRef = virtRegByReg(ref);
+ typeId = vRef->typeId();
+
+ // NOTE: It's possible to cast one register type to another if it's the
+ // same register group. However, VirtReg always contains the TypeId that
+ // was used to create the register. This means that in some cases we may
+ // end up having different size of `ref` and `vRef`. In such case we
+ // adjust the TypeId to match the `ref` register type instead of the
+ // original register type, which should be the expected behavior.
+ uint32_t typeSize = Type::sizeOf(typeId);
+ uint32_t refSize = ref.size();
+
+ if (typeSize != refSize) {
+ if (Type::isInt(typeId)) {
+ // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
+ switch (refSize) {
+ case 1: typeId = Type::kIdI8 | (typeId & 1); break;
+ case 2: typeId = Type::kIdI16 | (typeId & 1); break;
+ case 4: typeId = Type::kIdI32 | (typeId & 1); break;
+ case 8: typeId = Type::kIdI64 | (typeId & 1); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else if (Type::isMmx(typeId)) {
+ // MMX register - always use 64-bit.
+ typeId = Type::kIdMmx64;
+ }
+ else if (Type::isMask(typeId)) {
+ // Mask register - change TypeId to match `ref` size.
+ switch (refSize) {
+ case 1: typeId = Type::kIdMask8; break;
+ case 2: typeId = Type::kIdMask16; break;
+ case 4: typeId = Type::kIdMask32; break;
+ case 8: typeId = Type::kIdMask64; break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else {
+ // VEC register - change TypeId to match `ref` size, keep vector metadata.
+ uint32_t elementTypeId = Type::baseOf(typeId);
+
+ switch (refSize) {
+ case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
+ case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
+ case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+
+ if (typeId == Type::kIdVoid)
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+ }
+ }
+ else {
+ typeId = ref.type();
+ }
+
+ Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
+
+ out->_initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, ref, sb.data());
+}
+
+Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
+ out->reset();
+
+ if (size == 0)
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (!Support::isPowerOf2(alignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment > 64)
+ alignment = 64;
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
+
+ vReg->_virtSize = size;
+ vReg->_isStack = true;
+ vReg->_alignment = uint8_t(alignment);
+
+ // Set the memory operand to GPD/GPQ and its id to VirtReg.
+ *out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
+ return kErrorOk;
+}
+
+Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
+ if (!isVirtIdValid(virtId))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+
+ if (newAlignment && !Support::isPowerOf2(newAlignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (newAlignment > 64)
+ newAlignment = 64;
+
+ VirtReg* vReg = virtRegById(virtId);
+ if (newSize)
+ vReg->_virtSize = newSize;
+
+ if (newAlignment)
+ vReg->_alignment = uint8_t(newAlignment);
+
+ // This is required if the RAPass is already running. There is a chance that
+ // a stack-slot has been already allocated and in that case it has to be
+ // updated as well, otherwise we would allocate wrong amount of memory.
+ RAWorkReg* workReg = vReg->_workReg;
+ if (workReg && workReg->_stackSlot) {
+ workReg->_stackSlot->_size = vReg->_virtSize;
+ workReg->_stackSlot->_alignment = vReg->_alignment;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
+ out->reset();
+ ConstPoolNode** pPool;
+
+ if (scope == ConstPool::kScopeLocal)
+ pPool = &_localConstPool;
+ else if (scope == ConstPool::kScopeGlobal)
+ pPool = &_globalConstPool;
+ else
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (!*pPool)
+ ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
+
+ ConstPoolNode* pool = *pPool;
+ size_t off;
+ Error err = pool->add(data, size, off);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ *out = BaseMem(BaseMem::Decomposed {
+ Label::kLabelTag, // Base type.
+ pool->labelId(), // Base id.
+ 0, // Index type.
+ 0, // Index id.
+ int32_t(off), // Offset.
+ uint32_t(size), // Size.
+ 0 // Flags.
+ });
+
+ return kErrorOk;
+}
+
+void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
+ if (!reg.isVirtReg()) return;
+
+ VirtReg* vReg = virtRegById(reg.id());
+ if (!vReg) return;
+
+ if (fmt && fmt[0] != '\0') {
+ char buf[128];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ va_end(ap);
+
+ vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
+ }
+ else {
+ BaseCompiler_assignGenericName(this, vReg);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Jump Annotations]
+// ============================================================================
+
+Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
+ JumpNode* node = _allocator.allocT<JumpNode>();
+ uint32_t opCount = 1;
+
+ *out = node;
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
+ node->setOp(0, o0);
+ node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
+ uint32_t options = instOptions() | forcedInstOptions();
+ RegOnly extra = extraReg();
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+ resetExtraReg();
+
+ JumpNode* node;
+ ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
+
+ node->setExtraReg(extra);
+ if (comment)
+ node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+JumpAnnotation* BaseCompiler::newJumpAnnotation() {
+ if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ uint32_t id = _jumpAnnotations.size();
+ JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
+
+ if (!jumpAnnotation) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ _jumpAnnotations.appendUnsafe(jumpAnnotation);
+ return jumpAnnotation;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Events]
+// ============================================================================
+
+Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ Error err = addPassT<GlobalConstPoolPass>();
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
+ _func = nullptr;
+ _localConstPool = nullptr;
+ _globalConstPool = nullptr;
+
+ _vRegArray.reset();
+ _vRegZone.reset();
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::FuncPass - Construction / Destruction]
+// ============================================================================
+
+FuncPass::FuncPass(const char* name) noexcept
+ : Pass(name) {}
+
+// ============================================================================
+// [asmjit::FuncPass - Run]
+// ============================================================================
+
+Error FuncPass::run(Zone* zone, Logger* logger) {
+ BaseNode* node = cb()->firstNode();
+ if (!node) return kErrorOk;
+
+ do {
+ if (node->type() == BaseNode::kNodeFunc) {
+ FuncNode* func = node->as<FuncNode>();
+ node = func->endNode();
+ ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
+ }
+
+ // Find a function by skipping all nodes that are not `kNodeFunc`.
+ do {
+ node = node->next();
+ } while (node && node->type() != BaseNode::kNodeFunc);
+ } while (node);
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/client/asmjit/core/compiler.h b/client/asmjit/core/compiler.h
new file mode 100644
index 0000000..71f89ec
--- /dev/null
+++ b/client/asmjit/core/compiler.h
@@ -0,0 +1,816 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
+#define ASMJIT_CORE_COMPILER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/builder.h"
+#include "../core/constpool.h"
+#include "../core/func.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+struct RATiedReg;
+class RAWorkReg;
+
+class JumpAnnotation;
+
+class JumpNode;
+class FuncNode;
+class FuncRetNode;
+class InvokeNode;
+
+//! \addtogroup asmjit_compiler
+//! \{
+
+// ============================================================================
+// [asmjit::VirtReg]
+// ============================================================================
+
+//! Virtual register data, managed by \ref BaseCompiler.
+class VirtReg {
+public:
+ ASMJIT_NONCOPYABLE(VirtReg)
+
+ //! Virtual register id.
+ uint32_t _id;
+ //! Virtual register info (signature).
+ RegInfo _info;
+ //! Virtual register size (can be smaller than `regInfo._size`).
+ uint32_t _virtSize;
+ //! Virtual register alignment (for spilling).
+ uint8_t _alignment;
+ //! Type-id.
+ uint8_t _typeId;
+ //! Virtual register weight for alloc/spill decisions.
+ uint8_t _weight;
+ //! True if this is a fixed register, never reallocated.
+ uint8_t _isFixed : 1;
+ //! True if the virtual register is only used as a stack (never accessed as register).
+ uint8_t _isStack : 1;
+ uint8_t _reserved : 6;
+
+ //! Virtual register name (user provided or automatically generated).
+ ZoneString<16> _name;
+
+ // -------------------------------------------------------------------------
+ // The following members are used exclusively by RAPass. They are initialized
+ // when the VirtReg is created to NULL pointers and then changed during RAPass
+ // execution. RAPass sets them back to NULL before it returns.
+ // -------------------------------------------------------------------------
+
+ //! Reference to `RAWorkReg`, used during register allocation.
+ RAWorkReg* _workReg;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
+ : _id(id),
+ _virtSize(virtSize),
+ _alignment(uint8_t(alignment)),
+ _typeId(uint8_t(typeId)),
+ _weight(1),
+ _isFixed(false),
+ _isStack(false),
+ _reserved(0),
+ _name(),
+ _workReg(nullptr) { _info._signature = signature; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the virtual register id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! Returns the virtual register name.
+ inline const char* name() const noexcept { return _name.data(); }
+ //! Returns the size of the virtual register name.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns a register information that wraps the register signature.
+ inline const RegInfo& info() const noexcept { return _info; }
+ //! Returns a virtual register type (maps to the physical register type as well).
+ inline uint32_t type() const noexcept { return _info.type(); }
+ //! Returns a virtual register group (maps to the physical register group as well).
+ inline uint32_t group() const noexcept { return _info.group(); }
+
+ //! Returns a real size of the register this virtual register maps to.
+ //!
+ //! For example if this is a 128-bit SIMD register used for a scalar single
+ //! precision floating point value then its virtSize would be 4, however, the
+ //! `regSize` would still say 16 (128-bits), because it's the smallest size
+ //! of that register type.
+ inline uint32_t regSize() const noexcept { return _info.size(); }
+
+ //! Returns a register signature of this virtual register.
+ inline uint32_t signature() const noexcept { return _info.signature(); }
+
+ //! Returns the virtual register size.
+ //!
+ //! The virtual register size describes how many bytes the virtual register
+ //! needs to store its content. It can be smaller than the physical register
+ //! size, see `regSize()`.
+ inline uint32_t virtSize() const noexcept { return _virtSize; }
+
+ //! Returns the virtual register alignment.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ //! Returns the virtual register type id, see `Type::Id`.
+ inline uint32_t typeId() const noexcept { return _typeId; }
+
+ //! Returns the virtual register weight - the register allocator can use it
+ //! as explicit hint for alloc/spill decisions.
+ inline uint32_t weight() const noexcept { return _weight; }
+ //! Sets the virtual register weight (0 to 255) - the register allocator can
+ //! use it as explicit hint for alloc/spill decisions and initial bin-packing.
+ inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
+
+ //! Returns whether the virtual register is always allocated to a fixed
+ //! physical register (and never reallocated).
+ //!
+ //! \note This is only used for special purposes and it's mostly internal.
+ inline bool isFixed() const noexcept { return bool(_isFixed); }
+
+ //! Returns whether the virtual register is indeed a stack that only uses
+ //! the virtual register id for making it accessible.
+ //!
+ //! \note It's an error if a stack is accessed as a register.
+ inline bool isStack() const noexcept { return bool(_isStack); }
+
+ inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
+ inline RAWorkReg* workReg() const noexcept { return _workReg; }
+ inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
+ inline void resetWorkReg() noexcept { _workReg = nullptr; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseCompiler]
+// ============================================================================
+
+//! Code emitter that uses virtual registers and performs register allocation.
+//!
+//! Compiler is a high-level code-generation tool that provides register
+//! allocation and automatic handling of function calling conventions. It was
+//! primarily designed for merging multiple parts of code into a function
+//! without worrying about registers and function calling conventions.
+//!
+//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and
+//! 64-bit code generation within a single code base.
+//!
+//! BaseCompiler is based on BaseBuilder and contains all the features it
+//! provides. It means that the code it stores can be modified (removed, added,
+//! injected) and analyzed. When the code is finalized the compiler can emit
+//! the code into an Assembler to translate the abstract representation into a
+//! machine code.
+//!
+//! Check out architecture specific compilers for more details and examples:
+//!
+//! - \ref x86::Compiler - X86/X64 compiler implementation.
+class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
+public:
+ ASMJIT_NONCOPYABLE(BaseCompiler)
+ typedef BaseBuilder Base;
+
+ //! Current function.
+ FuncNode* _func;
+ //! Allocates `VirtReg` objects.
+ Zone _vRegZone;
+ //! Stores array of `VirtReg` pointers.
+ ZoneVector<VirtReg*> _vRegArray;
+ //! Stores jump annotations.
+ ZoneVector<JumpAnnotation*> _jumpAnnotations;
+
+ //! Local constant pool, flushed at the end of each function.
+ ConstPoolNode* _localConstPool;
+ //! Global constant pool, flushed by `finalize()`.
+ ConstPoolNode* _globalConstPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseCompiler` instance.
+ ASMJIT_API BaseCompiler() noexcept;
+ //! Destroys the `BaseCompiler` instance.
+ ASMJIT_API virtual ~BaseCompiler() noexcept;
+
+ //! \}
+
+ //! \name Function Management
+ //! \{
+
+ //! Returns the current function.
+ inline FuncNode* func() const noexcept { return _func; }
+
+ //! Creates a new \ref FuncNode.
+ ASMJIT_API Error _newFuncNode(FuncNode** out, const FuncSignature& signature);
+ //! Creates a new \ref FuncNode adds it to the compiler.
+ ASMJIT_API Error _addFuncNode(FuncNode** out, const FuncSignature& signature);
+
+ //! Creates a new \ref FuncRetNode.
+ ASMJIT_API Error _newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
+ //! Creates a new \ref FuncRetNode and adds it to the compiler.
+ ASMJIT_API Error _addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
+
+ //! Creates a new \ref FuncNode with the given `signature` and returns it.
+ inline FuncNode* newFunc(const FuncSignature& signature) {
+ FuncNode* node;
+ _newFuncNode(&node, signature);
+ return node;
+ }
+
+ //! Creates a new \ref FuncNode with the given `signature`, adds it to the
+ //! compiler by using the \ref addFunc(FuncNode*) overload, and returns it.
+ inline FuncNode* addFunc(const FuncSignature& signature) {
+ FuncNode* node;
+ _addFuncNode(&node, signature);
+ return node;
+ }
+
+ //! Adds a function `node` to the instruction stream.
+ ASMJIT_API FuncNode* addFunc(FuncNode* func);
+ //! Emits a sentinel that marks the end of the current function.
+ ASMJIT_API Error endFunc();
+
+ //! Sets a function argument at `argIndex` to `reg`.
+ ASMJIT_API Error setArg(uint32_t argIndex, const BaseReg& reg);
+
+ inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
+ FuncRetNode* node;
+ _newRetNode(&node, o0, o1);
+ return node;
+ }
+
+ inline FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) {
+ FuncRetNode* node;
+ _addRetNode(&node, o0, o1);
+ return node;
+ }
+
+ //! \}
+
+ //! \name Function Invocation
+ //! \{
+
+ //! Creates a new \ref InvokeNode.
+ ASMJIT_API Error _newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
+ //! Creates a new \ref InvokeNode and adds it to Compiler.
+ ASMJIT_API Error _addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
+
+ //! Creates a new `InvokeNode`.
+ inline InvokeNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ _newInvokeNode(&node, instId, o0, signature);
+ return node;
+ }
+
+ //! Adds a new `InvokeNode`.
+ inline InvokeNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ _addInvokeNode(&node, instId, o0, signature);
+ return node;
+ }
+
+ //! \}
+
+ //! \name Virtual Registers
+ //! \{
+
+ //! Creates a new virtual register representing the given `typeId` and `signature`.
+ //!
+ //! \note This function is public, but it's not generally recommended to be used
+ //! by AsmJit users, use architecture-specific `newReg()` functionality instead
+ //! or functions like \ref _newReg() and \ref _newRegFmt().
+ ASMJIT_API Error newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name);
+
+ //! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
+ ASMJIT_API Error _newReg(BaseReg* out, uint32_t typeId, const char* name = nullptr);
+
+ //! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
+ //!
+ //! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
+ ASMJIT_API Error _newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...);
+
+ //! Creates a new virtual register compatible with the provided reference register `ref`.
+ ASMJIT_API Error _newReg(BaseReg* out, const BaseReg& ref, const char* name = nullptr);
+
+ //! Creates a new virtual register compatible with the provided reference register `ref`.
+ //!
+ //! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
+ ASMJIT_API Error _newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...);
+
+ //! Tests whether the given `id` is a valid virtual register id.
+ inline bool isVirtIdValid(uint32_t id) const noexcept {
+ uint32_t index = Operand::virtIdToIndex(id);
+ return index < _vRegArray.size();
+ }
+ //! Tests whether the given `reg` is a virtual register having a valid id.
+ inline bool isVirtRegValid(const BaseReg& reg) const noexcept {
+ return isVirtIdValid(reg.id());
+ }
+
+ //! Returns \ref VirtReg associated with the given `id`.
+ inline VirtReg* virtRegById(uint32_t id) const noexcept {
+ ASMJIT_ASSERT(isVirtIdValid(id));
+ return _vRegArray[Operand::virtIdToIndex(id)];
+ }
+
+ //! Returns \ref VirtReg associated with the given `reg`.
+ inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
+
+ //! Returns \ref VirtReg associated with the given virtual register `index`.
+ //!
+ //! \note This is not the same as virtual register id. The conversion between
+ //! id and its index is implemented by \ref Operand_::virtIdToIndex() and \ref
+ //! Operand_::indexToVirtId() functions.
+ inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
+
+ //! Returns an array of all virtual registers managed by the Compiler.
+ inline const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
+
+ //! \name Stack
+ //! \{
+
+ //! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
+ //!
+ //! \note `name` can be used to give the stack a name, for debugging purposes.
+ ASMJIT_API Error _newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name = nullptr);
+
+ //! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
+ ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
+
+ //! Updates the stack size of a stack created by `_newStack()`.
+ inline Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
+ return setStackSize(mem.id(), newSize, newAlignment);
+ }
+
+ //! \}
+
+ //! \name Constants
+ //! \{
+
+ //! Creates a new constant of the given `scope` (see \ref ConstPool::Scope).
+ //!
+ //! This function adds a constant of the given `size` to the built-in \ref
+ //! ConstPool and stores the reference to that constant to the `out` operand.
+ ASMJIT_API Error _newConst(BaseMem* out, uint32_t scope, const void* data, size_t size);
+
+ //! \}
+
+ //! \name Miscellaneous
+ //! \{
+
+ //! Rename the given virtual register `reg` to a formatted string `fmt`.
+ ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
+
+ //! \}
+
+ //! \name Jump Annotations
+ //! \{
+
+ inline const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
+ return _jumpAnnotations;
+ }
+
+ ASMJIT_API Error newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation);
+ ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
+
+ //! Returns a new `JumpAnnotation` instance, which can be used to aggregate
+ //! possible targets of a jump where the target is not a label, for example
+ //! to implement jump tables.
+ ASMJIT_API JumpAnnotation* newJumpAnnotation();
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("alloc() has no effect, it will be removed in the future")
+ inline void alloc(BaseReg&) {}
+ ASMJIT_DEPRECATED("spill() has no effect, it will be removed in the future")
+ inline void spill(BaseReg&) {}
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::JumpAnnotation]
+// ============================================================================
+
+//! Jump annotation used to annotate jumps.
+//!
+//! \ref BaseCompiler allows to emit jumps where the target is either register
+//! or memory operand. Such jumps cannot be trivially inspected, so instead of
+//! doing heuristics AsmJit allows to annotate such jumps with possible targets.
+//! Register allocator then use the annotation to construct control-flow, which
+//! is then used by liveness analysis and other tools to prepare ground for
+//! register allocation.
+class JumpAnnotation {
+public:
+ ASMJIT_NONCOPYABLE(JumpAnnotation)
+
+ //! Compiler that owns this JumpAnnotation.
+ BaseCompiler* _compiler;
+ //! Annotation identifier.
+ uint32_t _annotationId;
+ //! Vector of label identifiers, see \ref labelIds().
+ ZoneVector<uint32_t> _labelIds;
+
+ inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
+ : _compiler(compiler),
+ _annotationId(annotationId) {}
+
+ //! Returns the compiler that owns this JumpAnnotation.
+ inline BaseCompiler* compiler() const noexcept { return _compiler; }
+ //! Returns the annotation id.
+ inline uint32_t annotationId() const noexcept { return _annotationId; }
+ //! Returns a vector of label identifiers that lists all targets of the jump.
+ const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
+
+ //! Tests whether the given `label` is a target of this JumpAnnotation.
+ inline bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
+ //! Tests whether the given `labelId` is a target of this JumpAnnotation.
+ inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
+
+ //! Adds the `label` to the list of targets of this JumpAnnotation.
+ inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
+ //! Adds the `labelId` to the list of targets of this JumpAnnotation.
+ inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
+};
+
+// ============================================================================
+// [asmjit::JumpNode]
+// ============================================================================
+
+//! Jump instruction with \ref JumpAnnotation.
+//!
+//! \note This node should be only used to represent jump where the jump target
+//! cannot be deduced by examining instruction operands. For example if the jump
+//! target is register or memory location. This pattern is often used to perform
+//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
+class JumpNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(JumpNode)
+
+ JumpAnnotation* _annotation;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
+ : InstNode(cc, instId, options, opCount, kBaseOpCapacity),
+ _annotation(annotation) {
+ setType(kNodeJump);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this JumpNode has associated a \ref JumpAnnotation.
+ inline bool hasAnnotation() const noexcept { return _annotation != nullptr; }
+ //! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
+ inline JumpAnnotation* annotation() const noexcept { return _annotation; }
+ //! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
+ inline void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncNode]
+// ============================================================================
+
+//! Function node represents a function used by \ref BaseCompiler.
+//!
+//! A function is composed of the following:
+//!
+//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit.
+//! To get the entry, simply use \ref FuncNode::label(), which is the same
+//! as \ref LabelNode::label().
+//!
+//! - Function exit, which is represented by \ref FuncNode::exitNode(). A
+//! helper function \ref FuncNode::exitLabel() exists and returns an exit
+//! label instead of node.
+//!
+//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of
+//! a function - there should be no code that belongs to the function after
+//! this node, but the Compiler doesn't enforce that at the moment.
+//!
+//! - Function detail, see \ref FuncNode::detail().
+//!
+//! - Function frame, see \ref FuncNode::frame().
+//!
+//! - Function arguments mapped to virtual registers, see \ref FuncNode::args().
+//!
+//! In a node list, the function and its body looks like the following:
+//!
+//! \code{.unparsed}
+//! [...] - Anything before the function.
+//!
+//! [FuncNode] - Entry point of the function, acts as a label as well.
+//! <Prolog> - Prolog inserted by the register allocator.
+//! {...} - Function body - user code basically.
+//! [ExitLabel] - Exit label
+//! <Epilog> - Epilog inserted by the register allocator.
+//! <Return> - Return inserted by the register allocator.
+//! {...} - Can contain data or user code (error handling, special cases, ...).
+//! [FuncEnd] - End sentinel
+//!
+//! [...] - Anything after the function.
+//! \endcode
+//!
+//! When a function is added to the compiler by \ref BaseCompiler::addFunc() it
+//! actually inserts 3 nodes (FuncNode, ExitLabel, and FuncEnd) and sets the
+//! current cursor to be FuncNode. When \ref BaseCompiler::endFunc() is called
+//! the cursor is set to FuncEnd. This guarantees that user can use ExitLabel
+//! as a marker after additional code or data can be placed, and it's a common
+//! practice.
+class FuncNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncNode)
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Function frame.
+ FuncFrame _frame;
+ //! Function exit label.
+ LabelNode* _exitNode;
+ //! Function end (sentinel).
+ SentinelNode* _end;
+ //! Arguments array as `VirtReg`.
+ VirtReg** _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncNode` instance.
+ //!
+ //! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
+ ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
+ : LabelNode(cb),
+ _funcDetail(),
+ _frame(),
+ _exitNode(nullptr),
+ _end(nullptr),
+ _args(nullptr) {
+ setType(kNodeFunc);
+ }
+
+ //! \}
+
+ //! \{
+ //! \name Accessors
+
+ //! Returns function exit `LabelNode`.
+ inline LabelNode* exitNode() const noexcept { return _exitNode; }
+ //! Returns function exit label.
+ inline Label exitLabel() const noexcept { return _exitNode->label(); }
+
+ //! Returns "End of Func" sentinel.
+ inline SentinelNode* endNode() const noexcept { return _end; }
+
+ //! Returns function declaration.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns function declaration.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns function frame.
+ inline FuncFrame& frame() noexcept { return _frame; }
+ //! Returns function frame.
+ inline const FuncFrame& frame() const noexcept { return _frame; }
+
+ //! Returns arguments count.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+ //! Returns returns count.
+ inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
+
+ //! Returns arguments list.
+ inline VirtReg** args() const noexcept { return _args; }
+
+ //! Returns argument at `i`.
+ inline VirtReg* arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ return _args[i];
+ }
+
+ //! Sets argument at `i`.
+ inline void setArg(uint32_t i, VirtReg* vReg) noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ _args[i] = vReg;
+ }
+
+ //! Resets argument at `i`.
+ inline void resetArg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ _args[i] = nullptr;
+ }
+
+ //! Returns function attributes.
+ inline uint32_t attributes() const noexcept { return _frame.attributes(); }
+ //! Adds `attrs` to the function attributes.
+ inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncRetNode]
+// ============================================================================
+
+//! Function return, used by \ref BaseCompiler.
+class FuncRetNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncRetNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncRetNode` instance.
+ inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
+ _any._nodeType = kNodeFuncRet;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InvokeNode]
+// ============================================================================
+
+//! Function invocation, used by \ref BaseCompiler.
+class InvokeNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(InvokeNode)
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Returns.
+ Operand_ _rets[2];
+ //! Arguments.
+ Operand_* _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InvokeNode` instance.
+ inline InvokeNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
+ : InstNode(cb, instId, options, kBaseOpCapacity),
+ _funcDetail(),
+ _args(nullptr) {
+ setType(kNodeInvoke);
+ _resetOps();
+ _rets[0].reset();
+ _rets[1].reset();
+ addFlags(kFlagIsRemovable);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets the function signature.
+ inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
+ return _funcDetail.init(signature, environment);
+ }
+
+ //! Returns the function detail.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns the function detail.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns the target operand.
+ inline Operand& target() noexcept { return _opArray[0].as<Operand>(); }
+ //! \overload
+ inline const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
+
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+ //! Returns the number of function return values.
+ inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
+
+ //! Returns the return value at `i`.
+ inline Operand& ret(uint32_t i = 0) noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return _rets[i].as<Operand>();
+ }
+ //! \overload
+ inline const Operand& ret(uint32_t i = 0) const noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return _rets[i].as<Operand>();
+ }
+
+ //! Returns the function argument at `i`.
+ inline Operand& arg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return _args[i].as<Operand>();
+ }
+ //! \overload
+ inline const Operand& arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return _args[i].as<Operand>();
+ }
+
+ //! Sets the function argument at `i` to `op`.
+ ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
+ //! Sets the function return value at `i` to `op`.
+ ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
+
+ //! Sets the function argument at `i` to `reg`.
+ inline bool setArg(uint32_t i, const BaseReg& reg) noexcept { return _setArg(i, reg); }
+ //! Sets the function argument at `i` to `imm`.
+ inline bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
+
+ //! Sets the function return value at `i` to `var`.
+ inline bool setRet(uint32_t i, const BaseReg& reg) noexcept { return _setRet(i, reg); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncPass]
+// ============================================================================
+
+//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
+class ASMJIT_VIRTAPI FuncPass : public Pass {
+public:
+ ASMJIT_NONCOPYABLE(FuncPass)
+ typedef Pass Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API FuncPass(const char* name) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated `BaseCompiler`.
+ inline BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
+
+ //! \}
+
+ //! \name Run
+ //! \{
+
+ //! Calls `runOnFunction()` on each `FuncNode` node found.
+ ASMJIT_API Error run(Zone* zone, Logger* logger) override;
+
+ //! Called once per `FuncNode`.
+ virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_COMPILER_H_INCLUDED
diff --git a/client/asmjit/core/constpool.cpp b/client/asmjit/core/constpool.cpp
new file mode 100644
index 0000000..4db68e2
--- /dev/null
+++ b/client/asmjit/core/constpool.cpp
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/constpool.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ConstPool - Construction / Destruction]
+// ============================================================================
+
+ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
+ConstPool::~ConstPool() noexcept {}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+void ConstPool::reset(Zone* zone) noexcept {
+ _zone = zone;
+
+ size_t dataSize = 1;
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].reset();
+ _tree[i].setDataSize(dataSize);
+ _gaps[i] = nullptr;
+ dataSize <<= 1;
+ }
+
+ _gapPool = nullptr;
+ _size = 0;
+ _alignment = 0;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Ops]
+// ============================================================================
+
+static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
+ ConstPool::Gap* gap = self->_gapPool;
+ if (!gap)
+ return self->_zone->allocT<ConstPool::Gap>();
+
+ self->_gapPool = gap->_next;
+ return gap;
+}
+
+static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
+ gap->_next = self->_gapPool;
+ self->_gapPool = gap;
+}
+
+static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
+ ASMJIT_ASSERT(size > 0);
+
+ while (size > 0) {
+ size_t gapIndex;
+ size_t gapSize;
+
+ if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
+ gapIndex = ConstPool::kIndex16;
+ gapSize = 16;
+ }
+ else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
+ gapIndex = ConstPool::kIndex8;
+ gapSize = 8;
+ }
+ else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
+ gapIndex = ConstPool::kIndex4;
+ gapSize = 4;
+ }
+ else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
+ gapIndex = ConstPool::kIndex2;
+ gapSize = 2;
+ }
+ else {
+ gapIndex = ConstPool::kIndex1;
+ gapSize = 1;
+ }
+
+ // We don't have to check for errors here, if this failed nothing really
+ // happened (just the gap won't be visible) and it will fail again at
+ // place where the same check would generate `kErrorOutOfMemory` error.
+ ConstPool::Gap* gap = ConstPool_allocGap(self);
+ if (!gap)
+ return;
+
+ gap->_next = self->_gaps[gapIndex];
+ self->_gaps[gapIndex] = gap;
+
+ gap->_offset = offset;
+ gap->_size = gapSize;
+
+ offset += gapSize;
+ size -= gapSize;
+ }
+}
+
+Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ size_t treeIndex;
+
+ if (size == 32)
+ treeIndex = kIndex32;
+ else if (size == 16)
+ treeIndex = kIndex16;
+ else if (size == 8)
+ treeIndex = kIndex8;
+ else if (size == 4)
+ treeIndex = kIndex4;
+ else if (size == 2)
+ treeIndex = kIndex2;
+ else if (size == 1)
+ treeIndex = kIndex1;
+ else
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ConstPool::Node* node = _tree[treeIndex].get(data);
+ if (node) {
+ dstOffset = node->_offset;
+ return kErrorOk;
+ }
+
+ // Before incrementing the current offset try if there is a gap that can
+ // be used for the requested data.
+ size_t offset = ~size_t(0);
+ size_t gapIndex = treeIndex;
+
+ while (gapIndex != kIndexCount - 1) {
+ ConstPool::Gap* gap = _gaps[treeIndex];
+
+ // Check if there is a gap.
+ if (gap) {
+ size_t gapOffset = gap->_offset;
+ size_t gapSize = gap->_size;
+
+ // Destroy the gap for now.
+ _gaps[treeIndex] = gap->_next;
+ ConstPool_freeGap(this, gap);
+
+ offset = gapOffset;
+ ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
+
+ gapSize -= size;
+ if (gapSize > 0)
+ ConstPool_addGap(this, gapOffset, gapSize);
+ }
+
+ gapIndex++;
+ }
+
+ if (offset == ~size_t(0)) {
+ // Get how many bytes have to be skipped so the address is aligned accordingly
+ // to the 'size'.
+ size_t diff = Support::alignUpDiff<size_t>(_size, size);
+
+ if (diff != 0) {
+ ConstPool_addGap(this, _size, diff);
+ _size += diff;
+ }
+
+ offset = _size;
+ _size += size;
+ }
+
+ // Add the initial node to the right index.
+ node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
+ if (!node) return DebugUtils::errored(kErrorOutOfMemory);
+
+ _tree[treeIndex].insert(node);
+ _alignment = Support::max<size_t>(_alignment, size);
+
+ dstOffset = offset;
+
+ // Now create a bunch of shared constants that are based on the data pattern.
+ // We stop at size 4, it probably doesn't make sense to split constants down
+ // to 1 byte.
+ size_t pCount = 1;
+ while (size > 4) {
+ size >>= 1;
+ pCount <<= 1;
+
+ ASMJIT_ASSERT(treeIndex != 0);
+ treeIndex--;
+
+ const uint8_t* pData = static_cast<const uint8_t*>(data);
+ for (size_t i = 0; i < pCount; i++, pData += size) {
+ node = _tree[treeIndex].get(pData);
+ if (node) continue;
+
+ node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
+ _tree[treeIndex].insert(node);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+struct ConstPoolFill {
+ inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
+ _dst(dst),
+ _dataSize(dataSize) {}
+
+ inline void operator()(const ConstPool::Node* node) noexcept {
+ if (!node->_shared)
+ memcpy(_dst + node->_offset, node->data(), _dataSize);
+ }
+
+ uint8_t* _dst;
+ size_t _dataSize;
+};
+
+void ConstPool::fill(void* dst) const noexcept {
+ // Clears possible gaps, asmjit should never emit garbage to the output.
+ memset(dst, 0, _size);
+
+ ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].forEach(filler);
+ filler._dataSize <<= 1;
+ }
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(const_pool) {
+ Zone zone(32384 - Zone::kBlockOverhead);
+ ConstPool pool(&zone);
+
+ uint32_t i;
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
+
+ INFO("Adding %u constants to the pool.", kCount);
+ {
+ size_t prevOffset;
+ size_t curOffset;
+ uint64_t c = 0x0101010101010101u;
+
+ EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
+ EXPECT(prevOffset == 0);
+
+ for (i = 1; i < kCount; i++) {
+ c++;
+ EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
+ EXPECT(prevOffset + 8 == curOffset);
+ EXPECT(pool.size() == (i + 1) * 8);
+ prevOffset = curOffset;
+ }
+
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Retrieving %u constants from the pool.", kCount);
+ {
+ uint64_t c = 0x0101010101010101u;
+
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Checking if the constants were split into 4-byte patterns");
+ {
+ uint32_t c = 0x01010101;
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 4, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Adding 2 byte constant to misalign the current offset");
+ {
+ uint16_t c = 0xFFFF;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Adding 8 byte constant to check if pool gets aligned again");
+ {
+ uint64_t c = 0xFFFFFFFFFFFFFFFFu;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 8);
+ }
+
+ INFO("Adding 2 byte constant to verify the gap is filled");
+ {
+ uint16_t c = 0xFFFE;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 2);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Checking reset functionality");
+ {
+ pool.reset(&zone);
+ zone.reset();
+
+ EXPECT(pool.size() == 0);
+ EXPECT(pool.alignment() == 0);
+ }
+
+ INFO("Checking pool alignment when combined constants are added");
+ {
+ uint8_t bytes[32] = { 0 };
+ size_t offset;
+
+ pool.add(bytes, 1, offset);
+ EXPECT(pool.size() == 1);
+ EXPECT(pool.alignment() == 1);
+ EXPECT(offset == 0);
+
+ pool.add(bytes, 2, offset);
+ EXPECT(pool.size() == 4);
+ EXPECT(pool.alignment() == 2);
+ EXPECT(offset == 2);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 32, offset);
+ EXPECT(pool.size() == 64);
+ EXPECT(pool.alignment() == 32);
+ EXPECT(offset == 32);
+ }
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/constpool.h b/client/asmjit/core/constpool.h
new file mode 100644
index 0000000..d9ac589
--- /dev/null
+++ b/client/asmjit/core/constpool.h
@@ -0,0 +1,262 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_utilities
+//! \{
+
+// ============================================================================
+// [asmjit::ConstPool]
+// ============================================================================
+
+//! Constant pool.
+class ConstPool {
+public:
+ ASMJIT_NONCOPYABLE(ConstPool)
+
+ //! Constant pool scope.
+ enum Scope : uint32_t {
+ //! Local constant, always embedded right after the current function.
+ kScopeLocal = 0,
+ //! Global constant, embedded at the end of the currently compiled code.
+ kScopeGlobal = 1
+ };
+
+ //! \cond INTERNAL
+
+ //! Index of a given size in const-pool table.
+ enum Index : uint32_t {
+ kIndex1 = 0,
+ kIndex2 = 1,
+ kIndex4 = 2,
+ kIndex8 = 3,
+ kIndex16 = 4,
+ kIndex32 = 5,
+ kIndexCount = 6
+ };
+
+ //! Zone-allocated const-pool gap created by two differently aligned constants.
+ struct Gap {
+ //! Pointer to the next gap
+ Gap* _next;
+ //! Offset of the gap.
+ size_t _offset;
+ //! Remaining bytes of the gap (basically a gap size).
+ size_t _size;
+ };
+
+ //! Zone-allocated const-pool node.
+ class Node : public ZoneTreeNodeT<Node> {
+ public:
+ ASMJIT_NONCOPYABLE(Node)
+
+ //! If this constant is shared with another.
+ uint32_t _shared : 1;
+ //! Data offset from the beginning of the pool.
+ uint32_t _offset;
+
+ inline Node(size_t offset, bool shared) noexcept
+ : ZoneTreeNodeT<Node>(),
+ _shared(shared),
+ _offset(uint32_t(offset)) {}
+
+ inline void* data() const noexcept {
+ return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
+ }
+ };
+
+ //! Data comparer used internally.
+ class Compare {
+ public:
+ size_t _dataSize;
+
+ inline Compare(size_t dataSize) noexcept
+ : _dataSize(dataSize) {}
+
+ inline int operator()(const Node& a, const Node& b) const noexcept {
+ return ::memcmp(a.data(), b.data(), _dataSize);
+ }
+
+ inline int operator()(const Node& a, const void* data) const noexcept {
+ return ::memcmp(a.data(), data, _dataSize);
+ }
+ };
+
+ //! Zone-allocated const-pool tree.
+ struct Tree {
+ //! RB tree.
+ ZoneTree<Node> _tree;
+ //! Size of the tree (number of nodes).
+ size_t _size;
+ //! Size of the data.
+ size_t _dataSize;
+
+ inline explicit Tree(size_t dataSize = 0) noexcept
+ : _tree(),
+ _size(0),
+ _dataSize(dataSize) {}
+
+ inline void reset() noexcept {
+ _tree.reset();
+ _size = 0;
+ }
+
+ inline bool empty() const noexcept { return _size == 0; }
+ inline size_t size() const noexcept { return _size; }
+
+ inline void setDataSize(size_t dataSize) noexcept {
+ ASMJIT_ASSERT(empty());
+ _dataSize = dataSize;
+ }
+
+ inline Node* get(const void* data) noexcept {
+ Compare cmp(_dataSize);
+ return _tree.get(data, cmp);
+ }
+
+ inline void insert(Node* node) noexcept {
+ Compare cmp(_dataSize);
+ _tree.insert(node, cmp);
+ _size++;
+ }
+
+ template<typename Visitor>
+ inline void forEach(Visitor& visitor) const noexcept {
+ Node* node = _tree.root();
+ if (!node) return;
+
+ Node* stack[Globals::kMaxTreeHeight];
+ size_t top = 0;
+
+ for (;;) {
+ Node* left = node->left();
+ if (left != nullptr) {
+ ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
+ stack[top++] = node;
+
+ node = left;
+ continue;
+ }
+
+ for (;;) {
+ visitor(node);
+ node = node->right();
+
+ if (node != nullptr)
+ break;
+
+ if (top == 0)
+ return;
+
+ node = stack[--top];
+ }
+ }
+ }
+
+ static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
+ Node* node = zone->allocT<Node>(sizeof(Node) + size);
+ if (ASMJIT_UNLIKELY(!node)) return nullptr;
+
+ node = new(node) Node(offset, shared);
+ memcpy(node->data(), data, size);
+ return node;
+ }
+ };
+
+ //! \endcond
+
+ //! Zone allocator.
+ Zone* _zone;
+ //! Tree per size.
+ Tree _tree[kIndexCount];
+ //! Gaps per size.
+ Gap* _gaps[kIndexCount];
+ //! Gaps pool
+ Gap* _gapPool;
+
+ //! Size of the pool (in bytes).
+ size_t _size;
+ //! Required pool alignment.
+ size_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API ConstPool(Zone* zone) noexcept;
+ ASMJIT_API ~ConstPool() noexcept;
+
+ ASMJIT_API void reset(Zone* zone) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _size; }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _alignment; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds a constant to the constant pool.
+ //!
+ //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
+ //! The constant is added to the pool only if it doesn't not exist, otherwise
+ //! cached value is returned.
+ //!
+ //! AsmJit is able to subdivide added constants, so for example if you add
+ //! 8-byte constant 0x1122334455667788 it will create the following slots:
+ //!
+ //! 8-byte: 0x1122334455667788
+ //! 4-byte: 0x11223344, 0x55667788
+ //!
+ //! The reason is that when combining MMX/SSE/AVX code some patterns are used
+ //! frequently. However, AsmJit is not able to reallocate a constant that has
+ //! been already added. For example if you try to add 4-byte constant and then
+ //! 8-byte constant having the same 4-byte pattern as the previous one, two
+ //! independent slots will be generated by the pool.
+ ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
+
+ //! Fills the destination with the content of this constant pool.
+ ASMJIT_API void fill(void* dst) const noexcept;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED
diff --git a/client/asmjit/core/cpuinfo.cpp b/client/asmjit/core/cpuinfo.cpp
new file mode 100644
index 0000000..edc7d17
--- /dev/null
+++ b/client/asmjit/core/cpuinfo.cpp
@@ -0,0 +1,97 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/cpuinfo.h"
+
+#if !defined(_WIN32)
+ #include <errno.h>
+ #include <sys/utsname.h>
+ #include <unistd.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU NumThreads]
+// ============================================================================
+
+#if defined(_WIN32)
+static inline uint32_t detectHWThreadCount() noexcept {
+ SYSTEM_INFO info;
+ ::GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+#elif defined(_SC_NPROCESSORS_ONLN)
+static inline uint32_t detectHWThreadCount() noexcept {
+ long res = ::sysconf(_SC_NPROCESSORS_ONLN);
+ return res <= 0 ? uint32_t(1) : uint32_t(res);
+}
+#else
+static inline uint32_t detectHWThreadCount() noexcept {
+ return 1;
+}
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU Features]
+// ============================================================================
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+namespace arm { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - Static Initializer]
+// ============================================================================
+
+static uint32_t cpuInfoInitialized;
+static CpuInfo cpuInfoGlobal(Globals::NoInit);
+
+const CpuInfo& CpuInfo::host() noexcept {
+ // This should never cause a problem as the resulting information should
+ // always be the same.
+ if (!cpuInfoInitialized) {
+ CpuInfo cpuInfoLocal;
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+ x86::detectCpu(cpuInfoLocal);
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+ arm::detectCpu(cpuInfoLocal);
+#endif
+
+ cpuInfoLocal._hwThreadCount = detectHWThreadCount();
+ cpuInfoGlobal = cpuInfoLocal;
+ cpuInfoInitialized = 1;
+ }
+
+ return cpuInfoGlobal;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/cpuinfo.h b/client/asmjit/core/cpuinfo.h
new file mode 100644
index 0000000..ee2c9e5
--- /dev/null
+++ b/client/asmjit/core/cpuinfo.h
@@ -0,0 +1,154 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
+#define ASMJIT_CORE_CPUINFO_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/features.h"
+#include "../core/globals.h"
+#include "../core/string.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CpuInfo]
+// ============================================================================
+
+//! CPU information.
+class CpuInfo {
+public:
+ //! Architecture.
+ uint8_t _arch;
+ //! Sub-architecture.
+ uint8_t _subArch;
+ //! Reserved for future use.
+ uint16_t _reserved;
+ //! CPU family ID.
+ uint32_t _familyId;
+ //! CPU model ID.
+ uint32_t _modelId;
+ //! CPU brand ID.
+ uint32_t _brandId;
+ //! CPU stepping.
+ uint32_t _stepping;
+ //! Processor type.
+ uint32_t _processorType;
+ //! Maximum number of addressable IDs for logical processors.
+ uint32_t _maxLogicalProcessors;
+ //! Cache line size (in bytes).
+ uint32_t _cacheLineSize;
+ //! Number of hardware threads.
+ uint32_t _hwThreadCount;
+
+ //! CPU vendor string.
+ FixedString<16> _vendor;
+ //! CPU brand string.
+ FixedString<64> _brand;
+ //! CPU features.
+ BaseFeatures _features;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline CpuInfo() noexcept { reset(); }
+ inline CpuInfo(const CpuInfo& other) noexcept = default;
+
+ inline explicit CpuInfo(Globals::NoInit_) noexcept
+ : _features(Globals::NoInit) {};
+
+ //! Returns the host CPU information.
+ ASMJIT_API static const CpuInfo& host() noexcept;
+
+ //! Initializes CpuInfo to the given architecture, see \ref Environment.
+ inline void initArch(uint32_t arch, uint32_t subArch = 0u) noexcept {
+ _arch = uint8_t(arch);
+ _subArch = uint8_t(subArch);
+ }
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline CpuInfo& operator=(const CpuInfo& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the CPU architecture id, see \ref Environment::Arch.
+ inline uint32_t arch() const noexcept { return _arch; }
+ //! Returns the CPU architecture sub-id, see \ref Environment::SubArch.
+ inline uint32_t subArch() const noexcept { return _subArch; }
+
+ //! Returns the CPU family ID.
+ inline uint32_t familyId() const noexcept { return _familyId; }
+ //! Returns the CPU model ID.
+ inline uint32_t modelId() const noexcept { return _modelId; }
+ //! Returns the CPU brand id.
+ inline uint32_t brandId() const noexcept { return _brandId; }
+ //! Returns the CPU stepping.
+ inline uint32_t stepping() const noexcept { return _stepping; }
+ //! Returns the processor type.
+ inline uint32_t processorType() const noexcept { return _processorType; }
+ //! Returns the number of maximum logical processors.
+ inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
+
+ //! Returns the size of a cache line flush.
+ inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
+ //! Returns number of hardware threads available.
+ inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
+
+ //! Returns the CPU vendor.
+ inline const char* vendor() const noexcept { return _vendor.str; }
+ //! Tests whether the CPU vendor is equal to `s`.
+ inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
+
+ //! Returns the CPU brand string.
+ inline const char* brand() const noexcept { return _brand.str; }
+
+ //! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
+ //! if needed.
+ template<typename T = BaseFeatures>
+ inline const T& features() const noexcept { return _features.as<T>(); }
+
+ //! Tests whether the CPU has the given `feature`.
+ inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
+ //! Adds the given CPU `feature` to the list of this CpuInfo features.
+ inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CPUINFO_H_INCLUDED
diff --git a/client/asmjit/core/datatypes.h b/client/asmjit/core/datatypes.h
new file mode 100644
index 0000000..2f6cc1e
--- /dev/null
+++ b/client/asmjit/core/datatypes.h
@@ -0,0 +1,1071 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_DATATYPES_H_INCLUDED
+#define ASMJIT_CORE_DATATYPES_H_INCLUDED
+
+#include "../core/globals.h"
+
+#ifndef ASMJIT_NO_DEPRECATED
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Data64]
+// ============================================================================
+
+//! 64-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data64 is deprecated and will be removed in the future") Data64 {
+ //! Array of eight 8-bit signed integers.
+ int8_t sb[8];
+ //! Array of eight 8-bit unsigned integers.
+ uint8_t ub[8];
+ //! Array of four 16-bit signed integers.
+ int16_t sw[4];
+ //! Array of four 16-bit unsigned integers.
+ uint16_t uw[4];
+ //! Array of two 32-bit signed integers.
+ int32_t sd[2];
+ //! Array of two 32-bit unsigned integers.
+ uint32_t ud[2];
+ //! Array of one 64-bit signed integer.
+ int64_t sq[1];
+ //! Array of one 64-bit unsigned integer.
+ uint64_t uq[1];
+
+ //! Array of two SP-FP values.
+ float sf[2];
+ //! Array of one DP-FP value.
+ double df[1];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(int8_t x0) noexcept {
+ Data64 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(uint8_t x0) noexcept {
+ Data64 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ Data64 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ Data64 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0) noexcept {
+ Data64 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0) noexcept {
+ Data64 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ Data64 self;
+ self.setI16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ Data64 self;
+ self.setU16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0) noexcept {
+ Data64 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0) noexcept {
+ Data64 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0, int32_t x1) noexcept {
+ Data64 self;
+ self.setI32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0, uint32_t x1) noexcept {
+ Data64 self;
+ self.setU32(x0, x1);
+ return self;
+ }
+
+ //! Sets 64-bit signed integer.
+ static inline Data64 fromI64(int64_t x0) noexcept {
+ Data64 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ static inline Data64 fromU64(uint64_t x0) noexcept {
+ Data64 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0) noexcept {
+ Data64 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0, float x1) noexcept {
+ Data64 self;
+ self.setF32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF64(double x0) noexcept {
+ Data64 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3;
+ sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3;
+ ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ sd[0] = x0; sd[1] = x0;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ ud[0] = x0; ud[1] = x0;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1) noexcept {
+ sd[0] = x0; sd[1] = x1;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1) noexcept {
+ ud[0] = x0; ud[1] = x1;
+ }
+
+ //! Sets 64-bit signed integer.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0, float x1) noexcept {
+ sf[0] = x0; sf[1] = x1;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data128]
+// ============================================================================
+
+//! 128-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data128 is deprecated and will be removed in the future") Data128 {
+ //! Array of sixteen 8-bit signed integers.
+ int8_t sb[16];
+ //! Array of sixteen 8-bit unsigned integers.
+ uint8_t ub[16];
+ //! Array of eight 16-bit signed integers.
+ int16_t sw[8];
+ //! Array of eight 16-bit unsigned integers.
+ uint16_t uw[8];
+ //! Array of four 32-bit signed integers.
+ int32_t sd[4];
+ //! Array of four 32-bit unsigned integers.
+ uint32_t ud[4];
+ //! Array of two 64-bit signed integers.
+ int64_t sq[2];
+ //! Array of two 64-bit unsigned integers.
+ uint64_t uq[2];
+
+ //! Array of four 32-bit single precision floating points.
+ float sf[4];
+ //! Array of two 64-bit double precision floating points.
+ double df[2];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(int8_t x0) noexcept {
+ Data128 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(uint8_t x0) noexcept {
+ Data128 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ Data128 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ Data128 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(int16_t x0) noexcept {
+ Data128 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(uint16_t x0) noexcept {
+ Data128 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ Data128 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ Data128 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0) noexcept {
+ Data128 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0) noexcept {
+ Data128 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ Data128 self;
+ self.setI32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ Data128 self;
+ self.setU32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0) noexcept {
+ Data128 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0) noexcept {
+ Data128 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0, int64_t x1) noexcept {
+ Data128 self;
+ self.setI64(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0, uint64_t x1) noexcept {
+ Data128 self;
+ self.setU64(x0, x1);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0) noexcept {
+ Data128 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0, float x1, float x2, float x3) noexcept {
+ Data128 self;
+ self.setF32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0) noexcept {
+ Data128 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0, double x1) noexcept {
+ Data128 self;
+ self.setF64(x0, x1);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t t = (uint64_t(x0) << 32) + x0;
+ uq[0] = t;
+ uq[1] = t;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ }
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1) noexcept {
+ sq[0] = x0; sq[1] = x1;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1) noexcept {
+ uq[0] = x0; uq[1] = x1;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0, float x1, float x2, float x3) noexcept {
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0, double x1) noexcept {
+ df[0] = x0; df[1] = x1;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data256]
+// ============================================================================
+
+//! 256-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data256 is deprecated and will be removed in the future") Data256 {
+ //! Array of thirty two 8-bit signed integers.
+ int8_t sb[32];
+ //! Array of thirty two 8-bit unsigned integers.
+ uint8_t ub[32];
+ //! Array of sixteen 16-bit signed integers.
+ int16_t sw[16];
+ //! Array of sixteen 16-bit unsigned integers.
+ uint16_t uw[16];
+ //! Array of eight 32-bit signed integers.
+ int32_t sd[8];
+ //! Array of eight 32-bit unsigned integers.
+ uint32_t ud[8];
+ //! Array of four 64-bit signed integers.
+ int64_t sq[4];
+ //! Array of four 64-bit unsigned integers.
+ uint64_t uq[4];
+
+ //! Array of eight 32-bit single precision floating points.
+ float sf[8];
+ //! Array of four 64-bit double precision floating points.
+ double df[4];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(int8_t x0) noexcept {
+ Data256 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(uint8_t x0) noexcept {
+ Data256 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ Data256 self;
+ self.setI8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ Data256 self;
+ self.setU8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(int16_t x0) noexcept {
+ Data256 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(uint16_t x0) noexcept {
+ Data256 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 ,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ Data256 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 ,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ Data256 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(int32_t x0) noexcept {
+ Data256 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(uint32_t x0) noexcept {
+ Data256 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ Data256 self;
+ self.setI32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ Data256 self;
+ self.setU32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0) noexcept {
+ Data256 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0) noexcept {
+ Data256 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ Data256 self;
+ self.setI64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ Data256 self;
+ self.setU64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(float x0) noexcept {
+ Data256 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ Data256 self;
+ self.setF32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0) noexcept {
+ Data256 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0, double x1, double x2, double x3) noexcept {
+ Data256 self;
+ self.setF64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ sb[16] = x16; sb[17] = x17; sb[18] = x18; sb[19] = x19;
+ sb[20] = x20; sb[21] = x21; sb[22] = x22; sb[23] = x23;
+ sb[24] = x24; sb[25] = x25; sb[26] = x26; sb[27] = x27;
+ sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ ub[16] = x16; ub[17] = x17; ub[18] = x18; ub[19] = x19;
+ ub[20] = x20; ub[21] = x21; ub[22] = x22; ub[23] = x23;
+ ub[24] = x24; ub[25] = x25; ub[26] = x26; ub[27] = x27;
+ ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ;
+ sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ;
+ sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11;
+ sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ;
+ uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ;
+ uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11;
+ uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = (uint64_t(x0) << 32) + x0;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ ud[4] = x0;
+ ud[5] = x0;
+ ud[6] = x0;
+ ud[7] = x0;
+ }
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0, double x1, double x2, double x3) noexcept {
+ df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3;
+ }
+
+ //! \}
+};
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_DEPRECATED
+#endif // ASMJIT_CORE_DATATYPES_H_INCLUDED
diff --git a/client/asmjit/core/emitter.cpp b/client/asmjit/core/emitter.cpp
new file mode 100644
index 0000000..b6ecc3d
--- /dev/null
+++ b/client/asmjit/core/emitter.cpp
@@ -0,0 +1,392 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/emitterutils_p.h"
+#include "../core/errorhandler.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86internal_p.h"
+ #include "../x86/x86instdb_p.h"
+#endif // ASMJIT_BUILD_X86
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminternal_p.h"
+ #include "../arm/arminstdb.h"
+#endif // ASMJIT_BUILD_ARM
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseEmitter - Construction / Destruction]
+// ============================================================================
+
+BaseEmitter::BaseEmitter(uint32_t emitterType) noexcept
+ : _emitterType(uint8_t(emitterType)),
+ _emitterFlags(0),
+ _validationFlags(0),
+ _validationOptions(0),
+ _encodingOptions(0),
+ _forcedInstOptions(BaseInst::kOptionReserved),
+ _privateData(0),
+ _code(nullptr),
+ _logger(nullptr),
+ _errorHandler(nullptr),
+ _environment(),
+ _gpRegInfo(),
+ _instOptions(0),
+ _extraReg(),
+ _inlineComment(nullptr) {}
+
+BaseEmitter::~BaseEmitter() noexcept {
+ if (_code) {
+ _addEmitterFlags(kFlagDestroyed);
+ _code->detach(this);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Finalize]
+// ============================================================================
+
+Error BaseEmitter::finalize() {
+ // Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Internals]
+// ============================================================================
+
+static constexpr uint32_t kEmitterPreservedFlags =
+ BaseEmitter::kFlagOwnLogger |
+ BaseEmitter::kFlagOwnErrorHandler ;
+
+static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
+ bool hasLogger = self->_logger != nullptr;
+ bool hasValidationOptions;
+
+ if (self->emitterType() == BaseEmitter::kTypeAssembler)
+ hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionAssembler);
+ else
+ hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionIntermediate);
+
+ self->_forcedInstOptions &= ~BaseInst::kOptionReserved;
+ if (hasLogger || hasValidationOptions)
+ self->_forcedInstOptions |= BaseInst::kOptionReserved;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Validation Options]
+// ============================================================================
+
+void BaseEmitter::addValidationOptions(uint32_t options) noexcept {
+ _validationOptions = uint8_t(_validationOptions | options);
+ BaseEmitter_updateForcedOptions(this);
+}
+
+void BaseEmitter::clearValidationOptions(uint32_t options) noexcept {
+ _validationOptions = uint8_t(_validationOptions | options);
+ BaseEmitter_updateForcedOptions(this);
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Logging]
+// ============================================================================
+
+void BaseEmitter::setLogger(Logger* logger) noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ if (logger) {
+ _logger = logger;
+ _addEmitterFlags(kFlagOwnLogger);
+ }
+ else {
+ _logger = nullptr;
+ _clearEmitterFlags(kFlagOwnLogger);
+ if (_code)
+ _logger = _code->logger();
+ }
+ BaseEmitter_updateForcedOptions(this);
+#else
+ DebugUtils::unused(logger);
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Error Handling]
+// ============================================================================
+
+void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
+ if (errorHandler) {
+ _errorHandler = errorHandler;
+ _addEmitterFlags(kFlagOwnErrorHandler);
+ }
+ else {
+ _errorHandler = nullptr;
+ _clearEmitterFlags(kFlagOwnErrorHandler);
+ if (_code)
+ _errorHandler = _code->errorHandler();
+ }
+}
+
+Error BaseEmitter::reportError(Error err, const char* message) {
+ ErrorHandler* eh = _errorHandler;
+ if (eh) {
+ if (!message)
+ message = DebugUtils::errorAsString(err);
+ eh->handleError(err, message, this);
+ }
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Labels]
+// ============================================================================
+
+Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
+}
+
+bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
+ return _code && labelId < _code->labelCount();
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Emit (Low-Level)]
+// ============================================================================
+
+using EmitterUtils::noExt;
+
+Error BaseEmitter::_emitI(uint32_t instId) {
+ return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0) {
+ return _emit(instId, o0, noExt[1], noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1) {
+ return _emit(instId, o0, o1, noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
+ return _emit(instId, o0, o1, o2, noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+ Operand_ opExt[3] = { o3 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
+ Operand_ opExt[3] = { o3, o4 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
+ Operand_ opExt[3] = { o3, o4, o5 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
+ const Operand_* op = operands;
+
+ Operand_ opExt[3];
+
+ switch (opCount) {
+ case 0:
+ return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
+
+ case 1:
+ return _emit(instId, op[0], noExt[1], noExt[2], noExt);
+
+ case 2:
+ return _emit(instId, op[0], op[1], noExt[2], noExt);
+
+ case 3:
+ return _emit(instId, op[0], op[1], op[2], noExt);
+
+ case 4:
+ opExt[0] = op[3];
+ opExt[1].reset();
+ opExt[2].reset();
+ return _emit(instId, op[0], op[1], op[2], opExt);
+
+ case 5:
+ opExt[0] = op[3];
+ opExt[1] = op[4];
+ opExt[2].reset();
+ return _emit(instId, op[0], op[1], op[2], opExt);
+
+ case 6:
+ return _emit(instId, op[0], op[1], op[2], op + 3);
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Emit (High-Level)]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86())
+ return x86::X86Internal::emitProlog(as<x86::Emitter>(), frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isFamilyARM())
+ return arm::ArmInternal::emitProlog(as<arm::Emitter>(), frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86())
+ return x86::X86Internal::emitEpilog(as<x86::Emitter>(), frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isFamilyARM())
+ return arm::ArmInternal::emitEpilog(as<arm::Emitter>(), frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86())
+ return x86::X86Internal::emitArgsAssignment(as<x86::Emitter>(), frame, args);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isFamilyARM())
+ return arm::ArmInternal::emitArgsAssignment(as<arm::Emitter>(), frame, args);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Comment]
+// ============================================================================
+
+Error BaseEmitter::commentf(const char* fmt, ...) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_LOGGING
+ va_list ap;
+ va_start(ap, fmt);
+ Error err = commentv(fmt, ap);
+ va_end(ap);
+ return err;
+#else
+ DebugUtils::unused(fmt);
+ return kErrorOk;
+#endif
+}
+
+Error BaseEmitter::commentv(const char* fmt, va_list ap) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_LOGGING
+ StringTmp<1024> sb;
+ Error err = sb.appendVFormat(fmt, ap);
+
+ if (ASMJIT_UNLIKELY(err))
+ return err;
+
+ return comment(sb.data(), sb.size());
+#else
+ DebugUtils::unused(fmt, ap);
+ return kErrorOk;
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Events]
+// ============================================================================
+
+Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
+ _code = code;
+ _environment = code->environment();
+
+ onSettingsUpdated();
+ return kErrorOk;
+}
+
+Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
+ DebugUtils::unused(code);
+
+ _clearEmitterFlags(~kEmitterPreservedFlags);
+ _forcedInstOptions = BaseInst::kOptionReserved;
+ _privateData = 0;
+
+ if (!hasOwnLogger())
+ _logger = nullptr;
+
+ if (!hasOwnErrorHandler())
+ _errorHandler = nullptr;
+
+ _environment.reset();
+ _gpRegInfo.reset();
+
+ _instOptions = 0;
+ _extraReg.reset();
+ _inlineComment = nullptr;
+
+ return kErrorOk;
+}
+
+void BaseEmitter::onSettingsUpdated() noexcept {
+ // Only called when attached to CodeHolder by CodeHolder.
+ ASMJIT_ASSERT(_code != nullptr);
+
+ if (!hasOwnLogger())
+ _logger = _code->logger();
+
+ if (!hasOwnErrorHandler())
+ _errorHandler = _code->errorHandler();
+
+ BaseEmitter_updateForcedOptions(this);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/emitter.h b/client/asmjit/core/emitter.h
new file mode 100644
index 0000000..6da602e
--- /dev/null
+++ b/client/asmjit/core/emitter.h
@@ -0,0 +1,714 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
+#define ASMJIT_CORE_EMITTER_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/codeholder.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class ConstPool;
+class FuncFrame;
+class FuncArgsAssignment;
+
+// ============================================================================
+// [asmjit::BaseEmitter]
+// ============================================================================
+
+//! Provides a base foundation to emit code - specialized by `Assembler` and
+//! `BaseBuilder`.
+class ASMJIT_VIRTAPI BaseEmitter {
+public:
+ ASMJIT_BASE_CLASS(BaseEmitter)
+
+ //! See \ref EmitterType.
+ uint8_t _emitterType;
+ //! See \ref BaseEmitter::EmitterFlags.
+ uint8_t _emitterFlags;
+ //! Validation flags in case validation is used, see \ref InstAPI::ValidationFlags.
+ //!
+ //! \note Validation flags are specific to the emitter and they are setup at
+ //! construction time and then never changed.
+ uint8_t _validationFlags;
+ //! Validation options, see \ref ValidationOptions.
+ uint8_t _validationOptions;
+
+ //! Encoding options, see \ref EncodingOptions.
+ uint32_t _encodingOptions;
+
+ //! Forced instruction options, combined with \ref _instOptions by \ref emit().
+ uint32_t _forcedInstOptions;
+ //! Internal private data used freely by any emitter.
+ uint32_t _privateData;
+
+ //! CodeHolder the emitter is attached to.
+ CodeHolder* _code;
+ //! Attached \ref Logger.
+ Logger* _logger;
+ //! Attached \ref ErrorHandler.
+ ErrorHandler* _errorHandler;
+
+ //! Describes the target environment, matches \ref CodeHolder::environment().
+ Environment _environment;
+ //! Native GP register signature and signature related information.
+ RegInfo _gpRegInfo;
+
+ //! Next instruction options (affects the next instruction).
+ uint32_t _instOptions;
+ //! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
+ RegOnly _extraReg;
+ //! Inline comment of the next instruction (affects the next instruction).
+ const char* _inlineComment;
+
+ //! Emitter type.
+ enum EmitterType : uint32_t {
+ //! Unknown or uninitialized.
+ kTypeNone = 0,
+ //! Emitter inherits from \ref BaseAssembler.
+ kTypeAssembler = 1,
+ //! Emitter inherits from \ref BaseBuilder.
+ kTypeBuilder = 2,
+ //! Emitter inherits from \ref BaseCompiler.
+ kTypeCompiler = 3,
+
+ //! Count of emitter types.
+ kTypeCount = 4
+ };
+
+ //! Emitter flags.
+ enum EmitterFlags : uint32_t {
+ //! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
+ kFlagOwnLogger = 0x10u,
+ //! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
+ kFlagOwnErrorHandler = 0x20u,
+ //! The emitter was finalized.
+ kFlagFinalized = 0x40u,
+ //! The emitter was destroyed.
+ kFlagDestroyed = 0x80u
+ };
+
+ //! Encoding options.
+ enum EncodingOptions : uint32_t {
+ //! Emit instructions that are optimized for size, if possible.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! When this option is set it the assembler will try to fix instructions
+ //! if possible into operation equivalent instructions that take less bytes
+ //! by taking advantage of implicit zero extension. For example instruction
+ //! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
+ //! and `and r32, imm` when the immediate constant is lesser than `2^31`.
+ kEncodingOptionOptimizeForSize = 0x00000001u,
+
+ //! Emit optimized code-alignment sequences.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Default align sequence used by X86 architecture is one-byte (0x90)
+ //! opcode that is often shown by disassemblers as NOP. However there are
+ //! more optimized align sequences for 2-11 bytes that may execute faster
+ //! on certain CPUs. If this feature is enabled AsmJit will generate
+ //! specialized sequences for alignment between 2 to 11 bytes.
+ kEncodingOptionOptimizedAlign = 0x00000002u,
+
+ //! Emit jump-prediction hints.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Jump prediction is usually based on the direction of the jump. If the
+ //! jump is backward it is usually predicted as taken; and if the jump is
+ //! forward it is usually predicted as not-taken. The reason is that loops
+ //! generally use backward jumps and conditions usually use forward jumps.
+ //! However this behavior can be overridden by using instruction prefixes.
+ //! If this option is enabled these hints will be emitted.
+ //!
+ //! This feature is disabled by default, because the only processor that
+ //! used to take into consideration prediction hints was P4. Newer processors
+ //! implement heuristics for branch prediction and ignore static hints. This
+ //! means that this feature can be only used for annotation purposes.
+ kEncodingOptionPredictedJumps = 0x00000010u
+ };
+
+#ifndef ASMJIT_NO_DEPRECATED
+ enum EmitterOptions : uint32_t {
+ kOptionOptimizedForSize = kEncodingOptionOptimizeForSize,
+ kOptionOptimizedAlign = kEncodingOptionOptimizedAlign,
+ kOptionPredictedJumps = kEncodingOptionPredictedJumps
+ };
+#endif
+
+ //! Validation options are used to tell emitters to perform strict validation
+ //! of instructions passed to \ref emit().
+ //!
+ //! \ref BaseAssembler implementation perform by default only basic checks
+ //! that are necessary to identify all variations of an instruction so the
+ //! correct encoding can be selected. This is fine for production-ready code
+ //! as the assembler doesn't have to perform checks that would slow it down.
+ //! However, sometimes these checks are beneficial especially when the project
+ //! that uses AsmJit is in a development phase, in which mistakes happen often.
+ //! To make the experience of using AsmJit seamless it offers validation
+ //! features that can be controlled by `ValidationOptions`.
+ enum ValidationOptions : uint32_t {
+ //! Perform strict validation in \ref BaseAssembler::emit() implementations.
+ //!
+ //! This flag ensures that each instruction is checked before it's encoded
+ //! into a binary representation. This flag is only relevant for \ref
+ //! BaseAssembler implementations, but can be set in any other emitter type,
+ //! in that case if that emitter needs to create an assembler on its own,
+ //! for the purpose of \ref finalize() it would propagate this flag to such
+ //! assembler so all instructions passed to it are explicitly validated.
+ //!
+ //! Default: false.
+ kValidationOptionAssembler = 0x00000001u,
+
+ //! Perform strict validation in \ref BaseBuilder::emit() and \ref
+ //! BaseCompiler::emit() implementations.
+ //!
+ //! This flag ensures that each instruction is checked before an \ref
+ //! InstNode representing the instruction is created by Builder or Compiler.
+ //!
+ //! Default: false.
+ kValidationOptionIntermediate = 0x00000002u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit BaseEmitter(uint32_t emitterType) noexcept;
+ ASMJIT_API virtual ~BaseEmitter() noexcept;
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ template<typename T>
+ inline T* as() noexcept { return reinterpret_cast<T*>(this); }
+
+ template<typename T>
+ inline const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
+
+ //! \}
+
+ //! \name Emitter Type & Flags
+ //! \{
+
+ //! Returns the type of this emitter, see `EmitterType`.
+ inline uint32_t emitterType() const noexcept { return _emitterType; }
+ //! Returns emitter flags , see `Flags`.
+ inline uint32_t emitterFlags() const noexcept { return _emitterFlags; }
+
+ //! Tests whether the emitter inherits from `BaseAssembler`.
+ inline bool isAssembler() const noexcept { return _emitterType == kTypeAssembler; }
+ //! Tests whether the emitter inherits from `BaseBuilder`.
+ //!
+ //! \note Both Builder and Compiler emitters would return `true`.
+ inline bool isBuilder() const noexcept { return _emitterType >= kTypeBuilder; }
+ //! Tests whether the emitter inherits from `BaseCompiler`.
+ inline bool isCompiler() const noexcept { return _emitterType == kTypeCompiler; }
+
+ //! Tests whether the emitter has the given `flag` enabled.
+ inline bool hasEmitterFlag(uint32_t flag) const noexcept { return (_emitterFlags & flag) != 0; }
+ //! Tests whether the emitter is finalized.
+ inline bool isFinalized() const noexcept { return hasEmitterFlag(kFlagFinalized); }
+ //! Tests whether the emitter is destroyed (only used during destruction).
+ inline bool isDestroyed() const noexcept { return hasEmitterFlag(kFlagDestroyed); }
+
+ inline void _addEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags | flags); }
+ inline void _clearEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags & ~flags); }
+
+ //! \}
+
+ //! \name Target Information
+ //! \{
+
+ //! Returns the CodeHolder this emitter is attached to.
+ inline CodeHolder* code() const noexcept { return _code; }
+
+ //! Returns the target environment, see \ref Environment.
+ //!
+ //! The returned \ref Environment reference matches \ref CodeHolder::environment().
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Tests whether the target architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return environment().is32Bit(); }
+ //! Tests whether the target architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return environment().is64Bit(); }
+
+ //! Returns the target architecture type.
+ inline uint32_t arch() const noexcept { return environment().arch(); }
+ //! Returns the target architecture sub-type.
+ inline uint32_t subArch() const noexcept { return environment().subArch(); }
+
+ //! Returns the target architecture's GP register size (4 or 8 bytes).
+ inline uint32_t registerSize() const noexcept { return environment().registerSize(); }
+
+ //! \}
+
+ //! \name Initialization & Finalization
+ //! \{
+
+ //! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
+ inline bool isInitialized() const noexcept { return _code != nullptr; }
+
+ //! Finalizes this emitter.
+ //!
+ //! Materializes the content of the emitter by serializing it to the attached
+ //! \ref CodeHolder through an architecture specific \ref BaseAssembler. This
+ //! function won't do anything if the emitter inherits from \ref BaseAssembler
+ //! as assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder.
+ //! However, if this is an emitter that inherits from \ref BaseBuilder or \ref
+ //! BaseCompiler then these emitters need the materialization phase as they
+ //! store their content in a representation not visible to \ref CodeHolder.
+ ASMJIT_API virtual Error finalize();
+
+ //! \}
+
+ //! \name Logging
+ //! \{
+
+ //! Tests whether the emitter has a logger.
+ inline bool hasLogger() const noexcept { return _logger != nullptr; }
+
+ //! Tests whether the emitter has its own logger.
+ //!
+ //! Own logger means that it overrides the possible logger that may be used
+ //! by \ref CodeHolder this emitter is attached to.
+ inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(kFlagOwnLogger); }
+
+ //! Returns the logger this emitter uses.
+ //!
+ //! The returned logger is either the emitter's own logger or it's logger
+ //! used by \ref CodeHolder this emitter is attached to.
+ inline Logger* logger() const noexcept { return _logger; }
+
+ //! Sets or resets the logger of the emitter.
+ //!
+ //! If the `logger` argument is non-null then the logger will be considered
+ //! emitter's own logger, see \ref hasOwnLogger() for more details. If the
+ //! given `logger` is null then the emitter will automatically use logger
+ //! that is attached to the \ref CodeHolder this emitter is attached to.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+
+ //! Resets the logger of this emitter.
+ //!
+ //! The emitter will bail to using a logger attached to \ref CodeHolder this
+ //! emitter is attached to, or no logger at all if \ref CodeHolder doesn't
+ //! have one.
+ inline void resetLogger() noexcept { return setLogger(nullptr); }
+
+ //! \}
+
+ //! \name Error Handling
+ //! \{
+
+ //! Tests whether the emitter has an error handler attached.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+
+ //! Tests whether the emitter has its own error handler.
+ //!
+ //! Own error handler means that it overrides the possible error handler that
+ //! may be used by \ref CodeHolder this emitter is attached to.
+ inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(kFlagOwnErrorHandler); }
+
+ //! Returns the error handler this emitter uses.
+ //!
+ //! The returned error handler is either the emitter's own error handler or
+ //! it's error handler used by \ref CodeHolder this emitter is attached to.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+
+ //! Sets or resets the error handler of the emitter.
+ ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
+
+ //! Resets the error handler.
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! Handles the given error in the following way:
+ //! 1. If the emitter has \ref ErrorHandler attached, it calls its
+ //! \ref ErrorHandler::handleError() member function first, and
+ //! then returns the error. The `handleError()` function may throw.
+ //! 2. if the emitter doesn't have \ref ErrorHandler, the error is
+ //! simply returned.
+ ASMJIT_API Error reportError(Error err, const char* message = nullptr);
+
+ //! \}
+
+ //! \name Encoding Options
+ //! \{
+
+ //! Returns encoding options, see \ref EncodingOptions.
+ inline uint32_t encodingOptions() const noexcept { return _encodingOptions; }
+ //! Tests whether the encoding `option` is set.
+ inline bool hasEncodingOption(uint32_t option) const noexcept { return (_encodingOptions & option) != 0; }
+
+ //! Enables the given encoding `options`, see \ref EncodingOptions.
+ inline void addEncodingOptions(uint32_t options) noexcept { _encodingOptions |= options; }
+ //! Disables the given encoding `options`, see \ref EncodingOptions.
+ inline void clearEncodingOptions(uint32_t options) noexcept { _encodingOptions &= ~options; }
+
+ //! \}
+
+ //! \name Validation Options
+ //! \{
+
+ //! Returns the emitter's validation options, see \ref ValidationOptions.
+ inline uint32_t validationOptions() const noexcept {
+ return _validationOptions;
+ }
+
+ //! Tests whether the given `option` is present in validation options.
+ inline bool hasValidationOption(uint32_t option) const noexcept {
+ return (_validationOptions & option) != 0;
+ }
+
+ //! Activates the given validation `options`, see \ref ValidationOptions.
+ //!
+ //! This function is used to activate explicit validation options that will
+ //! be then used by all emitter implementations. There are in general two
+ //! possibilities:
+ //!
+ //! - Architecture specific assembler is used. In this case a
+ //! \ref kValidationOptionAssembler can be used to turn on explicit
+ //! validation that will be used before an instruction is emitted.
+ //! This means that internally an extra step will be performed to
+ //! make sure that the instruction is correct. This is needed, because
+ //! by default assemblers prefer speed over strictness.
+ //!
+ //! This option should be used in debug builds as it's pretty expensive.
+ //!
+ //! - Architecture specific builder or compiler is used. In this case
+ //! the user can turn on \ref kValidationOptionIntermediate option
+ //! that adds explicit validation step before the Builder or Compiler
+ //! creates an \ref InstNode to represent an emitted instruction. Error
+ //! will be returned if the instruction is ill-formed. In addition,
+ //! also \ref kValidationOptionAssembler can be used, which would not be
+ //! consumed by Builder / Compiler directly, but it would be propagated
+ //! to an architecture specific \ref BaseAssembler implementation it
+ //! creates during \ref BaseEmitter::finalize().
+ ASMJIT_API void addValidationOptions(uint32_t options) noexcept;
+
+ //! Deactivates the given validation `options`.
+ //!
+ //! See \ref addValidationOptions() and \ref ValidationOptions for more details.
+ ASMJIT_API void clearValidationOptions(uint32_t options) noexcept;
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ //! Returns forced instruction options.
+ //!
+ //! Forced instruction options are merged with next instruction options before
+ //! the instruction is encoded. These options have some bits reserved that are
+ //! used by error handling, logging, and instruction validation purposes. Other
+ //! options are globals that affect each instruction.
+ inline uint32_t forcedInstOptions() const noexcept { return _forcedInstOptions; }
+
+ //! Returns options of the next instruction.
+ inline uint32_t instOptions() const noexcept { return _instOptions; }
+ //! Returns options of the next instruction.
+ inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
+ //! Adds options of the next instruction.
+ inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
+ //! Resets options of the next instruction.
+ inline void resetInstOptions() noexcept { _instOptions = 0; }
+
+ //! Tests whether the extra register operand is valid.
+ inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
+ //! Returns an extra operand that will be used by the next instruction (architecture specific).
+ inline const RegOnly& extraReg() const noexcept { return _extraReg; }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ //! Resets an extra operand that will be used by the next instruction (architecture specific).
+ inline void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! Returns comment/annotation of the next instruction.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets comment/annotation of the next instruction.
+ //!
+ //! \note This string is set back to null by `_emit()`, but until that it has
+ //! to remain valid as the Emitter is not required to make a copy of it (and
+ //! it would be slow to do that for each instruction).
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets the comment/annotation to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ virtual Error section(Section* section) = 0;
+
+ //! \}
+
+ //! \name Labels
+ //! \{
+
+ //! Creates a new label.
+ virtual Label newLabel() = 0;
+ //! Creates a new named label.
+ virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
+
+ //! Returns `Label` by `name`.
+ //!
+ //! Returns invalid Label in case that the name is invalid or label was not found.
+ //!
+ //! \note This function doesn't trigger ErrorHandler in case the name is invalid
+ //! or no such label exist. You must always check the validity of the `Label` returned.
+ ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Binds the `label` to the current position of the current section.
+ //!
+ //! \note Attempt to bind the same label multiple times will return an error.
+ virtual Error bind(const Label& label) = 0;
+
+ //! Tests whether the label `id` is valid (i.e. registered).
+ ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
+ //! Tests whether the `label` is valid (i.e. registered).
+ inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ // NOTE: These `emit()` helpers are designed to address a code-bloat generated
+ // by C++ compilers to call a function having many arguments. Each parameter to
+ // `_emit()` requires some code to pass it, which means that if we default to
+ // 5 arguments in `_emit()` and instId the C++ compiler would have to generate
+ // a virtual function call having 5 parameters and additional `this` argument,
+ // which is quite a lot. Since by default most instructions have 2 to 3 operands
+ // it's better to introduce helpers that pass from 0 to 6 operands that help to
+ // reduce the size of emit(...) function call.
+
+ //! Emits an instruction (internal).
+ ASMJIT_API Error _emitI(uint32_t instId);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
+
+ //! Emits an instruction `instId` with the given `operands`.
+ template<typename... Args>
+ ASMJIT_INLINE Error emit(uint32_t instId, Args&&... operands) {
+ return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
+ }
+
+ inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
+ return _emitOpArray(instId, operands, opCount);
+ }
+
+ inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
+ setInstOptions(inst.options());
+ setExtraReg(inst.extraReg());
+ return _emitOpArray(inst.id(), operands, opCount);
+ }
+
+ //! \cond INTERNAL
+ //! Emits an instruction - all 6 operands must be defined.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
+ //! Emits instruction having operands stored in array.
+ ASMJIT_API virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount);
+ //! \endcond
+
+ //! \}
+
+ //! \name Emit Utilities
+ //! \{
+
+ ASMJIT_API Error emitProlog(const FuncFrame& frame);
+ ASMJIT_API Error emitEpilog(const FuncFrame& frame);
+ ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ //! Aligns the current CodeBuffer position to the `alignment` specified.
+ //!
+ //! The sequence that is used to fill the gap between the aligned location
+ //! and the current location depends on the align `mode`, see \ref AlignMode.
+ virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ //! Embeds raw data into the \ref CodeBuffer.
+ virtual Error embed(const void* data, size_t dataSize) = 0;
+
+ //! Embeds a typed data array.
+ //!
+ //! This is the most flexible function for embedding data as it allows to:
+ //! - Assign a `typeId` to the data, so the emitter knows the type of
+ //! items stored in `data`. Binary data should use \ref Type::kIdU8.
+ //! - Repeat the given data `repeatCount` times, so the data can be used
+ //! as a fill pattern for example, or as a pattern used by SIMD instructions.
+ virtual Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
+
+ //! Embeds int8_t `value` repeated by `repeatCount`.
+ inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI8, &value, 1, repeatCount); }
+ //! Embeds uint8_t `value` repeated by `repeatCount`.
+ inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU8, &value, 1, repeatCount); }
+ //! Embeds int16_t `value` repeated by `repeatCount`.
+ inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI16, &value, 1, repeatCount); }
+ //! Embeds uint16_t `value` repeated by `repeatCount`.
+ inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU16, &value, 1, repeatCount); }
+ //! Embeds int32_t `value` repeated by `repeatCount`.
+ inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI32, &value, 1, repeatCount); }
+ //! Embeds uint32_t `value` repeated by `repeatCount`.
+ inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU32, &value, 1, repeatCount); }
+ //! Embeds int64_t `value` repeated by `repeatCount`.
+ inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI64, &value, 1, repeatCount); }
+ //! Embeds uint64_t `value` repeated by `repeatCount`.
+ inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU64, &value, 1, repeatCount); }
+ //! Embeds a floating point `value` repeated by `repeatCount`.
+ inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(Type::kIdF32, &value, 1, repeatCount); }
+ //! Embeds a floating point `value` repeated by `repeatCount`.
+ inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(Type::IdOfT<double>::kTypeId, &value, 1, repeatCount); }
+
+ //! Embeds a constant pool at the current offset by performing the following:
+ //! 1. Aligns by using kAlignData to the minimum `pool` alignment.
+ //! 2. Binds the ConstPool label so it's bound to an aligned location.
+ //! 3. Emits ConstPool content.
+ virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
+
+ //! Embeds an absolute `label` address as data.
+ //!
+ //! The `dataSize` is an optional argument that can be used to specify the
+ //! size of the address data. If it's zero (default) the address size is
+ //! deduced from the target architecture (either 4 or 8 bytes).
+ virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
+
+ //! Embeds a delta (distance) between the `label` and `base` calculating it
+ //! as `label - base`. This function was designed to make it easier to embed
+ //! lookup tables where each index is a relative distance of two labels.
+ virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ //! Emits a comment stored in `data` with an optional `size` parameter.
+ virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
+
+ //! Emits a formatted comment specified by `fmt` and variable number of arguments.
+ ASMJIT_API Error commentf(const char* fmt, ...);
+ //! Emits a formatted comment specified by `fmt` and `ap`.
+ ASMJIT_API Error commentv(const char* fmt, va_list ap);
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ //! Called after the emitter was attached to `CodeHolder`.
+ virtual Error onAttach(CodeHolder* code) noexcept = 0;
+ //! Called after the emitter was detached from `CodeHolder`.
+ virtual Error onDetach(CodeHolder* code) noexcept = 0;
+
+ //! Called when \ref CodeHolder has updated an important setting, which
+ //! involves the following:
+ //!
+ //! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been
+ //! called).
+ //! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler()
+ //! has been called).
+ //!
+ //! This function ensures that the settings are properly propagated from
+ //! \ref CodeHolder to the emitter.
+ //!
+ //! \note This function is virtual and can be overridden, however, if you
+ //! do so, always call \ref BaseEmitter::onSettingsUpdated() within your
+ //! own implementation to ensure that the emitter is in a consisten state.
+ ASMJIT_API virtual void onSettingsUpdated() noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use environment() instead")
+ inline CodeInfo codeInfo() const noexcept {
+ return CodeInfo(_environment, _code ? _code->baseAddress() : Globals::kNoBaseAddress);
+ }
+
+ ASMJIT_DEPRECATED("Use arch() instead")
+ inline uint32_t archId() const noexcept { return arch(); }
+
+ ASMJIT_DEPRECATED("Use registerSize() instead")
+ inline uint32_t gpSize() const noexcept { return registerSize(); }
+
+ ASMJIT_DEPRECATED("Use encodingOptions() instead")
+ inline uint32_t emitterOptions() const noexcept { return encodingOptions(); }
+
+ ASMJIT_DEPRECATED("Use addEncodingOptions() instead")
+ inline void addEmitterOptions(uint32_t options) noexcept { addEncodingOptions(options); }
+
+ ASMJIT_DEPRECATED("Use clearEncodingOptions() instead")
+ inline void clearEmitterOptions(uint32_t options) noexcept { clearEncodingOptions(options); }
+
+ ASMJIT_DEPRECATED("Use forcedInstOptions() instead")
+ inline uint32_t globalInstOptions() const noexcept { return forcedInstOptions(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITTER_H_INCLUDED
diff --git a/client/asmjit/core/emitterutils.cpp b/client/asmjit/core/emitterutils.cpp
new file mode 100644
index 0000000..1115934
--- /dev/null
+++ b/client/asmjit/core/emitterutils.cpp
@@ -0,0 +1,150 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/emitterutils_p.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::EmitterUtils]
+// ============================================================================
+
+namespace EmitterUtils {
+
+#ifndef ASMJIT_NO_LOGGING
+
+Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
+ size_t currentSize = sb.size();
+ size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
+
+ ASMJIT_ASSERT(binSize >= dispSize);
+ const size_t kNoBinSize = SIZE_MAX;
+
+ if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
+ size_t align = kMaxInstLineSize;
+ char sep = ';';
+
+ for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
+ size_t begin = sb.size();
+ ASMJIT_PROPAGATE(sb.padEnd(align));
+
+ if (sep) {
+ ASMJIT_PROPAGATE(sb.append(sep));
+ ASMJIT_PROPAGATE(sb.append(' '));
+ }
+
+ // Append binary data or comment.
+ if (i == 0) {
+ ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
+ ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
+ ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
+ if (commentSize == 0) break;
+ }
+ else {
+ ASMJIT_PROPAGATE(sb.append(comment, commentSize));
+ }
+
+ currentSize += sb.size() - begin;
+ align += kMaxBinarySize;
+ sep = '|';
+ }
+ }
+
+ return sb.append('\n');
+}
+
+void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
+ Logger* logger = self->logger();
+
+ StringTmp<512> sb;
+ size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : SIZE_MAX;
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
+ Formatter::formatLabel(sb, logger->flags(), self, label.id());
+ sb.append(':');
+ EmitterUtils::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
+ logger->log(sb.data(), sb.size());
+}
+
+void logInstructionEmitted(
+ BaseAssembler* self,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
+
+ Logger* logger = self->logger();
+ ASMJIT_ASSERT(logger != nullptr);
+
+ StringTmp<256> sb;
+ uint32_t flags = logger->flags();
+
+ uint8_t* beforeCursor = self->bufferPtr();
+ intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
+
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
+ Formatter::formatInstruction(sb, flags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
+
+ if ((flags & FormatOptions::kFlagMachineCode) != 0)
+ EmitterUtils::formatLine(sb, self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
+ else
+ EmitterUtils::formatLine(sb, nullptr, SIZE_MAX, 0, 0, self->inlineComment());
+ logger->log(sb);
+}
+
+Error logInstructionFailed(
+ BaseAssembler* self,
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
+
+ StringTmp<256> sb;
+ sb.append(DebugUtils::errorAsString(err));
+ sb.append(": ");
+
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ Formatter::formatInstruction(sb, 0, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
+
+ if (self->inlineComment()) {
+ sb.append(" ; ");
+ sb.append(self->inlineComment());
+ }
+
+ self->resetInstOptions();
+ self->resetExtraReg();
+ self->resetInlineComment();
+ return self->reportError(err, sb.data());
+}
+
+#endif
+
+} // {EmitterUtils}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/emitterutils_p.h b/client/asmjit/core/emitterutils_p.h
new file mode 100644
index 0000000..7e222d3
--- /dev/null
+++ b/client/asmjit/core/emitterutils_p.h
@@ -0,0 +1,109 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+
+#include "../core/emitter.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+class BaseAssembler;
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::EmitterUtils]
+// ============================================================================
+
+namespace EmitterUtils {
+
+static const Operand_ noExt[3] {};
+
+enum kOpIndex {
+ kOp3 = 0,
+ kOp4 = 1,
+ kOp5 = 2
+};
+
+static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
+ uint32_t opCount = 0;
+
+ if (opExt[kOp3].isNone()) {
+ if (!o0.isNone()) opCount = 1;
+ if (!o1.isNone()) opCount = 2;
+ if (!o2.isNone()) opCount = 3;
+ }
+ else {
+ opCount = 4;
+ if (!opExt[kOp4].isNone()) {
+ opCount = 5 + uint32_t(!opExt[kOp5].isNone());
+ }
+ }
+
+ return opCount;
+}
+
+static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
+ dst[0].copyFrom(o0);
+ dst[1].copyFrom(o1);
+ dst[2].copyFrom(o2);
+ dst[3].copyFrom(opExt[kOp3]);
+ dst[4].copyFrom(opExt[kOp4]);
+ dst[5].copyFrom(opExt[kOp5]);
+}
+
+#ifndef ASMJIT_NO_LOGGING
+enum : uint32_t {
+ // Has to be big to be able to hold all metadata compiler can assign to a
+ // single instruction.
+ kMaxInstLineSize = 44,
+ kMaxBinarySize = 26
+};
+
+Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
+
+void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
+
+void logInstructionEmitted(
+ BaseAssembler* self,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
+
+Error logInstructionFailed(
+ BaseAssembler* self,
+ Error err, uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
+#endif
+
+}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+
diff --git a/client/asmjit/core/environment.cpp b/client/asmjit/core/environment.cpp
new file mode 100644
index 0000000..3be2b15
--- /dev/null
+++ b/client/asmjit/core/environment.cpp
@@ -0,0 +1,64 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/environment.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// X86 Target
+// ----------
+//
+// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
+// stack alignment. Other operating systems are assumed to have
+// 4-byte alignment by default for safety reasons.
+// - 64-bit - stack must be aligned to 16 bytes.
+//
+// ARM Target
+// ----------
+//
+// - 32-bit - Stack must be aligned to 8 bytes.
+// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
+uint32_t Environment::stackAlignment() const noexcept {
+ if (is64Bit()) {
+ // Assume 16-byte alignment on any 64-bit target.
+ return 16;
+ }
+ else {
+ // The following platforms use 16-byte alignment in 32-bit mode.
+ if (isPlatformLinux() ||
+ isPlatformBSD() ||
+ isPlatformApple() ||
+ isPlatformHaiku()) {
+ return 16u;
+ }
+
+ if (isFamilyARM())
+ return 8;
+
+ // Bail to 4-byte alignment if we don't know.
+ return 4;
+ }
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/environment.h b/client/asmjit/core/environment.h
new file mode 100644
index 0000000..99b34ec
--- /dev/null
+++ b/client/asmjit/core/environment.h
@@ -0,0 +1,591 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
+#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
+
+#include "../core/globals.h"
+
+#if defined(__APPLE__)
+ #include <TargetConditionals.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::Environment]
+// ============================================================================
+
+//! Represents an environment, which is usually related to a \ref Target.
+//!
+//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is
+//! sometimes called "Triple" (historically it used to be 3 only parts) or
+//! "Tuple", which is a convention used by Debian Linux.
+//!
+//! AsmJit doesn't support all possible combinations or architectures and ABIs,
+//! however, it models the environment similarly to other compilers for future
+//! extensibility.
+class Environment {
+public:
+ //! Architecture type, see \ref Arch.
+ uint8_t _arch;
+ //! Sub-architecture type, see \ref SubArch.
+ uint8_t _subArch;
+ //! Vendor type, see \ref Vendor.
+ uint8_t _vendor;
+ //! Platform type, see \ref Platform.
+ uint8_t _platform;
+ //! ABI type, see \ref Abi.
+ uint8_t _abi;
+ //! Object format, see \ref Format.
+ uint8_t _format;
+ //! Reserved for future use, must be zero.
+ uint16_t _reserved;
+
+ //! Architecture.
+ enum Arch : uint32_t {
+ //! Unknown or uninitialized architecture.
+ kArchUnknown = 0,
+
+ //! Mask used by 32-bit architectures (odd are 32-bit, even are 64-bit).
+ kArch32BitMask = 0x01,
+ //! Mask used by big-endian architectures.
+ kArchBigEndianMask = 0x80u,
+
+ //! 32-bit X86 architecture.
+ kArchX86 = 1,
+ //! 64-bit X86 architecture also known as X86_64 and AMD64.
+ kArchX64 = 2,
+
+ //! 32-bit RISC-V architecture.
+ kArchRISCV32 = 3,
+ //! 64-bit RISC-V architecture.
+ kArchRISCV64 = 4,
+
+ //! 32-bit ARM architecture (little endian).
+ kArchARM = 5,
+ //! 32-bit ARM architecture (big endian).
+ kArchARM_BE = kArchARM | kArchBigEndianMask,
+ //! 64-bit ARM architecture in (little endian).
+ kArchAArch64 = 6,
+ //! 64-bit ARM architecture in (big endian).
+ kArchAArch64_BE = kArchAArch64 | kArchBigEndianMask,
+ //! 32-bit ARM in Thumb mode (little endian).
+ kArchThumb = 7,
+ //! 32-bit ARM in Thumb mode (big endian).
+ kArchThumb_BE = kArchThumb | kArchBigEndianMask,
+
+ // 8 is not used, even numbers are 64-bit architectures.
+
+ //! 32-bit MIPS architecture in (little endian).
+ kArchMIPS_LE = 9,
+ //! 32-bit MIPS architecture in (big endian).
+ kArchMIPS_BE = kArchMIPS_LE | kArchBigEndianMask,
+ //! 64-bit MIPS architecture in (little endian).
+ kArchMIPS64_LE = 10,
+ //! 64-bit MIPS architecture in (big endian).
+ kArchMIPS64_BE = kArchMIPS64_LE | kArchBigEndianMask,
+
+ //! Count of architectures.
+ kArchCount
+ };
+
+ //! Sub-architecture.
+ enum SubArch : uint32_t {
+ //! Unknown or uninitialized architecture sub-type.
+ kSubArchUnknown = 0,
+
+ //! Count of sub-architectures.
+ kSubArchCount
+ };
+
+ //! Vendor.
+ //!
+ //! \note AsmJit doesn't use vendor information at the moment. It's provided
+ //! for future use, if required.
+ enum Vendor : uint32_t {
+ //! Unknown or uninitialized vendor.
+ kVendorUnknown = 0,
+
+ //! Count of vendor identifiers.
+ kVendorCount
+ };
+
+ //! Platform / OS.
+ enum Platform : uint32_t {
+ //! Unknown or uninitialized platform.
+ kPlatformUnknown = 0,
+
+ //! Windows OS.
+ kPlatformWindows,
+
+ //! Other platform, most likely POSIX based.
+ kPlatformOther,
+
+ //! Linux OS.
+ kPlatformLinux,
+ //! GNU/Hurd OS.
+ kPlatformHurd,
+
+ //! FreeBSD OS.
+ kPlatformFreeBSD,
+ //! OpenBSD OS.
+ kPlatformOpenBSD,
+ //! NetBSD OS.
+ kPlatformNetBSD,
+ //! DragonFly BSD OS.
+ kPlatformDragonFlyBSD,
+
+ //! Haiku OS.
+ kPlatformHaiku,
+
+ //! Apple OSX.
+ kPlatformOSX,
+ //! Apple iOS.
+ kPlatformIOS,
+ //! Apple TVOS.
+ kPlatformTVOS,
+ //! Apple WatchOS.
+ kPlatformWatchOS,
+
+ //! Emscripten platform.
+ kPlatformEmscripten,
+
+ //! Count of platform identifiers.
+ kPlatformCount
+ };
+
+ //! ABI.
+ enum Abi : uint32_t {
+ //! Unknown or uninitialied environment.
+ kAbiUnknown = 0,
+ //! Microsoft ABI.
+ kAbiMSVC,
+ //! GNU ABI.
+ kAbiGNU,
+ //! Android Environment / ABI.
+ kAbiAndroid,
+ //! Cygwin ABI.
+ kAbiCygwin,
+
+ //! Count of known ABI types.
+ kAbiCount
+ };
+
+ //! Object format.
+ //!
+ //! \note AsmJit doesn't really use anything except \ref kFormatUnknown and
+ //! \ref kFormatJIT at the moment. Object file formats are provided for
+ //! future extensibility and a possibility to generate object files at some
+ //! point.
+ enum Format : uint32_t {
+ //! Unknown or uninitialized object format.
+ kFormatUnknown = 0,
+
+ //! JIT code generation object, most likely \ref JitRuntime or a custom
+ //! \ref Target implementation.
+ kFormatJIT,
+
+ //! Executable and linkable format (ELF).
+ kFormatELF,
+ //! Common object file format.
+ kFormatCOFF,
+ //! Extended COFF object format.
+ kFormatXCOFF,
+ //! Mach object file format.
+ kFormatMachO,
+
+ //! Count of object format types.
+ kFormatCount
+ };
+
+ //! \name Environment Detection
+ //! \{
+
+#ifdef _DOXYGEN
+ //! Architecture detected at compile-time (architecture of the host).
+ static constexpr Arch kArchHost = DETECTED_AT_COMPILE_TIME;
+ //! Sub-architecture detected at compile-time (sub-architecture of the host).
+ static constexpr SubArch kSubArchHost = DETECTED_AT_COMPILE_TIME;
+ //! Vendor detected at compile-time (vendor of the host).
+ static constexpr Vendor kVendorHost = DETECTED_AT_COMPILE_TIME;
+ //! Platform detected at compile-time (platform of the host).
+ static constexpr Platform kPlatformHost = DETECTED_AT_COMPILE_TIME;
+ //! ABI detected at compile-time (ABI of the host).
+ static constexpr Abi kAbiHost = DETECTED_AT_COMPILE_TIME;
+#else
+ static constexpr Arch kArchHost =
+ ASMJIT_ARCH_X86 == 32 ? kArchX86 :
+ ASMJIT_ARCH_X86 == 64 ? kArchX64 :
+
+ ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kArchARM :
+ ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kArchARM_BE :
+ ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kArchAArch64 :
+ ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kArchAArch64_BE :
+
+ ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kArchMIPS_LE :
+ ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kArchMIPS_BE :
+ ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kArchMIPS64_LE :
+ ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kArchMIPS64_BE :
+
+ kArchUnknown;
+
+ static constexpr SubArch kSubArchHost =
+ kSubArchUnknown;
+
+ static constexpr Vendor kVendorHost =
+ kVendorUnknown;
+
+ static constexpr Platform kPlatformHost =
+#if defined(__EMSCRIPTEN__)
+ kPlatformEmscripten
+#elif defined(_WIN32)
+ kPlatformWindows
+#elif defined(__linux__)
+ kPlatformLinux
+#elif defined(__gnu_hurd__)
+ kPlatformHurd
+#elif defined(__FreeBSD__)
+ kPlatformFreeBSD
+#elif defined(__OpenBSD__)
+ kPlatformOpenBSD
+#elif defined(__NetBSD__)
+ kPlatformNetBSD
+#elif defined(__DragonFly__)
+ kPlatformDragonFlyBSD
+#elif defined(__HAIKU__)
+ kPlatformHaiku
+#elif defined(__APPLE__) && TARGET_OS_OSX
+ kPlatformOSX
+#elif defined(__APPLE__) && TARGET_OS_TV
+ kPlatformTVOS
+#elif defined(__APPLE__) && TARGET_OS_WATCH
+ kPlatformWatchOS
+#elif defined(__APPLE__) && TARGET_OS_IPHONE
+ kPlatformIOS
+#else
+ kPlatformOther
+#endif
+ ;
+
+ static constexpr Abi kAbiHost =
+#if defined(_MSC_VER)
+ kAbiMSVC
+#elif defined(__CYGWIN__)
+ kAbiCygwin
+#elif defined(__MINGW32__) || defined(__GLIBC__)
+ kAbiGNU
+#elif defined(__ANDROID__)
+ kAbiAndroid
+#else
+ kAbiUnknown
+#endif
+ ;
+
+#endif
+
+ //! \}
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline Environment() noexcept :
+ _arch(uint8_t(kArchUnknown)),
+ _subArch(uint8_t(kSubArchUnknown)),
+ _vendor(uint8_t(kVendorUnknown)),
+ _platform(uint8_t(kPlatformUnknown)),
+ _abi(uint8_t(kAbiUnknown)),
+ _format(uint8_t(kFormatUnknown)),
+ _reserved(0) {}
+
+ inline Environment(const Environment& other) noexcept = default;
+
+ inline explicit Environment(uint32_t arch,
+ uint32_t subArch = kSubArchUnknown,
+ uint32_t vendor = kVendorUnknown,
+ uint32_t platform = kPlatformUnknown,
+ uint32_t abi = kAbiUnknown,
+ uint32_t format = kFormatUnknown) noexcept {
+ init(arch, subArch, vendor, platform, abi, format);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Environment& operator=(const Environment& other) noexcept = default;
+
+ inline bool operator==(const Environment& other) const noexcept { return equals(other); }
+ inline bool operator!=(const Environment& other) const noexcept { return !equals(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the environment is not set up.
+ //!
+ //! Returns true if all members are zero, and thus unknown.
+ inline bool empty() const noexcept {
+ // Unfortunately compilers won't optimize fields are checked one by one...
+ return _packed() == 0;
+ }
+
+ //! Tests whether the environment is intialized, which means it must have
+ //! a valid architecture.
+ inline bool isInitialized() const noexcept {
+ return _arch != kArchUnknown;
+ }
+
+ inline uint64_t _packed() const noexcept {
+ uint64_t x;
+ memcpy(&x, this, 8);
+ return x;
+ }
+
+ //! Resets all members of the environment to zero / unknown.
+ inline void reset() noexcept {
+ _arch = uint8_t(kArchUnknown);
+ _subArch = uint8_t(kSubArchUnknown);
+ _vendor = uint8_t(kVendorUnknown);
+ _platform = uint8_t(kPlatformUnknown);
+ _abi = uint8_t(kAbiUnknown);
+ _format = uint8_t(kFormatUnknown);
+ _reserved = 0;
+ }
+
+ inline bool equals(const Environment& other) const noexcept {
+ return _packed() == other._packed();
+ }
+
+ //! Returns the architecture, see \ref Arch.
+ inline uint32_t arch() const noexcept { return _arch; }
+ //! Returns the sub-architecture, see \ref SubArch.
+ inline uint32_t subArch() const noexcept { return _subArch; }
+ //! Returns vendor, see \ref Vendor.
+ inline uint32_t vendor() const noexcept { return _vendor; }
+ //! Returns target's platform or operating system, see \ref Platform.
+ inline uint32_t platform() const noexcept { return _platform; }
+ //! Returns target's ABI, see \ref Abi.
+ inline uint32_t abi() const noexcept { return _abi; }
+ //! Returns target's object format, see \ref Format.
+ inline uint32_t format() const noexcept { return _format; }
+
+ inline void init(uint32_t arch,
+ uint32_t subArch = kSubArchUnknown,
+ uint32_t vendor = kVendorUnknown,
+ uint32_t platform = kPlatformUnknown,
+ uint32_t abi = kAbiUnknown,
+ uint32_t format = kFormatUnknown) noexcept {
+ _arch = uint8_t(arch);
+ _subArch = uint8_t(subArch);
+ _vendor = uint8_t(vendor);
+ _platform = uint8_t(platform);
+ _abi = uint8_t(abi);
+ _format = uint8_t(format);
+ _reserved = 0;
+ }
+
+ //! Tests whether the architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return is32Bit(_arch); }
+ //! Tests whether the architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return is64Bit(_arch); }
+
+ //! Tests whether the architecture is little endian.
+ inline bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
+ //! Tests whether the architecture is big endian.
+ inline bool isBigEndian() const noexcept { return isBigEndian(_arch); }
+
+ //! Tests whether this architecture is of X86 family.
+ inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
+ //! Tests whether this architecture is of ARM family.
+ inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
+ //! Tests whether this architecture is of ARM family.
+ inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
+ //! Tests whether this architecture is of ARM family.
+ inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
+
+ //! Tests whether the environment platform is Windows.
+ inline bool isPlatformWindows() const noexcept { return _platform == kPlatformWindows; }
+
+ //! Tests whether the environment platform is Linux.
+ inline bool isPlatformLinux() const noexcept { return _platform == kPlatformLinux; }
+
+ //! Tests whether the environment platform is Hurd.
+ inline bool isPlatformHurd() const noexcept { return _platform == kPlatformHurd; }
+
+ //! Tests whether the environment platform is Haiku.
+ inline bool isPlatformHaiku() const noexcept { return _platform == kPlatformHaiku; }
+
+ //! Tests whether the environment platform is any BSD.
+ inline bool isPlatformBSD() const noexcept {
+ return _platform == kPlatformFreeBSD ||
+ _platform == kPlatformOpenBSD ||
+ _platform == kPlatformNetBSD ||
+ _platform == kPlatformDragonFlyBSD;
+ }
+
+ //! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
+ inline bool isPlatformApple() const noexcept {
+ return _platform == kPlatformOSX ||
+ _platform == kPlatformIOS ||
+ _platform == kPlatformTVOS ||
+ _platform == kPlatformWatchOS;
+ }
+
+ //! Tests whether the ABI is MSVC.
+ inline bool isAbiMSVC() const noexcept { return _abi == kAbiMSVC; }
+ //! Tests whether the ABI is GNU.
+ inline bool isAbiGNU() const noexcept { return _abi == kAbiGNU; }
+
+ //! Returns a calculated stack alignment for this environment.
+ ASMJIT_API uint32_t stackAlignment() const noexcept;
+
+ //! Returns a native register size of this architecture.
+ uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
+
+ //! Sets the architecture to `arch`.
+ inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
+ //! Sets the sub-architecture to `subArch`.
+ inline void setSubArch(uint32_t subArch) noexcept { _subArch = uint8_t(subArch); }
+ //! Sets the vendor to `vendor`.
+ inline void setVendor(uint32_t vendor) noexcept { _vendor = uint8_t(vendor); }
+ //! Sets the platform to `platform`.
+ inline void setPlatform(uint32_t platform) noexcept { _platform = uint8_t(platform); }
+ //! Sets the ABI to `abi`.
+ inline void setAbi(uint32_t abi) noexcept { _abi = uint8_t(abi); }
+ //! Sets the object format to `format`.
+ inline void setFormat(uint32_t format) noexcept { _format = uint8_t(format); }
+
+ //! \}
+
+ //! \name Static Utilities
+ //! \{
+
+ //! Tests whether the given architecture `arch` is 32-bit.
+ static inline bool is32Bit(uint32_t arch) noexcept {
+ return (arch & kArch32BitMask) == kArch32BitMask;
+ }
+
+ //! Tests whether the given architecture `arch` is 64-bit.
+ static inline bool is64Bit(uint32_t arch) noexcept {
+ return (arch & kArch32BitMask) == 0;
+ }
+
+ //! Tests whether the given architecture `arch` is little endian.
+ static inline bool isLittleEndian(uint32_t arch) noexcept {
+ return (arch & kArchBigEndianMask) == 0;
+ }
+
+ //! Tests whether the given architecture `arch` is big endian.
+ static inline bool isBigEndian(uint32_t arch) noexcept {
+ return (arch & kArchBigEndianMask) == kArchBigEndianMask;
+ }
+
+ //! Tests whether the given architecture family is X86 or X64.
+ static inline bool isFamilyX86(uint32_t arch) noexcept {
+ return arch == kArchX86 ||
+ arch == kArchX64;
+ }
+
+ //! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
+ static inline bool isFamilyRISCV(uint32_t arch) noexcept {
+ return arch == kArchRISCV32 ||
+ arch == kArchRISCV64;
+ }
+
+ //! Tests whether the given architecture family is ARM, Thumb, or AArch64.
+ static inline bool isFamilyARM(uint32_t arch) noexcept {
+ arch &= ~kArchBigEndianMask;
+ return arch == kArchARM ||
+ arch == kArchAArch64 ||
+ arch == kArchThumb;
+ }
+
+ //! Tests whether the given architecture family is MISP or MIPS64.
+ static inline bool isFamilyMIPS(uint32_t arch) noexcept {
+ arch &= ~kArchBigEndianMask;
+ return arch == kArchMIPS_LE ||
+ arch == kArchMIPS64_LE;
+ }
+
+ //! Returns a native general purpose register size from the given architecture.
+ static uint32_t registerSizeFromArch(uint32_t arch) noexcept {
+ return is32Bit(arch) ? 4u : 8u;
+ }
+
+ //! \}
+};
+
+//! Returns the host environment constructed from preprocessor macros defined
+//! by the compiler.
+//!
+//! The returned environment should precisely match the target host architecture,
+//! sub-architecture, platform, and ABI.
+static ASMJIT_INLINE Environment hostEnvironment() noexcept {
+ return Environment(Environment::kArchHost,
+ Environment::kSubArchHost,
+ Environment::kVendorHost,
+ Environment::kPlatformHost,
+ Environment::kAbiHost,
+ Environment::kFormatUnknown);
+}
+
+static_assert(sizeof(Environment) == 8,
+ "Environment must occupy exactly 8 bytes.");
+
+//! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+class ASMJIT_DEPRECATED_STRUCT("Use Environment instead") ArchInfo : public Environment {
+public:
+ inline ArchInfo() noexcept : Environment() {}
+
+ inline ArchInfo(const Environment& other) noexcept : Environment(other) {}
+ inline explicit ArchInfo(uint32_t arch, uint32_t subArch = kSubArchUnknown) noexcept
+ : Environment(arch, subArch) {}
+
+ enum Id : uint32_t {
+ kIdNone = Environment::kArchUnknown,
+ kIdX86 = Environment::kArchX86,
+ kIdX64 = Environment::kArchX64,
+ kIdA32 = Environment::kArchARM,
+ kIdA64 = Environment::kArchAArch64,
+ kIdHost = Environment::kArchHost
+ };
+
+ enum SubType : uint32_t {
+ kSubIdNone = Environment::kSubArchUnknown
+ };
+
+ static inline ArchInfo host() noexcept { return ArchInfo(hostEnvironment()); }
+};
+#endif // !ASMJIT_NO_DEPRECATED
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
diff --git a/client/asmjit/core/errorhandler.cpp b/client/asmjit/core/errorhandler.cpp
new file mode 100644
index 0000000..8372d75
--- /dev/null
+++ b/client/asmjit/core/errorhandler.cpp
@@ -0,0 +1,37 @@
+
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/errorhandler.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+ErrorHandler::ErrorHandler() noexcept {}
+ErrorHandler::~ErrorHandler() noexcept {}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/errorhandler.h b/client/asmjit/core/errorhandler.h
new file mode 100644
index 0000000..2337cd8
--- /dev/null
+++ b/client/asmjit/core/errorhandler.h
@@ -0,0 +1,267 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
+#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_error_handling
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+//! Error handler can be used to override the default behavior of error handling.
+//!
+//! It's available to all classes that inherit `BaseEmitter`. Override
+//! \ref ErrorHandler::handleError() to implement your own error handler.
+//!
+//! The following use-cases are supported:
+//!
+//! - Record the error and continue code generation. This is the simplest
+//! approach that can be used to at least log possible errors.
+//! - Throw an exception. AsmJit doesn't use exceptions and is completely
+//! exception-safe, but it's perfectly legal to throw an exception from
+//! the error handler.
+//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
+//! Builder and Compiler to a consistent state before calling \ref handleError(),
+//! so `longjmp()` can be used without issues to cancel the code-generation if
+//! an error occurred. This method can be used if exception handling in your
+//! project is turned off and you still want some comfort. In most cases it
+//! should be safe as AsmJit uses \ref Zone memory and the ownership of memory
+//! it allocates always ends with the instance that allocated it. If using this
+//! approach please never jump outside the life-time of \ref CodeHolder and
+//! \ref BaseEmitter.
+//!
+//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter,
+//! which has a priority. The example below uses error handler that just prints
+//! the error, but lets AsmJit continue:
+//!
+//! ```
+//! // Error Handling #1 - Logging and returing Error.
+//! #include <asmjit/x86.h>
+//! #include <stdio.h>
+//!
+//! using namespace asmjit;
+//!
+//! // Error handler that just prints the error and lets AsmJit ignore it.
+//! class SimpleErrorHandler : public ErrorHandler {
+//! public:
+//! Error err;
+//!
+//! inline SimpleErrorHandler() : err(kErrorOk) {}
+//!
+//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
+//! this->err = err;
+//! fprintf(stderr, "ERROR: %s\n", message);
+//! }
+//! };
+//!
+//! int main() {
+//! JitRuntime rt;
+//! SimpleErrorHandler eh;
+//!
+//! CodeHolder code;
+//! code.init(rt.environment());
+//! code.setErrorHandler(&eh);
+//!
+//! // Try to emit instruction that doesn't exist.
+//! x86::Assembler a(&code);
+//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
+//!
+//! if (eh.err) {
+//! // Assembler failed!
+//! return 1;
+//! }
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! If error happens during instruction emitting / encoding the assembler behaves
+//! transactionally - the output buffer won't advance if encoding failed, thus
+//! either a fully encoded instruction or nothing is emitted. The error handling
+//! shown above is useful, but it's still not the best way of dealing with errors
+//! in AsmJit. The following example shows how to use exception handling to handle
+//! errors in a more C++ way:
+//!
+//! ```
+//! // Error Handling #2 - Throwing an exception.
+//! #include <asmjit/x86.h>
+//! #include <exception>
+//! #include <string>
+//! #include <stdio.h>
+//!
+//! using namespace asmjit;
+//!
+//! // Error handler that throws a user-defined `AsmJitException`.
+//! class AsmJitException : public std::exception {
+//! public:
+//! Error err;
+//! std::string message;
+//!
+//! AsmJitException(Error err, const char* message) noexcept
+//! : err(err),
+//! message(message) {}
+//!
+//! const char* what() const noexcept override { return message.c_str(); }
+//! };
+//!
+//! class ThrowableErrorHandler : public ErrorHandler {
+//! public:
+//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
+//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
+//! throw AsmJitException(err, message);
+//! }
+//! };
+//!
+//! int main() {
+//! JitRuntime rt;
+//! ThrowableErrorHandler eh;
+//!
+//! CodeHolder code;
+//! code.init(rt.environment());
+//! code.setErrorHandler(&eh);
+//!
+//! x86::Assembler a(&code);
+//!
+//! // Try to emit instruction that doesn't exist.
+//! try {
+//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
+//! }
+//! catch (const AsmJitException& ex) {
+//! printf("EXCEPTION THROWN: %s\n", ex.what());
+//! return 1;
+//! }
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! If C++ exceptions are not what you like or your project turns off them
+//! completely there is still a way of reducing the error handling to a minimum
+//! by using a standard setjmp/longjmp approach. AsmJit is exception-safe and
+//! cleans up everything before calling the ErrorHandler, so any approach is
+//! safe. You can simply jump from the error handler without causing any
+//! side-effects or memory leaks. The following example demonstrates how it
+//! could be done:
+//!
+//! ```
+//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
+//! #include <asmjit/x86.h>
+//! #include <setjmp.h>
+//! #include <stdio.h>
+//!
+//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
+//! public:
+//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
+//!
+//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
+//! this->err = err;
+//! longjmp(state, 1);
+//! }
+//!
+//! jmp_buf state;
+//! asmjit::Error err;
+//! };
+//!
+//! int main(int argc, char* argv[]) {
+//! using namespace asmjit;
+//!
+//! JitRuntime rt;
+//! LongJmpErrorHandler eh;
+//!
+//! CodeHolder code;
+//! code.init(rt.rt.environment());
+//! code.setErrorHandler(&eh);
+//!
+//! x86::Assembler a(&code);
+//!
+//! if (!setjmp(eh.state)) {
+//! // Try to emit instruction that doesn't exist.
+//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
+//! }
+//! else {
+//! Error err = eh.err;
+//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
+//! }
+//!
+//! return 0;
+//! }
+//! ```
+class ASMJIT_VIRTAPI ErrorHandler {
+public:
+ ASMJIT_BASE_CLASS(ErrorHandler)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Creates a new `ErrorHandler` instance.
+ ASMJIT_API ErrorHandler() noexcept;
+ //! Destroys the `ErrorHandler` instance.
+ ASMJIT_API virtual ~ErrorHandler() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Handle Error]
+ // --------------------------------------------------------------------------
+
+ //! Error handler (must be reimplemented).
+ //!
+ //! Error handler is called after an error happened and before it's propagated
+ //! to the caller. There are multiple ways how the error handler can be used:
+ //!
+ //! 1. User-based error handling without throwing exception or using C's
+ //! `longjmp()`. This is for users that don't use exceptions and want
+ //! customized error handling.
+ //!
+ //! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
+ //! exception-safe, but you can throw exception from your error handler if
+ //! this way is the preferred way of handling errors in your project.
+ //!
+ //! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
+ //! `BaseEmitter` to a consistent state before calling `handleError()`
+ //! so `longjmp()` can be used without any issues to cancel the code
+ //! generation if an error occurred. There is no difference between
+ //! exceptions and `longjmp()` from AsmJit's perspective, however,
+ //! never jump outside of `CodeHolder` and `BaseEmitter` scope as you
+ //! would leak memory.
+ virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
+
diff --git a/client/asmjit/core/features.h b/client/asmjit/core/features.h
new file mode 100644
index 0000000..fd28472
--- /dev/null
+++ b/client/asmjit/core/features.h
@@ -0,0 +1,186 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_FEATURES_H_INCLUDED
+#define ASMJIT_CORE_FEATURES_H_INCLUDED
+
+#include "../core/globals.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::BaseFeatures]
+// ============================================================================
+
+//! Base class that provides information about CPU features.
+//!
+//! Internally each feature is repreesnted by a single bit in an embedded
+//! bit-array, however, feature bits are defined by an architecture specific
+//! implementations, like \ref x86::Features.
+class BaseFeatures {
+public:
+ typedef Support::BitWord BitWord;
+ typedef Support::BitVectorIterator<BitWord> Iterator;
+
+ enum : uint32_t {
+ kMaxFeatures = 128,
+ kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
+ };
+
+ BitWord _bits[kNumBitWords];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline BaseFeatures() noexcept { reset(); }
+ inline BaseFeatures(const BaseFeatures& other) noexcept = default;
+ inline explicit BaseFeatures(Globals::NoInit_) noexcept {}
+
+ inline void reset() noexcept {
+ for (size_t i = 0; i < kNumBitWords; i++)
+ _bits[i] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default;
+
+ inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); }
+ inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ //! Casts this base class into a derived type `T`.
+ template<typename T>
+ inline T& as() noexcept { return static_cast<T&>(*this); }
+
+ //! Casts this base class into a derived type `T` (const).
+ template<typename T>
+ inline const T& as() const noexcept { return static_cast<const T&>(*this); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept {
+ for (uint32_t i = 0; i < kNumBitWords; i++)
+ if (_bits[i])
+ return false;
+ return true;
+ }
+
+ //! Returns all features as array of bitwords (see \ref Support::BitWord).
+ inline BitWord* bits() noexcept { return _bits; }
+ //! Returns all features as array of bitwords (const).
+ inline const BitWord* bits() const noexcept { return _bits; }
+
+ //! Returns the number of BitWords returned by \ref bits().
+ inline size_t bitWordCount() const noexcept { return kNumBitWords; }
+
+ //! Returns \ref Support::BitVectorIterator, that can be used to iterate
+ //! all features efficiently
+ inline Iterator iterator() const noexcept {
+ return Iterator(_bits, kNumBitWords);
+ }
+
+ //! Tests whether the feature `featureId` is present.
+ inline bool has(uint32_t featureId) const noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ return bool((_bits[idx] >> bit) & 0x1);
+ }
+
+ //! Tests whether all features as defined by `other` are present.
+ inline bool hasAll(const BaseFeatures& other) const noexcept {
+ for (uint32_t i = 0; i < kNumBitWords; i++)
+ if ((_bits[i] & other._bits[i]) != other._bits[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds the given CPU `featureId` to the list of features.
+ inline void add(uint32_t featureId) noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ _bits[idx] |= BitWord(1) << bit;
+ }
+
+ template<typename... Args>
+ inline void add(uint32_t featureId, Args... otherIds) noexcept {
+ add(featureId);
+ add(otherIds...);
+ }
+
+ //! Removes the given CPU `featureId` from the list of features.
+ inline void remove(uint32_t featureId) noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ _bits[idx] &= ~(BitWord(1) << bit);
+ }
+
+ template<typename... Args>
+ inline void remove(uint32_t featureId, Args... otherIds) noexcept {
+ remove(featureId);
+ remove(otherIds...);
+ }
+
+ inline bool eq(const BaseFeatures& other) const noexcept {
+ for (size_t i = 0; i < kNumBitWords; i++)
+ if (_bits[i] != other._bits[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_FEATURES_H_INCLUDED
diff --git a/client/asmjit/core/formatter.cpp b/client/asmjit/core/formatter.cpp
new file mode 100644
index 0000000..404edaf
--- /dev/null
+++ b/client/asmjit/core/formatter.cpp
@@ -0,0 +1,469 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_LOGGING
+
+#include "../core/builder.h"
+#include "../core/codeholder.h"
+#include "../core/compiler.h"
+#include "../core/emitter.h"
+#include "../core/formatter.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86formatter_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armformatter_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+#if defined(ASMJIT_NO_COMPILER)
+class VirtReg;
+#endif
+
+// ============================================================================
+// [asmjit::Formatter]
+// ============================================================================
+
+namespace Formatter {
+
+Error formatTypeId(String& sb, uint32_t typeId) noexcept {
+ if (typeId == Type::kIdVoid)
+ return sb.append("void");
+
+ if (!Type::isValid(typeId))
+ return sb.append("unknown");
+
+ const char* typeName = "unknown";
+ uint32_t typeSize = Type::sizeOf(typeId);
+
+ uint32_t baseId = Type::baseOf(typeId);
+ switch (baseId) {
+ case Type::kIdIntPtr : typeName = "iptr" ; break;
+ case Type::kIdUIntPtr: typeName = "uptr" ; break;
+ case Type::kIdI8 : typeName = "i8" ; break;
+ case Type::kIdU8 : typeName = "u8" ; break;
+ case Type::kIdI16 : typeName = "i16" ; break;
+ case Type::kIdU16 : typeName = "u16" ; break;
+ case Type::kIdI32 : typeName = "i32" ; break;
+ case Type::kIdU32 : typeName = "u32" ; break;
+ case Type::kIdI64 : typeName = "i64" ; break;
+ case Type::kIdU64 : typeName = "u64" ; break;
+ case Type::kIdF32 : typeName = "f32" ; break;
+ case Type::kIdF64 : typeName = "f64" ; break;
+ case Type::kIdF80 : typeName = "f80" ; break;
+ case Type::kIdMask8 : typeName = "mask8" ; break;
+ case Type::kIdMask16 : typeName = "mask16"; break;
+ case Type::kIdMask32 : typeName = "mask32"; break;
+ case Type::kIdMask64 : typeName = "mask64"; break;
+ case Type::kIdMmx32 : typeName = "mmx32" ; break;
+ case Type::kIdMmx64 : typeName = "mmx64" ; break;
+ }
+
+ uint32_t baseSize = Type::sizeOf(baseId);
+ if (typeSize > baseSize) {
+ uint32_t count = typeSize / baseSize;
+ return sb.appendFormat("%sx%u", typeName, unsigned(count));
+ }
+ else {
+ return sb.append(typeName);
+ }
+}
+
+Error formatFeature(
+ String& sb,
+ uint32_t arch,
+ uint32_t featureId) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::FormatterInternal::formatFeature(sb, featureId);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::FormatterInternal::formatFeature(sb, featureId);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error formatLabel(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t labelId) noexcept {
+
+ DebugUtils::unused(formatFlags);
+
+ const LabelEntry* le = emitter->code()->labelEntry(labelId);
+ if (ASMJIT_UNLIKELY(!le))
+ return sb.appendFormat("<InvalidLabel:%u>", labelId);
+
+ if (le->hasName()) {
+ if (le->hasParent()) {
+ uint32_t parentId = le->parentId();
+ const LabelEntry* pe = emitter->code()->labelEntry(parentId);
+
+ if (ASMJIT_UNLIKELY(!pe))
+ ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
+ else if (ASMJIT_UNLIKELY(!pe->hasName()))
+ ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
+ else
+ ASMJIT_PROPAGATE(sb.append(pe->name()));
+
+ ASMJIT_PROPAGATE(sb.append('.'));
+ }
+ return sb.append(le->name());
+ }
+ else {
+ return sb.appendFormat("L%u", labelId);
+ }
+}
+
+Error formatRegister(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ uint32_t regType,
+ uint32_t regId) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error formatOperand(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ const Operand_& op) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error formatInstruction(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+#ifndef ASMJIT_NO_BUILDER
+static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
+ uint32_t typeId = value.typeId();
+ ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
+
+ if (value.isAssigned()) {
+ ASMJIT_PROPAGATE(sb.append('@'));
+
+ if (value.isIndirect())
+ ASMJIT_PROPAGATE(sb.append('['));
+
+ // NOTE: It should be either reg or stack, but never both. We
+ // use two IFs on purpose so if the FuncValue is both it would
+ // show in logs.
+ if (value.isReg()) {
+ ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
+ }
+
+ if (value.isStack()) {
+ ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
+ }
+
+ if (value.isIndirect())
+ ASMJIT_PROPAGATE(sb.append(']'));
+ }
+
+ return kErrorOk;
+}
+
+static Error formatFuncRets(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ const FuncDetail& fd,
+ VirtReg* const* vRegs) noexcept {
+
+ if (!fd.hasRet())
+ return sb.append("void");
+
+ for (uint32_t i = 0; i < fd.retCount(); i++) {
+ if (i) ASMJIT_PROPAGATE(sb.append(", "));
+ ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, fd.ret(i)));
+
+#ifndef ASMJIT_NO_COMPILER
+ if (vRegs) {
+ static const char nullRet[] = "<none>";
+ ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullRet));
+ }
+#else
+ DebugUtils::unused(vRegs);
+#endif
+ }
+
+ return kErrorOk;
+}
+
+static Error formatFuncArgs(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ const FuncDetail& fd,
+ VirtReg* const* vRegs) noexcept {
+
+ uint32_t count = fd.argCount();
+ if (!count)
+ return sb.append("void");
+
+ for (uint32_t i = 0; i < count; i++) {
+ if (i)
+ ASMJIT_PROPAGATE(sb.append(", "));
+
+ ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, fd.arg(i)));
+
+#ifndef ASMJIT_NO_COMPILER
+ if (vRegs) {
+ static const char nullArg[] = "<none>";
+ ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullArg));
+ }
+#else
+ DebugUtils::unused(vRegs);
+#endif
+ }
+
+ return kErrorOk;
+}
+
+Error formatNode(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder,
+ const BaseNode* node) noexcept {
+
+ if (node->hasPosition() && (formatFlags & FormatOptions::kFlagPositions) != 0)
+ ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
+
+ switch (node->type()) {
+ case BaseNode::kNodeInst:
+ case BaseNode::kNodeJump: {
+ const InstNode* instNode = node->as<InstNode>();
+ ASMJIT_PROPAGATE(
+ formatInstruction(sb, formatFlags, builder,
+ builder->arch(),
+ instNode->baseInst(), instNode->operands(), instNode->opCount()));
+ break;
+ }
+
+ case BaseNode::kNodeSection: {
+ const SectionNode* sectionNode = node->as<SectionNode>();
+ if (builder->_code->isSectionValid(sectionNode->id())) {
+ const Section* section = builder->_code->sectionById(sectionNode->id());
+ ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
+ }
+ break;
+ }
+
+ case BaseNode::kNodeLabel: {
+ const LabelNode* labelNode = node->as<LabelNode>();
+ ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, labelNode->labelId()));
+ ASMJIT_PROPAGATE(sb.append(":"));
+ break;
+ }
+
+ case BaseNode::kNodeAlign: {
+ const AlignNode* alignNode = node->as<AlignNode>();
+ ASMJIT_PROPAGATE(
+ sb.appendFormat("align %u (%s)",
+ alignNode->alignment(),
+ alignNode->alignMode() == kAlignCode ? "code" : "data"));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedData: {
+ const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
+ ASMJIT_PROPAGATE(sb.append("embed "));
+ if (embedNode->repeatCount() != 1)
+ ASMJIT_PROPAGATE(sb.appendFormat("[repeat=%zu] ", size_t(embedNode->repeatCount())));
+ ASMJIT_PROPAGATE(sb.appendFormat("%u bytes", embedNode->dataSize()));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedLabel: {
+ const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
+ ASMJIT_PROPAGATE(sb.append(".label "));
+ ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedLabelDelta: {
+ const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
+ ASMJIT_PROPAGATE(sb.append(".label ("));
+ ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
+ ASMJIT_PROPAGATE(sb.append(" - "));
+ ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->baseLabelId()));
+ ASMJIT_PROPAGATE(sb.append(")"));
+ break;
+ }
+
+ case BaseNode::kNodeComment: {
+ const CommentNode* commentNode = node->as<CommentNode>();
+ ASMJIT_PROPAGATE(sb.appendFormat("; %s", commentNode->inlineComment()));
+ break;
+ }
+
+ case BaseNode::kNodeSentinel: {
+ const SentinelNode* sentinelNode = node->as<SentinelNode>();
+ const char* sentinelName = nullptr;
+
+ switch (sentinelNode->sentinelType()) {
+ case SentinelNode::kSentinelFuncEnd:
+ sentinelName = "[FuncEnd]";
+ break;
+
+ default:
+ sentinelName = "[Sentinel]";
+ break;
+ }
+
+ ASMJIT_PROPAGATE(sb.append(sentinelName));
+ break;
+ }
+
+#ifndef ASMJIT_NO_COMPILER
+ case BaseNode::kNodeFunc: {
+ const FuncNode* funcNode = node->as<FuncNode>();
+
+ ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, funcNode->labelId()));
+ ASMJIT_PROPAGATE(sb.append(": "));
+
+ ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail(), nullptr));
+ ASMJIT_PROPAGATE(sb.append(" Func("));
+ ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->args()));
+ ASMJIT_PROPAGATE(sb.append(")"));
+ break;
+ }
+
+ case BaseNode::kNodeFuncRet: {
+ const FuncRetNode* retNode = node->as<FuncRetNode>();
+ ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
+
+ for (uint32_t i = 0; i < 2; i++) {
+ const Operand_& op = retNode->_opArray[i];
+ if (!op.isNone()) {
+ ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
+ ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, builder, builder->arch(), op));
+ }
+ }
+ break;
+ }
+
+ case BaseNode::kNodeInvoke: {
+ const InvokeNode* invokeNode = node->as<InvokeNode>();
+ ASMJIT_PROPAGATE(
+ formatInstruction(sb, formatFlags, builder,
+ builder->arch(),
+ invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
+ break;
+ }
+#endif
+
+ default: {
+ ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
+ break;
+ }
+ }
+
+ return kErrorOk;
+}
+
+
+Error formatNodeList(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder) noexcept {
+
+ return formatNodeList(sb, formatFlags, builder, builder->firstNode(), nullptr);
+}
+
+Error formatNodeList(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder,
+ const BaseNode* begin,
+ const BaseNode* end) noexcept {
+
+ const BaseNode* node = begin;
+ while (node != end) {
+ ASMJIT_PROPAGATE(formatNode(sb, formatFlags, builder, node));
+ ASMJIT_PROPAGATE(sb.append('\n'));
+ node = node->next();
+ }
+ return kErrorOk;
+}
+#endif
+
+} // {Formatter}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/client/asmjit/core/formatter.h b/client/asmjit/core/formatter.h
new file mode 100644
index 0000000..14934ba
--- /dev/null
+++ b/client/asmjit/core/formatter.h
@@ -0,0 +1,256 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
+#define ASMJIT_CORE_FORMATTER_H_INCLUDED
+
+#include "../core/inst.h"
+#include "../core/string.h"
+
+#ifndef ASMJIT_NO_LOGGING
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_logging
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+struct Operand_;
+
+#ifndef ASMJIT_NO_BUILDER
+class BaseBuilder;
+class BaseNode;
+#endif
+
+#ifndef ASMJIT_NO_COMPILER
+class BaseCompiler;
+#endif
+
+// ============================================================================
+// [asmjit::FormatOptions]
+// ============================================================================
+
+//! Formatting options used by \ref Logger and \ref Formatter.
+class FormatOptions {
+public:
+ //! Format flags, see \ref Flags.
+ uint32_t _flags;
+ //! Indentation by type, see \ref IndentationType.
+ uint8_t _indentation[4];
+
+ //! Flags can enable a logging feature.
+ enum Flags : uint32_t {
+ //! No flags.
+ kNoFlags = 0u,
+
+ //! Show also binary form of each logged instruction (Assembler).
+ kFlagMachineCode = 0x00000001u,
+ //! Show a text explanation of some immediate values.
+ kFlagExplainImms = 0x00000002u,
+ //! Use hexadecimal notation of immediate values.
+ kFlagHexImms = 0x00000004u,
+ //! Use hexadecimal notation of address offsets.
+ kFlagHexOffsets = 0x00000008u,
+ //! Show casts between virtual register types (Compiler).
+ kFlagRegCasts = 0x00000010u,
+ //! Show positions associated with nodes (Compiler).
+ kFlagPositions = 0x00000020u,
+ //! Annotate nodes that are lowered by passes.
+ kFlagAnnotations = 0x00000040u,
+
+ // TODO: These must go, keep this only for formatting.
+ //! Show an additional output from passes.
+ kFlagDebugPasses = 0x00000080u,
+ //! Show an additional output from RA.
+ kFlagDebugRA = 0x00000100u
+ };
+
+ //! Describes indentation type of code, label, or comment in logger output.
+ enum IndentationType : uint32_t {
+ //! Indentation used for instructions and directives.
+ kIndentationCode = 0u,
+ //! Indentation used for labels and function nodes.
+ kIndentationLabel = 1u,
+ //! Indentation used for comments (not inline comments).
+ kIndentationComment = 2u,
+ //! \cond INTERNAL
+ //! Reserved for future use.
+ kIndentationReserved = 3u
+ //! \endcond
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a default-initialized FormatOptions.
+ constexpr FormatOptions() noexcept
+ : _flags(0),
+ _indentation { 0, 0, 0, 0 } {}
+
+ constexpr FormatOptions(const FormatOptions& other) noexcept = default;
+ inline FormatOptions& operator=(const FormatOptions& other) noexcept = default;
+
+ //! Resets FormatOptions to its default initialized state.
+ inline void reset() noexcept {
+ _flags = 0;
+ _indentation[0] = 0;
+ _indentation[1] = 0;
+ _indentation[2] = 0;
+ _indentation[3] = 0;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns format flags.
+ constexpr uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the given `flag` is set in format flags.
+ constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Resets all format flags to `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
+ //! Adds `flags` to format flags.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ //! Removes `flags` from format flags.
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ //! Returns indentation for the given `type`, see \ref IndentationType.
+ constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; }
+ //! Sets indentation for the given `type`, see \ref IndentationType.
+ inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); }
+ //! Resets indentation for the given `type` to zero.
+ inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Formatter]
+// ============================================================================
+
+//! Provides formatting functionality to format operands, instructions, and nodes.
+namespace Formatter {
+
+//! Appends a formatted `typeId` to the output string `sb`.
+ASMJIT_API Error formatTypeId(
+ String& sb,
+ uint32_t typeId) noexcept;
+
+//! Appends a formatted `featureId` to the output string `sb`.
+//!
+//! See \ref BaseFeatures.
+ASMJIT_API Error formatFeature(
+ String& sb,
+ uint32_t arch,
+ uint32_t featureId) noexcept;
+
+//! Appends a formatted register to the output string `sb`.
+//!
+//! \note Emitter is optional, but it's required to format virtual registers,
+//! which won't be formatted properly if the `emitter` is not provided.
+ASMJIT_API Error formatRegister(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ uint32_t regType,
+ uint32_t regId) noexcept;
+
+//! Appends a formatted label to the output string `sb`.
+//!
+//! \note Emitter is optional, but it's required to format named labels
+//! properly, otherwise the formatted as it is an anonymous label.
+ASMJIT_API Error formatLabel(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t labelId) noexcept;
+
+//! Appends a formatted operand to the output string `sb`.
+//!
+//! \note Emitter is optional, but it's required to format named labels and
+//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
+//! more details.
+ASMJIT_API Error formatOperand(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ const Operand_& op) noexcept;
+
+//! Appends a formatted instruction to the output string `sb`.
+//!
+//! \note Emitter is optional, but it's required to format named labels and
+//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
+//! more details.
+ASMJIT_API Error formatInstruction(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseEmitter* emitter,
+ uint32_t arch,
+ const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
+
+#ifndef ASMJIT_NO_BUILDER
+//! Appends a formatted node to the output string `sb`.
+//!
+//! The `node` must belong to the provided `builder`.
+ASMJIT_API Error formatNode(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder,
+ const BaseNode* node) noexcept;
+
+//! Appends formatted nodes to the output string `sb`.
+//!
+//! All nodes that are part of the given `builder` will be appended.
+ASMJIT_API Error formatNodeList(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder) noexcept;
+
+//! Appends formatted nodes to the output string `sb`.
+//!
+//! This function works the same as \ref formatNode(), but appends more nodes
+//! to the output string, separating each node with a newline '\n' character.
+ASMJIT_API Error formatNodeList(
+ String& sb,
+ uint32_t formatFlags,
+ const BaseBuilder* builder,
+ const BaseNode* begin,
+ const BaseNode* end) noexcept;
+#endif
+
+} // {Formatter}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+
+#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED
diff --git a/client/asmjit/core/func.cpp b/client/asmjit/core/func.cpp
new file mode 100644
index 0000000..514ae9d
--- /dev/null
+++ b/client/asmjit/core/func.cpp
@@ -0,0 +1,143 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+#include "../core/func.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86internal_p.h"
+ #include "../x86/x86operand.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminternal_p.h"
+ #include "../arm/armoperand.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::FuncDetail - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
+ uint32_t ccId = signature.callConv();
+ uint32_t argCount = signature.argCount();
+
+ if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ CallConv& cc = _callConv;
+ ASMJIT_PROPAGATE(cc.init(ccId, environment));
+
+ uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
+
+ const uint8_t* args = signature.args();
+ for (uint32_t i = 0; i < argCount; i++) {
+ FuncValue& arg = _args[i];
+ arg.initTypeId(Type::deabstract(args[i], deabstractDelta));
+ }
+ _argCount = uint8_t(argCount);
+ _vaIndex = uint8_t(signature.vaIndex());
+
+ uint32_t ret = signature.ret();
+ if (ret != Type::kIdVoid) {
+ _rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
+ _retCount = 1;
+ }
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment.isFamilyX86())
+ return x86::X86Internal::initFuncDetail(*this, signature, registerSize);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment.isFamilyARM())
+ return arm::ArmInternal::initFuncDetail(*this, signature, registerSize);
+#endif
+
+ // We should never bubble here as if `cc.init()` succeeded then there has to
+ // be an implementation for the current architecture. However, stay safe.
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncFrame - Init / Reset / Finalize]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(func.callConv().arch()))
+ return x86::X86Internal::initFuncFrame(*this, func);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(func.callConv().arch()))
+ return arm::ArmInternal::initFuncFrame(*this, func);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch()))
+ return x86::X86Internal::finalizeFuncFrame(*this);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch()))
+ return arm::ArmInternal::finalizeFuncFrame(*this);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncArgsAssignment]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
+ uint32_t arch = frame.arch();
+ const FuncDetail* func = funcDetail();
+
+ if (!func)
+ return DebugUtils::errored(kErrorInvalidState);
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::X86Internal::argsToFuncFrame(*this, frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::ArmInternal::argsToFuncFrame(*this, frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/func.h b/client/asmjit/core/func.h
new file mode 100644
index 0000000..9c45c1f
--- /dev/null
+++ b/client/asmjit/core/func.h
@@ -0,0 +1,976 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_FUNC_H_INCLUDED
+#define ASMJIT_CORE_FUNC_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/callconv.h"
+#include "../core/environment.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_function
+//! \{
+
+// ============================================================================
+// [asmjit::FuncArgIndex]
+// ============================================================================
+
+//! Function argument index (lo/hi).
+enum FuncArgIndex : uint32_t {
+ //! Maximum number of function arguments supported by AsmJit.
+ kFuncArgCount = Globals::kMaxFuncArgs,
+ //! Extended maximum number of arguments (used internally).
+ kFuncArgCountLoHi = kFuncArgCount * 2,
+
+ //! Index to the LO part of function argument (default).
+ //!
+ //! This value is typically omitted and added only if there is HI argument
+ //! accessed.
+ kFuncArgLo = 0,
+
+ //! Index to the HI part of function argument.
+ //!
+ //! HI part of function argument depends on target architecture. On x86 it's
+ //! typically used to transfer 64-bit integers (they form a pair of 32-bit
+ //! integers).
+ kFuncArgHi = kFuncArgCount
+};
+
+// ============================================================================
+// [asmjit::FuncSignature]
+// ============================================================================
+
+//! Function signature.
+//!
+//! Contains information about function return type, count of arguments and
+//! their TypeIds. Function signature is a low level structure which doesn't
+//! contain platform specific or calling convention specific information.
+struct FuncSignature {
+ //! Calling convention id.
+ uint8_t _callConv;
+ //! Count of arguments.
+ uint8_t _argCount;
+ //! Index of a first VA or `kNoVarArgs`.
+ uint8_t _vaIndex;
+ //! Return value TypeId.
+ uint8_t _ret;
+ //! Function arguments TypeIds.
+ const uint8_t* _args;
+
+ enum : uint8_t {
+ //! Doesn't have variable number of arguments (`...`).
+ kNoVarArgs = 0xFF
+ };
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ //! Initializes the function signature.
+ inline void init(uint32_t ccId, uint32_t vaIndex, uint32_t ret, const uint8_t* args, uint32_t argCount) noexcept {
+ ASMJIT_ASSERT(ccId <= 0xFF);
+ ASMJIT_ASSERT(argCount <= 0xFF);
+
+ _callConv = uint8_t(ccId);
+ _argCount = uint8_t(argCount);
+ _vaIndex = uint8_t(vaIndex);
+ _ret = uint8_t(ret);
+ _args = args;
+ }
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the calling convention.
+ inline uint32_t callConv() const noexcept { return _callConv; }
+ //! Sets the calling convention to `ccId`;
+ inline void setCallConv(uint32_t ccId) noexcept { _callConv = uint8_t(ccId); }
+
+ //! Tests whether the function has variable number of arguments (...).
+ inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
+ //! Returns the variable arguments (...) index, `kNoVarArgs` if none.
+ inline uint32_t vaIndex() const noexcept { return _vaIndex; }
+ //! Sets the variable arguments (...) index to `index`.
+ inline void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); }
+ //! Resets the variable arguments index (making it a non-va function).
+ inline void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; }
+
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _argCount; }
+
+ inline bool hasRet() const noexcept { return _ret != Type::kIdVoid; }
+ //! Returns the return value type.
+ inline uint32_t ret() const noexcept { return _ret; }
+
+ //! Returns the type of the argument at index `i`.
+ inline uint32_t arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < _argCount);
+ return _args[i];
+ }
+ //! Returns the array of function arguments' types.
+ inline const uint8_t* args() const noexcept { return _args; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncSignatureT]
+// ============================================================================
+
+template<typename... RET_ARGS>
+class FuncSignatureT : public FuncSignature {
+public:
+ inline FuncSignatureT(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept {
+ static const uint8_t ret_args[] = { (uint8_t(Type::IdOfT<RET_ARGS>::kTypeId))... };
+ init(ccId, vaIndex, ret_args[0], ret_args + 1, uint32_t(ASMJIT_ARRAY_SIZE(ret_args) - 1));
+ }
+};
+
+// ============================================================================
+// [asmjit::FuncSignatureBuilder]
+// ============================================================================
+
+//! Function signature builder.
+class FuncSignatureBuilder : public FuncSignature {
+public:
+ uint8_t _builderArgList[kFuncArgCount];
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ inline FuncSignatureBuilder(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept {
+ init(ccId, vaIndex, Type::kIdVoid, _builderArgList, 0);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets the return type to `retType`.
+ inline void setRet(uint32_t retType) noexcept { _ret = uint8_t(retType); }
+ //! Sets the return type based on `T`.
+ template<typename T>
+ inline void setRetT() noexcept { setRet(Type::IdOfT<T>::kTypeId); }
+
+ //! Sets the argument at index `index` to `argType`.
+ inline void setArg(uint32_t index, uint32_t argType) noexcept {
+ ASMJIT_ASSERT(index < _argCount);
+ _builderArgList[index] = uint8_t(argType);
+ }
+ //! Sets the argument at index `i` to the type based on `T`.
+ template<typename T>
+ inline void setArgT(uint32_t index) noexcept { setArg(index, Type::IdOfT<T>::kTypeId); }
+
+ //! Appends an argument of `type` to the function prototype.
+ inline void addArg(uint32_t type) noexcept {
+ ASMJIT_ASSERT(_argCount < kFuncArgCount);
+ _builderArgList[_argCount++] = uint8_t(type);
+ }
+ //! Appends an argument of type based on `T` to the function prototype.
+ template<typename T>
+ inline void addArgT() noexcept { addArg(Type::IdOfT<T>::kTypeId); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncValue]
+// ============================================================================
+
+//! Argument or return value as defined by `FuncSignature`, but with register
+//! or stack address (and other metadata) assigned to it.
+struct FuncValue {
+ uint32_t _data;
+
+ enum Parts : uint32_t {
+ kTypeIdShift = 0, //!< TypeId shift.
+ kTypeIdMask = 0x000000FFu, //!< TypeId mask.
+
+ kFlagIsReg = 0x00000100u, //!< Passed by register.
+ kFlagIsStack = 0x00000200u, //!< Passed by stack.
+ kFlagIsIndirect = 0x00000400u, //!< Passed indirectly by reference (internally a pointer).
+ kFlagIsDone = 0x00000800u, //!< Used internally by arguments allocator.
+
+ kStackOffsetShift = 12, //!< Stack offset shift.
+ kStackOffsetMask = 0xFFFFF000u, //!< Stack offset mask (must occupy MSB bits).
+
+ kRegIdShift = 16, //!< RegId shift.
+ kRegIdMask = 0x00FF0000u, //!< RegId mask.
+
+ kRegTypeShift = 24, //!< RegType shift.
+ kRegTypeMask = 0xFF000000u //!< RegType mask.
+ };
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ // These initialize the whole `FuncValue` to either register or stack. Useful
+ // when you know all of these properties and wanna just set it up.
+
+ //! Initializes the `typeId` of this `FuncValue`.
+ inline void initTypeId(uint32_t typeId) noexcept {
+ _data = typeId << kTypeIdShift;
+ }
+
+ inline void initReg(uint32_t regType, uint32_t regId, uint32_t typeId, uint32_t flags = 0) noexcept {
+ _data = (regType << kRegTypeShift) | (regId << kRegIdShift) | (typeId << kTypeIdShift) | kFlagIsReg | flags;
+ }
+
+ inline void initStack(int32_t offset, uint32_t typeId) noexcept {
+ _data = (uint32_t(offset) << kStackOffsetShift) | (typeId << kTypeIdShift) | kFlagIsStack;
+ }
+
+ //! Resets the value to its unassigned state.
+ inline void reset() noexcept { _data = 0; }
+
+ //! \}
+
+ //! \name Assign
+ //! \{
+
+ // These initialize only part of `FuncValue`, useful when building `FuncValue`
+ // incrementally. The caller should first init the type-id by caliing `initTypeId`
+ // and then continue building either register or stack.
+
+ inline void assignRegData(uint32_t regType, uint32_t regId) noexcept {
+ ASMJIT_ASSERT((_data & (kRegTypeMask | kRegIdMask)) == 0);
+ _data |= (regType << kRegTypeShift) | (regId << kRegIdShift) | kFlagIsReg;
+ }
+
+ inline void assignStackOffset(int32_t offset) noexcept {
+ ASMJIT_ASSERT((_data & kStackOffsetMask) == 0);
+ _data |= (uint32_t(offset) << kStackOffsetShift) | kFlagIsStack;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline void _replaceValue(uint32_t mask, uint32_t value) noexcept { _data = (_data & ~mask) | value; }
+
+ //! Tests whether the `FuncValue` has a flag `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_data & flag) != 0; }
+ //! Adds `flags` to `FuncValue`.
+ inline void addFlags(uint32_t flags) noexcept { _data |= flags; }
+ //! Clears `flags` of `FuncValue`.
+ inline void clearFlags(uint32_t flags) noexcept { _data &= ~flags; }
+
+ //! Tests whether the value is initialized (i.e. contains a valid data).
+ inline bool isInitialized() const noexcept { return _data != 0; }
+ //! Tests whether the argument is passed by register.
+ inline bool isReg() const noexcept { return hasFlag(kFlagIsReg); }
+ //! Tests whether the argument is passed by stack.
+ inline bool isStack() const noexcept { return hasFlag(kFlagIsStack); }
+ //! Tests whether the argument is passed by register.
+ inline bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); }
+ //! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM).
+ inline bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); }
+
+ //! Tests whether the argument was already processed (used internally).
+ inline bool isDone() const noexcept { return hasFlag(kFlagIsDone); }
+
+ //! Returns a register type of the register used to pass function argument or return value.
+ inline uint32_t regType() const noexcept { return (_data & kRegTypeMask) >> kRegTypeShift; }
+ //! Sets a register type of the register used to pass function argument or return value.
+ inline void setRegType(uint32_t regType) noexcept { _replaceValue(kRegTypeMask, regType << kRegTypeShift); }
+
+ //! Returns a physical id of the register used to pass function argument or return value.
+ inline uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; }
+ //! Sets a physical id of the register used to pass function argument or return value.
+ inline void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); }
+
+ //! Returns a stack offset of this argument.
+ inline int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; }
+ //! Sets a stack offset of this argument.
+ inline void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); }
+
+ //! Tests whether the argument or return value has associated `Type::Id`.
+ inline bool hasTypeId() const noexcept { return (_data & kTypeIdMask) != 0; }
+ //! Returns a TypeId of this argument or return value.
+ inline uint32_t typeId() const noexcept { return (_data & kTypeIdMask) >> kTypeIdShift; }
+ //! Sets a TypeId of this argument or return value.
+ inline void setTypeId(uint32_t typeId) noexcept { _replaceValue(kTypeIdMask, typeId << kTypeIdShift); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncDetail]
+// ============================================================================
+
+//! Function detail - CallConv and expanded FuncSignature.
+//!
+//! Function detail is architecture and OS dependent representation of a function.
+//! It contains calling convention and expanded function signature so all
+//! arguments have assigned either register type & id or stack address.
+class FuncDetail {
+public:
+ //! Calling convention.
+ CallConv _callConv;
+ //! Number of function arguments.
+ uint8_t _argCount;
+ //! Number of function return values.
+ uint8_t _retCount;
+ //! Variable arguments index of `kNoVarArgs`.
+ uint8_t _vaIndex;
+ //! Reserved for future use.
+ uint8_t _reserved;
+ //! Registers that contains arguments.
+ uint32_t _usedRegs[BaseReg::kGroupVirt];
+ //! Size of arguments passed by stack.
+ uint32_t _argStackSize;
+ //! Function return values.
+ FuncValue _rets[2];
+ //! Function arguments.
+ FuncValue _args[kFuncArgCountLoHi];
+
+ enum : uint8_t {
+ //! Doesn't have variable number of arguments (`...`).
+ kNoVarArgs = 0xFF
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline FuncDetail() noexcept { reset(); }
+ inline FuncDetail(const FuncDetail& other) noexcept = default;
+
+ //! Initializes this `FuncDetail` to the given signature.
+ ASMJIT_API Error init(const FuncSignature& signature, const Environment& environment) noexcept;
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the function's calling convention, see `CallConv`.
+ inline const CallConv& callConv() const noexcept { return _callConv; }
+
+ //! Returns the associated calling convention flags, see `CallConv::Flags`.
+ inline uint32_t flags() const noexcept { return _callConv.flags(); }
+ //! Checks whether a CallConv `flag` is set, see `CallConv::Flags`.
+ inline bool hasFlag(uint32_t ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); }
+
+ //! Returns count of function return values.
+ inline uint32_t retCount() const noexcept { return _retCount; }
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _argCount; }
+
+ //! Tests whether the function has a return value.
+ inline bool hasRet() const noexcept { return _retCount != 0; }
+ //! Returns function return value associated with the given `index`.
+ inline FuncValue& ret(uint32_t index = 0) noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
+ return _rets[index];
+ }
+ //! Returns function return value associated with the given `index` (const).
+ inline const FuncValue& ret(uint32_t index = 0) const noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
+ return _rets[index];
+ }
+
+ //! Returns function arguments array.
+ inline FuncValue* args() noexcept { return _args; }
+ //! Returns function arguments array (const).
+ inline const FuncValue* args() const noexcept { return _args; }
+
+ inline bool hasArg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index].isInitialized();
+ }
+
+ //! Returns function argument at the given `index`.
+ inline FuncValue& arg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index];
+ }
+
+ //! Returnsfunction argument at the given index `index` (const).
+ inline const FuncValue& arg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index];
+ }
+
+ inline void resetArg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ _args[index].reset();
+ }
+
+ inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
+ inline uint32_t vaIndex() const noexcept { return _vaIndex; }
+
+ //! Tests whether the function passes one or more argument by stack.
+ inline bool hasStackArgs() const noexcept { return _argStackSize != 0; }
+ //! Returns stack size needed for function arguments passed on the stack.
+ inline uint32_t argStackSize() const noexcept { return _argStackSize; }
+
+ inline uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); }
+ inline uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); }
+ inline uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); }
+
+ inline uint32_t passedRegs(uint32_t group) const noexcept { return _callConv.passedRegs(group); }
+ inline uint32_t preservedRegs(uint32_t group) const noexcept { return _callConv.preservedRegs(group); }
+
+ inline uint32_t usedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _usedRegs[group];
+ }
+
+ inline void addUsedRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _usedRegs[group] |= regs;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncFrame]
+// ============================================================================
+
+//! Function frame.
+//!
+//! Function frame is used directly by prolog and epilog insertion (PEI) utils.
+//! It provides information necessary to insert a proper and ABI comforming
+//! prolog and epilog. Function frame calculation is based on `CallConv` and
+//! other function attributes.
+//!
+//! Function Frame Structure
+//! ------------------------
+//!
+//! Various properties can contribute to the size and structure of the function
+//! frame. The function frame in most cases won't use all of the properties
+//! illustrated (for example Spill Zone and Red Zone are never used together).
+//!
+//! ```
+//! +-----------------------------+
+//! | Arguments Passed by Stack |
+//! +-----------------------------+
+//! | Spill Zone |
+//! +-----------------------------+ <- Stack offset (args) starts from here.
+//! | Return Address, if Pushed |
+//! +-----------------------------+ <- Stack pointer (SP) upon entry.
+//! | Save/Restore Stack. |
+//! +-----------------------------+-----------------------------+
+//! | Local Stack | |
+//! +-----------------------------+ Final Stack |
+//! | Call Stack | |
+//! +-----------------------------+-----------------------------+ <- SP after prolog.
+//! | Red Zone |
+//! +-----------------------------+
+//! ```
+class FuncFrame {
+public:
+ enum Tag : uint32_t {
+ //! Tag used to inform that some offset is invalid.
+ kTagInvalidOffset = 0xFFFFFFFFu
+ };
+
+ //! Attributes are designed in a way that all are initially false, and user
+ //! or FuncFrame finalizer adds them when necessary.
+ enum Attributes : uint32_t {
+ //! Function has variable number of arguments.
+ kAttrHasVarArgs = 0x00000001u,
+ //! Preserve frame pointer (don't omit FP).
+ kAttrHasPreservedFP = 0x00000010u,
+ //! Function calls other functions (is not leaf).
+ kAttrHasFuncCalls = 0x00000020u,
+
+ //! Use AVX instead of SSE for all operations (X86).
+ kAttrX86AvxEnabled = 0x00010000u,
+ //! Emit VZEROUPPER instruction in epilog (X86).
+ kAttrX86AvxCleanup = 0x00020000u,
+ //! Emit EMMS instruction in epilog (X86).
+ kAttrX86MmxCleanup = 0x00040000u,
+
+ //! Function has aligned save/restore of vector registers.
+ kAttrAlignedVecSR = 0x40000000u,
+ //! FuncFrame is finalized and can be used by PEI.
+ kAttrIsFinalized = 0x80000000u
+ };
+
+ //! Function attributes.
+ uint32_t _attributes;
+
+ //! Architecture, see \ref Environment::Arch.
+ uint8_t _arch;
+ //! SP register ID (to access call stack and local stack).
+ uint8_t _spRegId;
+ //! SA register ID (to access stack arguments).
+ uint8_t _saRegId;
+
+ //! Red zone size (copied from CallConv).
+ uint8_t _redZoneSize;
+ //! Spill zone size (copied from CallConv).
+ uint8_t _spillZoneSize;
+ //! Natural stack alignment (copied from CallConv).
+ uint8_t _naturalStackAlignment;
+ //! Minimum stack alignment to turn on dynamic alignment.
+ uint8_t _minDynamicAlignment;
+
+ //! Call stack alignment.
+ uint8_t _callStackAlignment;
+ //! Local stack alignment.
+ uint8_t _localStackAlignment;
+ //! Final stack alignment.
+ uint8_t _finalStackAlignment;
+
+ //! Adjustment of the stack before returning (X86-STDCALL).
+ uint16_t _calleeStackCleanup;
+
+ //! Call stack size.
+ uint32_t _callStackSize;
+ //! Local stack size.
+ uint32_t _localStackSize;
+ //! Final stack size (sum of call stack and local stack).
+ uint32_t _finalStackSize;
+
+ //! Local stack offset (non-zero only if call stack is used).
+ uint32_t _localStackOffset;
+ //! Offset relative to SP that contains previous SP (before alignment).
+ uint32_t _daOffset;
+ //! Offset of the first stack argument relative to SP.
+ uint32_t _saOffsetFromSP;
+ //! Offset of the first stack argument relative to SA (_saRegId or FP).
+ uint32_t _saOffsetFromSA;
+
+ //! Local stack adjustment in prolog/epilog.
+ uint32_t _stackAdjustment;
+
+ //! Registers that are dirty.
+ uint32_t _dirtyRegs[BaseReg::kGroupVirt];
+ //! Registers that must be preserved (copied from CallConv).
+ uint32_t _preservedRegs[BaseReg::kGroupVirt];
+
+ //! Final stack size required to save GP regs.
+ uint16_t _gpSaveSize;
+ //! Final Stack size required to save other than GP regs.
+ uint16_t _nonGpSaveSize;
+ //! Final offset where saved GP regs are stored.
+ uint32_t _gpSaveOffset;
+ //! Final offset where saved other than GP regs are stored.
+ uint32_t _nonGpSaveOffset;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline FuncFrame() noexcept { reset(); }
+ inline FuncFrame(const FuncFrame& other) noexcept = default;
+
+ ASMJIT_API Error init(const FuncDetail& func) noexcept;
+
+ inline void reset() noexcept {
+ memset(this, 0, sizeof(FuncFrame));
+ _spRegId = BaseReg::kIdBad;
+ _saRegId = BaseReg::kIdBad;
+ _daOffset = kTagInvalidOffset;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the target architecture of the function frame.
+ inline uint32_t arch() const noexcept { return _arch; }
+
+ //! Returns function frame attributes, see `Attributes`.
+ inline uint32_t attributes() const noexcept { return _attributes; }
+ //! Checks whether the FuncFame contains an attribute `attr`.
+ inline bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; }
+ //! Adds attributes `attrs` to the FuncFrame.
+ inline void addAttributes(uint32_t attrs) noexcept { _attributes |= attrs; }
+ //! Clears attributes `attrs` from the FrameFrame.
+ inline void clearAttributes(uint32_t attrs) noexcept { _attributes &= ~attrs; }
+
+ //! Tests whether the function has variable number of arguments.
+ inline bool hasVarArgs() const noexcept { return hasAttribute(kAttrHasVarArgs); }
+ //! Sets the variable arguments flag.
+ inline void setVarArgs() noexcept { addAttributes(kAttrHasVarArgs); }
+ //! Resets variable arguments flag.
+ inline void resetVarArgs() noexcept { clearAttributes(kAttrHasVarArgs); }
+
+ //! Tests whether the function preserves frame pointer (EBP|ESP on X86).
+ inline bool hasPreservedFP() const noexcept { return hasAttribute(kAttrHasPreservedFP); }
+ //! Enables preserved frame pointer.
+ inline void setPreservedFP() noexcept { addAttributes(kAttrHasPreservedFP); }
+ //! Disables preserved frame pointer.
+ inline void resetPreservedFP() noexcept { clearAttributes(kAttrHasPreservedFP); }
+
+ //! Tests whether the function calls other functions.
+ inline bool hasFuncCalls() const noexcept { return hasAttribute(kAttrHasFuncCalls); }
+ //! Sets `kFlagHasCalls` to true.
+ inline void setFuncCalls() noexcept { addAttributes(kAttrHasFuncCalls); }
+ //! Sets `kFlagHasCalls` to false.
+ inline void resetFuncCalls() noexcept { clearAttributes(kAttrHasFuncCalls); }
+
+ //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog.
+ inline bool hasAvxCleanup() const noexcept { return hasAttribute(kAttrX86AvxCleanup); }
+ //! Enables AVX cleanup.
+ inline void setAvxCleanup() noexcept { addAttributes(kAttrX86AvxCleanup); }
+ //! Disables AVX cleanup.
+ inline void resetAvxCleanup() noexcept { clearAttributes(kAttrX86AvxCleanup); }
+
+ //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog.
+ inline bool isAvxEnabled() const noexcept { return hasAttribute(kAttrX86AvxEnabled); }
+ //! Enables AVX cleanup.
+ inline void setAvxEnabled() noexcept { addAttributes(kAttrX86AvxEnabled); }
+ //! Disables AVX cleanup.
+ inline void resetAvxEnabled() noexcept { clearAttributes(kAttrX86AvxEnabled); }
+
+ //! Tests whether the function contains MMX cleanup - 'emms' instruction in epilog.
+ inline bool hasMmxCleanup() const noexcept { return hasAttribute(kAttrX86MmxCleanup); }
+ //! Enables MMX cleanup.
+ inline void setMmxCleanup() noexcept { addAttributes(kAttrX86MmxCleanup); }
+ //! Disables MMX cleanup.
+ inline void resetMmxCleanup() noexcept { clearAttributes(kAttrX86MmxCleanup); }
+
+ //! Tests whether the function uses call stack.
+ inline bool hasCallStack() const noexcept { return _callStackSize != 0; }
+ //! Tests whether the function uses local stack.
+ inline bool hasLocalStack() const noexcept { return _localStackSize != 0; }
+ //! Tests whether vector registers can be saved and restored by using aligned reads and writes.
+ inline bool hasAlignedVecSR() const noexcept { return hasAttribute(kAttrAlignedVecSR); }
+ //! Tests whether the function has to align stack dynamically.
+ inline bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; }
+
+ //! Tests whether the calling convention specifies 'RedZone'.
+ inline bool hasRedZone() const noexcept { return _redZoneSize != 0; }
+ //! Tests whether the calling convention specifies 'SpillZone'.
+ inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
+
+ //! Returns the size of 'RedZone'.
+ inline uint32_t redZoneSize() const noexcept { return _redZoneSize; }
+ //! Returns the size of 'SpillZone'.
+ inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
+ //! Returns natural stack alignment (guaranteed stack alignment upon entry).
+ inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
+ //! Returns natural stack alignment (guaranteed stack alignment upon entry).
+ inline uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; }
+
+ //! Tests whether the callee must adjust SP before returning (X86-STDCALL only)
+ inline bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; }
+ //! Returns home many bytes of the stack the the callee must adjust before returning (X86-STDCALL only)
+ inline uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; }
+
+ //! Returns call stack alignment.
+ inline uint32_t callStackAlignment() const noexcept { return _callStackAlignment; }
+ //! Returns local stack alignment.
+ inline uint32_t localStackAlignment() const noexcept { return _localStackAlignment; }
+ //! Returns final stack alignment (the maximum value of call, local, and natural stack alignments).
+ inline uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; }
+
+ //! Sets call stack alignment.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void setCallStackAlignment(uint32_t alignment) noexcept {
+ _callStackAlignment = uint8_t(alignment);
+ _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment);
+ }
+
+ //! Sets local stack alignment.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void setLocalStackAlignment(uint32_t value) noexcept {
+ _localStackAlignment = uint8_t(value);
+ _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment);
+ }
+
+ //! Combines call stack alignment with `alignment`, updating it to the greater value.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void updateCallStackAlignment(uint32_t alignment) noexcept {
+ _callStackAlignment = uint8_t(Support::max<uint32_t>(_callStackAlignment, alignment));
+ _finalStackAlignment = Support::max(_finalStackAlignment, _callStackAlignment);
+ }
+
+ //! Combines local stack alignment with `alignment`, updating it to the greater value.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void updateLocalStackAlignment(uint32_t alignment) noexcept {
+ _localStackAlignment = uint8_t(Support::max<uint32_t>(_localStackAlignment, alignment));
+ _finalStackAlignment = Support::max(_finalStackAlignment, _localStackAlignment);
+ }
+
+ //! Returns call stack size.
+ inline uint32_t callStackSize() const noexcept { return _callStackSize; }
+ //! Returns local stack size.
+ inline uint32_t localStackSize() const noexcept { return _localStackSize; }
+
+ //! Sets call stack size.
+ inline void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; }
+ //! Sets local stack size.
+ inline void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; }
+
+ //! Combines call stack size with `size`, updating it to the greater value.
+ inline void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); }
+ //! Combines local stack size with `size`, updating it to the greater value.
+ inline void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); }
+
+ //! Returns final stack size (only valid after the FuncFrame is finalized).
+ inline uint32_t finalStackSize() const noexcept { return _finalStackSize; }
+
+ //! Returns an offset to access the local stack (non-zero only if call stack is used).
+ inline uint32_t localStackOffset() const noexcept { return _localStackOffset; }
+
+ //! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP.
+ inline bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; }
+ //! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP).
+ inline uint32_t daOffset() const noexcept { return _daOffset; }
+
+ inline uint32_t saOffset(uint32_t regId) const noexcept {
+ return regId == _spRegId ? saOffsetFromSP()
+ : saOffsetFromSA();
+ }
+
+ inline uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; }
+ inline uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; }
+
+ //! Returns mask of registers of the given register `group` that are modified
+ //! by the function. The engine would then calculate which registers must be
+ //! saved & restored by the function by using the data provided by the calling
+ //! convention.
+ inline uint32_t dirtyRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _dirtyRegs[group];
+ }
+
+ //! Sets which registers (as a mask) are modified by the function.
+ //!
+ //! \remarks Please note that this will completely overwrite the existing
+ //! register mask, use `addDirtyRegs()` to modify the existing register
+ //! mask.
+ inline void setDirtyRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] = regs;
+ }
+
+ //! Adds which registers (as a mask) are modified by the function.
+ inline void addDirtyRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] |= regs;
+ }
+
+ //! \overload
+ inline void addDirtyRegs(const BaseReg& reg) noexcept {
+ ASMJIT_ASSERT(reg.id() < Globals::kMaxPhysRegs);
+ addDirtyRegs(reg.group(), Support::bitMask(reg.id()));
+ }
+
+ //! \overload
+ template<typename... Args>
+ ASMJIT_INLINE void addDirtyRegs(const BaseReg& reg, Args&&... args) noexcept {
+ addDirtyRegs(reg);
+ addDirtyRegs(std::forward<Args>(args)...);
+ }
+
+ inline void setAllDirty() noexcept {
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_dirtyRegs); i++)
+ _dirtyRegs[i] = 0xFFFFFFFFu;
+ }
+
+ inline void setAllDirty(uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] = 0xFFFFFFFFu;
+ }
+
+ //! Returns a calculated mask of registers of the given `group` that will be
+ //! saved and restored in the function's prolog and epilog, respectively. The
+ //! register mask is calculated from both `dirtyRegs` (provided by user) and
+ //! `preservedMask` (provided by the calling convention).
+ inline uint32_t savedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _dirtyRegs[group] & _preservedRegs[group];
+ }
+
+ //! Returns the mask of preserved registers of the given register `group`.
+ //!
+ //! Preserved registers are those that must survive the function call
+ //! unmodified. The function can only modify preserved registers it they
+ //! are saved and restored in funciton's prolog and epilog, respectively.
+ inline uint32_t preservedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _preservedRegs[group];
+ }
+
+ inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
+ inline uint32_t saRegId() const noexcept { return _saRegId; }
+ inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
+ inline void resetSARegId() { setSARegId(BaseReg::kIdBad); }
+
+ //! Returns stack size required to save GP registers.
+ inline uint32_t gpSaveSize() const noexcept { return _gpSaveSize; }
+ //! Returns stack size required to save other than GP registers (MM, XMM|YMM|ZMM, K, VFP, etc...).
+ inline uint32_t nonGpSaveSize() const noexcept { return _nonGpSaveSize; }
+
+ //! Returns an offset to the stack where general purpose registers are saved.
+ inline uint32_t gpSaveOffset() const noexcept { return _gpSaveOffset; }
+ //! Returns an offset to the stack where other than GP registers are saved.
+ inline uint32_t nonGpSaveOffset() const noexcept { return _nonGpSaveOffset; }
+
+ //! Tests whether the functions contains stack adjustment.
+ inline bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; }
+ //! Returns function's stack adjustment used in function's prolog and epilog.
+ //!
+ //! If the returned value is zero it means that the stack is not adjusted.
+ //! This can mean both that the stack is not used and/or the stack is only
+ //! adjusted by instructions that pust/pop registers into/from stack.
+ inline uint32_t stackAdjustment() const noexcept { return _stackAdjustment; }
+
+ //! \}
+
+ //! \name Finaliztion
+ //! \{
+
+ ASMJIT_API Error finalize() noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncArgsAssignment]
+// ============================================================================
+
+//! A helper class that can be used to assign a physical register for each
+//! function argument. Use with `BaseEmitter::emitArgsAssignment()`.
+class FuncArgsAssignment {
+public:
+ //! Function detail.
+ const FuncDetail* _funcDetail;
+ //! Register that can be used to access arguments passed by stack.
+ uint8_t _saRegId;
+ //! Reserved for future use.
+ uint8_t _reserved[3];
+ //! Mapping of each function argument.
+ FuncValue _args[kFuncArgCountLoHi];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit FuncArgsAssignment(const FuncDetail* fd = nullptr) noexcept { reset(fd); }
+
+ inline FuncArgsAssignment(const FuncArgsAssignment& other) noexcept {
+ memcpy(this, &other, sizeof(*this));
+ }
+
+ inline void reset(const FuncDetail* fd = nullptr) noexcept {
+ _funcDetail = fd;
+ _saRegId = uint8_t(BaseReg::kIdBad);
+ memset(_reserved, 0, sizeof(_reserved));
+ memset(_args, 0, sizeof(_args));
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline const FuncDetail* funcDetail() const noexcept { return _funcDetail; }
+ inline void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; }
+
+ inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
+ inline uint32_t saRegId() const noexcept { return _saRegId; }
+ inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
+ inline void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); }
+
+ inline FuncValue& arg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
+ return _args[index];
+ }
+ inline const FuncValue& arg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
+ return _args[index];
+ }
+
+ inline bool isAssigned(uint32_t argIndex) const noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ return _args[argIndex].isAssigned();
+ }
+
+ inline void assignReg(uint32_t argIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ ASMJIT_ASSERT(reg.isPhysReg());
+ _args[argIndex].initReg(reg.type(), reg.id(), typeId);
+ }
+
+ inline void assignReg(uint32_t argIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ _args[argIndex].initReg(regType, regId, typeId);
+ }
+
+ inline void assignStack(uint32_t argIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ _args[argIndex].initStack(offset, typeId);
+ }
+
+ // NOTE: All `assignAll()` methods are shortcuts to assign all arguments at
+ // once, however, since registers are passed all at once these initializers
+ // don't provide any way to pass TypeId and/or to keep any argument between
+ // the arguments passed unassigned.
+ inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg) noexcept {
+ assignReg(argIndex, reg);
+ }
+
+ template<typename... Args>
+ inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg, Args&&... args) noexcept {
+ assignReg(argIndex, reg);
+ _assignAllInternal(argIndex + 1, std::forward<Args>(args)...);
+ }
+
+ template<typename... Args>
+ inline void assignAll(Args&&... args) noexcept {
+ _assignAllInternal(0, std::forward<Args>(args)...);
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Update `FuncFrame` based on function's arguments assignment.
+ //!
+ //! \note You MUST call this in orher to use `BaseEmitter::emitArgsAssignment()`,
+ //! otherwise the FuncFrame would not contain the information necessary to
+ //! assign all arguments into the registers and/or stack specified.
+ ASMJIT_API Error updateFuncFrame(FuncFrame& frame) const noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_FUNC_H_INCLUDED
+
diff --git a/client/asmjit/core/globals.cpp b/client/asmjit/core/globals.cpp
new file mode 100644
index 0000000..7ec6628
--- /dev/null
+++ b/client/asmjit/core/globals.cpp
@@ -0,0 +1,144 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/globals.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::DebugUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
+#ifndef ASMJIT_NO_TEXT
+ // @EnumStringBegin{"enum": "ErrorCode", "output": "sError", "strip": "kError"}@
+ static const char sErrorString[] =
+ "Ok\0"
+ "OutOfMemory\0"
+ "InvalidArgument\0"
+ "InvalidState\0"
+ "InvalidArch\0"
+ "NotInitialized\0"
+ "AlreadyInitialized\0"
+ "FeatureNotEnabled\0"
+ "TooManyHandles\0"
+ "TooLarge\0"
+ "NoCodeGenerated\0"
+ "InvalidDirective\0"
+ "InvalidLabel\0"
+ "TooManyLabels\0"
+ "LabelAlreadyBound\0"
+ "LabelAlreadyDefined\0"
+ "LabelNameTooLong\0"
+ "InvalidLabelName\0"
+ "InvalidParentLabel\0"
+ "NonLocalLabelCannotHaveParent\0"
+ "InvalidSection\0"
+ "TooManySections\0"
+ "InvalidSectionName\0"
+ "TooManyRelocations\0"
+ "InvalidRelocEntry\0"
+ "RelocOffsetOutOfRange\0"
+ "InvalidAssignment\0"
+ "InvalidInstruction\0"
+ "InvalidRegType\0"
+ "InvalidRegGroup\0"
+ "InvalidPhysId\0"
+ "InvalidVirtId\0"
+ "InvalidPrefixCombination\0"
+ "InvalidLockPrefix\0"
+ "InvalidXAcquirePrefix\0"
+ "InvalidXReleasePrefix\0"
+ "InvalidRepPrefix\0"
+ "InvalidRexPrefix\0"
+ "InvalidExtraReg\0"
+ "InvalidKMaskUse\0"
+ "InvalidKZeroUse\0"
+ "InvalidBroadcast\0"
+ "InvalidEROrSAE\0"
+ "InvalidAddress\0"
+ "InvalidAddressIndex\0"
+ "InvalidAddressScale\0"
+ "InvalidAddress64Bit\0"
+ "InvalidAddress64BitZeroExtension\0"
+ "InvalidDisplacement\0"
+ "InvalidSegment\0"
+ "InvalidImmediate\0"
+ "InvalidOperandSize\0"
+ "AmbiguousOperandSize\0"
+ "OperandSizeMismatch\0"
+ "InvalidOption\0"
+ "OptionAlreadyDefined\0"
+ "InvalidTypeId\0"
+ "InvalidUseOfGpbHi\0"
+ "InvalidUseOfGpq\0"
+ "InvalidUseOfF80\0"
+ "NotConsecutiveRegs\0"
+ "IllegalVirtReg\0"
+ "TooManyVirtRegs\0"
+ "NoMorePhysRegs\0"
+ "OverlappedRegs\0"
+ "OverlappingStackRegWithRegArg\0"
+ "ExpressionLabelNotBound\0"
+ "ExpressionOverflow\0"
+ "<Unknown>\0";
+
+ static const uint16_t sErrorIndex[] = {
+ 0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
+ 247, 264, 283, 313, 328, 344, 363, 382, 400, 422, 440, 459, 474, 490, 504,
+ 518, 543, 561, 583, 605, 622, 639, 655, 671, 687, 704, 719, 734, 754, 774,
+ 794, 827, 847, 862, 879, 898, 919, 939, 953, 974, 988, 1006, 1022, 1038,
+ 1057, 1072, 1088, 1103, 1118, 1148, 1172, 1191
+ };
+ // @EnumStringEnd@
+
+ return sErrorString + sErrorIndex[Support::min<Error>(err, kErrorCount)];
+#else
+ DebugUtils::unused(err);
+ static const char noMessage[] = "";
+ return noMessage;
+#endif
+}
+
+ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
+#if defined(_WIN32)
+ ::OutputDebugStringA(str);
+#else
+ ::fputs(str, stderr);
+#endif
+}
+
+ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
+ char str[1024];
+
+ snprintf(str, 1024,
+ "[asmjit] Assertion failed at %s (line %d):\n"
+ "[asmjit] %s\n", file, line, msg);
+
+ debugOutput(str);
+ ::abort();
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/globals.h b/client/asmjit/core/globals.h
new file mode 100644
index 0000000..67a8769
--- /dev/null
+++ b/client/asmjit/core/globals.h
@@ -0,0 +1,453 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
+#define ASMJIT_CORE_GLOBALS_H_INCLUDED
+
+#include "../core/api-config.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Support]
+// ============================================================================
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_utilities
+//! \{
+namespace Support {
+ //! Cast designed to cast between function and void* pointers.
+ template<typename Dst, typename Src>
+ static inline Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; }
+} // {Support}
+
+#if defined(ASMJIT_NO_STDCXX)
+namespace Support {
+ ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
+ ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
+} // {Support}
+
+#define ASMJIT_BASE_CLASS(TYPE) \
+ ASMJIT_INLINE void* operator new(size_t n) noexcept { \
+ return Support::operatorNew(n); \
+ } \
+ \
+ ASMJIT_INLINE void operator delete(void* p) noexcept { \
+ Support::operatorDelete(p); \
+ } \
+ \
+ ASMJIT_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
+ ASMJIT_INLINE void operator delete(void*, void*) noexcept {}
+#else
+#define ASMJIT_BASE_CLASS(TYPE)
+#endif
+
+//! \}
+//! \endcond
+
+// ============================================================================
+// [asmjit::Globals]
+// ============================================================================
+
+//! \addtogroup asmjit_core
+//! \{
+
+//! Contains typedefs, constants, and variables used globally by AsmJit.
+namespace Globals {
+
+// ============================================================================
+// [asmjit::Globals::<global>]
+// ============================================================================
+
+//! Host memory allocator overhead.
+constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
+
+//! Host memory allocator alignment.
+constexpr uint32_t kAllocAlignment = 8;
+
+//! Aggressive growing strategy threshold.
+constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
+
+//! Maximum depth of RB-Tree is:
+//!
+//! `2 * log2(n + 1)`
+//!
+//! Size of RB node is at least two pointers (without data),
+//! so a theoretical architecture limit would be:
+//!
+//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
+//!
+//! Which yields 30 on 32-bit arch and 61 on 64-bit arch.
+//! The final value was adjusted by +1 for safety reasons.
+constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
+
+//! Maximum number of operands per a single instruction.
+constexpr uint32_t kMaxOpCount = 6;
+
+//! Maximum arguments of a function supported by the Compiler / Function API.
+constexpr uint32_t kMaxFuncArgs = 16;
+
+//! Maximum number of physical registers AsmJit can use per register group.
+constexpr uint32_t kMaxPhysRegs = 32;
+
+//! Maximum alignment.
+constexpr uint32_t kMaxAlignment = 64;
+
+//! Maximum label or symbol size in bytes.
+constexpr uint32_t kMaxLabelNameSize = 2048;
+
+//! Maximum section name size.
+constexpr uint32_t kMaxSectionNameSize = 35;
+
+//! Maximum size of comment.
+constexpr uint32_t kMaxCommentSize = 1024;
+
+//! Invalid identifier.
+constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
+
+//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
+constexpr uint32_t kNotFound = 0xFFFFFFFFu;
+
+//! Invalid base address.
+constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
+
+// ============================================================================
+// [asmjit::Globals::ResetPolicy]
+// ============================================================================
+
+//! Reset policy used by most `reset()` functions.
+enum ResetPolicy : uint32_t {
+ //! Soft reset, doesn't deallocate memory (default).
+ kResetSoft = 0,
+ //! Hard reset, releases all memory used, if any.
+ kResetHard = 1
+};
+
+// ============================================================================
+// [asmjit::Globals::Link]
+// ============================================================================
+
+enum Link : uint32_t {
+ kLinkLeft = 0,
+ kLinkRight = 1,
+
+ kLinkPrev = 0,
+ kLinkNext = 1,
+
+ kLinkFirst = 0,
+ kLinkLast = 1,
+
+ kLinkCount = 2
+};
+
+struct Init_ {};
+struct NoInit_ {};
+
+static const constexpr Init_ Init {};
+static const constexpr NoInit_ NoInit {};
+
+} // {Globals}
+
+// ============================================================================
+// [asmjit::ByteOrder]
+// ============================================================================
+
+//! Byte order.
+namespace ByteOrder {
+ enum : uint32_t {
+ kLE = 0,
+ kBE = 1,
+ kNative = ASMJIT_ARCH_LE ? kLE : kBE,
+ kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
+ };
+}
+
+// ============================================================================
+// [asmjit::ptr_as_func / func_as_ptr]
+// ============================================================================
+
+template<typename Func>
+static inline Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
+
+template<typename Func>
+static inline void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
+
+//! \}
+
+// ============================================================================
+// [asmjit::Error]
+// ============================================================================
+
+//! \addtogroup asmjit_error_handling
+//! \{
+
+//! AsmJit error type (uint32_t).
+typedef uint32_t Error;
+
+//! AsmJit error codes.
+enum ErrorCode : uint32_t {
+ // @EnumValuesBegin{"enum": "ErrorCode"}@
+
+ //! No error (success).
+ kErrorOk = 0,
+
+ //! Out of memory.
+ kErrorOutOfMemory,
+
+ //! Invalid argument.
+ kErrorInvalidArgument,
+
+ //! Invalid state.
+ //!
+ //! If this error is returned it means that either you are doing something
+ //! wrong or AsmJit caught itself by doing something wrong. This error should
+ //! never be ignored.
+ kErrorInvalidState,
+
+ //! Invalid or incompatible architecture.
+ kErrorInvalidArch,
+
+ //! The object is not initialized.
+ kErrorNotInitialized,
+ //! The object is already initialized.
+ kErrorAlreadyInitialized,
+
+ //! Built-in feature was disabled at compile time and it's not available.
+ kErrorFeatureNotEnabled,
+
+ //! Too many handles (Windows) or file descriptors (Unix/Posix).
+ kErrorTooManyHandles,
+ //! Code generated is larger than allowed.
+ kErrorTooLarge,
+
+ //! No code generated.
+ //!
+ //! Returned by runtime if the \ref CodeHolder contains no code.
+ kErrorNoCodeGenerated,
+
+ //! Invalid directive.
+ kErrorInvalidDirective,
+ //! Attempt to use uninitialized label.
+ kErrorInvalidLabel,
+ //! Label index overflow - a single \ref BaseAssembler instance can hold
+ //! almost 2^32 (4 billion) labels. If there is an attempt to create more
+ //! labels then this error is returned.
+ kErrorTooManyLabels,
+ //! Label is already bound.
+ kErrorLabelAlreadyBound,
+ //! Label is already defined (named labels).
+ kErrorLabelAlreadyDefined,
+ //! Label name is too long.
+ kErrorLabelNameTooLong,
+ //! Label must always be local if it's anonymous (without a name).
+ kErrorInvalidLabelName,
+ //! Parent id passed to \ref CodeHolder::newNamedLabelEntry() was invalid.
+ kErrorInvalidParentLabel,
+ //! Parent id specified for a non-local (global) label.
+ kErrorNonLocalLabelCannotHaveParent,
+
+ //! Invalid section.
+ kErrorInvalidSection,
+ //! Too many sections (section index overflow).
+ kErrorTooManySections,
+ //! Invalid section name (most probably too long).
+ kErrorInvalidSectionName,
+
+ //! Relocation index overflow (too many relocations).
+ kErrorTooManyRelocations,
+ //! Invalid relocation entry.
+ kErrorInvalidRelocEntry,
+ //! Reloc entry contains address that is out of range (unencodable).
+ kErrorRelocOffsetOutOfRange,
+
+ //! Invalid assignment to a register, function argument, or function return value.
+ kErrorInvalidAssignment,
+ //! Invalid instruction.
+ kErrorInvalidInstruction,
+ //! Invalid register type.
+ kErrorInvalidRegType,
+ //! Invalid register group.
+ kErrorInvalidRegGroup,
+ //! Invalid physical register id.
+ kErrorInvalidPhysId,
+ //! Invalid virtual register id.
+ kErrorInvalidVirtId,
+ //! Invalid prefix combination.
+ kErrorInvalidPrefixCombination,
+ //! Invalid LOCK prefix.
+ kErrorInvalidLockPrefix,
+ //! Invalid XACQUIRE prefix.
+ kErrorInvalidXAcquirePrefix,
+ //! Invalid XRELEASE prefix.
+ kErrorInvalidXReleasePrefix,
+ //! Invalid REP prefix.
+ kErrorInvalidRepPrefix,
+ //! Invalid REX prefix.
+ kErrorInvalidRexPrefix,
+ //! Invalid {...} register.
+ kErrorInvalidExtraReg,
+ //! Invalid {k} use (not supported by the instruction).
+ kErrorInvalidKMaskUse,
+ //! Invalid {k}{z} use (not supported by the instruction).
+ kErrorInvalidKZeroUse,
+ //! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}.
+ kErrorInvalidBroadcast,
+ //! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512).
+ kErrorInvalidEROrSAE,
+ //! Invalid address used (not encodable).
+ kErrorInvalidAddress,
+ //! Invalid index register used in memory address (not encodable).
+ kErrorInvalidAddressIndex,
+ //! Invalid address scale (not encodable).
+ kErrorInvalidAddressScale,
+ //! Invalid use of 64-bit address.
+ kErrorInvalidAddress64Bit,
+ //! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
+ kErrorInvalidAddress64BitZeroExtension,
+ //! Invalid displacement (not encodable).
+ kErrorInvalidDisplacement,
+ //! Invalid segment (X86).
+ kErrorInvalidSegment,
+
+ //! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
+ kErrorInvalidImmediate,
+
+ //! Invalid operand size.
+ kErrorInvalidOperandSize,
+ //! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
+ kErrorAmbiguousOperandSize,
+ //! Mismatching operand size (size of multiple operands doesn't match the operation size).
+ kErrorOperandSizeMismatch,
+
+ //! Invalid option.
+ kErrorInvalidOption,
+ //! Option already defined.
+ kErrorOptionAlreadyDefined,
+
+ //! Invalid TypeId.
+ kErrorInvalidTypeId,
+ //! Invalid use of a 8-bit GPB-HIGH register.
+ kErrorInvalidUseOfGpbHi,
+ //! Invalid use of a 64-bit GPQ register in 32-bit mode.
+ kErrorInvalidUseOfGpq,
+ //! Invalid use of an 80-bit float (\ref Type::kIdF80).
+ kErrorInvalidUseOfF80,
+ //! Some registers in the instruction muse be consecutive (some ARM and AVX512
+ //! neural-net instructions).
+ kErrorNotConsecutiveRegs,
+
+ //! Illegal virtual register - reported by instruction validation.
+ kErrorIllegalVirtReg,
+ //! AsmJit cannot create more virtual registers.
+ kErrorTooManyVirtRegs,
+
+ //! AsmJit requires a physical register, but no one is available.
+ kErrorNoMorePhysRegs,
+ //! A variable has been assigned more than once to a function argument (BaseCompiler).
+ kErrorOverlappedRegs,
+ //! Invalid register to hold stack arguments offset.
+ kErrorOverlappingStackRegWithRegArg,
+
+ //! Unbound label cannot be evaluated by expression.
+ kErrorExpressionLabelNotBound,
+ //! Arithmetic overflow during expression evaluation.
+ kErrorExpressionOverflow,
+
+ // @EnumValuesEnd@
+
+ //! Count of AsmJit error codes.
+ kErrorCount
+};
+
+// ============================================================================
+// [asmjit::DebugUtils]
+// ============================================================================
+
+//! Debugging utilities.
+namespace DebugUtils {
+
+//! \cond INTERNAL
+//! Used to silence warnings about unused arguments or variables.
+template<typename... Args>
+static ASMJIT_INLINE void unused(Args&&...) noexcept {}
+//! \endcond
+
+//! Returns the error `err` passed.
+//!
+//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
+//! help with tracing the origin of any error reported / returned by AsmJit.
+static constexpr Error errored(Error err) noexcept { return err; }
+
+//! Returns a printable version of `asmjit::Error` code.
+ASMJIT_API const char* errorAsString(Error err) noexcept;
+
+//! Called to output debugging message(s).
+ASMJIT_API void debugOutput(const char* str) noexcept;
+
+//! Called on assertion failure.
+//!
+//! \param file Source file name where it happened.
+//! \param line Line in the source file.
+//! \param msg Message to display.
+//!
+//! If you have problems with assertion failures a breakpoint can be put
+//! at \ref assertionFailed() function (asmjit/core/globals.cpp). A call stack
+//! will be available when such assertion failure is triggered. AsmJit always
+//! returns errors on failures, assertions are a last resort and usually mean
+//! unrecoverable state due to out of range array access or totally invalid
+//! arguments like nullptr where a valid pointer should be provided, etc...
+ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
+
+} // {DebugUtils}
+
+//! \def ASMJIT_ASSERT(...)
+//!
+//! AsmJit's own assert macro used in AsmJit code-base.
+#if defined(ASMJIT_BUILD_DEBUG)
+#define ASMJIT_ASSERT(...) \
+ do { \
+ if (ASMJIT_LIKELY(__VA_ARGS__)) \
+ break; \
+ ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
+ } while (0)
+#else
+#define ASMJIT_ASSERT(...) ((void)0)
+#endif
+
+//! \def ASMJIT_PROPAGATE(...)
+//!
+//! Propagates a possible `Error` produced by `...` to the caller by returning
+//! the error immediately. Used by AsmJit internally, but kept public for users
+//! that want to use the same technique to propagate errors to the caller.
+#define ASMJIT_PROPAGATE(...) \
+ do { \
+ ::asmjit::Error _err = __VA_ARGS__; \
+ if (ASMJIT_UNLIKELY(_err)) \
+ return _err; \
+ } while (0)
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_GLOBALS_H_INCLUDED
diff --git a/client/asmjit/core/inst.cpp b/client/asmjit/core/inst.cpp
new file mode 100644
index 0000000..a233b93
--- /dev/null
+++ b/client/asmjit/core/inst.cpp
@@ -0,0 +1,139 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/arch.h"
+#include "../core/inst.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86instapi_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminstapi_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::InstAPI - Text]
+// ============================================================================
+
+#ifndef ASMJIT_NO_TEXT
+Error InstAPI::instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::InstInternal::instIdToString(arch, instId, output);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::InstInternal::instIdToString(arch, instId, output);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+uint32_t InstAPI::stringToInstId(uint32_t arch, const char* s, size_t len) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::InstInternal::stringToInstId(arch, s, len);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::InstInternal::stringToInstId(arch, s, len);
+#endif
+
+ return 0;
+}
+#endif // !ASMJIT_NO_TEXT
+
+// ============================================================================
+// [asmjit::InstAPI - Validate]
+// ============================================================================
+
+#ifndef ASMJIT_NO_VALIDATION
+Error InstAPI::validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_VALIDATION
+
+// ============================================================================
+// [asmjit::InstAPI - QueryRWInfo]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+Error InstAPI::queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
+ if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+// ============================================================================
+// [asmjit::InstAPI - QueryFeatures]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+Error InstAPI::queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (Environment::isFamilyX86(arch))
+ return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (Environment::isFamilyARM(arch))
+ return arm::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/client/asmjit/core/inst.h b/client/asmjit/core/inst.h
new file mode 100644
index 0000000..79619ae
--- /dev/null
+++ b/client/asmjit/core/inst.h
@@ -0,0 +1,559 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_INST_H_INCLUDED
+#define ASMJIT_CORE_INST_H_INCLUDED
+
+#include "../core/cpuinfo.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_instruction_db
+//! \{
+
+// ============================================================================
+// [asmjit::BaseInst]
+// ============================================================================
+
+//! Instruction id, options, and extraReg in a single structure. This structure
+//! exists mainly to simplify analysis and validation API that requires `BaseInst`
+//! and `Operand[]` array.
+class BaseInst {
+public:
+ //! Instruction id, see \ref BaseInst::Id or {arch-specific}::Inst::Id.
+ uint32_t _id;
+ //! Instruction options, see \ref BaseInst::Options or {arch-specific}::Inst::Options.
+ uint32_t _options;
+ //! Extra register used by instruction (either REP register or AVX-512 selector).
+ RegOnly _extraReg;
+
+ enum Id : uint32_t {
+ //! Invalid or uninitialized instruction id.
+ kIdNone = 0x00000000u,
+ //! Abstract instruction (BaseBuilder and BaseCompiler).
+ kIdAbstract = 0x80000000u
+ };
+
+ enum Options : uint32_t {
+ //! Used internally by emitters for handling errors and rare cases.
+ kOptionReserved = 0x00000001u,
+
+ //! Prevents following a jump during compilation (BaseCompiler).
+ kOptionUnfollow = 0x00000010u,
+
+ //! Overwrite the destination operand(s) (BaseCompiler).
+ //!
+ //! Hint that is important for register liveness analysis. It tells the
+ //! compiler that the destination operand will be overwritten now or by
+ //! adjacent instructions. BaseCompiler knows when a register is completely
+ //! overwritten by a single instruction, for example you don't have to
+ //! mark "movaps" or "pxor x, x", however, if a pair of instructions is
+ //! used and the first of them doesn't completely overwrite the content
+ //! of the destination, BaseCompiler fails to mark that register as dead.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! - All instructions that always overwrite at least the size of the
+ //! register the virtual-register uses , for example "mov", "movq",
+ //! "movaps" don't need the overwrite option to be used - conversion,
+ //! shuffle, and other miscellaneous instructions included.
+ //!
+ //! - All instructions that clear the destination register if all operands
+ //! are the same, for example "xor x, x", "pcmpeqb x x", etc...
+ //!
+ //! - Consecutive instructions that partially overwrite the variable until
+ //! there is no old content require `BaseCompiler::overwrite()` to be used.
+ //! Some examples (not always the best use cases thought):
+ //!
+ //! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
+ //! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
+ //! - `mov al, ?` followed by `and ax, 0xFF`
+ //! - `mov al, ?` followed by `mov ah, al`
+ //! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
+ //!
+ //! - If allocated variable is used temporarily for scalar operations. For
+ //! example if you allocate a full vector like `x86::Compiler::newXmm()`
+ //! and then use that vector for scalar operations you should use
+ //! `overwrite()` directive:
+ //!
+ //! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
+ //! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
+ kOptionOverwrite = 0x00000020u,
+
+ //! Emit short-form of the instruction.
+ kOptionShortForm = 0x00000040u,
+ //! Emit long-form of the instruction.
+ kOptionLongForm = 0x00000080u,
+
+ //! Conditional jump is likely to be taken.
+ kOptionTaken = 0x00000100u,
+ //! Conditional jump is unlikely to be taken.
+ kOptionNotTaken = 0x00000200u
+ };
+
+ //! Control type.
+ enum ControlType : uint32_t {
+ //! No control type (doesn't jump).
+ kControlNone = 0u,
+ //! Unconditional jump.
+ kControlJump = 1u,
+ //! Conditional jump (branch).
+ kControlBranch = 2u,
+ //! Function call.
+ kControlCall = 3u,
+ //! Function return.
+ kControlReturn = 4u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new BaseInst instance with `id` and `options` set.
+ //!
+ //! Default values of `id` and `options` are zero, which means none instruciton.
+ //! Such instruction is guaranteed to never exist for any architecture supported
+ //! by AsmJit.
+ inline explicit BaseInst(uint32_t id = 0, uint32_t options = 0) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg() {}
+
+ inline BaseInst(uint32_t id, uint32_t options, const RegOnly& extraReg) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg(extraReg) {}
+
+ inline BaseInst(uint32_t id, uint32_t options, const BaseReg& extraReg) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg { extraReg.signature(), extraReg.id() } {}
+
+ //! \}
+
+ //! \name Instruction ID
+ //! \{
+
+ //! Returns the instruction id.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Sets the instruction id to the given `id`.
+ inline void setId(uint32_t id) noexcept { _id = id; }
+ //! Resets the instruction id to zero, see \ref kIdNone.
+ inline void resetId() noexcept { _id = 0; }
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ inline uint32_t options() const noexcept { return _options; }
+ inline bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; }
+ inline void setOptions(uint32_t options) noexcept { _options = options; }
+ inline void addOptions(uint32_t options) noexcept { _options |= options; }
+ inline void clearOptions(uint32_t options) noexcept { _options &= ~options; }
+ inline void resetOptions() noexcept { _options = 0; }
+
+ //! \}
+
+ //! \name Extra Register
+ //! \{
+
+ inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
+ inline RegOnly& extraReg() noexcept { return _extraReg; }
+ inline const RegOnly& extraReg() const noexcept { return _extraReg; }
+ inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
+ inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ inline void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::OpRWInfo]
+// ============================================================================
+
+//! Read/Write information related to a single operand, used by \ref InstRWInfo.
+struct OpRWInfo {
+ //! Read/Write flags, see \ref OpRWInfo::Flags.
+ uint32_t _opFlags;
+ //! Physical register index, if required.
+ uint8_t _physId;
+ //! Size of a possible memory operand that can replace a register operand.
+ uint8_t _rmSize;
+ //! Reserved for future use.
+ uint8_t _reserved[2];
+ //! Read bit-mask where each bit represents one byte read from Reg/Mem.
+ uint64_t _readByteMask;
+ //! Write bit-mask where each bit represents one byte written to Reg/Mem.
+ uint64_t _writeByteMask;
+ //! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
+ uint64_t _extendByteMask;
+
+ //! Flags describe how the operand is accessed and some additional information.
+ enum Flags : uint32_t {
+ //! Operand is read.
+ kRead = 0x00000001u,
+
+ //! Operand is written.
+ kWrite = 0x00000002u,
+
+ //! Operand is both read and written.
+ kRW = 0x00000003u,
+
+ //! Register operand can be replaced by a memory operand.
+ kRegMem = 0x00000004u,
+
+ //! The `extendByteMask()` represents a zero extension.
+ kZExt = 0x00000010u,
+
+ //! Register operand must use \ref physId().
+ kRegPhysId = 0x00000100u,
+ //! Base register of a memory operand must use \ref physId().
+ kMemPhysId = 0x00000200u,
+
+ //! This memory operand is only used to encode registers and doesn't access memory.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
+ kMemFake = 0x000000400u,
+
+ //! Base register of the memory operand will be read.
+ kMemBaseRead = 0x00001000u,
+ //! Base register of the memory operand will be written.
+ kMemBaseWrite = 0x00002000u,
+ //! Base register of the memory operand will be read & written.
+ kMemBaseRW = 0x00003000u,
+
+ //! Index register of the memory operand will be read.
+ kMemIndexRead = 0x00004000u,
+ //! Index register of the memory operand will be written.
+ kMemIndexWrite = 0x00008000u,
+ //! Index register of the memory operand will be read & written.
+ kMemIndexRW = 0x0000C000u,
+
+ //! Base register of the memory operand will be modified before the operation.
+ kMemBasePreModify = 0x00010000u,
+ //! Base register of the memory operand will be modified after the operation.
+ kMemBasePostModify = 0x00020000u
+ };
+
+ // Don't remove these asserts. Read/Write flags are used extensively
+ // by Compiler and they must always be compatible with constants below.
+ static_assert(kRead == 0x1, "OpRWInfo::kRead flag must be 0x1");
+ static_assert(kWrite == 0x2, "OpRWInfo::kWrite flag must be 0x2");
+ static_assert(kRegMem == 0x4, "OpRWInfo::kRegMem flag must be 0x4");
+
+ //! \name Reset
+ //! \{
+
+ //! Resets this operand information to all zeros.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! Resets this operand info (resets all members) and set common information
+ //! to the given `opFlags`, `regSize`, and possibly `physId`.
+ inline void reset(uint32_t opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
+ _opFlags = opFlags;
+ _physId = uint8_t(physId);
+ _rmSize = uint8_t((opFlags & kRegMem) ? regSize : uint32_t(0));
+ _resetReserved();
+
+ uint64_t mask = Support::lsbMask<uint64_t>(regSize);
+ _readByteMask = opFlags & kRead ? mask : uint64_t(0);
+ _writeByteMask = opFlags & kWrite ? mask : uint64_t(0);
+ _extendByteMask = 0;
+ }
+
+ inline void _resetReserved() noexcept {
+ memset(_reserved, 0, sizeof(_reserved));
+ }
+
+ //! \}
+
+ //! \name Operand Flags
+ //! \{
+
+ //! Returns operand flags, see \ref Flags.
+ inline uint32_t opFlags() const noexcept { return _opFlags; }
+ //! Tests whether operand flags contain the given `flag`.
+ inline bool hasOpFlag(uint32_t flag) const noexcept { return (_opFlags & flag) != 0; }
+
+ //! Adds the given `flags` to operand flags.
+ inline void addOpFlags(uint32_t flags) noexcept { _opFlags |= flags; }
+ //! Removes the given `flags` from operand flags.
+ inline void clearOpFlags(uint32_t flags) noexcept { _opFlags &= ~flags; }
+
+ //! Tests whether this operand is read from.
+ inline bool isRead() const noexcept { return hasOpFlag(kRead); }
+ //! Tests whether this operand is written to.
+ inline bool isWrite() const noexcept { return hasOpFlag(kWrite); }
+ //! Tests whether this operand is both read and write.
+ inline bool isReadWrite() const noexcept { return (_opFlags & kRW) == kRW; }
+ //! Tests whether this operand is read only.
+ inline bool isReadOnly() const noexcept { return (_opFlags & kRW) == kRead; }
+ //! Tests whether this operand is write only.
+ inline bool isWriteOnly() const noexcept { return (_opFlags & kRW) == kWrite; }
+
+ //! Tests whether this operand is Reg/Mem
+ //!
+ //! Reg/Mem operands can use either register or memory.
+ inline bool isRm() const noexcept { return hasOpFlag(kRegMem); }
+
+ //! Tests whether the operand will be zero extended.
+ inline bool isZExt() const noexcept { return hasOpFlag(kZExt); }
+
+ //! \}
+
+ //! \name Memory Flags
+ //! \{
+
+ //! Tests whether this is a fake memory operand, which is only used, because
+ //! of encoding. Fake memory operands do not access any memory, they are only
+ //! used to encode registers.
+ inline bool isMemFake() const noexcept { return hasOpFlag(kMemFake); }
+
+ //! Tests whether the instruction's memory BASE register is used.
+ inline bool isMemBaseUsed() const noexcept { return (_opFlags & kMemBaseRW) != 0; }
+ //! Tests whether the instruction reads from its BASE registers.
+ inline bool isMemBaseRead() const noexcept { return hasOpFlag(kMemBaseRead); }
+ //! Tests whether the instruction writes to its BASE registers.
+ inline bool isMemBaseWrite() const noexcept { return hasOpFlag(kMemBaseWrite); }
+ //! Tests whether the instruction reads and writes from/to its BASE registers.
+ inline bool isMemBaseReadWrite() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRW; }
+ //! Tests whether the instruction only reads from its BASE registers.
+ inline bool isMemBaseReadOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRead; }
+ //! Tests whether the instruction only writes to its BASE registers.
+ inline bool isMemBaseWriteOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseWrite; }
+
+ //! Tests whether the instruction modifies the BASE register before it uses
+ //! it to calculate the target address.
+ inline bool isMemBasePreModify() const noexcept { return hasOpFlag(kMemBasePreModify); }
+ //! Tests whether the instruction modifies the BASE register after it uses
+ //! it to calculate the target address.
+ inline bool isMemBasePostModify() const noexcept { return hasOpFlag(kMemBasePostModify); }
+
+ //! Tests whether the instruction's memory INDEX register is used.
+ inline bool isMemIndexUsed() const noexcept { return (_opFlags & kMemIndexRW) != 0; }
+ //! Tests whether the instruction reads the INDEX registers.
+ inline bool isMemIndexRead() const noexcept { return hasOpFlag(kMemIndexRead); }
+ //! Tests whether the instruction writes to its INDEX registers.
+ inline bool isMemIndexWrite() const noexcept { return hasOpFlag(kMemIndexWrite); }
+ //! Tests whether the instruction reads and writes from/to its INDEX registers.
+ inline bool isMemIndexReadWrite() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRW; }
+ //! Tests whether the instruction only reads from its INDEX registers.
+ inline bool isMemIndexReadOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRead; }
+ //! Tests whether the instruction only writes to its INDEX registers.
+ inline bool isMemIndexWriteOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexWrite; }
+
+ //! \}
+
+ //! \name Physical Register ID
+ //! \{
+
+ //! Returns a physical id of the register that is fixed for this operand.
+ //!
+ //! Returns \ref BaseReg::kIdBad if any register can be used.
+ inline uint32_t physId() const noexcept { return _physId; }
+ //! Tests whether \ref physId() would return a valid physical register id.
+ inline bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
+ //! Sets physical register id, which would be fixed for this operand.
+ inline void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
+
+ //! \}
+
+ //! \name Reg/Mem Information
+ //! \{
+
+ //! Returns Reg/Mem size of the operand.
+ inline uint32_t rmSize() const noexcept { return _rmSize; }
+ //! Sets Reg/Mem size of the operand.
+ inline void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
+
+ //! \}
+
+ //! \name Read & Write Masks
+ //! \{
+
+ //! Returns read mask.
+ inline uint64_t readByteMask() const noexcept { return _readByteMask; }
+ //! Returns write mask.
+ inline uint64_t writeByteMask() const noexcept { return _writeByteMask; }
+ //! Returns extend mask.
+ inline uint64_t extendByteMask() const noexcept { return _extendByteMask; }
+
+ //! Sets read mask.
+ inline void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
+ //! Sets write mask.
+ inline void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
+ //! Sets externd mask.
+ inline void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstRWInfo]
+// ============================================================================
+
+//! Read/Write information of an instruction.
+struct InstRWInfo {
+ //! Instruction flags (there are no flags at the moment, this field is reserved).
+ uint32_t _instFlags;
+ //! Mask of CPU flags read.
+ uint32_t _readFlags;
+ //! Mask of CPU flags written.
+ uint32_t _writeFlags;
+ //! Count of operands.
+ uint8_t _opCount;
+ //! CPU feature required for replacing register operand with memory operand.
+ uint8_t _rmFeature;
+ //! Reserved for future use.
+ uint8_t _reserved[18];
+ //! Read/Write onfo of extra register (rep{} or kz{}).
+ OpRWInfo _extraReg;
+ //! Read/Write info of instruction operands.
+ OpRWInfo _operands[Globals::kMaxOpCount];
+
+ //! \name Commons
+ //! \{
+
+ //! Resets this RW information to all zeros.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Instruction Flags
+ //!
+ //! \{
+
+ inline uint32_t instFlags() const noexcept { return _instFlags; }
+ inline bool hasInstFlag(uint32_t flag) const noexcept { return (_instFlags & flag) != 0; }
+
+ //! }
+
+ //! \name CPU Flags Read/Write Information
+ //! \{
+
+ //! Returns read flags of the instruction.
+ inline uint32_t readFlags() const noexcept { return _readFlags; }
+ //! Returns write flags of the instruction.
+ inline uint32_t writeFlags() const noexcept { return _writeFlags; }
+
+ //! \}
+
+ //! \name Reg/Mem Information
+ //! \{
+
+ //! Returns the CPU feature required to replace a register operand with memory
+ //! operand. If the returned feature is zero (none) then this instruction
+ //! either doesn't provide memory operand combination or there is no extra
+ //! CPU feature required.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Some AVX+ instructions may require extra features for replacing registers
+ //! with memory operands, for example VPSLLDQ instruction only supports
+ //! 'reg/reg/imm' combination on AVX/AVX2 capable CPUs and requires AVX-512 for
+ //! 'reg/mem/imm' combination.
+ inline uint32_t rmFeature() const noexcept { return _rmFeature; }
+
+ //! \}
+
+ //! \name Operand Read/Write Information
+ //! \{
+
+ //! Returns RW information of extra register operand (extraReg).
+ inline const OpRWInfo& extraReg() const noexcept { return _extraReg; }
+
+ //! Returns RW information of all instruction's operands.
+ inline const OpRWInfo* operands() const noexcept { return _operands; }
+
+ //! Returns RW information of the operand at the given `index`.
+ inline const OpRWInfo& operand(size_t index) const noexcept {
+ ASMJIT_ASSERT(index < Globals::kMaxOpCount);
+ return _operands[index];
+ }
+
+ //! Returns the number of operands this instruction has.
+ inline uint32_t opCount() const noexcept { return _opCount; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstAPI]
+// ============================================================================
+
+//! Instruction API.
+namespace InstAPI {
+
+//! Validation flags that can be used with \ref InstAPI::validate().
+enum ValidationFlags : uint32_t {
+ //! Allow virtual registers in the instruction.
+ kValidationFlagVirtRegs = 0x01u
+};
+
+#ifndef ASMJIT_NO_TEXT
+//! Appends the name of the instruction specified by `instId` and `instOptions`
+//! into the `output` string.
+//!
+//! \note Instruction options would only affect instruction prefix & suffix,
+//! other options would be ignored. If `instOptions` is zero then only raw
+//! instruction name (without any additional text) will be appended.
+ASMJIT_API Error instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept;
+
+//! Parses an instruction name in the given string `s`. Length is specified
+//! by `len` argument, which can be `SIZE_MAX` if `s` is known to be null
+//! terminated.
+//!
+//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such
+//! instruction exists.
+ASMJIT_API uint32_t stringToInstId(uint32_t arch, const char* s, size_t len) noexcept;
+#endif // !ASMJIT_NO_TEXT
+
+#ifndef ASMJIT_NO_VALIDATION
+//! Validates the given instruction considering the validation `flags`, see
+//! \ref ValidationFlags.
+ASMJIT_API Error validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags = 0) noexcept;
+#endif // !ASMJIT_NO_VALIDATION
+
+#ifndef ASMJIT_NO_INTROSPECTION
+//! Gets Read/Write information of the given instruction.
+ASMJIT_API Error queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
+
+//! Gets CPU features required by the given instruction.
+ASMJIT_API Error queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept;
+#endif // !ASMJIT_NO_INTROSPECTION
+
+} // {InstAPI}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_INST_H_INCLUDED
diff --git a/client/asmjit/core/jitallocator.cpp b/client/asmjit/core/jitallocator.cpp
new file mode 100644
index 0000000..b228511
--- /dev/null
+++ b/client/asmjit/core/jitallocator.cpp
@@ -0,0 +1,1152 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/arch.h"
+#include "../core/jitallocator.h"
+#include "../core/osutils_p.h"
+#include "../core/support.h"
+#include "../core/virtmem.h"
+#include "../core/zone.h"
+#include "../core/zonelist.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::JitAllocator - Constants]
+// ============================================================================
+
+enum JitAllocatorConstants : uint32_t {
+ //! Number of pools to use when `JitAllocator::kOptionUseMultiplePools` is set.
+ //!
+ //! Each pool increases granularity twice to make memory management more
+ //! efficient. Ideal number of pools appears to be 3 to 4 as it distributes
+ //! small and large functions properly.
+ kJitAllocatorMultiPoolCount = 3,
+
+ //! Minimum granularity (and the default granularity for pool #0).
+ kJitAllocatorBaseGranularity = 64,
+
+ //! Maximum block size (16MB).
+ kJitAllocatorMaxBlockSize = 1024 * 1024 * 16
+};
+
+static inline uint32_t JitAllocator_defaultFillPattern() noexcept {
+ // X86 and X86_64 - 4x 'int3' instruction.
+ if (ASMJIT_ARCH_X86)
+ return 0xCCCCCCCCu;
+
+ // Unknown...
+ return 0u;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - BitFlipIterator]
+// ============================================================================
+
+//! BitWord[] iterator used by `JitAllocator` that can flip the search pattern
+//! during iteration.
+template<typename T>
+class BitFlipIterator {
+public:
+ ASMJIT_INLINE BitFlipIterator(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept {
+ init(data, numBitWords, start, xorMask);
+ }
+
+ ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept {
+ const T* ptr = data + (start / Support::bitSizeOf<T>());
+ size_t idx = Support::alignDown(start, Support::bitSizeOf<T>());
+ size_t end = numBitWords * Support::bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = (*ptr++ ^ xorMask) & (Support::allOnes<T>() << (start % Support::bitSizeOf<T>()));
+ while (!bitWord && (idx += Support::bitSizeOf<T>()) < end)
+ bitWord = *ptr++ ^ xorMask;
+ }
+
+ _ptr = ptr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ _xorMask = xorMask;
+ }
+
+ ASMJIT_INLINE bool hasNext() const noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = Support::ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += Support::bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++ ^ _xorMask;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t nextAndFlip() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = Support::ctz(bitWord);
+ bitWord ^= Support::allOnes<T>() << bit;
+ _xorMask ^= Support::allOnes<T>();
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += Support::bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++ ^ _xorMask;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t peekNext() const noexcept {
+ ASMJIT_ASSERT(_current != T(0));
+ return _idx + Support::ctz(_current);
+ }
+
+ const T* _ptr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+ T _xorMask;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - Pool]
+// ============================================================================
+
+class JitAllocatorBlock;
+
+class JitAllocatorPool {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocatorPool)
+
+ inline JitAllocatorPool(uint32_t granularity) noexcept
+ : blocks(),
+ cursor(nullptr),
+ blockCount(0),
+ granularity(uint16_t(granularity)),
+ granularityLog2(uint8_t(Support::ctz(granularity))),
+ emptyBlockCount(0),
+ totalAreaSize(0),
+ totalAreaUsed(0),
+ totalOverheadBytes(0) {}
+
+ inline void reset() noexcept {
+ blocks.reset();
+ cursor = nullptr;
+ blockCount = 0;
+ totalAreaSize = 0;
+ totalAreaUsed = 0;
+ totalOverheadBytes = 0;
+ }
+
+ inline size_t byteSizeFromAreaSize(uint32_t areaSize) const noexcept { return size_t(areaSize) * granularity; }
+ inline uint32_t areaSizeFromByteSize(size_t size) const noexcept { return uint32_t((size + granularity - 1) >> granularityLog2); }
+
+ inline size_t bitWordCountFromAreaSize(uint32_t areaSize) const noexcept {
+ using namespace Support;
+ return alignUp<size_t>(areaSize, kBitWordSizeInBits) / kBitWordSizeInBits;
+ }
+
+ //! Double linked list of blocks.
+ ZoneList<JitAllocatorBlock> blocks;
+ //! Where to start looking first.
+ JitAllocatorBlock* cursor;
+
+ //! Count of blocks.
+ uint32_t blockCount;
+ //! Allocation granularity.
+ uint16_t granularity;
+ //! Log2(granularity).
+ uint8_t granularityLog2;
+ //! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty).
+ uint8_t emptyBlockCount;
+
+ //! Number of bits reserved across all blocks.
+ size_t totalAreaSize;
+ //! Number of bits used across all blocks.
+ size_t totalAreaUsed;
+ //! Overhead of all blocks (in bytes).
+ size_t totalOverheadBytes;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - Block]
+// ============================================================================
+
+class JitAllocatorBlock : public ZoneTreeNodeT<JitAllocatorBlock>,
+ public ZoneListNode<JitAllocatorBlock> {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocatorBlock)
+
+ enum Flags : uint32_t {
+ //! Block is empty.
+ kFlagEmpty = 0x00000001u,
+ //! Block is dirty (largestUnusedArea, searchStart, searchEnd).
+ kFlagDirty = 0x00000002u,
+ //! Block is dual-mapped.
+ kFlagDualMapped = 0x00000004u
+ };
+
+ inline JitAllocatorBlock(
+ JitAllocatorPool* pool,
+ VirtMem::DualMapping mapping,
+ size_t blockSize,
+ uint32_t blockFlags,
+ Support::BitWord* usedBitVector,
+ Support::BitWord* stopBitVector,
+ uint32_t areaSize) noexcept
+ : ZoneTreeNodeT(),
+ pool(pool),
+ mapping(mapping),
+ blockSize(blockSize),
+ flags(blockFlags),
+ areaSize(areaSize),
+ areaUsed(0),
+ largestUnusedArea(areaSize),
+ searchStart(0),
+ searchEnd(areaSize),
+ usedBitVector(usedBitVector),
+ stopBitVector(stopBitVector) {}
+
+ inline uint8_t* roPtr() const noexcept { return static_cast<uint8_t*>(mapping.ro); }
+ inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(mapping.rw); }
+
+ inline bool hasFlag(uint32_t f) const noexcept { return (flags & f) != 0; }
+ inline void addFlags(uint32_t f) noexcept { flags |= f; }
+ inline void clearFlags(uint32_t f) noexcept { flags &= ~f; }
+
+ inline uint32_t areaAvailable() const noexcept { return areaSize - areaUsed; }
+
+ inline void increaseUsedArea(uint32_t value) noexcept {
+ areaUsed += value;
+ pool->totalAreaUsed += value;
+ }
+
+ inline void decreaseUsedArea(uint32_t value) noexcept {
+ areaUsed -= value;
+ pool->totalAreaUsed -= value;
+ }
+
+ // RBTree default CMP uses '<' and '>' operators.
+ inline bool operator<(const JitAllocatorBlock& other) const noexcept { return roPtr() < other.roPtr(); }
+ inline bool operator>(const JitAllocatorBlock& other) const noexcept { return roPtr() > other.roPtr(); }
+
+ // Special implementation for querying blocks by `key`, which must be in `[BlockPtr, BlockPtr + BlockSize)` range.
+ inline bool operator<(const uint8_t* key) const noexcept { return roPtr() + blockSize <= key; }
+ inline bool operator>(const uint8_t* key) const noexcept { return roPtr() > key; }
+
+ //! Link to the pool that owns this block.
+ JitAllocatorPool* pool;
+ //! Virtual memory mapping - either single mapping (both pointers equal) or
+ //! dual mapping, where one pointer is Read+Execute and the second Read+Write.
+ VirtMem::DualMapping mapping;
+ //! Virtual memory size (block size) [bytes].
+ size_t blockSize;
+
+ //! Block flags.
+ uint32_t flags;
+ //! Size of the whole block area (bit-vector size).
+ uint32_t areaSize;
+ //! Used area (number of bits in bit-vector used).
+ uint32_t areaUsed;
+ //! The largest unused continuous area in the bit-vector (or `areaSize` to initiate rescan).
+ uint32_t largestUnusedArea;
+ //! Start of a search range (for unused bits).
+ uint32_t searchStart;
+ //! End of a search range (for unused bits).
+ uint32_t searchEnd;
+
+ //! Used bit-vector (0 = unused, 1 = used).
+ Support::BitWord* usedBitVector;
+ //! Stop bit-vector (0 = don't care, 1 = stop).
+ Support::BitWord* stopBitVector;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - PrivateImpl]
+// ============================================================================
+
+class JitAllocatorPrivateImpl : public JitAllocator::Impl {
+public:
+ inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept
+ : JitAllocator::Impl {},
+ pools(pools),
+ poolCount(poolCount) {}
+ inline ~JitAllocatorPrivateImpl() noexcept {}
+
+ //! Lock for thread safety.
+ mutable Lock lock;
+ //! System page size (also a minimum block size).
+ uint32_t pageSize;
+
+ //! Blocks from all pools in RBTree.
+ ZoneTree<JitAllocatorBlock> tree;
+ //! Allocator pools.
+ JitAllocatorPool* pools;
+ //! Number of allocator pools.
+ size_t poolCount;
+};
+
+static const JitAllocator::Impl JitAllocatorImpl_none {};
+static const JitAllocator::CreateParams JitAllocatorParams_none {};
+
+// ============================================================================
+// [asmjit::JitAllocator - Utilities]
+// ============================================================================
+
+static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::CreateParams* params) noexcept {
+ VirtMem::Info vmInfo = VirtMem::info();
+
+ if (!params)
+ params = &JitAllocatorParams_none;
+
+ uint32_t options = params->options;
+ uint32_t blockSize = params->blockSize;
+ uint32_t granularity = params->granularity;
+ uint32_t fillPattern = params->fillPattern;
+
+ // Setup pool count to [1..3].
+ size_t poolCount = 1;
+ if (options & JitAllocator::kOptionUseMultiplePools)
+ poolCount = kJitAllocatorMultiPoolCount;;
+
+ // Setup block size [64kB..256MB].
+ if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize))
+ blockSize = vmInfo.pageGranularity;
+
+ // Setup granularity [64..256].
+ if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity))
+ granularity = kJitAllocatorBaseGranularity;
+
+ // Setup fill-pattern.
+ if (!(options & JitAllocator::kOptionCustomFillPattern))
+ fillPattern = JitAllocator_defaultFillPattern();
+
+ size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
+ void* p = ::malloc(size);
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+
+ JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
+ JitAllocatorPrivateImpl* impl = new(p) JitAllocatorPrivateImpl(pools, poolCount);
+
+ impl->options = options;
+ impl->blockSize = blockSize;
+ impl->granularity = granularity;
+ impl->fillPattern = fillPattern;
+ impl->pageSize = vmInfo.pageSize;
+
+ for (size_t poolId = 0; poolId < poolCount; poolId++)
+ new(&pools[poolId]) JitAllocatorPool(granularity << poolId);
+
+ return impl;
+}
+
+static inline void JitAllocatorImpl_destroy(JitAllocatorPrivateImpl* impl) noexcept {
+ impl->~JitAllocatorPrivateImpl();
+ ::free(impl);
+}
+
+static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl* impl, size_t size) noexcept {
+ size_t poolId = impl->poolCount - 1;
+ size_t granularity = size_t(impl->granularity) << poolId;
+
+ while (poolId) {
+ if (Support::alignUp(size, granularity) == size)
+ break;
+ poolId--;
+ granularity >>= 1;
+ }
+
+ return poolId;
+}
+
+static inline size_t JitAllocatorImpl_bitVectorSizeToByteSize(uint32_t areaSize) noexcept {
+ using Support::kBitWordSizeInBits;
+ return ((areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits) * sizeof(Support::BitWord);
+}
+
+static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t allocationSize) noexcept {
+ JitAllocatorBlock* last = pool->blocks.last();
+ size_t blockSize = last ? last->blockSize : size_t(impl->blockSize);
+
+ if (blockSize < kJitAllocatorMaxBlockSize)
+ blockSize *= 2u;
+
+ if (allocationSize > blockSize) {
+ blockSize = Support::alignUp(allocationSize, impl->blockSize);
+ if (ASMJIT_UNLIKELY(blockSize < allocationSize))
+ return 0; // Overflown.
+ }
+
+ return blockSize;
+}
+
+ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t pattern, size_t sizeInBytes) noexcept {
+ size_t n = sizeInBytes / 4u;
+ uint32_t* p = static_cast<uint32_t*>(mem);
+
+ for (size_t i = 0; i < n; i++)
+ p[i] = pattern;
+}
+
+// Allocate a new `JitAllocatorBlock` for the given `blockSize`.
+//
+// NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block
+// is only allocated when it's actually needed, so it would be cleared anyway.
+static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept {
+ using Support::BitWord;
+ using Support::kBitWordSizeInBits;
+
+ uint32_t areaSize = uint32_t((blockSize + pool->granularity - 1) >> pool->granularityLog2);
+ uint32_t numBitWords = (areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits;
+
+ JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(::malloc(sizeof(JitAllocatorBlock)));
+ BitWord* bitWords = nullptr;
+ VirtMem::DualMapping virtMem {};
+ Error err = kErrorOutOfMemory;
+
+ if (block != nullptr)
+ bitWords = static_cast<BitWord*>(::malloc(size_t(numBitWords) * 2 * sizeof(BitWord)));
+
+ uint32_t blockFlags = 0;
+ if (bitWords != nullptr) {
+ if (impl->options & JitAllocator::kOptionUseDualMapping) {
+ err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute);
+ blockFlags |= JitAllocatorBlock::kFlagDualMapped;
+ }
+ else {
+ err = VirtMem::alloc(&virtMem.ro, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute);
+ virtMem.rw = virtMem.ro;
+ }
+ }
+
+ // Out of memory.
+ if (ASMJIT_UNLIKELY(!block || !bitWords || err != kErrorOk)) {
+ if (bitWords) ::free(bitWords);
+ if (block) ::free(block);
+ return nullptr;
+ }
+
+ // Fill the memory if the secure mode is enabled.
+ if (impl->options & JitAllocator::kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(virtMem.rw, impl->fillPattern, blockSize);
+
+ memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
+ return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
+}
+
+static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ DebugUtils::unused(impl);
+
+ if (block->flags & JitAllocatorBlock::kFlagDualMapped)
+ VirtMem::releaseDualMapping(&block->mapping, block->blockSize);
+ else
+ VirtMem::release(block->mapping.ro, block->blockSize);
+
+ ::free(block->usedBitVector);
+ ::free(block);
+}
+
+static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ if (!pool->cursor)
+ pool->cursor = block;
+
+ // Add to RBTree and List.
+ impl->tree.insert(block);
+ pool->blocks.append(block);
+
+ // Update statistics.
+ pool->blockCount++;
+ pool->totalAreaSize += block->areaSize;
+ pool->totalOverheadBytes += sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u;
+}
+
+static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ // Remove from RBTree and List.
+ if (pool->cursor == block)
+ pool->cursor = block->hasPrev() ? block->prev() : block->next();
+
+ impl->tree.remove(block);
+ pool->blocks.unlink(block);
+
+ // Update statistics.
+ pool->blockCount--;
+ pool->totalAreaSize -= block->areaSize;
+ pool->totalOverheadBytes -= sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u;
+}
+
+static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ if (block->hasFlag(JitAllocatorBlock::kFlagEmpty))
+ return;
+
+ uint32_t areaSize = block->areaSize;
+ uint32_t granularity = pool->granularity;
+ size_t numBitWords = pool->bitWordCountFromAreaSize(areaSize);
+
+ if (impl->options & JitAllocator::kOptionFillUnusedMemory) {
+ BitFlipIterator<Support::BitWord> it(block->usedBitVector, numBitWords);
+
+ while (it.hasNext()) {
+ uint32_t start = uint32_t(it.nextAndFlip());
+ uint32_t end = areaSize;
+
+ if (it.hasNext())
+ end = uint32_t(it.nextAndFlip());
+
+ JitAllocatorImpl_fillPattern(block->rwPtr() + start * granularity, impl->fillPattern, (end - start) * granularity);
+ }
+ }
+
+ memset(block->usedBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
+ memset(block->stopBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
+
+ block->areaUsed = 0;
+ block->largestUnusedArea = areaSize;
+ block->searchStart = 0;
+ block->searchEnd = areaSize;
+ block->addFlags(JitAllocatorBlock::kFlagEmpty);
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Construction / Destruction]
+// ============================================================================
+
+JitAllocator::JitAllocator(const CreateParams* params) noexcept {
+ _impl = JitAllocatorImpl_new(params);
+ if (ASMJIT_UNLIKELY(!_impl))
+ _impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none);
+}
+
+JitAllocator::~JitAllocator() noexcept {
+ if (_impl == &JitAllocatorImpl_none)
+ return;
+
+ reset(Globals::kResetHard);
+ JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Reset]
+// ============================================================================
+
+void JitAllocator::reset(uint32_t resetPolicy) noexcept {
+ if (_impl == &JitAllocatorImpl_none)
+ return;
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ impl->tree.reset();
+ size_t poolCount = impl->poolCount;
+
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
+ JitAllocatorPool& pool = impl->pools[poolId];
+ JitAllocatorBlock* block = pool.blocks.first();
+
+ JitAllocatorBlock* blockToKeep = nullptr;
+ if (resetPolicy != Globals::kResetHard && !(impl->options & kOptionImmediateRelease)) {
+ blockToKeep = block;
+ block = block->next();
+ }
+
+ while (block) {
+ JitAllocatorBlock* next = block->next();
+ JitAllocatorImpl_deleteBlock(impl, block);
+ block = next;
+ }
+
+ pool.reset();
+
+ if (blockToKeep) {
+ blockToKeep->_listNodes[0] = nullptr;
+ blockToKeep->_listNodes[1] = nullptr;
+ JitAllocatorImpl_wipeOutBlock(impl, blockToKeep);
+ JitAllocatorImpl_insertBlock(impl, blockToKeep);
+ pool.emptyBlockCount = 1;
+ }
+ }
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Statistics]
+// ============================================================================
+
+JitAllocator::Statistics JitAllocator::statistics() const noexcept {
+ Statistics statistics;
+ statistics.reset();
+
+ if (ASMJIT_LIKELY(_impl != &JitAllocatorImpl_none)) {
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+
+ size_t poolCount = impl->poolCount;
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
+ const JitAllocatorPool& pool = impl->pools[poolId];
+ statistics._blockCount += size_t(pool.blockCount);
+ statistics._reservedSize += size_t(pool.totalAreaSize) * pool.granularity;
+ statistics._usedSize += size_t(pool.totalAreaUsed) * pool.granularity;
+ statistics._overheadSize += size_t(pool.totalOverheadBytes);
+ }
+ }
+
+ return statistics;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Alloc / Release]
+// ============================================================================
+
+Error JitAllocator::alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
+
+ *roPtrOut = nullptr;
+ *rwPtrOut = nullptr;
+
+ // Align to the minimum granularity by default.
+ size = Support::alignUp<size_t>(size, impl->granularity);
+ if (ASMJIT_UNLIKELY(size == 0))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2))
+ return DebugUtils::errored(kErrorTooLarge);
+
+ LockGuard guard(impl->lock);
+ JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)];
+
+ uint32_t areaIndex = kNoIndex;
+ uint32_t areaSize = uint32_t(pool->areaSizeFromByteSize(size));
+
+ // Try to find the requested memory area in existing blocks.
+ JitAllocatorBlock* block = pool->blocks.first();
+ if (block) {
+ JitAllocatorBlock* initial = block;
+ do {
+ JitAllocatorBlock* next = block->hasNext() ? block->next() : pool->blocks.first();
+ if (block->areaAvailable() >= areaSize) {
+ if (block->hasFlag(JitAllocatorBlock::kFlagDirty) || block->largestUnusedArea >= areaSize) {
+ uint32_t blockAreaSize = block->areaSize;
+ uint32_t searchStart = block->searchStart;
+ uint32_t searchEnd = block->searchEnd;
+
+ BitFlipIterator<Support::BitWord> it(
+ block->usedBitVector,
+ pool->bitWordCountFromAreaSize(searchEnd),
+ searchStart,
+ Support::allOnes<Support::BitWord>());
+
+ // If there is unused area available then there has to be at least one match.
+ ASMJIT_ASSERT(it.hasNext());
+
+ uint32_t bestArea = blockAreaSize;
+ uint32_t largestArea = 0;
+ uint32_t holeIndex = uint32_t(it.peekNext());
+ uint32_t holeEnd = holeIndex;
+
+ searchStart = holeIndex;
+ do {
+ holeIndex = uint32_t(it.nextAndFlip());
+ if (holeIndex >= searchEnd) break;
+
+ holeEnd = it.hasNext() ? Support::min(searchEnd, uint32_t(it.nextAndFlip())) : searchEnd;
+ uint32_t holeSize = holeEnd - holeIndex;
+
+ if (holeSize >= areaSize && bestArea >= holeSize) {
+ largestArea = Support::max(largestArea, bestArea);
+ bestArea = holeSize;
+ areaIndex = holeIndex;
+ }
+ else {
+ largestArea = Support::max(largestArea, holeSize);
+ }
+ } while (it.hasNext());
+ searchEnd = holeEnd;
+
+ // Because we have traversed the entire block, we can now mark the
+ // largest unused area that can be used to cache the next traversal.
+ block->searchStart = searchStart;
+ block->searchEnd = searchEnd;
+ block->largestUnusedArea = largestArea;
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+
+ if (areaIndex != kNoIndex) {
+ if (searchStart == areaIndex)
+ block->searchStart += areaSize;
+ break;
+ }
+ }
+ }
+
+ block = next;
+ } while (block != initial);
+ }
+
+ // Allocate a new block if there is no region of a required width.
+ if (areaIndex == kNoIndex) {
+ size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size);
+ if (ASMJIT_UNLIKELY(!blockSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block = JitAllocatorImpl_newBlock(impl, pool, blockSize);
+
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ JitAllocatorImpl_insertBlock(impl, block);
+ areaIndex = 0;
+ block->searchStart = areaSize;
+ block->largestUnusedArea = block->areaSize - areaSize;
+ }
+
+ // Update statistics.
+ block->increaseUsedArea(areaSize);
+
+ // Handle special cases.
+ if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) {
+ pool->emptyBlockCount--;
+ block->clearFlags(JitAllocatorBlock::kFlagEmpty);
+ }
+
+ if (block->areaAvailable() == 0) {
+ // The whole block is filled.
+ block->searchStart = block->areaSize;
+ block->searchEnd = 0;
+ block->largestUnusedArea = 0;
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+ }
+
+ // Mark the newly allocated space as occupied and also the sentinel.
+ Support::bitVectorFill(block->usedBitVector, areaIndex, areaSize);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaSize - 1, true);
+
+ // Return a pointer to the allocated memory.
+ size_t offset = pool->byteSizeFromAreaSize(areaIndex);
+ ASMJIT_ASSERT(offset <= block->blockSize - size);
+
+ *roPtrOut = block->roPtr() + offset;
+ *rwPtrOut = block->rwPtr() + offset;
+ return kErrorOk;
+}
+
+Error JitAllocator::release(void* ro) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!ro))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(ro));
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // Offset relative to the start of the block.
+ JitAllocatorPool* pool = block->pool;
+ size_t offset = (size_t)((uint8_t*)ro - block->roPtr());
+
+ // The first bit representing the allocated area and its size.
+ uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
+ uint32_t areaLast = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true));
+ uint32_t areaSize = areaLast - areaIndex + 1;
+
+ // Update the search region and statistics.
+ block->searchStart = Support::min(block->searchStart, areaIndex);
+ block->searchEnd = Support::max(block->searchEnd, areaLast + 1);
+ block->addFlags(JitAllocatorBlock::kFlagDirty);
+ block->decreaseUsedArea(areaSize);
+
+ // Clear all occupied bits and also the sentinel.
+ Support::bitVectorClear(block->usedBitVector, areaIndex, areaSize);
+ Support::bitVectorSetBit(block->stopBitVector, areaLast, false);
+
+ // Fill the released memory if the secure mode is enabled.
+ if (impl->options & kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(block->rwPtr() + areaIndex * pool->granularity, impl->fillPattern, areaSize * pool->granularity);
+
+ // Release the whole block if it became empty.
+ if (block->areaUsed == 0) {
+ if (pool->emptyBlockCount || (impl->options & kOptionImmediateRelease)) {
+ JitAllocatorImpl_removeBlock(impl, block);
+ JitAllocatorImpl_deleteBlock(impl, block);
+ }
+ else {
+ pool->emptyBlockCount++;
+ block->largestUnusedArea = areaSize;
+ block->searchStart = 0;
+ block->searchEnd = areaSize;
+ block->addFlags(JitAllocatorBlock::kFlagEmpty);
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error JitAllocator::shrink(void* ro, size_t newSize) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!ro))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(newSize == 0))
+ return release(ro);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(ro));
+
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Offset relative to the start of the block.
+ JitAllocatorPool* pool = block->pool;
+ size_t offset = (size_t)((uint8_t*)ro - block->roPtr());
+
+ // The first bit representing the allocated area and its size.
+ uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
+ uint32_t areaOldSize = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true)) + 1 - areaIndex;
+ uint32_t areaNewSize = pool->areaSizeFromByteSize(newSize);
+
+ if (ASMJIT_UNLIKELY(areaNewSize > areaOldSize))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t areaDiff = areaOldSize - areaNewSize;
+ if (!areaDiff)
+ return kErrorOk;
+
+ // Update the search region and statistics.
+ block->searchStart = Support::min(block->searchStart, areaIndex + areaNewSize);
+ block->searchEnd = Support::max(block->searchEnd, areaIndex + areaOldSize);
+ block->addFlags(JitAllocatorBlock::kFlagDirty);
+ block->decreaseUsedArea(areaDiff);
+
+ // Unmark the released space and move the sentinel.
+ Support::bitVectorClear(block->usedBitVector, areaIndex + areaNewSize, areaDiff);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaOldSize - 1, false);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaNewSize - 1, true);
+
+ // Fill released memory if the secure mode is enabled.
+ if (impl->options & kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(
+ block->rwPtr() + (areaIndex + areaOldSize) * pool->granularity,
+ fillPattern(),
+ areaDiff * pool->granularity);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+// A pseudo random number generator based on a paper by Sebastiano Vigna:
+// http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
+class Random {
+public:
+ // Constants suggested as `23/18/5`.
+ enum Steps : uint32_t {
+ kStep1_SHL = 23,
+ kStep2_SHR = 18,
+ kStep3_SHR = 5
+ };
+
+ inline explicit Random(uint64_t seed = 0) noexcept { reset(seed); }
+ inline Random(const Random& other) noexcept = default;
+
+ inline void reset(uint64_t seed = 0) noexcept {
+ // The number is arbitrary, it means nothing.
+ constexpr uint64_t kZeroSeed = 0x1F0A2BE71D163FA0u;
+
+ // Generate the state data by using splitmix64.
+ for (uint32_t i = 0; i < 2; i++) {
+ seed += 0x9E3779B97F4A7C15u;
+ uint64_t x = seed;
+ x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9u;
+ x = (x ^ (x >> 27)) * 0x94D049BB133111EBu;
+ x = (x ^ (x >> 31));
+ _state[i] = x != 0 ? x : kZeroSeed;
+ }
+ }
+
+ inline uint32_t nextUInt32() noexcept {
+ return uint32_t(nextUInt64() >> 32);
+ }
+
+ inline uint64_t nextUInt64() noexcept {
+ uint64_t x = _state[0];
+ uint64_t y = _state[1];
+
+ x ^= x << kStep1_SHL;
+ y ^= y >> kStep3_SHR;
+ x ^= x >> kStep2_SHR;
+ x ^= y;
+
+ _state[0] = y;
+ _state[1] = x;
+ return x + y;
+ }
+
+ uint64_t _state[2];
+};
+
+// Helper class to verify that JitAllocator doesn't return addresses that overlap.
+class JitAllocatorWrapper {
+public:
+ inline explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
+ : _zone(1024 * 1024),
+ _heap(&_zone),
+ _allocator(params) {}
+
+ // Address to a memory region of a given size.
+ class Range {
+ public:
+ inline Range(uint8_t* addr, size_t size) noexcept
+ : addr(addr),
+ size(size) {}
+ uint8_t* addr;
+ size_t size;
+ };
+
+ // Based on JitAllocator::Block, serves our purpose well...
+ class Record : public ZoneTreeNodeT<Record>,
+ public Range {
+ public:
+ inline Record(uint8_t* addr, size_t size)
+ : ZoneTreeNodeT<Record>(),
+ Range(addr, size) {}
+
+ inline bool operator<(const Record& other) const noexcept { return addr < other.addr; }
+ inline bool operator>(const Record& other) const noexcept { return addr > other.addr; }
+
+ inline bool operator<(const uint8_t* key) const noexcept { return addr + size <= key; }
+ inline bool operator>(const uint8_t* key) const noexcept { return addr > key; }
+ };
+
+ void _insert(void* p_, size_t size) noexcept {
+ uint8_t* p = static_cast<uint8_t*>(p_);
+ uint8_t* pEnd = p + size - 1;
+
+ Record* record;
+
+ record = _records.get(p);
+ if (record)
+ EXPECT(record == nullptr,
+ "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
+
+ record = _records.get(pEnd);
+ if (record)
+ EXPECT(record == nullptr,
+ "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
+
+ record = _heap.newT<Record>(p, size);
+ EXPECT(record != nullptr,
+ "Out of memory, cannot allocate 'Record'");
+
+ _records.insert(record);
+ }
+
+ void _remove(void* p) noexcept {
+ Record* record = _records.get(static_cast<uint8_t*>(p));
+ EXPECT(record != nullptr,
+ "Address [%p] doesn't exist\n", p);
+
+ _records.remove(record);
+ _heap.release(record, sizeof(Record));
+ }
+
+ void* alloc(size_t size) noexcept {
+ void* roPtr;
+ void* rwPtr;
+
+ Error err = _allocator.alloc(&roPtr, &rwPtr, size);
+ EXPECT(err == kErrorOk,
+ "JitAllocator failed to allocate '%u' bytes\n", unsigned(size));
+
+ _insert(roPtr, size);
+ return roPtr;
+ }
+
+ void release(void* p) noexcept {
+ _remove(p);
+ EXPECT(_allocator.release(p) == kErrorOk,
+ "JitAllocator failed to release '%p'\n", p);
+ }
+
+ Zone _zone;
+ ZoneAllocator _heap;
+ ZoneTree<Record> _records;
+ JitAllocator _allocator;
+};
+
+static void JitAllocatorTest_shuffle(void** ptrArray, size_t count, Random& prng) noexcept {
+ for (size_t i = 0; i < count; ++i)
+ std::swap(ptrArray[i], ptrArray[size_t(prng.nextUInt32() % count)]);
+}
+
+static void JitAllocatorTest_usage(JitAllocator& allocator) noexcept {
+ JitAllocator::Statistics stats = allocator.statistics();
+ INFO(" Block Count : %9llu [Blocks]" , (unsigned long long)(stats.blockCount()));
+ INFO(" Reserved (VirtMem): %9llu [Bytes]" , (unsigned long long)(stats.reservedSize()));
+ INFO(" Used (VirtMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.usedSize()), stats.usedSizeAsPercent());
+ INFO(" Overhead (HeapMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.overheadSize()), stats.overheadSizeAsPercent());
+}
+
+UNIT(jit_allocator) {
+ size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000;
+
+ struct TestParams {
+ const char* name;
+ uint32_t options;
+ uint32_t blockSize;
+ uint32_t granularity;
+ };
+
+ #define OPT(OPTION) JitAllocator::OPTION
+ static TestParams testParams[] = {
+ { "Default", 0, 0, 0 },
+ { "16MB blocks", 0, 16 * 1024 * 1024, 0 },
+ { "256B granularity", 0, 0, 256 },
+ { "kOptionUseDualMapping", OPT(kOptionUseDualMapping), 0, 0 },
+ { "kOptionUseMultiplePools", OPT(kOptionUseMultiplePools), 0, 0 },
+ { "kOptionFillUnusedMemory", OPT(kOptionFillUnusedMemory), 0, 0 },
+ { "kOptionImmediateRelease", OPT(kOptionImmediateRelease), 0, 0 },
+ { "kOptionUseDualMapping | kOptionFillUnusedMemory", OPT(kOptionUseDualMapping) | OPT(kOptionFillUnusedMemory), 0, 0 }
+ };
+ #undef OPT
+
+ INFO("BitFlipIterator<uint32_t>");
+ {
+ static const uint32_t bits[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ BitFlipIterator<uint32_t> it(bits, ASMJIT_ARRAY_SIZE(bits));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 32);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 64);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 127);
+ EXPECT(!it.hasNext());
+ }
+
+ INFO("BitFlipIterator<uint64_t>");
+ {
+ static const uint64_t bits[] = { 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFF, 0, 0 };
+ BitFlipIterator<uint64_t> it(bits, ASMJIT_ARRAY_SIZE(bits));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 0);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 128);
+ EXPECT(!it.hasNext());
+ }
+
+ for (uint32_t testId = 0; testId < ASMJIT_ARRAY_SIZE(testParams); testId++) {
+ INFO("Testing JitAllocator: %s", testParams[testId].name);
+
+ JitAllocator::CreateParams params {};
+ params.options = testParams[testId].options;
+ params.blockSize = testParams[testId].blockSize;
+ params.granularity = testParams[testId].granularity;
+
+ JitAllocatorWrapper wrapper(&params);
+ Random prng(100);
+
+ size_t i;
+
+ INFO(" Memory alloc/release test - %d allocations", kCount);
+
+ void** ptrArray = (void**)::malloc(sizeof(void*) * size_t(kCount));
+ EXPECT(ptrArray != nullptr,
+ "Couldn't allocate '%u' bytes for pointer-array", unsigned(sizeof(void*) * size_t(kCount)));
+
+ INFO(" Allocating virtual memory...");
+ for (i = 0; i < kCount; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Releasing virtual memory...");
+ for (i = 0; i < kCount; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Allocating virtual memory...", kCount);
+ for (i = 0; i < kCount; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Shuffling...");
+ JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
+
+ INFO(" Releasing 50%% blocks...");
+ for (i = 0; i < kCount / 2; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Allocating 50%% blocks again...");
+ for (i = 0; i < kCount / 2; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Releasing virtual memory...");
+ for (i = 0; i < kCount; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ ::free(ptrArray);
+ }
+}
+#endif
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/client/asmjit/core/jitallocator.h b/client/asmjit/core/jitallocator.h
new file mode 100644
index 0000000..9cd0a1f
--- /dev/null
+++ b/client/asmjit/core/jitallocator.h
@@ -0,0 +1,278 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
+#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/globals.h"
+#include "../core/virtmem.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_virtual_memory
+//! \{
+
+// ============================================================================
+// [asmjit::JitAllocator]
+// ============================================================================
+
+//! A simple implementation of memory manager that uses `asmjit::VirtMem`
+//! functions to manage virtual memory for JIT compiled code.
+//!
+//! Implementation notes:
+//!
+//! - Granularity of allocated blocks is different than granularity for a typical
+//! C malloc. In addition, the allocator can use several memory pools having a
+//! different granularity to minimize the maintenance overhead. Multiple pools
+//! feature requires `kFlagUseMultiplePools` flag to be set.
+//!
+//! - The allocator doesn't store any information in executable memory, instead,
+//! the implementation uses two bit-vectors to manage allocated memory of each
+//! allocator-block. The first bit-vector called 'used' is used to track used
+//! memory (where each bit represents memory size defined by granularity) and
+//! the second bit vector called 'stop' is used as a sentinel to mark where
+//! the allocated area ends.
+//!
+//! - Internally, the allocator also uses RB tree to keep track of all blocks
+//! across all pools. Each inserted block is added to the tree so it can be
+//! matched fast during `release()` and `shrink()`.
+class JitAllocator {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocator)
+
+ struct Impl {
+ //! Allocator options, see \ref JitAllocator::Options.
+ uint32_t options;
+ //! Base block size (0 if the allocator is not initialized).
+ uint32_t blockSize;
+ //! Base granularity (0 if the allocator is not initialized).
+ uint32_t granularity;
+ //! A pattern that is used to fill unused memory if secure mode is enabled.
+ uint32_t fillPattern;
+ };
+
+ //! Allocator implementation (private).
+ Impl* _impl;
+
+ enum Options : uint32_t {
+ //! Enables the use of an anonymous memory-mapped memory that is mapped into
+ //! two buffers having a different pointer. The first buffer has read and
+ //! execute permissions and the second buffer has read+write permissions.
+ //!
+ //! See \ref VirtMem::allocDualMapping() for more details about this feature.
+ kOptionUseDualMapping = 0x00000001u,
+
+ //! Enables the use of multiple pools with increasing granularity instead of
+ //! a single pool. This flag would enable 3 internal pools in total having
+ //! 64, 128, and 256 bytes granularity.
+ //!
+ //! This feature is only recommended for users that generate a lot of code
+ //! and would like to minimize the overhead of `JitAllocator` itself by
+ //! having blocks of different allocation granularities. Using this feature
+ //! only for few allocations won't pay off as the allocator may need to
+ //! create more blocks initially before it can take the advantage of
+ //! variable block granularity.
+ kOptionUseMultiplePools = 0x00000002u,
+
+ //! Always fill reserved memory by a fill-pattern.
+ //!
+ //! Causes a new block to be cleared by the fill pattern and freshly
+ //! released memory to be cleared before making it ready for another use.
+ kOptionFillUnusedMemory = 0x00000004u,
+
+ //! When this flag is set the allocator would immediately release unused
+ //! blocks during `release()` or `reset()`. When this flag is not set the
+ //! allocator would keep one empty block in each pool to prevent excessive
+ //! virtual memory allocations and deallocations in border cases, which
+ //! involve constantly allocating and deallocating a single block caused
+ //! by repetitive calling `alloc()` and `release()` when the allocator has
+ //! either no blocks or have all blocks fully occupied.
+ kOptionImmediateRelease = 0x00000008u,
+
+ //! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
+ kOptionCustomFillPattern = 0x10000000u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Parameters that can be passed to `JitAllocator` constructor.
+ //!
+ //! Use it like this:
+ //!
+ //! ```
+ //! // Zero initialize (zero means the default value) and change what you need.
+ //! JitAllocator::CreateParams params {};
+ //! params.blockSize = 1024 * 1024;
+ //!
+ //! // Create the allocator.
+ //! JitAllocator allocator(&params);
+ //! ```
+ struct CreateParams {
+ //! Allocator options, see \ref JitAllocator::Options.
+ //!
+ //! No options are used by default.
+ uint32_t options;
+
+ //! Base size of a single block in bytes (default 64kB).
+ //!
+ //! \remarks Block size must be equal or greater to page size and must be
+ //! power of 2. If the input is not valid then the default block size will
+ //! be used instead.
+ uint32_t blockSize;
+
+ //! Base granularity (and also natural alignment) of allocations in bytes
+ //! (default 64).
+ //!
+ //! Since the `JitAllocator` uses bit-arrays to mark used memory the
+ //! granularity also specifies how many bytes correspond to a single bit in
+ //! such bit-array. Higher granularity means more waste of virtual memory
+ //! (as it increases the natural alignment), but smaller bit-arrays as less
+ //! bits would be required per a single block.
+ uint32_t granularity;
+
+ //! Patter to use to fill unused memory.
+ //!
+ //! Only used if \ref kOptionCustomFillPattern is set.
+ uint32_t fillPattern;
+
+ // Reset the content of `CreateParams`.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+ };
+
+ //! Creates a `JitAllocator` instance.
+ explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept;
+ //! Destroys the `JitAllocator` instance and release all blocks held.
+ ASMJIT_API ~JitAllocator() noexcept;
+
+ inline bool isInitialized() const noexcept { return _impl->blockSize == 0; }
+
+ //! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
+ //!
+ //! \remarks This function is not thread-safe as it's designed to be used when
+ //! nobody else is using allocator. The reason is that there is no point of
+ //1 calling `reset()` when the allocator is still in use.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns allocator options, see `Flags`.
+ inline uint32_t options() const noexcept { return _impl->options; }
+ //! Tests whether the allocator has the given `option` set.
+ inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; }
+
+ //! Returns a base block size (a minimum size of block that the allocator would allocate).
+ inline uint32_t blockSize() const noexcept { return _impl->blockSize; }
+ //! Returns granularity of the allocator.
+ inline uint32_t granularity() const noexcept { return _impl->granularity; }
+ //! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
+ inline uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
+
+ //! \}
+
+ //! \name Alloc & Release
+ //! \{
+
+ //! Allocate `size` bytes of virtual memory.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept;
+
+ //! Release a memory returned by `alloc()`.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error release(void* ro) noexcept;
+
+ //! Free extra memory allocated with `p` by restricting it to `newSize` size.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error shrink(void* ro, size_t newSize) noexcept;
+
+ //! \}
+
+ //! \name Statistics
+ //! \{
+
+ //! Statistics about `JitAllocator`.
+ struct Statistics {
+ //! Number of blocks `JitAllocator` maintains.
+ size_t _blockCount;
+ //! How many bytes are currently used / allocated.
+ size_t _usedSize;
+ //! How many bytes are currently reserved by the allocator.
+ size_t _reservedSize;
+ //! Allocation overhead (in bytes) required to maintain all blocks.
+ size_t _overheadSize;
+
+ inline void reset() noexcept {
+ _blockCount = 0;
+ _usedSize = 0;
+ _reservedSize = 0;
+ _overheadSize = 0;
+ }
+
+ //! Returns count of blocks managed by `JitAllocator` at the moment.
+ inline size_t blockCount() const noexcept { return _blockCount; }
+
+ //! Returns how many bytes are currently used.
+ inline size_t usedSize() const noexcept { return _usedSize; }
+ //! Returns the number of bytes unused by the allocator at the moment.
+ inline size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
+ //! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks).
+ inline size_t reservedSize() const noexcept { return _reservedSize; }
+ //! Returns the number of bytes the allocator needs to manage the allocated memory.
+ inline size_t overheadSize() const noexcept { return _overheadSize; }
+
+ inline double usedSizeAsPercent() const noexcept {
+ return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+
+ inline double unusedSizeAsPercent() const noexcept {
+ return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+
+ inline double overheadSizeAsPercent() const noexcept {
+ return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+ };
+
+ //! Returns JIT allocator statistics.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Statistics statistics() const noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif
diff --git a/client/asmjit/core/jitruntime.cpp b/client/asmjit/core/jitruntime.cpp
new file mode 100644
index 0000000..1127c86
--- /dev/null
+++ b/client/asmjit/core/jitruntime.cpp
@@ -0,0 +1,120 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/cpuinfo.h"
+#include "../core/jitruntime.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::JitRuntime - Utilities]
+// ============================================================================
+
+// Only useful on non-x86 architectures.
+static inline void JitRuntime_flushInstructionCache(const void* p, size_t size) noexcept {
+#if defined(_WIN32) && !ASMJIT_ARCH_X86
+ // Windows has a built-in support in `kernel32.dll`.
+ ::FlushInstructionCache(::GetCurrentProcess(), p, size);
+#else
+ DebugUtils::unused(p, size);
+#endif
+}
+
+// ============================================================================
+// [asmjit::JitRuntime - Construction / Destruction]
+// ============================================================================
+
+JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
+ : _allocator(params) {
+ _environment = hostEnvironment();
+ _environment.setFormat(Environment::kFormatJIT);
+}
+
+JitRuntime::~JitRuntime() noexcept {}
+
+// ============================================================================
+// [asmjit::JitRuntime - Interface]
+// ============================================================================
+
+Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
+ *dst = nullptr;
+
+ ASMJIT_PROPAGATE(code->flatten());
+ ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
+
+ size_t estimatedCodeSize = code->codeSize();
+ if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
+ return DebugUtils::errored(kErrorNoCodeGenerated);
+
+ uint8_t* ro;
+ uint8_t* rw;
+ ASMJIT_PROPAGATE(_allocator.alloc((void**)&ro, (void**)&rw, estimatedCodeSize));
+
+ // Relocate the code.
+ Error err = code->relocateToBase(uintptr_t((void*)ro));
+ if (ASMJIT_UNLIKELY(err)) {
+ _allocator.release(ro);
+ return err;
+ }
+
+ // Recalculate the final code size and shrink the memory we allocated for it
+ // in case that some relocations didn't require records in an address table.
+ size_t codeSize = code->codeSize();
+
+ for (Section* section : code->_sections) {
+ size_t offset = size_t(section->offset());
+ size_t bufferSize = size_t(section->bufferSize());
+ size_t virtualSize = size_t(section->virtualSize());
+
+ ASMJIT_ASSERT(offset + bufferSize <= codeSize);
+ memcpy(rw + offset, section->data(), bufferSize);
+
+ if (virtualSize > bufferSize) {
+ ASMJIT_ASSERT(offset + virtualSize <= codeSize);
+ memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
+ }
+ }
+
+ if (codeSize < estimatedCodeSize)
+ _allocator.shrink(ro, codeSize);
+
+ flush(ro, codeSize);
+ *dst = ro;
+
+ return kErrorOk;
+}
+
+Error JitRuntime::_release(void* p) noexcept {
+ return _allocator.release(p);
+}
+
+void JitRuntime::flush(const void* p, size_t size) noexcept {
+ JitRuntime_flushInstructionCache(p, size);
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/client/asmjit/core/jitruntime.h b/client/asmjit/core/jitruntime.h
new file mode 100644
index 0000000..91880e6
--- /dev/null
+++ b/client/asmjit/core/jitruntime.h
@@ -0,0 +1,126 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
+#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/codeholder.h"
+#include "../core/jitallocator.h"
+#include "../core/target.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+class CodeHolder;
+
+//! \addtogroup asmjit_virtual_memory
+//! \{
+
+// ============================================================================
+// [asmjit::JitRuntime]
+// ============================================================================
+
+//! JIT execution runtime is a special `Target` that is designed to store and
+//! execute the generated code.
+class ASMJIT_VIRTAPI JitRuntime : public Target {
+public:
+ ASMJIT_NONCOPYABLE(JitRuntime)
+
+ //! Virtual memory allocator.
+ JitAllocator _allocator;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `JitRuntime` instance.
+ explicit ASMJIT_API JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
+ //! Destroys the `JitRuntime` instance.
+ ASMJIT_API virtual ~JitRuntime() noexcept;
+
+ inline void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept {
+ _allocator.reset(resetPolicy);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated `JitAllocator`.
+ inline JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ // NOTE: To allow passing function pointers to `add()` and `release()` the
+ // virtual methods are prefixed with `_` and called from templates instead.
+
+ //! Allocates memory needed for a code stored in the `CodeHolder` and relocates
+ //! the code to the pointer allocated.
+ //!
+ //! The beginning of the memory allocated for the function is returned in `dst`.
+ //! If failed `Error` code is returned and `dst` is explicitly set to `nullptr`
+ //! (this means that you don't have to set it to null before calling `add()`).
+ template<typename Func>
+ inline Error add(Func* dst, CodeHolder* code) noexcept {
+ return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
+ }
+
+ //! Releases `p` which was obtained by calling `add()`.
+ template<typename Func>
+ inline Error release(Func p) noexcept {
+ return _release(Support::ptr_cast_impl<void*, Func>(p));
+ }
+
+ //! Type-unsafe version of `add()`.
+ ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept;
+
+ //! Type-unsafe version of `release()`.
+ ASMJIT_API virtual Error _release(void* p) noexcept;
+
+ //! Flushes an instruction cache.
+ //!
+ //! This member function is called after the code has been copied to the
+ //! destination buffer. It is only useful for JIT code generation as it
+ //! causes a flush of the processor's cache.
+ //!
+ //! Flushing is basically a NOP under X86, but is needed by architectures
+ //! that do not have a transparent instruction cache like ARM.
+ //!
+ //! This function can also be overridden to improve compatibility with tools
+ //! such as Valgrind, however, it's not an official part of AsmJit.
+ ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif
diff --git a/client/asmjit/core/logger.cpp b/client/asmjit/core/logger.cpp
new file mode 100644
index 0000000..22e0b9a
--- /dev/null
+++ b/client/asmjit/core/logger.cpp
@@ -0,0 +1,124 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_LOGGING
+
+#include "../core/logger.h"
+#include "../core/string.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Logger - Construction / Destruction]
+// ============================================================================
+
+Logger::Logger() noexcept
+ : _options() {}
+Logger::~Logger() noexcept {}
+
+// ============================================================================
+// [asmjit::Logger - Logging]
+// ============================================================================
+
+Error Logger::logf(const char* fmt, ...) noexcept {
+ Error err;
+ va_list ap;
+
+ va_start(ap, fmt);
+ err = logv(fmt, ap);
+ va_end(ap);
+
+ return err;
+}
+
+Error Logger::logv(const char* fmt, va_list ap) noexcept {
+ StringTmp<2048> sb;
+ ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap));
+ return log(sb);
+}
+
+Error Logger::logBinary(const void* data, size_t size) noexcept {
+ static const char prefix[] = "db ";
+
+ StringTmp<256> sb;
+ sb.append(prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
+
+ size_t i = size;
+ const uint8_t* s = static_cast<const uint8_t*>(data);
+
+ while (i) {
+ uint32_t n = uint32_t(Support::min<size_t>(i, 16));
+ sb.truncate(ASMJIT_ARRAY_SIZE(prefix) - 1);
+ sb.appendHex(s, n);
+ sb.append('\n');
+ ASMJIT_PROPAGATE(log(sb));
+ s += n;
+ i -= n;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::FileLogger - Construction / Destruction]
+// ============================================================================
+
+FileLogger::FileLogger(FILE* file) noexcept
+ : _file(file) {}
+FileLogger::~FileLogger() noexcept {}
+
+// ============================================================================
+// [asmjit::FileLogger - Logging]
+// ============================================================================
+
+Error FileLogger::_log(const char* data, size_t size) noexcept {
+ if (!_file)
+ return kErrorOk;
+
+ if (size == SIZE_MAX)
+ size = strlen(data);
+
+ fwrite(data, 1, size, _file);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::StringLogger - Construction / Destruction]
+// ============================================================================
+
+StringLogger::StringLogger() noexcept {}
+StringLogger::~StringLogger() noexcept {}
+
+// ============================================================================
+// [asmjit::StringLogger - Logging]
+// ============================================================================
+
+Error StringLogger::_log(const char* data, size_t size) noexcept {
+ return _content.append(data, size);
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/client/asmjit/core/logger.h b/client/asmjit/core/logger.h
new file mode 100644
index 0000000..2840869
--- /dev/null
+++ b/client/asmjit/core/logger.h
@@ -0,0 +1,228 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
+#define ASMJIT_CORE_LOGGING_H_INCLUDED
+
+#include "../core/inst.h"
+#include "../core/string.h"
+#include "../core/formatter.h"
+
+#ifndef ASMJIT_NO_LOGGING
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_logging
+//! \{
+
+// ============================================================================
+// [asmjit::Logger]
+// ============================================================================
+
+//! Logging interface.
+//!
+//! This class can be inherited and reimplemented to fit into your own logging
+//! needs. When reimplementing a logger use \ref Logger::_log() method to log
+//! customize the output.
+//!
+//! There are two `Logger` implementations offered by AsmJit:
+//! - \ref FileLogger - logs into a `FILE*`.
+//! - \ref StringLogger - concatenates all logs into a \ref String.
+class ASMJIT_VIRTAPI Logger {
+public:
+ ASMJIT_BASE_CLASS(Logger)
+ ASMJIT_NONCOPYABLE(Logger)
+
+ //! Format options.
+ FormatOptions _options;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `Logger` instance.
+ ASMJIT_API Logger() noexcept;
+ //! Destroys the `Logger` instance.
+ ASMJIT_API virtual ~Logger() noexcept;
+
+ //! \}
+
+ //! \name Format Options
+ //! \{
+
+ //! Returns \ref FormatOptions of this logger.
+ inline FormatOptions& options() noexcept { return _options; }
+ //! \overload
+ inline const FormatOptions& options() const noexcept { return _options; }
+
+ //! Returns formatting flags, see \ref FormatOptions::Flags.
+ inline uint32_t flags() const noexcept { return _options.flags(); }
+ //! Tests whether the logger has the given `flag` enabled.
+ inline bool hasFlag(uint32_t flag) const noexcept { return _options.hasFlag(flag); }
+ //! Sets formatting flags to `flags`, see \ref FormatOptions::Flags.
+ inline void setFlags(uint32_t flags) noexcept { _options.setFlags(flags); }
+ //! Enables the given formatting `flags`, see \ref FormatOptions::Flags.
+ inline void addFlags(uint32_t flags) noexcept { _options.addFlags(flags); }
+ //! Disables the given formatting `flags`, see \ref FormatOptions::Flags.
+ inline void clearFlags(uint32_t flags) noexcept { _options.clearFlags(flags); }
+
+ //! Returns indentation of `type`, see \ref FormatOptions::IndentationType.
+ inline uint32_t indentation(uint32_t type) const noexcept { return _options.indentation(type); }
+ //! Sets indentation of the given indentation `type` to `n` spaces, see \ref
+ //! FormatOptions::IndentationType.
+ inline void setIndentation(uint32_t type, uint32_t n) noexcept { _options.setIndentation(type, n); }
+ //! Resets indentation of the given indentation `type` to 0 spaces.
+ inline void resetIndentation(uint32_t type) noexcept { _options.resetIndentation(type); }
+
+ //! \}
+
+ //! \name Logging Interface
+ //! \{
+
+ //! Logs `str` - must be reimplemented.
+ //!
+ //! The function can accept either a null terminated string if `size` is
+ //! `SIZE_MAX` or a non-null terminated string of the given `size`. The
+ //! function cannot assume that the data is null terminated and must handle
+ //! non-null terminated inputs.
+ virtual Error _log(const char* data, size_t size) noexcept = 0;
+
+ //! Logs string `str`, which is either null terminated or having size `size`.
+ inline Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); }
+ //! Logs content of a string `str`.
+ inline Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
+
+ //! Formats the message by using `snprintf()` and then passes the formatted
+ //! string to \ref _log().
+ ASMJIT_API Error logf(const char* fmt, ...) noexcept;
+
+ //! Formats the message by using `vsnprintf()` and then passes the formatted
+ //! string to \ref _log().
+ ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
+
+ //! Logs binary `data` of the given `size`.
+ ASMJIT_API Error logBinary(const void* data, size_t size) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FileLogger]
+// ============================================================================
+
+//! Logger that can log to a `FILE*`.
+class ASMJIT_VIRTAPI FileLogger : public Logger {
+public:
+ ASMJIT_NONCOPYABLE(FileLogger)
+
+ FILE* _file;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FileLogger` that logs to `FILE*`.
+ ASMJIT_API FileLogger(FILE* file = nullptr) noexcept;
+ //! Destroys the `FileLogger`.
+ ASMJIT_API virtual ~FileLogger() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the logging output stream or null if the logger has no output
+ //! stream.
+ inline FILE* file() const noexcept { return _file; }
+
+ //! Sets the logging output stream to `stream` or null.
+ //!
+ //! \note If the `file` is null the logging will be disabled. When a logger
+ //! is attached to `CodeHolder` or any emitter the logging API will always
+ //! be called regardless of the output file. This means that if you really
+ //! want to disable logging at emitter level you must not attach a logger
+ //! to it.
+ inline void setFile(FILE* file) noexcept { _file = file; }
+
+ //! \}
+
+ ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
+};
+
+// ============================================================================
+// [asmjit::StringLogger]
+// ============================================================================
+
+//! Logger that stores everything in an internal string buffer.
+class ASMJIT_VIRTAPI StringLogger : public Logger {
+public:
+ ASMJIT_NONCOPYABLE(StringLogger)
+
+ //! Logger data as string.
+ String _content;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Create new `StringLogger`.
+ ASMJIT_API StringLogger() noexcept;
+ //! Destroys the `StringLogger`.
+ ASMJIT_API virtual ~StringLogger() noexcept;
+
+ //! \}
+
+ //! \name Logger Data Accessors
+ //! \{
+
+ //! Returns the content of the logger as \ref String.
+ //!
+ //! It can be moved, if desired.
+ inline String& content() noexcept { return _content; }
+ //! \overload
+ inline const String& content() const noexcept { return _content; }
+
+ //! Returns aggregated logger data as `char*` pointer.
+ //!
+ //! The pointer is owned by `StringLogger`, it can't be modified or freed.
+ inline const char* data() const noexcept { return _content.data(); }
+ //! Returns size of the data returned by `data()`.
+ inline size_t dataSize() const noexcept { return _content.size(); }
+
+ //! \}
+
+ //! \name Logger Data Manipulation
+ //! \{
+
+ //! Clears the accumulated logger data.
+ inline void clear() noexcept { _content.clear(); }
+
+ //! \}
+
+ ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+
+#endif // ASMJIT_CORE_LOGGER_H_INCLUDED
diff --git a/client/asmjit/core/misc_p.h b/client/asmjit/core/misc_p.h
new file mode 100644
index 0000000..b1056f4
--- /dev/null
+++ b/client/asmjit/core/misc_p.h
@@ -0,0 +1,49 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
+#define ASMJIT_CORE_MISC_P_H_INCLUDED
+
+#include "../core/api-config.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_utilities
+//! \{
+
+#define ASMJIT_LOOKUP_TABLE_8(T, I) T((I)), T((I+1)), T((I+2)), T((I+3)), T((I+4)), T((I+5)), T((I+6)), T((I+7))
+#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8)
+#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16)
+#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32)
+#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64)
+#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128)
+#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256)
+#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512)
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_MISC_P_H_INCLUDED
diff --git a/client/asmjit/core/operand.cpp b/client/asmjit/core/operand.cpp
new file mode 100644
index 0000000..46bb23a
--- /dev/null
+++ b/client/asmjit/core/operand.cpp
@@ -0,0 +1,136 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Operand - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(operand) {
+ INFO("Checking operand sizes");
+ EXPECT(sizeof(Operand) == 16);
+ EXPECT(sizeof(BaseReg) == 16);
+ EXPECT(sizeof(BaseMem) == 16);
+ EXPECT(sizeof(Imm) == 16);
+ EXPECT(sizeof(Label) == 16);
+
+ INFO("Checking basic functionality of Operand");
+ Operand a, b;
+ Operand dummy;
+
+ EXPECT(a.isNone() == true);
+ EXPECT(a.isReg() == false);
+ EXPECT(a.isMem() == false);
+ EXPECT(a.isImm() == false);
+ EXPECT(a.isLabel() == false);
+ EXPECT(a == b);
+ EXPECT(a._data[0] == 0);
+ EXPECT(a._data[1] == 0);
+
+ INFO("Checking basic functionality of Label");
+ Label label;
+ EXPECT(label.isValid() == false);
+ EXPECT(label.id() == Globals::kInvalidId);
+
+ INFO("Checking basic functionality of BaseReg");
+ EXPECT(BaseReg().isReg() == true);
+ EXPECT(BaseReg().isValid() == false);
+ EXPECT(BaseReg()._data[0] == 0);
+ EXPECT(BaseReg()._data[1] == 0);
+ EXPECT(dummy.as<BaseReg>().isValid() == false);
+
+ // Create some register (not specific to any architecture).
+ uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift ) |
+ (2 << Operand::kSignatureRegGroupShift) |
+ (8 << Operand::kSignatureSizeShift ) ;
+ BaseReg r1(rSig, 5);
+
+ EXPECT(r1.isValid() == true);
+ EXPECT(r1.isReg() == true);
+ EXPECT(r1.isReg(1) == true);
+ EXPECT(r1.isPhysReg() == true);
+ EXPECT(r1.isVirtReg() == false);
+ EXPECT(r1.signature() == rSig);
+ EXPECT(r1.type() == 1);
+ EXPECT(r1.group() == 2);
+ EXPECT(r1.size() == 8);
+ EXPECT(r1.id() == 5);
+ EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
+ EXPECT(r1._data[0] == 0);
+ EXPECT(r1._data[1] == 0);
+
+ // The same type of register having different id.
+ BaseReg r2(r1, 6);
+ EXPECT(r2.isValid() == true);
+ EXPECT(r2.isReg() == true);
+ EXPECT(r2.isReg(1) == true);
+ EXPECT(r2.isPhysReg() == true);
+ EXPECT(r2.isVirtReg() == false);
+ EXPECT(r2.signature() == rSig);
+ EXPECT(r2.type() == r1.type());
+ EXPECT(r2.group() == r1.group());
+ EXPECT(r2.size() == r1.size());
+ EXPECT(r2.id() == 6);
+ EXPECT(r2.isReg(1, 6) == true);
+
+ r1.reset();
+ EXPECT(!r1.isReg());
+ EXPECT(!r1.isValid());
+
+ INFO("Checking basic functionality of BaseMem");
+ BaseMem m;
+ EXPECT(m.isMem());
+ EXPECT(m == BaseMem());
+ EXPECT(m.hasBase() == false);
+ EXPECT(m.hasIndex() == false);
+ EXPECT(m.hasOffset() == false);
+ EXPECT(m.isOffset64Bit() == true);
+ EXPECT(m.offset() == 0);
+
+ m.setOffset(-1);
+ EXPECT(m.offsetLo32() == -1);
+ EXPECT(m.offset() == -1);
+
+ int64_t x = int64_t(0xFF00FF0000000001u);
+ int32_t xHi = int32_t(0xFF00FF00u);
+
+ m.setOffset(x);
+ EXPECT(m.offset() == x);
+ EXPECT(m.offsetLo32() == 1);
+ EXPECT(m.offsetHi32() == xHi);
+
+ INFO("Checking basic functionality of Imm");
+ Imm immValue(-42);
+ EXPECT(Imm(-1).value() == -1);
+ EXPECT(imm(-1).value() == -1);
+ EXPECT(immValue.value() == -42);
+ EXPECT(imm(0xFFFFFFFF).value() == int64_t(0xFFFFFFFF));
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/operand.h b/client/asmjit/core/operand.h
new file mode 100644
index 0000000..1faaeca
--- /dev/null
+++ b/client/asmjit/core/operand.h
@@ -0,0 +1,1418 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_OPERAND_H_INCLUDED
+#define ASMJIT_CORE_OPERAND_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Macros]
+// ============================================================================
+
+//! Adds a template specialization for `REG_TYPE` into the local `RegTraits`.
+#define ASMJIT_DEFINE_REG_TRAITS(REG, REG_TYPE, GROUP, SIZE, COUNT, TYPE_ID) \
+template<> \
+struct RegTraits<REG_TYPE> { \
+ typedef REG RegT; \
+ \
+ static constexpr uint32_t kValid = 1; \
+ static constexpr uint32_t kCount = COUNT; \
+ static constexpr uint32_t kTypeId = TYPE_ID; \
+ \
+ static constexpr uint32_t kType = REG_TYPE; \
+ static constexpr uint32_t kGroup = GROUP; \
+ static constexpr uint32_t kSize = SIZE; \
+ \
+ static constexpr uint32_t kSignature = \
+ (Operand::kOpReg << Operand::kSignatureOpShift ) | \
+ (kType << Operand::kSignatureRegTypeShift ) | \
+ (kGroup << Operand::kSignatureRegGroupShift) | \
+ (kSize << Operand::kSignatureSizeShift ) ; \
+}
+
+//! Adds constructors and member functions to a class that implements abstract
+//! register. Abstract register is register that doesn't have type or signature
+//! yet, it's a base class like `x86::Reg` or `arm::Reg`.
+#define ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \
+public: \
+ /*! Default constructor that only setups basics. */ \
+ constexpr REG() noexcept \
+ : BASE(kSignature, kIdBad) {} \
+ \
+ /*! Makes a copy of the `other` register operand. */ \
+ constexpr REG(const REG& other) noexcept \
+ : BASE(other) {} \
+ \
+ /*! Makes a copy of the `other` register having id set to `rId` */ \
+ constexpr REG(const BaseReg& other, uint32_t rId) noexcept \
+ : BASE(other, rId) {} \
+ \
+ /*! Creates a register based on `signature` and `rId`. */ \
+ constexpr REG(uint32_t signature, uint32_t rId) noexcept \
+ : BASE(signature, rId) {} \
+ \
+ /*! Creates a completely uninitialized REG register operand (garbage). */ \
+ inline explicit REG(Globals::NoInit_) noexcept \
+ : BASE(Globals::NoInit) {} \
+ \
+ /*! Creates a new register from register type and id. */ \
+ static inline REG fromTypeAndId(uint32_t rType, uint32_t rId) noexcept { \
+ return REG(signatureOf(rType), rId); \
+ } \
+ \
+ /*! Clones the register operand. */ \
+ constexpr REG clone() const noexcept { return REG(*this); } \
+ \
+ inline REG& operator=(const REG& other) noexcept = default;
+
+//! Adds constructors and member functions to a class that implements final
+//! register. Final registers MUST HAVE a valid signature.
+#define ASMJIT_DEFINE_FINAL_REG(REG, BASE, TRAITS) \
+public: \
+ static constexpr uint32_t kThisType = TRAITS::kType; \
+ static constexpr uint32_t kThisGroup = TRAITS::kGroup; \
+ static constexpr uint32_t kThisSize = TRAITS::kSize; \
+ static constexpr uint32_t kSignature = TRAITS::kSignature; \
+ \
+ ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \
+ \
+ /*! Creates a register operand having its id set to `rId`. */ \
+ constexpr explicit REG(uint32_t rId) noexcept \
+ : BASE(kSignature, rId) {}
+
+//! \addtogroup asmjit_assembler
+//! \{
+
+// ============================================================================
+// [asmjit::Operand_]
+// ============================================================================
+
+//! Constructor-less `Operand`.
+//!
+//! Contains no initialization code and can be used safely to define an array
+//! of operands that won't be initialized. This is an `Operand` compatible
+//! data structure designed to be statically initialized, static const, or to
+//! be used by the user to define an array of operands without having them
+//! default initialized.
+//!
+//! The key difference between `Operand` and `Operand_`:
+//!
+//! ```
+//! Operand_ xArray[10]; // Not initialized, contains garbage.
+//! Operand yArray[10]; // All operands initialized to none.
+//! ```
+struct Operand_ {
+ //! Operand's signature that provides operand type and additional information.
+ uint32_t _signature;
+ //! Either base id as used by memory operand or any id as used by others.
+ uint32_t _baseId;
+
+ //! Data specific to the operand type.
+ //!
+ //! The reason we don't use union is that we have `constexpr` constructors that
+ //! construct operands and other `constexpr` functions that return wither another
+ //! Operand or something else. These cannot generally work with unions so we also
+ //! cannot use `union` if we want to be standard compliant.
+ uint32_t _data[2];
+
+ //! Indexes to `_data` array.
+ enum DataIndex : uint32_t {
+ kDataMemIndexId = 0,
+ kDataMemOffsetLo = 1,
+
+ kDataImmValueLo = ASMJIT_ARCH_LE ? 0 : 1,
+ kDataImmValueHi = ASMJIT_ARCH_LE ? 1 : 0
+ };
+
+ //! Operand types that can be encoded in `Operand`.
+ enum OpType : uint32_t {
+ //! Not an operand or not initialized.
+ kOpNone = 0,
+ //! Operand is a register.
+ kOpReg = 1,
+ //! Operand is a memory.
+ kOpMem = 2,
+ //! Operand is an immediate value.
+ kOpImm = 3,
+ //! Operand is a label.
+ kOpLabel = 4
+ };
+ static_assert(kOpMem == kOpReg + 1, "asmjit::Operand requires `kOpMem` to be `kOpReg+1`.");
+
+ //! Label tag.
+ enum LabelTag {
+ //! Label tag is used as a sub-type, forming a unique signature across all
+ //! operand types as 0x1 is never associated with any register type. This
+ //! means that a memory operand's BASE register can be constructed from
+ //! virtually any operand (register vs. label) by just assigning its type
+ //! (register type or label-tag) and operand id.
+ kLabelTag = 0x1
+ };
+
+ // \cond INTERNAL
+ enum SignatureBits : uint32_t {
+ // Operand type (3 least significant bits).
+ // |........|........|........|.....XXX|
+ kSignatureOpShift = 0,
+ kSignatureOpMask = 0x07u << kSignatureOpShift,
+
+ // Register type (5 bits).
+ // |........|........|........|XXXXX...|
+ kSignatureRegTypeShift = 3,
+ kSignatureRegTypeMask = 0x1Fu << kSignatureRegTypeShift,
+
+ // Register group (4 bits).
+ // |........|........|....XXXX|........|
+ kSignatureRegGroupShift = 8,
+ kSignatureRegGroupMask = 0x0Fu << kSignatureRegGroupShift,
+
+ // Memory base type (5 bits).
+ // |........|........|........|XXXXX...|
+ kSignatureMemBaseTypeShift = 3,
+ kSignatureMemBaseTypeMask = 0x1Fu << kSignatureMemBaseTypeShift,
+
+ // Memory index type (5 bits).
+ // |........|........|...XXXXX|........|
+ kSignatureMemIndexTypeShift = 8,
+ kSignatureMemIndexTypeMask = 0x1Fu << kSignatureMemIndexTypeShift,
+
+ // Memory base+index combined (10 bits).
+ // |........|........|...XXXXX|XXXXX...|
+ kSignatureMemBaseIndexShift = 3,
+ kSignatureMemBaseIndexMask = 0x3FFu << kSignatureMemBaseIndexShift,
+
+ // Memory address type (2 bits).
+ // |........|........|.XX.....|........|
+ kSignatureMemAddrTypeShift = 13,
+ kSignatureMemAddrTypeMask = 0x03u << kSignatureMemAddrTypeShift,
+
+ // This memory operand represents a home-slot or stack (BaseCompiler).
+ // |........|........|X.......|........|
+ kSignatureMemRegHomeShift = 15,
+ kSignatureMemRegHomeFlag = 0x01u << kSignatureMemRegHomeShift,
+
+ // Operand size (8 most significant bits).
+ // |XXXXXXXX|........|........|........|
+ kSignatureSizeShift = 24,
+ kSignatureSizeMask = 0xFFu << kSignatureSizeShift
+ };
+ //! \endcond
+
+ //! Constants useful for VirtId <-> Index translation.
+ enum VirtIdConstants : uint32_t {
+ //! Minimum valid packed-id.
+ kVirtIdMin = 256,
+ //! Maximum valid packed-id, excludes Globals::kInvalidId.
+ kVirtIdMax = Globals::kInvalidId - 1,
+ //! Count of valid packed-ids.
+ kVirtIdCount = uint32_t(kVirtIdMax - kVirtIdMin + 1)
+ };
+
+ //! Tests whether the given `id` is a valid virtual register id. Since AsmJit
+ //! supports both physical and virtual registers it must be able to distinguish
+ //! between these two. The idea is that physical registers are always limited
+ //! in size, so virtual identifiers start from `kVirtIdMin` and end at `kVirtIdMax`.
+ static ASMJIT_INLINE bool isVirtId(uint32_t id) noexcept { return id - kVirtIdMin < uint32_t(kVirtIdCount); }
+ //! Converts a real-id into a packed-id that can be stored in Operand.
+ static ASMJIT_INLINE uint32_t indexToVirtId(uint32_t id) noexcept { return id + kVirtIdMin; }
+ //! Converts a packed-id back to real-id.
+ static ASMJIT_INLINE uint32_t virtIdToIndex(uint32_t id) noexcept { return id - kVirtIdMin; }
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! \cond INTERNAL
+ //! Initializes a `BaseReg` operand from `signature` and register `id`.
+ inline void _initReg(uint32_t signature, uint32_t id) noexcept {
+ _signature = signature;
+ _baseId = id;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+ //! \endcond
+
+ //! Initializes the operand from `other` operand (used by operator overloads).
+ inline void copyFrom(const Operand_& other) noexcept { memcpy(this, &other, sizeof(Operand_)); }
+
+ //! Resets the `Operand` to none.
+ //!
+ //! None operand is defined the following way:
+ //! - Its signature is zero (kOpNone, and the rest zero as well).
+ //! - Its id is `0`.
+ //! - The reserved8_4 field is set to `0`.
+ //! - The reserved12_4 field is set to zero.
+ //!
+ //! In other words, reset operands have all members set to zero. Reset operand
+ //! must match the Operand state right after its construction. Alternatively,
+ //! if you have an array of operands, you can simply use `memset()`.
+ //!
+ //! ```
+ //! using namespace asmjit;
+ //!
+ //! Operand a;
+ //! Operand b;
+ //! assert(a == b);
+ //!
+ //! b = x86::eax;
+ //! assert(a != b);
+ //!
+ //! b.reset();
+ //! assert(a == b);
+ //!
+ //! memset(&b, 0, sizeof(Operand));
+ //! assert(a == b);
+ //! ```
+ inline void reset() noexcept {
+ _signature = 0;
+ _baseId = 0;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Operator Overloads
+ //! \{
+
+ //! Tests whether this operand is the same as `other`.
+ constexpr bool operator==(const Operand_& other) const noexcept { return equals(other); }
+ //! Tests whether this operand is not the same as `other`.
+ constexpr bool operator!=(const Operand_& other) const noexcept { return !equals(other); }
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ //! Casts this operand to `T` type.
+ template<typename T>
+ inline T& as() noexcept { return static_cast<T&>(*this); }
+
+ //! Casts this operand to `T` type (const).
+ template<typename T>
+ inline const T& as() const noexcept { return static_cast<const T&>(*this); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the operand matches the given signature `sign`.
+ constexpr bool hasSignature(uint32_t signature) const noexcept { return _signature == signature; }
+ //! Tests whether the operand matches the signature of the `other` operand.
+ constexpr bool hasSignature(const Operand_& other) const noexcept { return _signature == other.signature(); }
+
+ //! Returns operand signature as unsigned 32-bit integer.
+ //!
+ //! Signature is first 4 bytes of the operand data. It's used mostly for
+ //! operand checking as it's much faster to check 4 bytes at once than having
+ //! to check these bytes individually.
+ constexpr uint32_t signature() const noexcept { return _signature; }
+
+ //! Sets the operand signature, see `signature()`.
+ //!
+ //! \note Improper use of `setSignature()` can lead to hard-to-debug errors.
+ inline void setSignature(uint32_t signature) noexcept { _signature = signature; }
+
+ //! \cond INTERNAL
+ template<uint32_t mask>
+ constexpr bool _hasSignaturePart() const noexcept {
+ return (_signature & mask) != 0;
+ }
+
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+
+ template<uint32_t mask>
+ inline void _setSignaturePart(uint32_t value) noexcept {
+ ASMJIT_ASSERT((value & ~(mask >> Support::constCtz(mask))) == 0);
+ _signature = (_signature & ~mask) | (value << Support::constCtz(mask));
+ }
+ //! \endcond
+
+ //! Returns the type of the operand, see `OpType`.
+ constexpr uint32_t opType() const noexcept { return _getSignaturePart<kSignatureOpMask>(); }
+ //! Tests whether the operand is none (`kOpNone`).
+ constexpr bool isNone() const noexcept { return _signature == 0; }
+ //! Tests whether the operand is a register (`kOpReg`).
+ constexpr bool isReg() const noexcept { return opType() == kOpReg; }
+ //! Tests whether the operand is a memory location (`kOpMem`).
+ constexpr bool isMem() const noexcept { return opType() == kOpMem; }
+ //! Tests whether the operand is an immediate (`kOpImm`).
+ constexpr bool isImm() const noexcept { return opType() == kOpImm; }
+ //! Tests whether the operand is a label (`kOpLabel`).
+ constexpr bool isLabel() const noexcept { return opType() == kOpLabel; }
+
+ //! Tests whether the operand is a physical register.
+ constexpr bool isPhysReg() const noexcept { return isReg() && _baseId < 0xFFu; }
+ //! Tests whether the operand is a virtual register.
+ constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; }
+
+ //! Tests whether the operand specifies a size (i.e. the size is not zero).
+ constexpr bool hasSize() const noexcept { return _hasSignaturePart<kSignatureSizeMask>(); }
+ //! Tests whether the size of the operand matches `size`.
+ constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; }
+
+ //! Returns the size of the operand in bytes.
+ //!
+ //! The value returned depends on the operand type:
+ //! * None - Should always return zero size.
+ //! * Reg - Should always return the size of the register. If the register
+ //! size depends on architecture (like `x86::CReg` and `x86::DReg`)
+ //! the size returned should be the greatest possible (so it should
+ //! return 64-bit size in such case).
+ //! * Mem - Size is optional and will be in most cases zero.
+ //! * Imm - Should always return zero size.
+ //! * Label - Should always return zero size.
+ constexpr uint32_t size() const noexcept { return _getSignaturePart<kSignatureSizeMask>(); }
+
+ //! Returns the operand id.
+ //!
+ //! The value returned should be interpreted accordingly to the operand type:
+ //! * None - Should be `0`.
+ //! * Reg - Physical or virtual register id.
+ //! * Mem - Multiple meanings - BASE address (register or label id), or
+ //! high value of a 64-bit absolute address.
+ //! * Imm - Should be `0`.
+ //! * Label - Label id if it was created by using `newLabel()` or
+ //! `Globals::kInvalidId` if the label is invalid or not
+ //! initialized.
+ constexpr uint32_t id() const noexcept { return _baseId; }
+
+ //! Tests whether the operand is 100% equal to `other` operand.
+ //!
+ //! \note This basically performs a binary comparison, if aby bit is
+ //! different the operands are not equal.
+ constexpr bool equals(const Operand_& other) const noexcept {
+ return (_signature == other._signature) &
+ (_baseId == other._baseId ) &
+ (_data[0] == other._data[0] ) &
+ (_data[1] == other._data[1] ) ;
+ }
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use equals() instead")
+ constexpr bool isEqual(const Operand_& other) const noexcept { return equals(other); }
+#endif //!ASMJIT_NO_DEPRECATED
+
+ //! Tests whether the operand is a register matching `rType`.
+ constexpr bool isReg(uint32_t rType) const noexcept {
+ return (_signature & (kSignatureOpMask | kSignatureRegTypeMask)) ==
+ ((kOpReg << kSignatureOpShift) | (rType << kSignatureRegTypeShift));
+ }
+
+ //! Tests whether the operand is register and of `rType` and `rId`.
+ constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept {
+ return isReg(rType) && id() == rId;
+ }
+
+ //! Tests whether the operand is a register or memory.
+ constexpr bool isRegOrMem() const noexcept {
+ return Support::isBetween<uint32_t>(opType(), kOpReg, kOpMem);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Operand]
+// ============================================================================
+
+//! Operand can contain register, memory location, immediate, or label.
+class Operand : public Operand_ {
+public:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates `kOpNone` operand having all members initialized to zero.
+ constexpr Operand() noexcept
+ : Operand_{ kOpNone, 0u, { 0u, 0u }} {}
+
+ //! Creates a cloned `other` operand.
+ constexpr Operand(const Operand& other) noexcept = default;
+
+ //! Creates a cloned `other` operand.
+ constexpr explicit Operand(const Operand_& other)
+ : Operand_(other) {}
+
+ //! Creates an operand initialized to raw `[u0, u1, u2, u3]` values.
+ constexpr Operand(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept
+ : Operand_{ u0, u1, { u2, u3 }} {}
+
+ //! Creates an uninitialized operand (dangerous).
+ inline explicit Operand(Globals::NoInit_) noexcept {}
+
+ //! \}
+
+ //! \name Operator Overloads
+ //! \{
+
+ inline Operand& operator=(const Operand& other) noexcept = default;
+ inline Operand& operator=(const Operand_& other) noexcept { return operator=(static_cast<const Operand&>(other)); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Clones this operand and returns its copy.
+ constexpr Operand clone() const noexcept { return Operand(*this); }
+
+ //! \}
+};
+
+static_assert(sizeof(Operand) == 16, "asmjit::Operand must be exactly 16 bytes long");
+
+// ============================================================================
+// [asmjit::Label]
+// ============================================================================
+
+//! Label (jump target or data location).
+//!
+//! Label represents a location in code typically used as a jump target, but
+//! may be also a reference to some data or a static variable. Label has to be
+//! explicitly created by BaseEmitter.
+//!
+//! Example of using labels:
+//!
+//! ```
+//! // Create some emitter (for example x86::Assembler).
+//! x86::Assembler a;
+//!
+//! // Create Label instance.
+//! Label L1 = a.newLabel();
+//!
+//! // ... your code ...
+//!
+//! // Using label.
+//! a.jump(L1);
+//!
+//! // ... your code ...
+//!
+//! // Bind label to the current position, see `BaseEmitter::bind()`.
+//! a.bind(L1);
+//! ```
+class Label : public Operand {
+public:
+ //! Type of the Label.
+ enum LabelType : uint32_t {
+ //! Anonymous (unnamed) label.
+ kTypeAnonymous = 0,
+ //! Local label (always has parentId).
+ kTypeLocal = 1,
+ //! Global label (never has parentId).
+ kTypeGlobal = 2,
+ //! Number of label types.
+ kTypeCount = 3
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a label operand without ID (you must set the ID to make it valid).
+ constexpr Label() noexcept
+ : Operand(Globals::Init, kOpLabel, Globals::kInvalidId, 0, 0) {}
+
+ //! Creates a cloned label operand of `other`.
+ constexpr Label(const Label& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a label operand of the given `id`.
+ constexpr explicit Label(uint32_t id) noexcept
+ : Operand(Globals::Init, kOpLabel, id, 0, 0) {}
+
+ inline explicit Label(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! Resets the label, will reset all properties and set its ID to `Globals::kInvalidId`.
+ inline void reset() noexcept {
+ _signature = kOpLabel;
+ _baseId = Globals::kInvalidId;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Label& operator=(const Label& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the label was created by CodeHolder and/or an attached emitter.
+ constexpr bool isValid() const noexcept { return _baseId != Globals::kInvalidId; }
+ //! Sets the label `id`.
+ inline void setId(uint32_t id) noexcept { _baseId = id; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseRegTraits]
+// ============================================================================
+
+//! \cond INTERNAL
+//! Default register traits.
+struct BaseRegTraits {
+ //! RegType is not valid by default.
+ static constexpr uint32_t kValid = 0;
+ //! Count of registers (0 if none).
+ static constexpr uint32_t kCount = 0;
+ //! Everything is void by default.
+ static constexpr uint32_t kTypeId = 0;
+
+ //! Zero type by default.
+ static constexpr uint32_t kType = 0;
+ //! Zero group by default.
+ static constexpr uint32_t kGroup = 0;
+ //! No size by default.
+ static constexpr uint32_t kSize = 0;
+
+ //! Empty signature by default.
+ static constexpr uint32_t kSignature = Operand::kOpReg;
+};
+//! \endcond
+
+// ============================================================================
+// [asmjit::BaseReg]
+// ============================================================================
+
+//! Structure that allows to extract a register information based on the signature.
+//!
+//! This information is compatible with operand's signature (32-bit integer)
+//! and `RegInfo` just provides easy way to access it.
+struct RegInfo {
+ inline void reset(uint32_t signature = 0) noexcept { _signature = signature; }
+ inline void setSignature(uint32_t signature) noexcept { _signature = signature; }
+
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+
+ constexpr bool isValid() const noexcept { return _signature != 0; }
+ constexpr uint32_t signature() const noexcept { return _signature; }
+ constexpr uint32_t opType() const noexcept { return _getSignaturePart<Operand::kSignatureOpMask>(); }
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<Operand::kSignatureRegGroupMask>(); }
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<Operand::kSignatureRegTypeMask>(); }
+ constexpr uint32_t size() const noexcept { return _getSignaturePart<Operand::kSignatureSizeMask>(); }
+
+ uint32_t _signature;
+};
+
+//! Physical or virtual register operand.
+class BaseReg : public Operand {
+public:
+ //! Architecture neutral register types.
+ //!
+ //! These must be reused by any platform that contains that types. All GP
+ //! and VEC registers are also allowed by design to be part of a BASE|INDEX
+ //! of a memory operand.
+ enum RegType : uint32_t {
+ //! No register - unused, invalid, multiple meanings.
+ kTypeNone = 0,
+
+ // (1 is used as a LabelTag)
+
+ //! 8-bit low general purpose register (X86).
+ kTypeGp8Lo = 2,
+ //! 8-bit high general purpose register (X86).
+ kTypeGp8Hi = 3,
+ //! 16-bit general purpose register (X86).
+ kTypeGp16 = 4,
+ //! 32-bit general purpose register (X86|ARM).
+ kTypeGp32 = 5,
+ //! 64-bit general purpose register (X86|ARM).
+ kTypeGp64 = 6,
+ //! 32-bit view of a vector register (ARM).
+ kTypeVec32 = 7,
+ //! 64-bit view of a vector register (ARM).
+ kTypeVec64 = 8,
+ //! 128-bit view of a vector register (X86|ARM).
+ kTypeVec128 = 9,
+ //! 256-bit view of a vector register (X86).
+ kTypeVec256 = 10,
+ //! 512-bit view of a vector register (X86).
+ kTypeVec512 = 11,
+ //! 1024-bit view of a vector register (future).
+ kTypeVec1024 = 12,
+ //! Other0 register, should match `kOther0` group.
+ kTypeOther0 = 13,
+ //! Other1 register, should match `kOther1` group.
+ kTypeOther1 = 14,
+ //! Universal id of IP/PC register (if separate).
+ kTypeIP = 15,
+ //! Start of platform dependent register types (must be honored).
+ kTypeCustom = 16,
+ //! Maximum possible register type value.
+ kTypeMax = 31
+ };
+
+ //! Register group (architecture neutral), and some limits.
+ enum RegGroup : uint32_t {
+ //! General purpose register group compatible with all backends.
+ kGroupGp = 0,
+ //! Vector register group compatible with all backends.
+ kGroupVec = 1,
+ //! Group that is architecture dependent.
+ kGroupOther0 = 2,
+ //! Group that is architecture dependent.
+ kGroupOther1 = 3,
+ //! Count of register groups used by virtual registers.
+ kGroupVirt = 4,
+ //! Count of register groups used by physical registers.
+ kGroupCount = 16
+ };
+
+ enum Id : uint32_t {
+ //! None or any register (mostly internal).
+ kIdBad = 0xFFu
+ };
+
+ static constexpr uint32_t kSignature = kOpReg;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a dummy register operand.
+ constexpr BaseReg() noexcept
+ : Operand(Globals::Init, kSignature, kIdBad, 0, 0) {}
+
+ //! Creates a new register operand which is the same as `other` .
+ constexpr BaseReg(const BaseReg& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a new register operand compatible with `other`, but with a different `rId`.
+ constexpr BaseReg(const BaseReg& other, uint32_t rId) noexcept
+ : Operand(Globals::Init, other._signature, rId, 0, 0) {}
+
+ //! Creates a register initialized to `signature` and `rId`.
+ constexpr BaseReg(uint32_t signature, uint32_t rId) noexcept
+ : Operand(Globals::Init, signature, rId, 0, 0) {}
+
+ inline explicit BaseReg(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseReg& operator=(const BaseReg& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this register is the same as `other`.
+ //!
+ //! This is just an optimization. Registers by default only use the first
+ //! 8 bytes of Operand data, so this method takes advantage of this knowledge
+ //! and only compares these 8 bytes. If both operands were created correctly
+ //! both \ref equals() and \ref isSame() should give the same answer, however,
+ //! if any of these two contains garbage or other metadata in the upper 8
+ //! bytes then \ref isSame() may return `true` in cases in which \ref equals()
+ //! returns false.
+ constexpr bool isSame(const BaseReg& other) const noexcept {
+ return (_signature == other._signature) & (_baseId == other._baseId);
+ }
+
+ //! Tests whether the register is valid (either virtual or physical).
+ constexpr bool isValid() const noexcept { return (_signature != 0) & (_baseId != kIdBad); }
+
+ //! Tests whether this is a physical register.
+ constexpr bool isPhysReg() const noexcept { return _baseId < kIdBad; }
+ //! Tests whether this is a virtual register.
+ constexpr bool isVirtReg() const noexcept { return _baseId > kIdBad; }
+
+ //! Tests whether the register type matches `type` - same as `isReg(type)`, provided for convenience.
+ constexpr bool isType(uint32_t type) const noexcept { return (_signature & kSignatureRegTypeMask) == (type << kSignatureRegTypeShift); }
+ //! Tests whether the register group matches `group`.
+ constexpr bool isGroup(uint32_t group) const noexcept { return (_signature & kSignatureRegGroupMask) == (group << kSignatureRegGroupShift); }
+
+ //! Tests whether the register is a general purpose register (any size).
+ constexpr bool isGp() const noexcept { return isGroup(kGroupGp); }
+ //! Tests whether the register is a vector register.
+ constexpr bool isVec() const noexcept { return isGroup(kGroupVec); }
+
+ using Operand_::isReg;
+
+ //! Same as `isType()`, provided for convenience.
+ constexpr bool isReg(uint32_t rType) const noexcept { return isType(rType); }
+ //! Tests whether the register type matches `type` and register id matches `rId`.
+ constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept { return isType(rType) && id() == rId; }
+
+ //! Returns the type of the register.
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<kSignatureRegTypeMask>(); }
+ //! Returns the register group.
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<kSignatureRegGroupMask>(); }
+
+ //! Clones the register operand.
+ constexpr BaseReg clone() const noexcept { return BaseReg(*this); }
+
+ //! Casts this register to `RegT` by also changing its signature.
+ //!
+ //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors.
+ template<typename RegT>
+ constexpr RegT cloneAs() const noexcept { return RegT(RegT::kSignature, id()); }
+
+ //! Casts this register to `other` by also changing its signature.
+ //!
+ //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors.
+ template<typename RegT>
+ constexpr RegT cloneAs(const RegT& other) const noexcept { return RegT(other.signature(), id()); }
+
+ //! Sets the register id to `rId`.
+ inline void setId(uint32_t rId) noexcept { _baseId = rId; }
+
+ //! Sets a 32-bit operand signature based on traits of `RegT`.
+ template<typename RegT>
+ inline void setSignatureT() noexcept { _signature = RegT::kSignature; }
+
+ //! Sets the register `signature` and `rId`.
+ inline void setSignatureAndId(uint32_t signature, uint32_t rId) noexcept {
+ _signature = signature;
+ _baseId = rId;
+ }
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ //! Tests whether the `op` operand is a general purpose register.
+ static inline bool isGp(const Operand_& op) noexcept {
+ // Check operand type and register group. Not interested in register type and size.
+ const uint32_t kSgn = (kOpReg << kSignatureOpShift ) |
+ (kGroupGp << kSignatureRegGroupShift) ;
+ return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn;
+ }
+
+ //! Tests whether the `op` operand is a vector register.
+ static inline bool isVec(const Operand_& op) noexcept {
+ // Check operand type and register group. Not interested in register type and size.
+ const uint32_t kSgn = (kOpReg << kSignatureOpShift ) |
+ (kGroupVec << kSignatureRegGroupShift) ;
+ return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn;
+ }
+
+ //! Tests whether the `op` is a general purpose register of the given `rId`.
+ static inline bool isGp(const Operand_& op, uint32_t rId) noexcept { return isGp(op) & (op.id() == rId); }
+ //! Tests whether the `op` is a vector register of the given `rId`.
+ static inline bool isVec(const Operand_& op, uint32_t rId) noexcept { return isVec(op) & (op.id() == rId); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RegOnly]
+// ============================================================================
+
+//! RegOnly is 8-byte version of `BaseReg` that allows to store either register
+//! or nothing.
+//!
+//! This class was designed to decrease the space consumed by each extra "operand"
+//! in `BaseEmitter` and `InstNode` classes.
+struct RegOnly {
+ //! Type of the operand, either `kOpNone` or `kOpReg`.
+ uint32_t _signature;
+ //! Physical or virtual register id.
+ uint32_t _id;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Initializes the `RegOnly` instance to hold register `signature` and `id`.
+ inline void init(uint32_t signature, uint32_t id) noexcept {
+ _signature = signature;
+ _id = id;
+ }
+
+ inline void init(const BaseReg& reg) noexcept { init(reg.signature(), reg.id()); }
+ inline void init(const RegOnly& reg) noexcept { init(reg.signature(), reg.id()); }
+
+ //! Resets the `RegOnly` members to zeros (none).
+ inline void reset() noexcept { init(0, 0); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this ExtraReg is none (same as calling `Operand_::isNone()`).
+ constexpr bool isNone() const noexcept { return _signature == 0; }
+ //! Tests whether the register is valid (either virtual or physical).
+ constexpr bool isReg() const noexcept { return _signature != 0; }
+
+ //! Tests whether this is a physical register.
+ constexpr bool isPhysReg() const noexcept { return _id < BaseReg::kIdBad; }
+ //! Tests whether this is a virtual register (used by `BaseCompiler`).
+ constexpr bool isVirtReg() const noexcept { return _id > BaseReg::kIdBad; }
+
+ //! Returns the register signature or 0 if no register is assigned.
+ constexpr uint32_t signature() const noexcept { return _signature; }
+ //! Returns the register id.
+ //!
+ //! \note Always check whether the register is assigned before using the
+ //! returned identifier as non-assigned `RegOnly` instance would return
+ //! zero id, which is still a valid register id.
+ constexpr uint32_t id() const noexcept { return _id; }
+
+ //! Sets the register id.
+ inline void setId(uint32_t id) noexcept { _id = id; }
+
+ //! \cond INTERNAL
+ //!
+ //! Extracts information from operand's signature.
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+ //! \endcond
+
+ //! Returns the type of the register.
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<Operand::kSignatureRegTypeMask>(); }
+ //! Returns the register group.
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<Operand::kSignatureRegGroupMask>(); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Converts this ExtraReg to a real `RegT` operand.
+ template<typename RegT>
+ constexpr RegT toReg() const noexcept { return RegT(_signature, _id); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseMem]
+// ============================================================================
+
+//! Base class for all memory operands.
+//!
+//! \note It's tricky to pack all possible cases that define a memory operand
+//! into just 16 bytes. The `BaseMem` splits data into the following parts:
+//!
+//! BASE - Base register or label - requires 36 bits total. 4 bits are used to
+//! encode the type of the BASE operand (label vs. register type) and
+//! the remaining 32 bits define the BASE id, which can be a physical or
+//! virtual register index. If BASE type is zero, which is never used as
+//! a register-type and label doesn't use it as well then BASE field
+//! contains a high DWORD of a possible 64-bit absolute address, which is
+//! possible on X64.
+//!
+//! INDEX - Index register (or theoretically Label, which doesn't make sense).
+//! Encoding is similar to BASE - it also requires 36 bits and splits
+//! the encoding to INDEX type (4 bits defining the register type) and
+//! id (32-bits).
+//!
+//! OFFSET - A relative offset of the address. Basically if BASE is specified
+//! the relative displacement adjusts BASE and an optional INDEX. if
+//! BASE is not specified then the OFFSET should be considered as ABSOLUTE
+//! address (at least on X86). In that case its low 32 bits are stored in
+//! DISPLACEMENT field and the remaining high 32 bits are stored in BASE.
+//!
+//! OTHER - There is rest 8 bits that can be used for whatever purpose. The
+//! x86::Mem operand uses these bits to store segment override prefix and
+//! index shift (scale).
+class BaseMem : public Operand {
+public:
+ //! Address type.
+ enum AddrType : uint32_t {
+ //! Default address type, Assembler will select the best type when necessary.
+ kAddrTypeDefault = 0,
+ //! Absolute address type.
+ kAddrTypeAbs = 1,
+ //! Relative address type.
+ kAddrTypeRel = 2
+ };
+
+ // Shortcuts.
+ enum SignatureMem : uint32_t {
+ kSignatureMemAbs = kAddrTypeAbs << kSignatureMemAddrTypeShift,
+ kSignatureMemRel = kAddrTypeRel << kSignatureMemAddrTypeShift
+ };
+
+ //! \cond INTERNAL
+ //! Used internally to construct `BaseMem` operand from decomposed data.
+ struct Decomposed {
+ uint32_t baseType;
+ uint32_t baseId;
+ uint32_t indexType;
+ uint32_t indexId;
+ int32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ };
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a default `BaseMem` operand, that points to [0].
+ constexpr BaseMem() noexcept
+ : Operand(Globals::Init, kOpMem, 0, 0, 0) {}
+
+ //! Creates a `BaseMem` operand that is a clone of `other`.
+ constexpr BaseMem(const BaseMem& other) noexcept
+ : Operand(other) {}
+
+ //! \cond INTERNAL
+
+ //! Creates a `BaseMem` operand from 4 integers as used by `Operand_` struct.
+ constexpr BaseMem(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept
+ : Operand(Globals::Init, u0, u1, u2, u3) {}
+
+ constexpr BaseMem(const Decomposed& d) noexcept
+ : Operand(Globals::Init,
+ kOpMem | (d.baseType << kSignatureMemBaseTypeShift )
+ | (d.indexType << kSignatureMemIndexTypeShift)
+ | (d.size << kSignatureSizeShift )
+ | d.flags,
+ d.baseId,
+ d.indexId,
+ uint32_t(d.offset)) {}
+
+ //! \endcond
+
+ //! Creates a completely uninitialized `BaseMem` operand.
+ inline explicit BaseMem(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! Resets the memory operand - after the reset the memory points to [0].
+ inline void reset() noexcept {
+ _signature = kOpMem;
+ _baseId = 0;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseMem& operator=(const BaseMem& other) noexcept { copyFrom(other); return *this; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Clones the memory operand.
+ constexpr BaseMem clone() const noexcept { return BaseMem(*this); }
+
+ //! Returns the address type (see \ref AddrType) of the memory operand.
+ //!
+ //! By default, address type of newly created memory operands is always \ref kAddrTypeDefault.
+ constexpr uint32_t addrType() const noexcept { return _getSignaturePart<kSignatureMemAddrTypeMask>(); }
+ //! Sets the address type to `addrType`, see \ref AddrType.
+ inline void setAddrType(uint32_t addrType) noexcept { _setSignaturePart<kSignatureMemAddrTypeMask>(addrType); }
+ //! Resets the address type to \ref kAddrTypeDefault.
+ inline void resetAddrType() noexcept { _setSignaturePart<kSignatureMemAddrTypeMask>(0); }
+
+ //! Tests whether the address type is \ref kAddrTypeAbs.
+ constexpr bool isAbs() const noexcept { return addrType() == kAddrTypeAbs; }
+ //! Sets the address type to \ref kAddrTypeAbs.
+ inline void setAbs() noexcept { setAddrType(kAddrTypeAbs); }
+
+ //! Tests whether the address type is \ref kAddrTypeRel.
+ constexpr bool isRel() const noexcept { return addrType() == kAddrTypeRel; }
+ //! Sets the address type to \ref kAddrTypeRel.
+ inline void setRel() noexcept { setAddrType(kAddrTypeRel); }
+
+ //! Tests whether this memory operand is a register home (only used by \ref asmjit_compiler)
+ constexpr bool isRegHome() const noexcept { return _hasSignaturePart<kSignatureMemRegHomeFlag>(); }
+ //! Mark this memory operand as register home (only used by \ref asmjit_compiler).
+ inline void setRegHome() noexcept { _signature |= kSignatureMemRegHomeFlag; }
+ //! Marks this operand to not be a register home (only used by \ref asmjit_compiler).
+ inline void clearRegHome() noexcept { _signature &= ~kSignatureMemRegHomeFlag; }
+
+ //! Tests whether the memory operand has a BASE register or label specified.
+ constexpr bool hasBase() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0; }
+ //! Tests whether the memory operand has an INDEX register specified.
+ constexpr bool hasIndex() const noexcept { return (_signature & kSignatureMemIndexTypeMask) != 0; }
+ //! Tests whether the memory operand has BASE or INDEX register.
+ constexpr bool hasBaseOrIndex() const noexcept { return (_signature & kSignatureMemBaseIndexMask) != 0; }
+ //! Tests whether the memory operand has BASE and INDEX register.
+ constexpr bool hasBaseAndIndex() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0 && (_signature & kSignatureMemIndexTypeMask) != 0; }
+
+ //! Tests whether the BASE operand is a register (registers start after `kLabelTag`).
+ constexpr bool hasBaseReg() const noexcept { return (_signature & kSignatureMemBaseTypeMask) > (Label::kLabelTag << kSignatureMemBaseTypeShift); }
+ //! Tests whether the BASE operand is a label.
+ constexpr bool hasBaseLabel() const noexcept { return (_signature & kSignatureMemBaseTypeMask) == (Label::kLabelTag << kSignatureMemBaseTypeShift); }
+ //! Tests whether the INDEX operand is a register (registers start after `kLabelTag`).
+ constexpr bool hasIndexReg() const noexcept { return (_signature & kSignatureMemIndexTypeMask) > (Label::kLabelTag << kSignatureMemIndexTypeShift); }
+
+ //! Returns the type of the BASE register (0 if this memory operand doesn't
+ //! use the BASE register).
+ //!
+ //! \note If the returned type is one (a value never associated to a register
+ //! type) the BASE is not register, but it's a label. One equals to `kLabelTag`.
+ //! You should always check `hasBaseLabel()` before using `baseId()` result.
+ constexpr uint32_t baseType() const noexcept { return _getSignaturePart<kSignatureMemBaseTypeMask>(); }
+
+ //! Returns the type of an INDEX register (0 if this memory operand doesn't
+ //! use the INDEX register).
+ constexpr uint32_t indexType() const noexcept { return _getSignaturePart<kSignatureMemIndexTypeMask>(); }
+
+ //! This is used internally for BASE+INDEX validation.
+ constexpr uint32_t baseAndIndexTypes() const noexcept { return _getSignaturePart<kSignatureMemBaseIndexMask>(); }
+
+ //! Returns both BASE (4:0 bits) and INDEX (9:5 bits) types combined into a
+ //! single value.
+ //!
+ //! \remarks Returns id of the BASE register or label (if the BASE was
+ //! specified as label).
+ constexpr uint32_t baseId() const noexcept { return _baseId; }
+
+ //! Returns the id of the INDEX register.
+ constexpr uint32_t indexId() const noexcept { return _data[kDataMemIndexId]; }
+
+ //! Sets the id of the BASE register (without modifying its type).
+ inline void setBaseId(uint32_t rId) noexcept { _baseId = rId; }
+ //! Sets the id of the INDEX register (without modifying its type).
+ inline void setIndexId(uint32_t rId) noexcept { _data[kDataMemIndexId] = rId; }
+
+ //! Sets the base register to type and id of the given `base` operand.
+ inline void setBase(const BaseReg& base) noexcept { return _setBase(base.type(), base.id()); }
+ //! Sets the index register to type and id of the given `index` operand.
+ inline void setIndex(const BaseReg& index) noexcept { return _setIndex(index.type(), index.id()); }
+
+ //! \cond INTERNAL
+ inline void _setBase(uint32_t rType, uint32_t rId) noexcept {
+ _setSignaturePart<kSignatureMemBaseTypeMask>(rType);
+ _baseId = rId;
+ }
+
+ inline void _setIndex(uint32_t rType, uint32_t rId) noexcept {
+ _setSignaturePart<kSignatureMemIndexTypeMask>(rType);
+ _data[kDataMemIndexId] = rId;
+ }
+ //! \endcond
+
+ //! Resets the memory operand's BASE register or label.
+ inline void resetBase() noexcept { _setBase(0, 0); }
+ //! Resets the memory operand's INDEX register.
+ inline void resetIndex() noexcept { _setIndex(0, 0); }
+
+ //! Sets the memory operand size (in bytes).
+ inline void setSize(uint32_t size) noexcept { _setSignaturePart<kSignatureSizeMask>(size); }
+
+ //! Tests whether the memory operand has a 64-bit offset or absolute address.
+ //!
+ //! If this is true then `hasBase()` must always report false.
+ constexpr bool isOffset64Bit() const noexcept { return baseType() == 0; }
+
+ //! Tests whether the memory operand has a non-zero offset or absolute address.
+ constexpr bool hasOffset() const noexcept {
+ return (_data[kDataMemOffsetLo] | uint32_t(_baseId & Support::bitMaskFromBool<uint32_t>(isOffset64Bit()))) != 0;
+ }
+
+ //! Returns either relative offset or absolute address as 64-bit integer.
+ constexpr int64_t offset() const noexcept {
+ return isOffset64Bit() ? int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32))
+ : int64_t(int32_t(_data[kDataMemOffsetLo])); // Sign extend 32-bit offset.
+ }
+
+ //! Returns a 32-bit low part of a 64-bit offset or absolute address.
+ constexpr int32_t offsetLo32() const noexcept { return int32_t(_data[kDataMemOffsetLo]); }
+ //! Returns a 32-but high part of a 64-bit offset or absolute address.
+ //!
+ //! \note This function is UNSAFE and returns garbage if `isOffset64Bit()`
+ //! returns false. Never use it blindly without checking it first.
+ constexpr int32_t offsetHi32() const noexcept { return int32_t(_baseId); }
+
+ //! Sets a 64-bit offset or an absolute address to `offset`.
+ //!
+ //! \note This functions attempts to set both high and low parts of a 64-bit
+ //! offset, however, if the operand has a BASE register it will store only the
+ //! low 32 bits of the offset / address as there is no way to store both BASE
+ //! and 64-bit offset, and there is currently no architecture that has such
+ //! capability targeted by AsmJit.
+ inline void setOffset(int64_t offset) noexcept {
+ uint32_t lo = uint32_t(uint64_t(offset) & 0xFFFFFFFFu);
+ uint32_t hi = uint32_t(uint64_t(offset) >> 32);
+ uint32_t hiMsk = Support::bitMaskFromBool<uint32_t>(isOffset64Bit());
+
+ _data[kDataMemOffsetLo] = lo;
+ _baseId = (hi & hiMsk) | (_baseId & ~hiMsk);
+ }
+ //! Sets a low 32-bit offset to `offset` (don't use without knowing how BaseMem works).
+ inline void setOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] = uint32_t(offset); }
+
+ //! Adjusts the offset by `offset`.
+ //!
+ //! \note This is a fast function that doesn't use the HI 32-bits of a
+ //! 64-bit offset. Use it only if you know that there is a BASE register
+ //! and the offset is only 32 bits anyway.
+
+ //! Adjusts the memory operand offset by a `offset`.
+ inline void addOffset(int64_t offset) noexcept {
+ if (isOffset64Bit()) {
+ int64_t result = offset + int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32));
+ _data[kDataMemOffsetLo] = uint32_t(uint64_t(result) & 0xFFFFFFFFu);
+ _baseId = uint32_t(uint64_t(result) >> 32);
+ }
+ else {
+ _data[kDataMemOffsetLo] += uint32_t(uint64_t(offset) & 0xFFFFFFFFu);
+ }
+ }
+
+ //! Adds `offset` to a low 32-bit offset part (don't use without knowing how
+ //! BaseMem works).
+ inline void addOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] += uint32_t(offset); }
+
+ //! Resets the memory offset to zero.
+ inline void resetOffset() noexcept { setOffset(0); }
+
+ //! Resets the lo part of the memory offset to zero (don't use without knowing
+ //! how BaseMem works).
+ inline void resetOffsetLo32() noexcept { setOffsetLo32(0); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Imm]
+// ============================================================================
+
+//! Immediate operand.
+//!
+//! Immediate operand is usually part of instruction itself. It's inlined after
+//! or before the instruction opcode. Immediates can be only signed or unsigned
+//! integers.
+//!
+//! To create an immediate operand use `asmjit::imm()` helper, which can be used
+//! with any type, not just the default 64-bit int.
+class Imm : public Operand {
+public:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new immediate value (initial value is 0).
+ constexpr Imm() noexcept
+ : Operand(Globals::Init, kOpImm, 0, 0, 0) {}
+
+ //! Creates a new immediate value from `other`.
+ constexpr Imm(const Imm& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a new signed immediate value, assigning the value to `val`.
+ constexpr explicit Imm(int64_t val) noexcept
+ : Operand(Globals::Init, kOpImm, 0, Support::unpackU32At0(val), Support::unpackU32At1(val)) {}
+
+ inline explicit Imm(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ //! Assigns the value of the `other` operand to this immediate.
+ inline Imm& operator=(const Imm& other) noexcept { copyFrom(other); return *this; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the immediate value as `int64_t`, which is the internal format Imm uses.
+ constexpr int64_t value() const noexcept {
+ return int64_t((uint64_t(_data[kDataImmValueHi]) << 32) | _data[kDataImmValueLo]);
+ }
+
+ //! Tests whether the immediate can be casted to 8-bit signed integer.
+ constexpr bool isInt8() const noexcept { return Support::isInt8(value()); }
+ //! Tests whether the immediate can be casted to 8-bit unsigned integer.
+ constexpr bool isUInt8() const noexcept { return Support::isUInt8(value()); }
+ //! Tests whether the immediate can be casted to 16-bit signed integer.
+ constexpr bool isInt16() const noexcept { return Support::isInt16(value()); }
+ //! Tests whether the immediate can be casted to 16-bit unsigned integer.
+ constexpr bool isUInt16() const noexcept { return Support::isUInt16(value()); }
+ //! Tests whether the immediate can be casted to 32-bit signed integer.
+ constexpr bool isInt32() const noexcept { return Support::isInt32(value()); }
+ //! Tests whether the immediate can be casted to 32-bit unsigned integer.
+ constexpr bool isUInt32() const noexcept { return _data[kDataImmValueHi] == 0; }
+
+ //! Returns the immediate value casted to `T`.
+ //!
+ //! The value is masked before it's casted to `T` so the returned value is
+ //! simply the representation of `T` considering the original value's lowest
+ //! bits.
+ template<typename T>
+ constexpr T valueAs() const noexcept {
+ return T(uint64_t(value()) & Support::allOnes<typename std::make_unsigned<T>::type>());
+ }
+
+ //! Returns low 32-bit signed integer.
+ constexpr int32_t int32Lo() const noexcept { return int32_t(_data[kDataImmValueLo]); }
+ //! Returns high 32-bit signed integer.
+ constexpr int32_t int32Hi() const noexcept { return int32_t(_data[kDataImmValueHi]); }
+ //! Returns low 32-bit signed integer.
+ constexpr uint32_t uint32Lo() const noexcept { return _data[kDataImmValueLo]; }
+ //! Returns high 32-bit signed integer.
+ constexpr uint32_t uint32Hi() const noexcept { return _data[kDataImmValueHi]; }
+
+ //! Sets immediate value to `val`, the value is casted to a signed 64-bit integer.
+ template<typename T>
+ inline void setValue(const T& val) noexcept {
+ int64_t val64 = int64_t(Support::asNormalized(val));
+ _data[kDataImmValueHi] = uint32_t(uint64_t(val64) >> 32);
+ _data[kDataImmValueLo] = uint32_t(uint64_t(val64) & 0xFFFFFFFFu);
+ }
+
+ inline void setDouble(double d) noexcept {
+ setValue(Support::bitCast<uint64_t>(d));
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Clones the immediate operand.
+ constexpr Imm clone() const noexcept { return Imm(*this); }
+
+ inline void signExtend8Bits() noexcept { setValue(int64_t(valueAs<int8_t>())); }
+ inline void signExtend16Bits() noexcept { setValue(int64_t(valueAs<int16_t>())); }
+ inline void signExtend32Bits() noexcept { setValue(int64_t(valueAs<int32_t>())); }
+
+ inline void zeroExtend8Bits() noexcept { setValue(valueAs<uint8_t>()); }
+ inline void zeroExtend16Bits() noexcept { setValue(valueAs<uint16_t>()); }
+ inline void zeroExtend32Bits() noexcept { _data[kDataImmValueHi] = 0u; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use valueAs<int8_t>() instead")
+ inline int8_t i8() const noexcept { return valueAs<int8_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<uint8_t>() instead")
+ inline uint8_t u8() const noexcept { return valueAs<uint8_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<int16_t>() instead")
+ inline int16_t i16() const noexcept { return valueAs<int16_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<uint16_t>() instead")
+ inline uint16_t u16() const noexcept { return valueAs<uint16_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<int32_t>() instead")
+ inline int32_t i32() const noexcept { return valueAs<int32_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<uint32_t>() instead")
+ inline uint32_t u32() const noexcept { return valueAs<uint32_t>(); }
+
+ ASMJIT_DEPRECATED("Use value() instead")
+ inline int64_t i64() const noexcept { return value(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<uint64_t>() instead")
+ inline uint64_t u64() const noexcept { return valueAs<uint64_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<intptr_t>() instead")
+ inline intptr_t iptr() const noexcept { return valueAs<intptr_t>(); }
+
+ ASMJIT_DEPRECATED("Use valueAs<uintptr_t>() instead")
+ inline uintptr_t uptr() const noexcept { return valueAs<uintptr_t>(); }
+
+ ASMJIT_DEPRECATED("Use int32Lo() instead")
+ inline int32_t i32Lo() const noexcept { return int32Lo(); }
+
+ ASMJIT_DEPRECATED("Use uint32Lo() instead")
+ inline uint32_t u32Lo() const noexcept { return uint32Lo(); }
+
+ ASMJIT_DEPRECATED("Use int32Hi() instead")
+ inline int32_t i32Hi() const noexcept { return int32Hi(); }
+
+ ASMJIT_DEPRECATED("Use uint32Hi() instead")
+ inline uint32_t u32Hi() const noexcept { return uint32Hi(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+//! Creates a new immediate operand.
+//!
+//! Using `imm(x)` is much nicer than using `Imm(x)` as this is a template
+//! which can accept any integer including pointers and function pointers.
+template<typename T>
+static constexpr Imm imm(T val) noexcept {
+ return Imm(std::is_signed<T>::value ? int64_t(val) : int64_t(uint64_t(val)));
+}
+
+//! \}
+
+// ============================================================================
+// [asmjit::Globals::none]
+// ============================================================================
+
+namespace Globals {
+ //! \ingroup asmjit_assembler
+ //!
+ //! A default-constructed operand of `Operand_::kOpNone` type.
+ static constexpr const Operand none;
+}
+
+// ============================================================================
+// [asmjit::Support::ForwardOp]
+// ============================================================================
+
+//! \cond INTERNAL
+namespace Support {
+
+template<typename T, bool IsIntegral>
+struct ForwardOpImpl {
+ static ASMJIT_INLINE const T& forward(const T& value) noexcept { return value; }
+};
+
+template<typename T>
+struct ForwardOpImpl<T, true> {
+ static ASMJIT_INLINE Imm forward(const T& value) noexcept { return Imm(value); }
+};
+
+//! Either forwards operand T or returns a new operand for T if T is a type
+//! convertible to operand. At the moment this is only used to convert integers
+//! to \ref Imm operands.
+template<typename T>
+struct ForwardOp : public ForwardOpImpl<T, std::is_integral<typename std::decay<T>::type>::value> {};
+
+}
+
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_OPERAND_H_INCLUDED
diff --git a/client/asmjit/core/osutils.cpp b/client/asmjit/core/osutils.cpp
new file mode 100644
index 0000000..e2f34ef
--- /dev/null
+++ b/client/asmjit/core/osutils.cpp
@@ -0,0 +1,106 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/osutils.h"
+#include "../core/support.h"
+
+#if defined(_WIN32)
+ #include <atomic>
+#elif defined(__APPLE__)
+ #include <mach/mach_time.h>
+#else
+ #include <time.h>
+ #include <unistd.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::OSUtils - GetTickCount]
+// ============================================================================
+
+uint32_t OSUtils::getTickCount() noexcept {
+#if defined(_WIN32)
+ enum HiResStatus : uint32_t {
+ kHiResUnknown = 0,
+ kHiResAvailable = 1,
+ kHiResNotAvailable = 2
+ };
+
+ static std::atomic<uint32_t> _hiResStatus(kHiResUnknown);
+ static volatile double _hiResFreq(0);
+
+ uint32_t status = _hiResStatus.load();
+ LARGE_INTEGER now, qpf;
+
+ if (status != kHiResNotAvailable && ::QueryPerformanceCounter(&now)) {
+ double freq = _hiResFreq;
+ if (status == kHiResUnknown) {
+ // Detects the availability of high resolution counter.
+ if (::QueryPerformanceFrequency(&qpf)) {
+ freq = double(qpf.QuadPart) / 1000.0;
+ _hiResFreq = freq;
+ _hiResStatus.compare_exchange_strong(status, kHiResAvailable);
+ status = kHiResAvailable;
+ }
+ else {
+ // High resolution not available.
+ _hiResStatus.compare_exchange_strong(status, kHiResNotAvailable);
+ }
+ }
+
+ if (status == kHiResAvailable)
+ return uint32_t(uint64_t(int64_t(double(now.QuadPart) / freq)) & 0xFFFFFFFFu);
+ }
+
+ // Bail to `GetTickCount()` if we cannot use high resolution.
+ return ::GetTickCount();
+#elif defined(__APPLE__)
+ // See Apple's QA1398.
+ static mach_timebase_info_data_t _machTime;
+
+ uint32_t denom = _machTime.denom;
+ if (ASMJIT_UNLIKELY(!denom)) {
+ if (mach_timebase_info(&_machTime) != KERN_SUCCESS || !(denom = _machTime.denom))
+ return 0;
+ }
+
+ // `mach_absolute_time()` returns nanoseconds, we want milliseconds.
+ uint64_t t = mach_absolute_time() / 1000000u;
+ t = (t * _machTime.numer) / _machTime.denom;
+ return uint32_t(t & 0xFFFFFFFFu);
+#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
+ struct timespec ts;
+ if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
+ return 0;
+
+ uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u);
+ return uint32_t(t & 0xFFFFFFFFu);
+#else
+ #pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.")
+ return 0;
+#endif
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/osutils.h b/client/asmjit/core/osutils.h
new file mode 100644
index 0000000..a469129
--- /dev/null
+++ b/client/asmjit/core/osutils.h
@@ -0,0 +1,87 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
+#define ASMJIT_CORE_OSUTILS_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_utilities
+//! \{
+
+// ============================================================================
+// [asmjit::OSUtils]
+// ============================================================================
+
+//! Operating system utilities.
+namespace OSUtils {
+ //! Gets the current CPU tick count, used for benchmarking (1ms resolution).
+ ASMJIT_API uint32_t getTickCount() noexcept;
+};
+
+// ============================================================================
+// [asmjit::Lock]
+// ============================================================================
+
+//! \cond INTERNAL
+
+//! Lock.
+//!
+//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
+//! layout is exposed as it's used by some other classes, which are public.
+class Lock {
+public:
+ ASMJIT_NONCOPYABLE(Lock)
+
+#if defined(_WIN32)
+#pragma pack(push, 8)
+ struct ASMJIT_MAY_ALIAS Handle {
+ void* DebugInfo;
+ long LockCount;
+ long RecursionCount;
+ void* OwningThread;
+ void* LockSemaphore;
+ unsigned long* SpinCount;
+ };
+ Handle _handle;
+#pragma pack(pop)
+#elif !defined(__EMSCRIPTEN__)
+ typedef pthread_mutex_t Handle;
+ Handle _handle;
+#endif
+
+ inline Lock() noexcept;
+ inline ~Lock() noexcept;
+
+ inline void lock() noexcept;
+ inline void unlock() noexcept;
+};
+//! \endcond
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_OSUTILS_H_INCLUDED
diff --git a/client/asmjit/core/osutils_p.h b/client/asmjit/core/osutils_p.h
new file mode 100644
index 0000000..31db308
--- /dev/null
+++ b/client/asmjit/core/osutils_p.h
@@ -0,0 +1,94 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_OSUTILS_P_H_INCLUDED
+#define ASMJIT_CORE_OSUTILS_P_H_INCLUDED
+
+#include "../core/osutils.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_utilities
+//! \{
+
+// ============================================================================
+// [asmjit::Lock]
+// ============================================================================
+
+#if defined(_WIN32)
+
+// Windows implementation.
+static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
+static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
+
+inline Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+
+#elif !defined(__EMSCRIPTEN__)
+
+// PThread implementation.
+#ifdef PTHREAD_MUTEX_INITIALIZER
+inline Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
+#else
+inline Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
+#endif
+inline Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
+inline void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
+inline void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
+
+#else
+
+// Dummy implementation - Emscripten or other unsupported platform.
+inline Lock::Lock() noexcept {}
+inline Lock::~Lock() noexcept {}
+inline void Lock::lock() noexcept {}
+inline void Lock::unlock() noexcept {}
+
+#endif
+
+// ============================================================================
+// [asmjit::LockGuard]
+// ============================================================================
+
+//! Scoped lock.
+class LockGuard {
+public:
+ ASMJIT_NONCOPYABLE(LockGuard)
+
+ Lock& _target;
+
+ inline LockGuard(Lock& target) noexcept
+ : _target(target) { _target.lock(); }
+ inline ~LockGuard() noexcept { _target.unlock(); }
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_OSUTILS_P_H_INCLUDED
+
diff --git a/client/asmjit/core/raassignment_p.h b/client/asmjit/core/raassignment_p.h
new file mode 100644
index 0000000..2618afd
--- /dev/null
+++ b/client/asmjit/core/raassignment_p.h
@@ -0,0 +1,399 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
+#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/radefs_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RAAssignment]
+// ============================================================================
+
+class RAAssignment {
+ ASMJIT_NONCOPYABLE(RAAssignment)
+
+public:
+ enum Ids : uint32_t {
+ kPhysNone = 0xFF,
+ kWorkNone = RAWorkReg::kIdNone
+ };
+
+ enum DirtyBit : uint32_t {
+ kClean = 0,
+ kDirty = 1
+ };
+
+ struct Layout {
+ inline void reset() noexcept {
+ physIndex.reset();
+ physCount.reset();
+ physTotal = 0;
+ workCount = 0;
+ workRegs = nullptr;
+ }
+
+ RARegIndex physIndex; //!< Index of architecture registers per group.
+ RARegCount physCount; //!< Count of architecture registers per group.
+ uint32_t physTotal; //!< Count of physical registers of all groups.
+ uint32_t workCount; //!< Count of work registers.
+ const RAWorkRegs* workRegs; //!< WorkRegs data (vector).
+ };
+
+ struct PhysToWorkMap {
+ static inline size_t sizeOf(uint32_t count) noexcept {
+ return sizeof(PhysToWorkMap) - sizeof(uint32_t) + size_t(count) * sizeof(uint32_t);
+ }
+
+ inline void reset(uint32_t count) noexcept {
+ assigned.reset();
+ dirty.reset();
+
+ for (uint32_t i = 0; i < count; i++)
+ workIds[i] = kWorkNone;
+ }
+
+ inline void copyFrom(const PhysToWorkMap* other, uint32_t count) noexcept {
+ size_t size = sizeOf(count);
+ memcpy(this, other, size);
+ }
+
+ RARegMask assigned; //!< Assigned registers (each bit represents one physical reg).
+ RARegMask dirty; //!< Dirty registers (spill slot out of sync or no spill slot).
+ uint32_t workIds[1 /* ... */]; //!< PhysReg to WorkReg mapping.
+ };
+
+ struct WorkToPhysMap {
+ static inline size_t sizeOf(uint32_t count) noexcept {
+ return size_t(count) * sizeof(uint8_t);
+ }
+
+ inline void reset(uint32_t count) noexcept {
+ for (uint32_t i = 0; i < count; i++)
+ physIds[i] = kPhysNone;
+ }
+
+ inline void copyFrom(const WorkToPhysMap* other, uint32_t count) noexcept {
+ size_t size = sizeOf(count);
+ if (ASMJIT_LIKELY(size))
+ memcpy(this, other, size);
+ }
+
+ uint8_t physIds[1 /* ... */]; //!< WorkReg to PhysReg mapping
+ };
+
+ //! Physical registers layout.
+ Layout _layout;
+ //! WorkReg to PhysReg mapping.
+ WorkToPhysMap* _workToPhysMap;
+ //! PhysReg to WorkReg mapping and assigned/dirty bits.
+ PhysToWorkMap* _physToWorkMap;
+ //! Optimization to translate PhysRegs to WorkRegs faster.
+ uint32_t* _physToWorkIds[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAAssignment() noexcept {
+ _layout.reset();
+ resetMaps();
+ }
+
+ inline void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
+ // Layout must be initialized before data.
+ ASMJIT_ASSERT(_physToWorkMap == nullptr);
+ ASMJIT_ASSERT(_workToPhysMap == nullptr);
+
+ _layout.physIndex.buildIndexes(physCount);
+ _layout.physCount = physCount;
+ _layout.physTotal = uint32_t(_layout.physIndex[BaseReg::kGroupVirt - 1]) +
+ uint32_t(_layout.physCount[BaseReg::kGroupVirt - 1]) ;
+ _layout.workCount = workRegs.size();
+ _layout.workRegs = &workRegs;
+ }
+
+ inline void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _physToWorkMap = physToWorkMap;
+ _workToPhysMap = workToPhysMap;
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ _physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
+ }
+
+ inline void resetMaps() noexcept {
+ _physToWorkMap = nullptr;
+ _workToPhysMap = nullptr;
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ _physToWorkIds[group] = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
+
+ inline RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
+ inline const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
+ inline uint32_t assigned(uint32_t group) const noexcept { return _physToWorkMap->assigned[group]; }
+
+ inline RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
+ inline const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
+ inline uint32_t dirty(uint32_t group) const noexcept { return _physToWorkMap->dirty[group]; }
+
+ inline uint32_t workToPhysId(uint32_t group, uint32_t workId) const noexcept {
+ DebugUtils::unused(group);
+ ASMJIT_ASSERT(workId != kWorkNone);
+ ASMJIT_ASSERT(workId < _layout.workCount);
+ return _workToPhysMap->physIds[workId];
+ }
+
+ inline uint32_t physToWorkId(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return _physToWorkIds[group][physId];
+ }
+
+ inline bool isPhysAssigned(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return Support::bitTest(_physToWorkMap->assigned[group], physId);
+ }
+
+ inline bool isPhysDirty(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return Support::bitTest(_physToWorkMap->dirty[group], physId);
+ }
+
+ //! \}
+
+ //! \name Assignment
+ //! \{
+
+ // These are low-level allocation helpers that are used to update the current
+ // mappings between physical and virt/work registers and also to update masks
+ // that represent allocated and dirty registers. These functions don't emit
+ // any code; they are only used to update and keep all mappings in sync.
+
+ //! Assign [VirtReg/WorkReg] to a physical register.
+ ASMJIT_INLINE void assign(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
+ ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
+ ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
+ ASMJIT_ASSERT(!isPhysAssigned(group, physId));
+ ASMJIT_ASSERT(!isPhysDirty(group, physId));
+
+ _workToPhysMap->physIds[workId] = uint8_t(physId);
+ _physToWorkIds[group][physId] = workId;
+
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->assigned[group] |= regMask;
+ _physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
+
+ verify();
+ }
+
+ //! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
+ ASMJIT_INLINE void reassign(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
+ ASMJIT_ASSERT(dstPhysId != srcPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
+ ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
+ ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true);
+ ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false);
+
+ _workToPhysMap->physIds[workId] = uint8_t(dstPhysId);
+ _physToWorkIds[group][srcPhysId] = kWorkNone;
+ _physToWorkIds[group][dstPhysId] = workId;
+
+ uint32_t srcMask = Support::bitMask(srcPhysId);
+ uint32_t dstMask = Support::bitMask(dstPhysId);
+
+ uint32_t dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
+ uint32_t regMask = dstMask | srcMask;
+
+ _physToWorkMap->assigned[group] ^= regMask;
+ _physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
+
+ verify();
+ }
+
+ ASMJIT_INLINE void swap(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
+ ASMJIT_ASSERT(aPhysId != bPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
+ ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId);
+ ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId);
+ ASMJIT_ASSERT(isPhysAssigned(group, aPhysId));
+ ASMJIT_ASSERT(isPhysAssigned(group, bPhysId));
+
+ _workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId);
+ _workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId);
+ _physToWorkIds[group][aPhysId] = bWorkId;
+ _physToWorkIds[group][bPhysId] = aWorkId;
+
+ uint32_t aMask = Support::bitMask(aPhysId);
+ uint32_t bMask = Support::bitMask(bPhysId);
+
+ uint32_t flipMask = Support::bitMaskFromBool<uint32_t>(
+ ((_physToWorkMap->dirty[group] & aMask) != 0) ^
+ ((_physToWorkMap->dirty[group] & bMask) != 0));
+
+ uint32_t regMask = aMask | bMask;
+ _physToWorkMap->dirty[group] ^= regMask & flipMask;
+
+ verify();
+ }
+
+ //! Unassign [VirtReg/WorkReg] from a physical register.
+ ASMJIT_INLINE void unassign(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
+ ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
+ ASMJIT_ASSERT(isPhysAssigned(group, physId));
+
+ _workToPhysMap->physIds[workId] = kPhysNone;
+ _physToWorkIds[group][physId] = kWorkNone;
+
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->assigned[group] &= ~regMask;
+ _physToWorkMap->dirty[group] &= ~regMask;
+
+ verify();
+ }
+
+ inline void makeClean(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ DebugUtils::unused(workId);
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->dirty[group] &= ~regMask;
+ }
+
+ inline void makeDirty(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ DebugUtils::unused(workId);
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->dirty[group] |= regMask;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(RAAssignment& other) noexcept {
+ std::swap(_workToPhysMap, other._workToPhysMap);
+ std::swap(_physToWorkMap, other._physToWorkMap);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ std::swap(_physToWorkIds[group], other._physToWorkIds[group]);
+ }
+
+ inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
+ memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
+ memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount));
+ }
+
+ inline void copyFrom(const RAAssignment& other) noexcept {
+ copyFrom(other.physToWorkMap(), other.workToPhysMap());
+ }
+
+ // Not really useful outside of debugging.
+ bool equals(const RAAssignment& other) const noexcept {
+ // Layout should always match.
+ if (_layout.physIndex != other._layout.physIndex ||
+ _layout.physCount != other._layout.physCount ||
+ _layout.physTotal != other._layout.physTotal ||
+ _layout.workCount != other._layout.workCount ||
+ _layout.workRegs != other._layout.workRegs)
+ return false;
+
+ uint32_t physTotal = _layout.physTotal;
+ uint32_t workCount = _layout.workCount;
+
+ for (uint32_t physId = 0; physId < physTotal; physId++) {
+ uint32_t thisWorkId = _physToWorkMap->workIds[physId];
+ uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
+ if (thisWorkId != otherWorkId)
+ return false;
+ }
+
+ for (uint32_t workId = 0; workId < workCount; workId++) {
+ uint32_t thisPhysId = _workToPhysMap->physIds[workId];
+ uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
+ if (thisPhysId != otherPhysId)
+ return false;
+ }
+
+ if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||
+ _physToWorkMap->dirty != other._physToWorkMap->dirty )
+ return false;
+
+ return true;
+ }
+
+#if defined(ASMJIT_BUILD_DEBUG)
+ ASMJIT_NOINLINE void verify() noexcept {
+ // Verify WorkToPhysMap.
+ {
+ for (uint32_t workId = 0; workId < _layout.workCount; workId++) {
+ uint32_t physId = _workToPhysMap->physIds[workId];
+ if (physId != kPhysNone) {
+ const RAWorkReg* workReg = _layout.workRegs->at(workId);
+ uint32_t group = workReg->group();
+ ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
+ }
+ }
+ }
+
+ // Verify PhysToWorkMap.
+ {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ uint32_t physCount = _layout.physCount[group];
+ for (uint32_t physId = 0; physId < physCount; physId++) {
+ uint32_t workId = _physToWorkIds[group][physId];
+ if (workId != kWorkNone) {
+ ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId);
+ }
+ }
+ }
+ }
+ }
+#else
+ inline void verify() noexcept {}
+#endif
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
diff --git a/client/asmjit/core/rabuilders_p.h b/client/asmjit/core/rabuilders_p.h
new file mode 100644
index 0000000..e14d47f
--- /dev/null
+++ b/client/asmjit/core/rabuilders_p.h
@@ -0,0 +1,644 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
+#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/formatter.h"
+#include "../core/rapass_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RACFGBuilder]
+// ============================================================================
+
+template<typename This>
+class RACFGBuilder {
+public:
+ RAPass* _pass;
+ BaseCompiler* _cc;
+
+ RABlock* _curBlock;
+ RABlock* _retBlock;
+ FuncNode* _funcNode;
+ RARegsStats _blockRegStats;
+ uint32_t _exitLabelId;
+ ZoneVector<uint32_t> _sharedAssignmentsMap;
+
+ // Only used by logging, it's fine to be here to prevent more #ifdefs...
+ bool _hasCode;
+ RABlock* _lastLoggedBlock;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* _logger;
+ uint32_t _logFlags;
+ StringTmp<512> _sb;
+#endif
+
+ static constexpr uint32_t kRootIndentation = 2;
+ static constexpr uint32_t kCodeIndentation = 4;
+
+ // NOTE: This is a bit hacky. There are some nodes which are processed twice
+ // (see `onBeforeInvoke()` and `onBeforeRet()`) as they can insert some nodes
+ // around them. Since we don't have any flags to mark these we just use their
+ // position that is [at that time] unassigned.
+ static constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
+
+ inline RACFGBuilder(RAPass* pass) noexcept
+ : _pass(pass),
+ _cc(pass->cc()),
+ _curBlock(nullptr),
+ _retBlock(nullptr),
+ _funcNode(nullptr),
+ _blockRegStats{},
+ _exitLabelId(Globals::kInvalidId),
+ _hasCode(false),
+ _lastLoggedBlock(nullptr) {
+#ifndef ASMJIT_NO_LOGGING
+ _logger = _pass->debugLogger();
+ _logFlags = FormatOptions::kFlagPositions;
+
+ if (_logger)
+ _logFlags |= _logger->flags();
+#endif
+ }
+
+ inline BaseCompiler* cc() const noexcept { return _cc; }
+
+ // --------------------------------------------------------------------------
+ // [Run]
+ // --------------------------------------------------------------------------
+
+ //! Called per function by an architecture-specific CFG builder.
+ Error run() noexcept {
+ log("[RAPass::BuildCFG]\n");
+ ASMJIT_PROPAGATE(prepare());
+
+ logNode(_funcNode, kRootIndentation);
+ logBlock(_curBlock, kRootIndentation);
+
+ BaseNode* node = _funcNode->next();
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ _curBlock->setFirst(node);
+ _curBlock->setLast(node);
+
+ RAInstBuilder ib;
+ ZoneVector<RABlock*> blocksWithUnknownJumps;
+
+ for (;;) {
+ BaseNode* next = node->next();
+ ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore);
+
+ if (node->isInst()) {
+ // Instruction | Jump | Invoke | Return
+ // ------------------------------------
+
+ // Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them
+ // share the same interface that provides operands that have read/write
+ // semantics.
+ if (ASMJIT_UNLIKELY(!_curBlock)) {
+ // Unreachable code has to be removed, we cannot allocate registers
+ // in such code as we cannot do proper liveness analysis in such case.
+ removeNode(node);
+ node = next;
+ continue;
+ }
+
+ _hasCode = true;
+
+ if (node->isInvoke() || node->isFuncRet()) {
+ if (node->position() != kNodePositionDidOnBefore) {
+ // Call and Reg are complicated as they may insert some surrounding
+ // code around them. The simplest approach is to get the previous
+ // node, call the `onBefore()` handlers and then check whether
+ // anything changed and restart if so. By restart we mean that the
+ // current `node` would go back to the first possible inserted node
+ // by `onBeforeInvoke()` or `onBeforeRet()`.
+ BaseNode* prev = node->prev();
+
+ if (node->type() == BaseNode::kNodeInvoke)
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeInvoke(node->as<InvokeNode>()));
+ else
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
+
+ if (prev != node->prev()) {
+ // If this was the first node in the block and something was
+ // inserted before it then we have to update the first block.
+ if (_curBlock->first() == node)
+ _curBlock->setFirst(prev->next());
+
+ node->setPosition(kNodePositionDidOnBefore);
+ node = prev->next();
+
+ // `onBeforeInvoke()` and `onBeforeRet()` can only insert instructions.
+ ASMJIT_ASSERT(node->isInst());
+ }
+
+ // Necessary if something was inserted after `node`, but nothing before.
+ next = node->next();
+ }
+ else {
+ // Change the position back to its original value.
+ node->setPosition(0);
+ }
+ }
+
+ InstNode* inst = node->as<InstNode>();
+ logNode(inst, kCodeIndentation);
+
+ uint32_t controlType = BaseInst::kControlNone;
+ ib.reset();
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, controlType, ib));
+
+ if (node->isInvoke()) {
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onInvoke(inst->as<InvokeNode>(), ib));
+ }
+
+ if (node->isFuncRet()) {
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
+ controlType = BaseInst::kControlReturn;
+ }
+
+ if (controlType == BaseInst::kControlJump) {
+ uint32_t fixedRegCount = 0;
+ for (RATiedReg& tiedReg : ib) {
+ RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
+ if (workReg->group() == BaseReg::kGroupGp) {
+ uint32_t useId = tiedReg.useId();
+ if (useId == BaseReg::kIdBad) {
+ useId = _pass->_scratchRegIndexes[fixedRegCount++];
+ tiedReg.setUseId(useId);
+ }
+ _curBlock->addExitScratchGpRegs(Support::bitMask<uint32_t>(useId));
+ }
+ }
+ }
+
+ ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
+ _blockRegStats.combineWith(ib._stats);
+
+ if (controlType != BaseInst::kControlNone) {
+ // Support for conditional and unconditional jumps.
+ if (controlType == BaseInst::kControlJump || controlType == BaseInst::kControlBranch) {
+ _curBlock->setLast(node);
+ _curBlock->addFlags(RABlock::kFlagHasTerminator);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ if (!(inst->instOptions() & BaseInst::kOptionUnfollow)) {
+ // Jmp/Jcc/Call/Loop/etc...
+ uint32_t opCount = inst->opCount();
+ const Operand* opArray = inst->operands();
+
+ // Cannot jump anywhere without operands.
+ if (ASMJIT_UNLIKELY(!opCount))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (opArray[opCount - 1].isLabel()) {
+ // Labels are easy for constructing the control flow.
+ LabelNode* labelNode;
+ ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, opArray[opCount - 1].as<Label>()));
+
+ RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
+ if (ASMJIT_UNLIKELY(!targetBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ targetBlock->makeTargetable();
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
+ }
+ else {
+ // Not a label - could be jump with reg/mem operand, which
+ // means that it can go anywhere. Such jumps must either be
+ // annotated so the CFG can be properly constructed, otherwise
+ // we assume the worst case - can jump to any basic block.
+ JumpAnnotation* jumpAnnotation = nullptr;
+ _curBlock->addFlags(RABlock::kFlagHasJumpTable);
+
+ if (inst->type() == BaseNode::kNodeJump)
+ jumpAnnotation = inst->as<JumpNode>()->annotation();
+
+ if (jumpAnnotation) {
+ uint64_t timestamp = _pass->nextTimestamp();
+ for (uint32_t id : jumpAnnotation->labelIds()) {
+ LabelNode* labelNode;
+ ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, id));
+
+ RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
+ if (ASMJIT_UNLIKELY(!targetBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Prevents adding basic-block successors multiple times.
+ if (!targetBlock->hasTimestamp(timestamp)) {
+ targetBlock->setTimestamp(timestamp);
+ targetBlock->makeTargetable();
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
+ }
+ }
+ ASMJIT_PROPAGATE(shareAssignmentAcrossSuccessors(_curBlock));
+ }
+ else {
+ ASMJIT_PROPAGATE(blocksWithUnknownJumps.append(_pass->allocator(), _curBlock));
+ }
+ }
+ }
+
+ if (controlType == BaseInst::kControlJump) {
+ // Unconditional jump makes the code after the jump unreachable,
+ // which will be removed instantly during the CFG construction;
+ // as we cannot allocate registers for instructions that are not
+ // part of any block. Of course we can leave these instructions
+ // as they are, however, that would only postpone the problem as
+ // assemblers can't encode instructions that use virtual registers.
+ _curBlock = nullptr;
+ }
+ else {
+ node = next;
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ RABlock* consecutiveBlock;
+ if (node->type() == BaseNode::kNodeLabel) {
+ if (node->hasPassData()) {
+ consecutiveBlock = node->passData<RABlock>();
+ }
+ else {
+ consecutiveBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutiveBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ node->setPassData<RABlock>(consecutiveBlock);
+ }
+ }
+ else {
+ consecutiveBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutiveBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
+
+ _curBlock = consecutiveBlock;
+ _hasCode = false;
+ _blockRegStats.reset();
+
+ if (_curBlock->isConstructed())
+ break;
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
+
+ logBlock(_curBlock, kRootIndentation);
+ continue;
+ }
+ }
+
+ if (controlType == BaseInst::kControlReturn) {
+ _curBlock->setLast(node);
+ _curBlock->makeConstructed(_blockRegStats);
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
+
+ _curBlock = nullptr;
+ }
+ }
+ }
+ else if (node->type() == BaseNode::kNodeLabel) {
+ // Label - Basic-Block Management
+ // ------------------------------
+
+ if (!_curBlock) {
+ // If the current code is unreachable the label makes it reachable
+ // again. We may remove the whole block in the future if it's not
+ // referenced though.
+ _curBlock = node->passData<RABlock>();
+
+ if (_curBlock) {
+ // If the label has a block assigned we can either continue with
+ // it or skip it if the block has been constructed already.
+ if (_curBlock->isConstructed())
+ break;
+ }
+ else {
+ // No block assigned - create a new one and assign it.
+ _curBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!_curBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ node->setPassData<RABlock>(_curBlock);
+ }
+
+ _curBlock->makeTargetable();
+ _hasCode = false;
+ _blockRegStats.reset();
+ ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
+ }
+ else {
+ if (node->hasPassData()) {
+ RABlock* consecutive = node->passData<RABlock>();
+ consecutive->makeTargetable();
+
+ if (_curBlock == consecutive) {
+ // The label currently processed is part of the current block. This
+ // is only possible for multiple labels that are right next to each
+ // other or labels that are separated by non-code nodes like directives
+ // and comments.
+ if (ASMJIT_UNLIKELY(_hasCode))
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+ else {
+ // Label makes the current block constructed. There is a chance that the
+ // Label is not used, but we don't know that at this point. In the worst
+ // case there would be two blocks next to each other, it's just fine.
+ ASMJIT_ASSERT(_curBlock->last() != node);
+ _curBlock->setLast(node->prev());
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
+
+ _curBlock = consecutive;
+ _hasCode = false;
+ _blockRegStats.reset();
+ }
+ }
+ else {
+ // First time we see this label.
+ if (_hasCode) {
+ // Cannot continue the current block if it already contains some
+ // code. We need to create a new block and make it a successor.
+ ASMJIT_ASSERT(_curBlock->last() != node);
+ _curBlock->setLast(node->prev());
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ RABlock* consecutive = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutive))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ consecutive->makeTargetable();
+
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
+
+ _curBlock = consecutive;
+ _hasCode = false;
+ _blockRegStats.reset();
+ }
+
+ node->setPassData<RABlock>(_curBlock);
+ }
+ }
+
+ if (_curBlock && _curBlock != _lastLoggedBlock)
+ logBlock(_curBlock, kRootIndentation);
+ logNode(node, kRootIndentation);
+
+ // Unlikely: Assume that the exit label is reached only once per function.
+ if (ASMJIT_UNLIKELY(node->as<LabelNode>()->labelId() == _exitLabelId)) {
+ _curBlock->setLast(node);
+ _curBlock->makeConstructed(_blockRegStats);
+ ASMJIT_PROPAGATE(_pass->addExitBlock(_curBlock));
+
+ _curBlock = nullptr;
+ }
+ }
+ else {
+ // Other Nodes | Function Exit
+ // ---------------------------
+
+ logNode(node, kCodeIndentation);
+
+ if (node->type() == BaseNode::kNodeSentinel) {
+ if (node == _funcNode->endNode()) {
+ // Make sure we didn't flow here if this is the end of the function sentinel.
+ if (ASMJIT_UNLIKELY(_curBlock))
+ return DebugUtils::errored(kErrorInvalidState);
+ break;
+ }
+ }
+ else if (node->type() == BaseNode::kNodeFunc) {
+ // RAPass can only compile a single function at a time. If we
+ // encountered a function it must be the current one, bail if not.
+ if (ASMJIT_UNLIKELY(node != _funcNode))
+ return DebugUtils::errored(kErrorInvalidState);
+ // PASS if this is the first node.
+ }
+ else {
+ // PASS if this is a non-interesting or unknown node.
+ }
+ }
+
+ // Advance to the next node.
+ node = next;
+
+ // NOTE: We cannot encounter a NULL node, because every function must be
+ // terminated by a sentinel (`stop`) node. If we encountered a NULL node it
+ // means that something went wrong and this node list is corrupted; bail in
+ // such case.
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ if (_pass->hasDanglingBlocks())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ for (RABlock* block : blocksWithUnknownJumps)
+ handleBlockWithUnknownJump(block);
+
+ return _pass->initSharedAssignments(_sharedAssignmentsMap);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Prepare]
+ // --------------------------------------------------------------------------
+
+ //! Prepares the CFG builder of the current function.
+ Error prepare() noexcept {
+ FuncNode* func = _pass->func();
+ BaseNode* node = nullptr;
+
+ // Create entry and exit blocks.
+ _funcNode = func;
+ _retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node);
+
+ if (ASMJIT_UNLIKELY(!_retBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _retBlock->makeTargetable();
+ ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
+
+ if (node != func) {
+ _curBlock = _pass->newBlock();
+ if (ASMJIT_UNLIKELY(!_curBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ else {
+ // Function that has no code at all.
+ _curBlock = _retBlock;
+ }
+
+ // Reset everything we may need.
+ _blockRegStats.reset();
+ _exitLabelId = func->exitNode()->labelId();
+
+ // Initially we assume there is no code in the function body.
+ _hasCode = false;
+
+ return _pass->addBlock(_curBlock);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Utilities]
+ // --------------------------------------------------------------------------
+
+ //! Called when a `node` is removed, e.g. bacause of a dead code elimination.
+ void removeNode(BaseNode* node) noexcept {
+ logNode(node, kRootIndentation, "<Removed>");
+ cc()->removeNode(node);
+ }
+
+ //! Handles block with unknown jump, which could be a jump to a jump table.
+ //!
+ //! If we encounter such block we basically insert all existing blocks as
+ //! successors except the function entry block and a natural successor, if
+ //! such block exists.
+ Error handleBlockWithUnknownJump(RABlock* block) noexcept {
+ RABlocks& blocks = _pass->blocks();
+ size_t blockCount = blocks.size();
+
+ // NOTE: Iterate from `1` as the first block is the entry block, we don't
+ // allow the entry to be a successor of any block.
+ RABlock* consecutive = block->consecutive();
+ for (size_t i = 1; i < blockCount; i++) {
+ RABlock* candidate = blocks[i];
+ if (candidate == consecutive || !candidate->isTargetable())
+ continue;
+ block->appendSuccessor(candidate);
+ }
+
+ return shareAssignmentAcrossSuccessors(block);
+ }
+
+ Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
+ if (block->successors().size() <= 1)
+ return kErrorOk;
+
+ RABlock* consecutive = block->consecutive();
+ uint32_t sharedAssignmentId = Globals::kInvalidId;
+
+ for (RABlock* successor : block->successors()) {
+ if (successor == consecutive)
+ continue;
+
+ if (successor->hasSharedAssignmentId()) {
+ if (sharedAssignmentId == Globals::kInvalidId)
+ sharedAssignmentId = successor->sharedAssignmentId();
+ else
+ _sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
+ }
+ else {
+ if (sharedAssignmentId == Globals::kInvalidId)
+ ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
+ successor->setSharedAssignmentId(sharedAssignmentId);
+ }
+ }
+ return kErrorOk;
+ }
+
+ Error newSharedAssignmentId(uint32_t* out) noexcept {
+ uint32_t id = _sharedAssignmentsMap.size();
+ ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
+
+ *out = id;
+ return kErrorOk;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Logging]
+ // --------------------------------------------------------------------------
+
+#ifndef ASMJIT_NO_LOGGING
+ template<typename... Args>
+ inline void log(const char* fmt, Args&&... args) noexcept {
+ if (_logger)
+ _logger->logf(fmt, std::forward<Args>(args)...);
+ }
+
+ inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
+ if (_logger)
+ _logBlock(block, indentation);
+ }
+
+ inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
+ if (_logger)
+ _logNode(node, indentation, action);
+ }
+
+ void _logBlock(RABlock* block, uint32_t indentation) noexcept {
+ _sb.clear();
+ _sb.appendChars(' ', indentation);
+ _sb.appendFormat("{#%u}\n", block->blockId());
+ _logger->log(_sb);
+ _lastLoggedBlock = block;
+ }
+
+ void _logNode(BaseNode* node, uint32_t indentation, const char* action) noexcept {
+ _sb.clear();
+ _sb.appendChars(' ', indentation);
+ if (action) {
+ _sb.append(action);
+ _sb.append(' ');
+ }
+ Formatter::formatNode(_sb, _logFlags, cc(), node);
+ _sb.append('\n');
+ _logger->log(_sb);
+ }
+#else
+ template<typename... Args>
+ inline void log(const char* fmt, Args&&... args) noexcept {
+ DebugUtils::unused(fmt);
+ DebugUtils::unused(std::forward<Args>(args)...);
+ }
+
+ inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
+ DebugUtils::unused(block, indentation);
+ }
+
+ inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
+ DebugUtils::unused(node, indentation, action);
+ }
+#endif
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
diff --git a/client/asmjit/core/radefs_p.h b/client/asmjit/core/radefs_p.h
new file mode 100644
index 0000000..b77ed1b
--- /dev/null
+++ b/client/asmjit/core/radefs_p.h
@@ -0,0 +1,1091 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RADEFS_P_H_INCLUDED
+#define ASMJIT_CORE_RADEFS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/compiler.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+# define ASMJIT_RA_LOG_FORMAT(...) \
+ do { \
+ if (logger) \
+ logger->logf(__VA_ARGS__); \
+ } while (0)
+# define ASMJIT_RA_LOG_COMPLEX(...) \
+ do { \
+ if (logger) { \
+ __VA_ARGS__ \
+ } \
+ } while (0)
+#else
+# define ASMJIT_RA_LOG_FORMAT(...) ((void)0)
+# define ASMJIT_RA_LOG_COMPLEX(...) ((void)0)
+#endif
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class RAPass;
+class RABlock;
+struct RAStackSlot;
+
+typedef ZoneVector<RABlock*> RABlocks;
+typedef ZoneVector<RAWorkReg*> RAWorkRegs;
+
+// ============================================================================
+// [asmjit::RAStrategy]
+// ============================================================================
+
+struct RAStrategy {
+ uint8_t _type;
+
+ enum StrategyType : uint32_t {
+ kStrategySimple = 0,
+ kStrategyComplex = 1
+ };
+
+ inline RAStrategy() noexcept { reset(); }
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ inline uint32_t type() const noexcept { return _type; }
+ inline void setType(uint32_t type) noexcept { _type = uint8_t(type); }
+
+ inline bool isSimple() const noexcept { return _type == kStrategySimple; }
+ inline bool isComplex() const noexcept { return _type >= kStrategyComplex; }
+};
+
+// ============================================================================
+// [asmjit::RAArchTraits]
+// ============================================================================
+
+//! Traits.
+struct RAArchTraits {
+ enum Flags : uint32_t {
+ //! Registers can be swapped by a single instruction.
+ kHasSwap = 0x01u
+ };
+
+ uint8_t _flags[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAArchTraits() noexcept { reset(); }
+ inline void reset() noexcept { memset(_flags, 0, sizeof(_flags)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool hasFlag(uint32_t group, uint32_t flag) const noexcept { return (_flags[group] & flag) != 0; }
+ inline bool hasSwap(uint32_t group) const noexcept { return hasFlag(group, kHasSwap); }
+
+ inline uint8_t& operator[](uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _flags[group];
+ }
+
+ inline const uint8_t& operator[](uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _flags[group];
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegCount]
+// ============================================================================
+
+//! Count of virtual or physical registers per group.
+//!
+//! \note This class uses 8-bit integers to represent counters, it's only used
+//! in places where this is sufficient - for example total count of machine's
+//! physical registers, count of virtual registers per instruction, etc. There
+//! is also `RALiveCount`, which uses 32-bit integers and is indeed much safer.
+struct RARegCount {
+ union {
+ uint8_t _regs[4];
+ uint32_t _packed;
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Resets all counters to zero.
+ inline void reset() noexcept { _packed = 0; }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline uint8_t& operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _regs[index];
+ }
+
+ inline const uint8_t& operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _regs[index];
+ }
+
+ inline RARegCount& operator=(const RARegCount& other) noexcept = default;
+
+ inline bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; }
+ inline bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Returns the count of registers by the given register `group`.
+ inline uint32_t get(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ return (_packed >> shift) & uint32_t(0xFF);
+ }
+
+ //! Sets the register count by a register `group`.
+ inline void set(uint32_t group, uint32_t n) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ ASMJIT_ASSERT(n <= 0xFF);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ _packed = (_packed & ~uint32_t(0xFF << shift)) + (n << shift);
+ }
+
+ //! Adds the register count by a register `group`.
+ inline void add(uint32_t group, uint32_t n = 1) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ ASMJIT_ASSERT(0xFF - uint32_t(_regs[group]) >= n);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ _packed += n << shift;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegIndex]
+// ============================================================================
+
+struct RARegIndex : public RARegCount {
+ //! Build register indexes based on the given `count` of registers.
+ inline void buildIndexes(const RARegCount& count) noexcept {
+ uint32_t x = uint32_t(count._regs[0]);
+ uint32_t y = uint32_t(count._regs[1]) + x;
+ uint32_t z = uint32_t(count._regs[2]) + y;
+
+ ASMJIT_ASSERT(y <= 0xFF);
+ ASMJIT_ASSERT(z <= 0xFF);
+ _packed = Support::bytepack32_4x8(0, x, y, z);
+ }
+};
+
+// ============================================================================
+// [asmjit::RARegMask]
+// ============================================================================
+
+//! Registers mask.
+struct RARegMask {
+ uint32_t _masks[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline void init(const RARegMask& other) noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = other._masks[i];
+ }
+
+ //! Reset all register masks to zero.
+ inline void reset() noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RARegMask& operator=(const RARegMask& other) noexcept = default;
+
+ inline bool operator==(const RARegMask& other) const noexcept {
+ return _masks[0] == other._masks[0] &&
+ _masks[1] == other._masks[1] &&
+ _masks[2] == other._masks[2] &&
+ _masks[3] == other._masks[3] ;
+ }
+
+ inline bool operator!=(const RARegMask& other) const noexcept {
+ return !operator==(other);
+ }
+
+ inline uint32_t& operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _masks[index];
+ }
+
+ inline const uint32_t& operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _masks[index];
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Tests whether all register masks are zero (empty).
+ inline bool empty() const noexcept {
+ uint32_t m = 0;
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ m |= _masks[i];
+ return m == 0;
+ }
+
+ inline bool has(uint32_t group, uint32_t mask = 0xFFFFFFFFu) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return (_masks[group] & mask) != 0;
+ }
+
+ template<class Operator>
+ inline void op(const RARegMask& other) noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = Operator::op(_masks[i], other._masks[i]);
+ }
+
+ template<class Operator>
+ inline void op(uint32_t group, uint32_t input) noexcept {
+ _masks[group] = Operator::op(_masks[group], input);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegsStats]
+// ============================================================================
+
+//! Information associated with each instruction, propagated to blocks, loops,
+//! and the whole function. This information can be used to do minor decisions
+//! before the register allocator tries to do its job. For example to use fast
+//! register allocation inside a block or loop it cannot have clobbered and/or
+//! fixed registers, etc...
+struct RARegsStats {
+ uint32_t _packed;
+
+ enum Index : uint32_t {
+ kIndexUsed = 0,
+ kIndexFixed = 8,
+ kIndexClobbered = 16
+ };
+
+ enum Mask : uint32_t {
+ kMaskUsed = 0xFFu << kIndexUsed,
+ kMaskFixed = 0xFFu << kIndexFixed,
+ kMaskClobbered = 0xFFu << kIndexClobbered
+ };
+
+ inline void reset() noexcept { _packed = 0; }
+ inline void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; }
+
+ inline bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; }
+ inline bool hasUsed(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + group)) != 0u; }
+ inline void makeUsed(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexUsed + group); }
+
+ inline bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; }
+ inline bool hasFixed(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + group)) != 0u; }
+ inline void makeFixed(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexFixed + group); }
+
+ inline bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; }
+ inline bool hasClobbered(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + group)) != 0u; }
+ inline void makeClobbered(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexClobbered + group); }
+};
+
+// ============================================================================
+// [asmjit::RALiveCount]
+// ============================================================================
+
+//! Count of live registers, per group.
+class RALiveCount {
+public:
+ uint32_t n[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveCount() noexcept { reset(); }
+ inline RALiveCount(const RALiveCount& other) noexcept = default;
+
+ inline void init(const RALiveCount& other) noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = other.n[group];
+ }
+
+ inline void reset() noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveCount& operator=(const RALiveCount& other) noexcept = default;
+
+ inline uint32_t& operator[](uint32_t group) noexcept { return n[group]; }
+ inline const uint32_t& operator[](uint32_t group) const noexcept { return n[group]; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ template<class Operator>
+ inline void op(const RALiveCount& other) noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = Operator::op(n[group], other.n[group]);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveInterval]
+// ============================================================================
+
+struct RALiveInterval {
+ uint32_t a, b;
+
+ enum Misc : uint32_t {
+ kNaN = 0,
+ kInf = 0xFFFFFFFFu
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveInterval() noexcept : a(0), b(0) {}
+ inline RALiveInterval(uint32_t a, uint32_t b) noexcept : a(a), b(b) {}
+ inline RALiveInterval(const RALiveInterval& other) noexcept : a(other.a), b(other.b) {}
+
+ inline void init(uint32_t aVal, uint32_t bVal) noexcept {
+ a = aVal;
+ b = bVal;
+ }
+ inline void init(const RALiveInterval& other) noexcept { init(other.a, other.b); }
+ inline void reset() noexcept { init(0, 0); }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveInterval& operator=(const RALiveInterval& other) = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isValid() const noexcept { return a < b; }
+ inline uint32_t width() const noexcept { return b - a; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveSpan<T>]
+// ============================================================================
+
+template<typename T>
+class RALiveSpan : public RALiveInterval, public T {
+public:
+ typedef T DataType;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveSpan() noexcept : RALiveInterval(), T() {}
+ inline RALiveSpan(const RALiveSpan<T>& other) noexcept : RALiveInterval(other), T() {}
+ inline RALiveSpan(const RALiveInterval& interval, const T& data) noexcept : RALiveInterval(interval), T(data) {}
+ inline RALiveSpan(uint32_t a, uint32_t b) noexcept : RALiveInterval(a, b), T() {}
+ inline RALiveSpan(uint32_t a, uint32_t b, const T& data) noexcept : RALiveInterval(a, b), T(data) {}
+
+ inline void init(const RALiveSpan<T>& other) noexcept {
+ RALiveInterval::init(static_cast<const RALiveInterval&>(other));
+ T::init(static_cast<const T&>(other));
+ }
+
+ inline void init(const RALiveSpan<T>& span, const T& data) noexcept {
+ RALiveInterval::init(static_cast<const RALiveInterval&>(span));
+ T::init(data);
+ }
+
+ inline void init(const RALiveInterval& interval, const T& data) noexcept {
+ RALiveInterval::init(interval);
+ T::init(data);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveSpan& operator=(const RALiveSpan& other) {
+ init(other);
+ return *this;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveSpans<T>]
+// ============================================================================
+
+template<typename T>
+class RALiveSpans {
+public:
+ ASMJIT_NONCOPYABLE(RALiveSpans<T>)
+
+ typedef typename T::DataType DataType;
+ ZoneVector<T> _data;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveSpans() noexcept : _data() {}
+
+ inline void reset() noexcept { _data.reset(); }
+ inline void release(ZoneAllocator* allocator) noexcept { _data.release(allocator); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _data.empty(); }
+ inline uint32_t size() const noexcept { return _data.size(); }
+
+ inline T* data() noexcept { return _data.data(); }
+ inline const T* data() const noexcept { return _data.data(); }
+
+ inline bool isOpen() const noexcept {
+ uint32_t size = _data.size();
+ return size > 0 && _data[size - 1].b == RALiveInterval::kInf;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(RALiveSpans<T>& other) noexcept { _data.swap(other._data); }
+
+ //! Open the current live span.
+ ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept {
+ bool wasOpen;
+ return openAt(allocator, start, end, wasOpen);
+ }
+
+ ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept {
+ uint32_t size = _data.size();
+ wasOpen = false;
+
+ if (size > 0) {
+ T& last = _data[size - 1];
+ if (last.b >= start) {
+ wasOpen = last.b > start;
+ last.b = end;
+ return kErrorOk;
+ }
+ }
+
+ return _data.append(allocator, T(start, end));
+ }
+
+ inline void closeAt(uint32_t end) noexcept {
+ ASMJIT_ASSERT(!empty());
+
+ uint32_t size = _data.size();
+ _data[size - 1].b = end;
+ }
+
+ //! Returns the sum of width of all spans.
+ //!
+ //! \note Don't overuse, this iterates over all spans so it's O(N).
+ //! It should be only called once and then cached.
+ ASMJIT_INLINE uint32_t width() const noexcept {
+ uint32_t width = 0;
+ for (const T& span : _data)
+ width += span.width();
+ return width;
+ }
+
+ inline T& operator[](uint32_t index) noexcept { return _data[index]; }
+ inline const T& operator[](uint32_t index) const noexcept { return _data[index]; }
+
+ inline bool intersects(const RALiveSpans<T>& other) const noexcept {
+ return intersects(*this, other);
+ }
+
+ ASMJIT_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
+ uint32_t finalSize = x.size() + y.size();
+ ASMJIT_PROPAGATE(_data.reserve(allocator, finalSize));
+
+ T* dstPtr = _data.data();
+ const T* xSpan = x.data();
+ const T* ySpan = y.data();
+
+ const T* xEnd = xSpan + x.size();
+ const T* yEnd = ySpan + y.size();
+
+ // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`,
+ // which means that there is no intersection. We advance either `xSpan` or `ySpan`
+ // depending on their ranges.
+ if (xSpan != xEnd && ySpan != yEnd) {
+ uint32_t xa, ya;
+ xa = xSpan->a;
+ for (;;) {
+ while (ySpan->b <= xa) {
+ dstPtr->init(*ySpan, yData);
+ dstPtr++;
+ if (++ySpan == yEnd)
+ goto Done;
+ }
+
+ ya = ySpan->a;
+ while (xSpan->b <= ya) {
+ *dstPtr++ = *xSpan;
+ if (++xSpan == xEnd)
+ goto Done;
+ }
+
+ // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
+ xa = xSpan->a;
+ if (ySpan->b > xa)
+ return 0xFFFFFFFFu;
+ }
+ }
+
+ Done:
+ while (xSpan != xEnd) {
+ *dstPtr++ = *xSpan++;
+ }
+
+ while (ySpan != yEnd) {
+ dstPtr->init(*ySpan, yData);
+ dstPtr++;
+ ySpan++;
+ }
+
+ _data._setEndPtr(dstPtr);
+ return kErrorOk;
+ }
+
+ static ASMJIT_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept {
+ const T* xSpan = x.data();
+ const T* ySpan = y.data();
+
+ const T* xEnd = xSpan + x.size();
+ const T* yEnd = ySpan + y.size();
+
+ // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`,
+ // which means that there is no intersection. We advance either `xSpan` or `ySpan`
+ // depending on their end positions.
+ if (xSpan == xEnd || ySpan == yEnd)
+ return false;
+
+ uint32_t xa, ya;
+ xa = xSpan->a;
+
+ for (;;) {
+ while (ySpan->b <= xa)
+ if (++ySpan == yEnd)
+ return false;
+
+ ya = ySpan->a;
+ while (xSpan->b <= ya)
+ if (++xSpan == xEnd)
+ return false;
+
+ // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
+ xa = xSpan->a;
+ if (ySpan->b > xa)
+ return true;
+ }
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveStats]
+// ============================================================================
+
+//! Statistics about a register liveness.
+class RALiveStats {
+public:
+ uint32_t _width;
+ float _freq;
+ float _priority;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveStats()
+ : _width(0),
+ _freq(0.0f),
+ _priority(0.0f) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t width() const noexcept { return _width; }
+ inline float freq() const noexcept { return _freq; }
+ inline float priority() const noexcept { return _priority; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LiveRegData]
+// ============================================================================
+
+struct LiveRegData {
+ uint32_t id;
+
+ inline explicit LiveRegData(uint32_t id = BaseReg::kIdBad) noexcept : id(id) {}
+ inline LiveRegData(const LiveRegData& other) noexcept : id(other.id) {}
+
+ inline void init(const LiveRegData& other) noexcept { id = other.id; }
+
+ inline bool operator==(const LiveRegData& other) const noexcept { return id == other.id; }
+ inline bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; }
+};
+
+typedef RALiveSpan<LiveRegData> LiveRegSpan;
+typedef RALiveSpans<LiveRegSpan> LiveRegSpans;
+
+// ============================================================================
+// [asmjit::RATiedReg]
+// ============================================================================
+
+//! Tied register merges one ore more register operand into a single entity. It
+//! contains information about its access (Read|Write) and allocation slots
+//! (Use|Out) that are used by the register allocator and liveness analysis.
+struct RATiedReg {
+ //! WorkReg id.
+ uint32_t _workId;
+ //! Allocation flags.
+ uint32_t _flags;
+ //! Registers where input {R|X} can be allocated to.
+ uint32_t _allocableRegs;
+ //! Indexes used to rewrite USE regs.
+ uint32_t _useRewriteMask;
+ //! Indexes used to rewrite OUT regs.
+ uint32_t _outRewriteMask;
+
+ union {
+ struct {
+ //! How many times the VirtReg is referenced in all operands.
+ uint8_t _refCount;
+ //! Physical register for use operation (ReadOnly / ReadWrite).
+ uint8_t _useId;
+ //! Physical register for out operation (WriteOnly).
+ uint8_t _outId;
+ //! Reserved for future use (padding).
+ uint8_t _rmSize;
+ };
+ //! Packed data.
+ uint32_t _packed;
+ };
+
+ //! Flags.
+ //!
+ //! Register access information is encoded in 4 flags in total:
+ //!
+ //! - `kRead` - Register is Read (ReadWrite if combined with `kWrite`).
+ //! - `kWrite` - Register is Written (ReadWrite if combined with `kRead`).
+ //! - `kUse` - Encoded as Read or ReadWrite.
+ //! - `kOut` - Encoded as WriteOnly.
+ //!
+ //! Let's describe all of these on two X86 instructions:
+ //!
+ //! - ADD x{R|W|Use}, x{R|Use} -> {x:R|W|Use }
+ //! - LEA x{ W|Out}, [x{R|Use} + x{R|Out}] -> {x:R|W|Use|Out }
+ //! - ADD x{R|W|Use}, y{R|Use} -> {x:R|W|Use y:R|Use}
+ //! - LEA x{ W|Out}, [x{R|Use} + y{R|Out}] -> {x:R|W|Use|Out y:R|Use}
+ //!
+ //! It should be obvious from the example above how these flags get created.
+ //! Each operand contains READ/WRITE information, which is then merged to
+ //! RATiedReg's flags. However, we also need to represent the possitility to
+ //! use see the operation as two independent operations - USE and OUT, because
+ //! the register allocator will first allocate USE registers, and then assign
+ //! OUT registers independently of USE registers.
+ enum Flags : uint32_t {
+ kRead = OpRWInfo::kRead, //!< Register is read.
+ kWrite = OpRWInfo::kWrite, //!< Register is written.
+ kRW = OpRWInfo::kRW, //!< Register both read and written.
+
+ kUse = 0x00000100u, //!< Register has a USE slot (read/rw).
+ kOut = 0x00000200u, //!< Register has an OUT slot (write-only).
+ kUseRM = 0x00000400u, //!< Register in USE slot can be patched to memory.
+ kOutRM = 0x00000800u, //!< Register in OUT slot can be patched to memory.
+
+ kUseFixed = 0x00001000u, //!< Register has a fixed USE slot.
+ kOutFixed = 0x00002000u, //!< Register has a fixed OUT slot.
+ kUseDone = 0x00004000u, //!< Register USE slot has been allocated.
+ kOutDone = 0x00008000u, //!< Register OUT slot has been allocated.
+
+ kDuplicate = 0x00010000u, //!< Register must be duplicated (function call only).
+ kLast = 0x00020000u, //!< Last occurrence of this VirtReg in basic block.
+ kKill = 0x00040000u, //!< Kill this VirtReg after use.
+
+ // Architecture specific flags are used during RATiedReg building to ensure
+ // that architecture-specific constraints are handled properly. These flags
+ // are not really needed after RATiedReg[] is built and copied to `RAInst`.
+
+ kX86Gpb = 0x01000000u //!< This RATiedReg references GPB-LO or GPB-HI.
+ };
+
+ static_assert(kRead == 0x1, "RATiedReg::kRead flag must be 0x1");
+ static_assert(kWrite == 0x2, "RATiedReg::kWrite flag must be 0x2");
+ static_assert(kRW == 0x3, "RATiedReg::kRW combination must be 0x3");
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE void init(uint32_t workId, uint32_t flags, uint32_t allocableRegs, uint32_t useId, uint32_t useRewriteMask, uint32_t outId, uint32_t outRewriteMask, uint32_t rmSize = 0) noexcept {
+ _workId = workId;
+ _flags = flags;
+ _allocableRegs = allocableRegs;
+ _useRewriteMask = useRewriteMask;
+ _outRewriteMask = outRewriteMask;
+ _refCount = 1;
+ _useId = uint8_t(useId);
+ _outId = uint8_t(outId);
+ _rmSize = uint8_t(rmSize);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RATiedReg& operator=(const RATiedReg& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated WorkReg id.
+ inline uint32_t workId() const noexcept { return _workId; }
+
+ //! Checks if the given `flag` is set, see `Flags`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ //! Returns TiedReg flags, see `RATiedReg::Flags`.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Adds tied register flags, see `Flags`.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ //! Tests whether the register is read (writes `true` also if it's Read/Write).
+ inline bool isRead() const noexcept { return hasFlag(kRead); }
+ //! Tests whether the register is written (writes `true` also if it's Read/Write).
+ inline bool isWrite() const noexcept { return hasFlag(kWrite); }
+ //! Tests whether the register is read only.
+ inline bool isReadOnly() const noexcept { return (_flags & kRW) == kRead; }
+ //! Tests whether the register is write only.
+ inline bool isWriteOnly() const noexcept { return (_flags & kRW) == kWrite; }
+ //! Tests whether the register is read and written.
+ inline bool isReadWrite() const noexcept { return (_flags & kRW) == kRW; }
+
+ //! Tests whether the tied register has use operand (Read/ReadWrite).
+ inline bool isUse() const noexcept { return hasFlag(kUse); }
+ //! Tests whether the tied register has out operand (Write).
+ inline bool isOut() const noexcept { return hasFlag(kOut); }
+
+ //! Tests whether the USE slot can be patched to memory operand.
+ inline bool hasUseRM() const noexcept { return hasFlag(kUseRM); }
+ //! Tests whether the OUT slot can be patched to memory operand.
+ inline bool hasOutRM() const noexcept { return hasFlag(kOutRM); }
+
+ inline uint32_t rmSize() const noexcept { return _rmSize; }
+
+ inline void makeReadOnly() noexcept {
+ _flags = (_flags & ~(kOut | kWrite)) | kUse;
+ _useRewriteMask |= _outRewriteMask;
+ _outRewriteMask = 0;
+ }
+
+ inline void makeWriteOnly() noexcept {
+ _flags = (_flags & ~(kUse | kRead)) | kOut;
+ _outRewriteMask |= _useRewriteMask;
+ _useRewriteMask = 0;
+ }
+
+ //! Tests whether the register would duplicate.
+ inline bool isDuplicate() const noexcept { return hasFlag(kDuplicate); }
+
+ //! Tests whether the register (and the instruction it's part of) appears last in the basic block.
+ inline bool isLast() const noexcept { return hasFlag(kLast); }
+ //! Tests whether the register should be killed after USEd and/or OUTed.
+ inline bool isKill() const noexcept { return hasFlag(kKill); }
+
+ //! Tests whether the register is OUT or KILL (used internally by local register allocator).
+ inline bool isOutOrKill() const noexcept { return hasFlag(kOut | kKill); }
+
+ inline uint32_t allocableRegs() const noexcept { return _allocableRegs; }
+
+ inline uint32_t refCount() const noexcept { return _refCount; }
+ inline void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); }
+
+ //! Tests whether the register must be allocated to a fixed physical register before it's used.
+ inline bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; }
+ //! Tests whether the register must be allocated to a fixed physical register before it's written.
+ inline bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; }
+
+ //! Returns a physical register id used for 'use' operation.
+ inline uint32_t useId() const noexcept { return _useId; }
+ //! Returns a physical register id used for 'out' operation.
+ inline uint32_t outId() const noexcept { return _outId; }
+
+ inline uint32_t useRewriteMask() const noexcept { return _useRewriteMask; }
+ inline uint32_t outRewriteMask() const noexcept { return _outRewriteMask; }
+
+ //! Sets a physical register used for 'use' operation.
+ inline void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); }
+ //! Sets a physical register used for 'out' operation.
+ inline void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); }
+
+ inline bool isUseDone() const noexcept { return hasFlag(kUseDone); }
+ inline bool isOutDone() const noexcept { return hasFlag(kUseDone); }
+
+ inline void markUseDone() noexcept { addFlags(kUseDone); }
+ inline void markOutDone() noexcept { addFlags(kUseDone); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAWorkReg]
+// ============================================================================
+
+class RAWorkReg {
+public:
+ ASMJIT_NONCOPYABLE(RAWorkReg)
+
+ //! RAPass specific ID used during analysis and allocation.
+ uint32_t _workId;
+ //! Copy of ID used by `VirtReg`.
+ uint32_t _virtId;
+
+ //! Permanent association with `VirtReg`.
+ VirtReg* _virtReg;
+ //! Temporary association with `RATiedReg`.
+ RATiedReg* _tiedReg;
+ //! Stack slot associated with the register.
+ RAStackSlot* _stackSlot;
+
+ //! Copy of a signature used by `VirtReg`.
+ RegInfo _info;
+ //! RAPass specific flags used during analysis and allocation.
+ uint32_t _flags;
+ //! IDs of all physical registers this WorkReg has been allocated to.
+ uint32_t _allocatedMask;
+ //! IDs of all physical registers that are clobbered during the lifetime of
+ //! this WorkReg.
+ //!
+ //! This mask should be updated by `RAPass::buildLiveness()`, because it's
+ //! global and should be updated after unreachable code has been removed.
+ uint32_t _clobberSurvivalMask;
+
+ //! A byte-mask where each bit represents one valid byte of the register.
+ uint64_t _regByteMask;
+
+ //! Argument index (or `kNoArgIndex` if none).
+ uint8_t _argIndex;
+ //! Global home register ID (if any, assigned by RA).
+ uint8_t _homeRegId;
+ //! Global hint register ID (provided by RA or user).
+ uint8_t _hintRegId;
+
+ //! Live spans of the `VirtReg`.
+ LiveRegSpans _liveSpans;
+ //! Live statistics.
+ RALiveStats _liveStats;
+
+ //! All nodes that read/write this VirtReg/WorkReg.
+ ZoneVector<BaseNode*> _refs;
+ //! All nodes that write to this VirtReg/WorkReg.
+ ZoneVector<BaseNode*> _writes;
+
+ enum Ids : uint32_t {
+ kIdNone = 0xFFFFFFFFu
+ };
+
+ enum Flags : uint32_t {
+ //! Has been coalesced to another WorkReg.
+ kFlagCoalesced = 0x00000001u,
+ //! Stack slot has to be allocated.
+ kFlagStackUsed = 0x00000002u,
+ //! Stack allocation is preferred.
+ kFlagStackPreferred = 0x00000004u,
+ //! Marked for stack argument reassignment.
+ kFlagStackArgToStack = 0x00000008u
+ };
+
+ enum ArgIndex : uint32_t {
+ kNoArgIndex = 0xFFu
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE RAWorkReg(VirtReg* vReg, uint32_t workId) noexcept
+ : _workId(workId),
+ _virtId(vReg->id()),
+ _virtReg(vReg),
+ _tiedReg(nullptr),
+ _stackSlot(nullptr),
+ _info(vReg->info()),
+ _flags(0),
+ _allocatedMask(0),
+ _clobberSurvivalMask(0),
+ _regByteMask(0),
+ _argIndex(kNoArgIndex),
+ _homeRegId(BaseReg::kIdBad),
+ _hintRegId(BaseReg::kIdBad),
+ _liveSpans(),
+ _liveStats(),
+ _refs() {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t workId() const noexcept { return _workId; }
+ inline uint32_t virtId() const noexcept { return _virtId; }
+
+ inline const char* name() const noexcept { return _virtReg->name(); }
+ inline uint32_t nameSize() const noexcept { return _virtReg->nameSize(); }
+
+ inline uint32_t typeId() const noexcept { return _virtReg->typeId(); }
+
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ inline bool isStackUsed() const noexcept { return hasFlag(kFlagStackUsed); }
+ inline void markStackUsed() noexcept { addFlags(kFlagStackUsed); }
+
+ inline bool isStackPreferred() const noexcept { return hasFlag(kFlagStackPreferred); }
+ inline void markStackPreferred() noexcept { addFlags(kFlagStackPreferred); }
+
+ //! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore).
+ inline bool isCoalesced() const noexcept { return hasFlag(kFlagCoalesced); }
+
+ inline const RegInfo& info() const noexcept { return _info; }
+ inline uint32_t group() const noexcept { return _info.group(); }
+ inline uint32_t signature() const noexcept { return _info.signature(); }
+
+ inline VirtReg* virtReg() const noexcept { return _virtReg; }
+
+ inline bool hasTiedReg() const noexcept { return _tiedReg != nullptr; }
+ inline RATiedReg* tiedReg() const noexcept { return _tiedReg; }
+ inline void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; }
+ inline void resetTiedReg() noexcept { _tiedReg = nullptr; }
+
+ inline bool hasStackSlot() const noexcept { return _stackSlot != nullptr; }
+ inline RAStackSlot* stackSlot() const noexcept { return _stackSlot; }
+
+ inline LiveRegSpans& liveSpans() noexcept { return _liveSpans; }
+ inline const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; }
+
+ inline RALiveStats& liveStats() noexcept { return _liveStats; }
+ inline const RALiveStats& liveStats() const noexcept { return _liveStats; }
+
+ inline bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; }
+ inline uint32_t argIndex() const noexcept { return _argIndex; }
+ inline void setArgIndex(uint32_t index) noexcept { _argIndex = uint8_t(index); }
+
+ inline bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; }
+ inline uint32_t homeRegId() const noexcept { return _homeRegId; }
+ inline void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); }
+
+ inline bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; }
+ inline uint32_t hintRegId() const noexcept { return _hintRegId; }
+ inline void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); }
+
+ inline uint32_t allocatedMask() const noexcept { return _allocatedMask; }
+ inline void addAllocatedMask(uint32_t mask) noexcept { _allocatedMask |= mask; }
+
+ inline uint32_t clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; }
+ inline void addClobberSurvivalMask(uint32_t mask) noexcept { _clobberSurvivalMask |= mask; }
+
+ inline uint64_t regByteMask() const noexcept { return _regByteMask; }
+ inline void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; }
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RADEFS_P_H_INCLUDED
diff --git a/client/asmjit/core/ralocal.cpp b/client/asmjit/core/ralocal.cpp
new file mode 100644
index 0000000..e3a8a97
--- /dev/null
+++ b/client/asmjit/core/ralocal.cpp
@@ -0,0 +1,1039 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/ralocal_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Utilities]
+// ============================================================================
+
+static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
+ for (size_t i = 0; i < count; i++)
+ if (tiedRegs[i].workId() == workId)
+ return &tiedRegs[i];
+ return nullptr;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Init / Reset]
+// ============================================================================
+
+Error RALocalAllocator::init() noexcept {
+ PhysToWorkMap* physToWorkMap;
+ WorkToPhysMap* workToPhysMap;
+
+ physToWorkMap = _pass->newPhysToWorkMap();
+ workToPhysMap = _pass->newWorkToPhysMap();
+ if (!physToWorkMap || !workToPhysMap)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _curAssignment.initLayout(_pass->_physRegCount, _pass->workRegs());
+ _curAssignment.initMaps(physToWorkMap, workToPhysMap);
+
+ physToWorkMap = _pass->newPhysToWorkMap();
+ workToPhysMap = _pass->newWorkToPhysMap();
+ if (!physToWorkMap || !workToPhysMap)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _tmpAssignment.initLayout(_pass->_physRegCount, _pass->workRegs());
+ _tmpAssignment.initMaps(physToWorkMap, workToPhysMap);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Assignment]
+// ============================================================================
+
+Error RALocalAllocator::makeInitialAssignment() noexcept {
+ FuncNode* func = _pass->func();
+ RABlock* entry = _pass->entryBlock();
+
+ ZoneBitVector& liveIn = entry->liveIn();
+ uint32_t argCount = func->argCount();
+ uint32_t numIter = 1;
+
+ for (uint32_t iter = 0; iter < numIter; iter++) {
+ for (uint32_t i = 0; i < argCount; i++) {
+ // Unassigned argument.
+ VirtReg* virtReg = func->arg(i);
+ if (!virtReg) continue;
+
+ // Unreferenced argument.
+ RAWorkReg* workReg = virtReg->workReg();
+ if (!workReg) continue;
+
+ // Overwritten argument.
+ uint32_t workId = workReg->workId();
+ if (!liveIn.bitAt(workId))
+ continue;
+
+ uint32_t group = workReg->group();
+ if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
+ continue;
+
+ uint32_t allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
+ if (iter == 0) {
+ // First iteration: Try to allocate to home RegId.
+ if (workReg->hasHomeRegId()) {
+ uint32_t physId = workReg->homeRegId();
+ if (Support::bitTest(allocableRegs, physId)) {
+ _curAssignment.assign(group, workId, physId, true);
+ _pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
+ continue;
+ }
+ }
+
+ numIter = 2;
+ }
+ else {
+ // Second iteration: Pick any other register if the is an unassigned one or assign to stack.
+ if (allocableRegs) {
+ uint32_t physId = Support::ctz(allocableRegs);
+ _curAssignment.assign(group, workId, physId, true);
+ _pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
+ }
+ else {
+ // This register will definitely need stack, create the slot now and assign also `argIndex`
+ // to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
+ RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // This means STACK_ARG may be moved to STACK.
+ workReg->addFlags(RAWorkReg::kFlagStackArgToStack);
+ _pass->_numStackArgsToStackSlots++;
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::replaceAssignment(
+ const PhysToWorkMap* physToWorkMap,
+ const WorkToPhysMap* workToPhysMap) noexcept {
+
+ _curAssignment.copyFrom(physToWorkMap, workToPhysMap);
+ return kErrorOk;
+}
+
+Error RALocalAllocator::switchToAssignment(
+ PhysToWorkMap* dstPhysToWorkMap,
+ WorkToPhysMap* dstWorkToPhysMap,
+ const ZoneBitVector& liveIn,
+ bool dstReadOnly,
+ bool tryMode) noexcept {
+
+ RAAssignment dst;
+ RAAssignment& cur = _curAssignment;
+
+ dst.initLayout(_pass->_physRegCount, _pass->workRegs());
+ dst.initMaps(dstPhysToWorkMap, dstWorkToPhysMap);
+
+ if (tryMode)
+ return kErrorOk;
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ // ------------------------------------------------------------------------
+ // STEP 1:
+ // - KILL all registers that are not live at `dst`,
+ // - SPILL all registers that are not assigned at `dst`.
+ // ------------------------------------------------------------------------
+
+ if (!tryMode) {
+ Support::BitWordIterator<uint32_t> it(cur.assigned(group));
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = cur.physToWorkId(group, physId);
+
+ // Must be true as we iterate over assigned registers.
+ ASMJIT_ASSERT(workId != RAAssignment::kWorkNone);
+
+ // KILL if it's not live on entry.
+ if (!liveIn.bitAt(workId)) {
+ onKillReg(group, workId, physId);
+ continue;
+ }
+
+ // SPILL if it's not assigned on entry.
+ uint32_t altId = dst.workToPhysId(group, workId);
+ if (altId == RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 2:
+ // - MOVE and SWAP registers from their current assignments into their
+ // DST assignments.
+ // - Build `willLoadRegs` mask of registers scheduled for `onLoadReg()`.
+ // ------------------------------------------------------------------------
+
+ // Current run-id (1 means more aggressive decisions).
+ int32_t runId = -1;
+ // Remaining registers scheduled for `onLoadReg()`.
+ uint32_t willLoadRegs = 0;
+ // Remaining registers to be allocated in this loop.
+ uint32_t affectedRegs = dst.assigned(group);
+
+ while (affectedRegs) {
+ if (++runId == 2) {
+ if (!tryMode)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // Stop in `tryMode` if we haven't done anything in past two rounds.
+ break;
+ }
+
+ Support::BitWordIterator<uint32_t> it(affectedRegs);
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t physMask = Support::bitMask(physId);
+
+ uint32_t curWorkId = cur.physToWorkId(group, physId);
+ uint32_t dstWorkId = dst.physToWorkId(group, physId);
+
+ // The register must have assigned `dstWorkId` as we only iterate over assigned regs.
+ ASMJIT_ASSERT(dstWorkId != RAAssignment::kWorkNone);
+
+ if (curWorkId != RAAssignment::kWorkNone) {
+ // Both assigned.
+ if (curWorkId != dstWorkId) {
+ // Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out.
+ if (runId <= 0)
+ continue;
+
+ uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
+ if (altPhysId == RAAssignment::kPhysNone)
+ continue;
+
+ // Reset as we will do some changes to the current assignment.
+ runId = -1;
+
+ if (_archTraits.hasSwap(group)) {
+ ASMJIT_PROPAGATE(onSwapReg(group, curWorkId, physId, dstWorkId, altPhysId));
+ }
+ else {
+ // SPILL the reg if it's not dirty in DST, otherwise try to MOVE.
+ if (!cur.isPhysDirty(group, physId)) {
+ ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId));
+ }
+ else {
+ uint32_t allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
+
+ // If possible don't conflict with assigned regs at DST.
+ if (allocableRegs & ~dst.assigned(group))
+ allocableRegs &= ~dst.assigned(group);
+
+ if (allocableRegs) {
+ // MOVE is possible, thus preferred.
+ uint32_t tmpPhysId = Support::ctz(allocableRegs);
+
+ ASMJIT_PROPAGATE(onMoveReg(group, curWorkId, tmpPhysId, physId));
+ _pass->_clobberedRegs[group] |= Support::bitMask(tmpPhysId);
+ }
+ else {
+ // MOVE is impossible, must SPILL.
+ ASMJIT_PROPAGATE(onSpillReg(group, curWorkId, physId));
+ }
+ }
+
+ goto Cleared;
+ }
+ }
+ }
+ else {
+Cleared:
+ // DST assigned, CUR unassigned.
+ uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
+ if (altPhysId == RAAssignment::kPhysNone) {
+ if (liveIn.bitAt(dstWorkId))
+ willLoadRegs |= physMask; // Scheduled for `onLoadReg()`.
+ affectedRegs &= ~physMask; // Unaffected from now.
+ continue;
+ }
+ ASMJIT_PROPAGATE(onMoveReg(group, dstWorkId, physId, altPhysId));
+ }
+
+ // Both DST and CUR assigned to the same reg or CUR just moved to DST.
+ if ((dst.dirty(group) & physMask) != (cur.dirty(group) & physMask)) {
+ if ((dst.dirty(group) & physMask) == 0) {
+ // CUR dirty, DST not dirty (the assert is just to visualize the condition).
+ ASMJIT_ASSERT(!dst.isPhysDirty(group, physId) && cur.isPhysDirty(group, physId));
+
+ // If `dstReadOnly` is true it means that that block was already
+ // processed and we cannot change from CLEAN to DIRTY. In that case
+ // the register has to be saved as it cannot enter the block DIRTY.
+ if (dstReadOnly)
+ ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId));
+ else
+ dst.makeDirty(group, dstWorkId, physId);
+ }
+ else {
+ // DST dirty, CUR not dirty (the assert is just to visualize the condition).
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) && !cur.isPhysDirty(group, physId));
+
+ cur.makeDirty(group, dstWorkId, physId);
+ }
+ }
+
+ // Must match now...
+ ASMJIT_ASSERT(dst.physToWorkId(group, physId) == cur.physToWorkId(group, physId));
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
+
+ runId = -1;
+ affectedRegs &= ~physMask;
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 3:
+ // - Load registers specified by `willLoadRegs`.
+ // ------------------------------------------------------------------------
+
+ {
+ Support::BitWordIterator<uint32_t> it(willLoadRegs);
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+
+ if (!cur.isPhysAssigned(group, physId)) {
+ uint32_t workId = dst.physToWorkId(group, physId);
+
+ // The algorithm is broken if it tries to load a register that is not in LIVE-IN.
+ ASMJIT_ASSERT(liveIn.bitAt(workId) == true);
+
+ ASMJIT_PROPAGATE(onLoadReg(group, workId, physId));
+ if (dst.isPhysDirty(group, physId))
+ cur.makeDirty(group, workId, physId);
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
+ }
+ else {
+ // Not possible otherwise.
+ ASMJIT_ASSERT(tryMode == true);
+ }
+ }
+ }
+ }
+
+ if (!tryMode) {
+ // Hre is a code that dumps the conflicting part if something fails here:
+ // if (!dst.equals(cur)) {
+ // uint32_t physTotal = dst._layout.physTotal;
+ // uint32_t workCount = dst._layout.workCount;
+ //
+ // for (uint32_t physId = 0; physId < physTotal; physId++) {
+ // uint32_t dstWorkId = dst._physToWorkMap->workIds[physId];
+ // uint32_t curWorkId = cur._physToWorkMap->workIds[physId];
+ // if (dstWorkId != curWorkId)
+ // fprintf(stderr, "[PhysIdWork] PhysId=%u WorkId[DST(%u) != CUR(%u)]\n", physId, dstWorkId, curWorkId);
+ // }
+ //
+ // for (uint32_t workId = 0; workId < workCount; workId++) {
+ // uint32_t dstPhysId = dst._workToPhysMap->physIds[workId];
+ // uint32_t curPhysId = cur._workToPhysMap->physIds[workId];
+ // if (dstPhysId != curPhysId)
+ // fprintf(stderr, "[WorkToPhys] WorkId=%u PhysId[DST(%u) != CUR(%u)]\n", workId, dstPhysId, curPhysId);
+ // }
+ // }
+ ASMJIT_ASSERT(dst.equals(cur));
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept {
+ uint32_t group = BaseReg::kGroupGp;
+ Support::BitWordIterator<uint32_t> it(scratchRegs);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ if (_curAssignment.isPhysAssigned(group, physId)) {
+ uint32_t workId = _curAssignment.physToWorkId(group, physId);
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Allocation]
+// ============================================================================
+
+Error RALocalAllocator::allocInst(InstNode* node) noexcept {
+ RAInst* raInst = node->passData<RAInst>();
+
+ RATiedReg* outTiedRegs[Globals::kMaxPhysRegs];
+ RATiedReg* dupTiedRegs[Globals::kMaxPhysRegs];
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ _node = node;
+ _raInst = raInst;
+ _tiedTotal = raInst->_tiedTotal;
+ _tiedCount = raInst->_tiedCount;
+
+ // Whether we already replaced register operand with memory operand.
+ bool rmAllocated = false;
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ uint32_t i, count = this->tiedCount(group);
+ RATiedReg* tiedRegs = this->tiedRegs(group);
+
+ uint32_t willUse = _raInst->_usedRegs[group];
+ uint32_t willOut = _raInst->_clobberedRegs[group];
+ uint32_t willFree = 0;
+ uint32_t usePending = count;
+
+ uint32_t outTiedCount = 0;
+ uint32_t dupTiedCount = 0;
+
+ // ------------------------------------------------------------------------
+ // STEP 1:
+ //
+ // Calculate `willUse` and `willFree` masks based on tied registers we have.
+ //
+ // We don't do any assignment decisions at this stage as we just need to
+ // collect some information first. Then, after we populate all masks needed
+ // we can finally make some decisions in the second loop. The main reason
+ // for this is that we really need `willFree` to make assignment decisions
+ // for `willUse`, because if we mark some registers that will be freed, we
+ // can consider them in decision making afterwards.
+ // ------------------------------------------------------------------------
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+
+ // Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment.
+ if (tiedReg->isOutOrKill())
+ outTiedRegs[outTiedCount++] = tiedReg;
+
+ if (tiedReg->isDuplicate())
+ dupTiedRegs[dupTiedCount++] = tiedReg;
+
+ if (!tiedReg->isUse()) {
+ tiedReg->markUseDone();
+ usePending--;
+ continue;
+ }
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ if (tiedReg->hasUseId()) {
+ // If the register has `useId` it means it can only be allocated in that register.
+ uint32_t useMask = Support::bitMask(tiedReg->useId());
+
+ // RAInstBuilder must have collected `usedRegs` on-the-fly.
+ ASMJIT_ASSERT((willUse & useMask) != 0);
+
+ if (assignedId == tiedReg->useId()) {
+ // If the register is already allocated in this one, mark it done and continue.
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, assignedId);
+ usePending--;
+ willUse |= useMask;
+ }
+ else {
+ willFree |= useMask & _curAssignment.assigned(group);
+ }
+ }
+ else {
+ // Check if the register must be moved to `allocableRegs`.
+ uint32_t allocableRegs = tiedReg->allocableRegs();
+ if (assignedId != RAAssignment::kPhysNone) {
+ uint32_t assignedMask = Support::bitMask(assignedId);
+ if ((allocableRegs & ~willUse) & assignedMask) {
+ tiedReg->setUseId(assignedId);
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, assignedId);
+ usePending--;
+ willUse |= assignedMask;
+ }
+ else {
+ willFree |= assignedMask;
+ }
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 2:
+ //
+ // Do some decision making to find the best candidates of registers that
+ // need to be assigned, moved, and/or spilled. Only USE registers are
+ // considered here, OUT will be decided later after all CLOBBERed and OUT
+ // registers are unassigned.
+ // ------------------------------------------------------------------------
+
+ if (usePending) {
+ // TODO: Not sure `liveRegs` should be used, maybe willUse and willFree would be enough and much more clear.
+
+ // All registers that are currently alive without registers that will be freed.
+ uint32_t liveRegs = _curAssignment.assigned(group) & ~willFree;
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+ if (tiedReg->isUseDone()) continue;
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ // REG/MEM: Patch register operand to memory operand if not allocated.
+ if (!rmAllocated && tiedReg->hasUseRM()) {
+ if (assignedId == RAAssignment::kPhysNone && Support::isPowerOf2(tiedReg->useRewriteMask())) {
+ RAWorkReg* workReg = workRegById(tiedReg->workId());
+ uint32_t opIndex = Support::ctz(tiedReg->useRewriteMask()) / uint32_t(sizeof(Operand) / sizeof(uint32_t));
+ uint32_t rmSize = tiedReg->rmSize();
+
+ if (rmSize <= workReg->virtReg()->virtSize()) {
+ Operand& op = node->operands()[opIndex];
+ op = _pass->workRegAsMem(workReg);
+ op.as<BaseMem>().setSize(rmSize);
+ tiedReg->_useRewriteMask = 0;
+
+ tiedReg->markUseDone();
+ usePending--;
+
+ rmAllocated = true;
+ continue;
+ }
+ }
+ }
+
+ if (!tiedReg->hasUseId()) {
+ uint32_t allocableRegs = tiedReg->allocableRegs() & ~(willFree | willUse);
+
+ // DECIDE where to assign the USE register.
+ uint32_t useId = decideOnAssignment(group, workId, assignedId, allocableRegs);
+ uint32_t useMask = Support::bitMask(useId);
+
+ willUse |= useMask;
+ willFree |= useMask & liveRegs;
+ tiedReg->setUseId(useId);
+
+ if (assignedId != RAAssignment::kPhysNone) {
+ uint32_t assignedMask = Support::bitMask(assignedId);
+
+ willFree |= assignedMask;
+ liveRegs &= ~assignedMask;
+
+ // OPTIMIZATION: Assign the USE register here if it's possible.
+ if (!(liveRegs & useMask)) {
+ ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId));
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, useId);
+ usePending--;
+ }
+ }
+ else {
+ // OPTIMIZATION: Assign the USE register here if it's possible.
+ if (!(liveRegs & useMask)) {
+ ASMJIT_PROPAGATE(onLoadReg(group, workId, useId));
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, useId);
+ usePending--;
+ }
+ }
+
+ liveRegs |= useMask;
+ }
+ }
+ }
+
+ // Initially all used regs will be marked clobbered.
+ uint32_t clobberedByInst = willUse | willOut;
+
+ // ------------------------------------------------------------------------
+ // STEP 3:
+ //
+ // Free all registers that we marked as `willFree`. Only registers that are not
+ // USEd by the instruction are considered as we don't want to free regs we need.
+ // ------------------------------------------------------------------------
+
+ if (willFree) {
+ uint32_t allocableRegs = _availableRegs[group] & ~(_curAssignment.assigned(group) | willFree | willUse | willOut);
+ Support::BitWordIterator<uint32_t> it(willFree);
+
+ do {
+ uint32_t assignedId = it.next();
+ if (_curAssignment.isPhysAssigned(group, assignedId)) {
+ uint32_t workId = _curAssignment.physToWorkId(group, assignedId);
+
+ // DECIDE whether to MOVE or SPILL.
+ if (allocableRegs) {
+ uint32_t reassignedId = decideOnReassignment(group, workId, assignedId, allocableRegs);
+ if (reassignedId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onMoveReg(group, workId, reassignedId, assignedId));
+ allocableRegs ^= Support::bitMask(reassignedId);
+ continue;
+ }
+ }
+
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, assignedId));
+ }
+ } while (it.hasNext());
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 4:
+ //
+ // ALLOCATE / SHUFFLE all registers that we marked as `willUse` and weren't
+ // allocated yet. This is a bit complicated as the allocation is iterative.
+ // In some cases we have to wait before allocating a particual physical
+ // register as it's still occupied by some other one, which we need to move
+ // before we can use it. In this case we skip it and allocate another some
+ // other instead (making it free for another iteration).
+ //
+ // NOTE: Iterations are mostly important for complicated allocations like
+ // function calls, where there can be up to N registers used at once. Asm
+ // instructions won't run the loop more than once in 99.9% of cases as they
+ // use 2..3 registers in average.
+ // ------------------------------------------------------------------------
+
+ if (usePending) {
+ bool mustSwap = false;
+ do {
+ uint32_t oldPending = usePending;
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* thisTiedReg = &tiedRegs[i];
+ if (thisTiedReg->isUseDone()) continue;
+
+ uint32_t thisWorkId = thisTiedReg->workId();
+ uint32_t thisPhysId = _curAssignment.workToPhysId(group, thisWorkId);
+
+ // This would be a bug, fatal one!
+ uint32_t targetPhysId = thisTiedReg->useId();
+ ASMJIT_ASSERT(targetPhysId != thisPhysId);
+
+ uint32_t targetWorkId = _curAssignment.physToWorkId(group, targetPhysId);
+ if (targetWorkId != RAAssignment::kWorkNone) {
+ RAWorkReg* targetWorkReg = workRegById(targetWorkId);
+
+ // Swapping two registers can solve two allocation tasks by emitting
+ // just a single instruction. However, swap is only available on few
+ // architectures and it's definitely not available for each register
+ // group. Calling `onSwapReg()` before checking these would be fatal.
+ if (_archTraits.hasSwap(group) && thisPhysId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onSwapReg(group, thisWorkId, thisPhysId, targetWorkId, targetPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+
+ // Double-hit.
+ RATiedReg* targetTiedReg = RALocal_findTiedRegByWorkId(tiedRegs, count, targetWorkReg->workId());
+ if (targetTiedReg && targetTiedReg->useId() == thisPhysId) {
+ targetTiedReg->markUseDone();
+ if (targetTiedReg->isWrite())
+ _curAssignment.makeDirty(group, targetWorkId, thisPhysId);
+ usePending--;
+ }
+ continue;
+ }
+
+ if (!mustSwap)
+ continue;
+
+ // Only branched here if the previous iteration did nothing. This is
+ // essentially a SWAP operation without having a dedicated instruction
+ // for that purpose (vector registers, etc). The simplest way to
+ // handle such case is to SPILL the target register.
+ ASMJIT_PROPAGATE(onSpillReg(group, targetWorkId, targetPhysId));
+ }
+
+ if (thisPhysId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onMoveReg(group, thisWorkId, targetPhysId, thisPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+ }
+ else {
+ ASMJIT_PROPAGATE(onLoadReg(group, thisWorkId, targetPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+ }
+ }
+
+ mustSwap = (oldPending == usePending);
+ } while (usePending);
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 5:
+ //
+ // KILL registers marked as KILL/OUT.
+ // ------------------------------------------------------------------------
+
+ uint32_t outPending = outTiedCount;
+ if (outTiedCount) {
+ for (i = 0; i < outTiedCount; i++) {
+ RATiedReg* tiedReg = outTiedRegs[i];
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t physId = _curAssignment.workToPhysId(group, workId);
+
+ // Must check if it's allocated as KILL can be related to OUT (like KILL
+ // immediately after OUT, which could mean the register is not assigned).
+ if (physId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onKillReg(group, workId, physId));
+ willOut &= ~Support::bitMask(physId);
+ }
+
+ // We still maintain number of pending registers for OUT assignment.
+ // So, if this is only KILL, not OUT, we can safely decrement it.
+ outPending -= !tiedReg->isOut();
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 6:
+ //
+ // SPILL registers that will be CLOBBERed. Since OUT and KILL were
+ // already processed this is used mostly to handle function CALLs.
+ // ------------------------------------------------------------------------
+
+ if (willOut) {
+ Support::BitWordIterator<uint32_t> it(willOut);
+ do {
+ uint32_t physId = it.next();
+ uint32_t workId = _curAssignment.physToWorkId(group, physId);
+
+ if (workId == RAAssignment::kWorkNone)
+ continue;
+
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ } while (it.hasNext());
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 7:
+ //
+ // Duplication.
+ // ------------------------------------------------------------------------
+
+ for (i = 0; i < dupTiedCount; i++) {
+ RATiedReg* tiedReg = dupTiedRegs[i];
+ uint32_t workId = tiedReg->workId();
+ uint32_t srcId = tiedReg->useId();
+
+ Support::BitWordIterator<uint32_t> it(tiedReg->_allocableRegs);
+ while (it.hasNext()) {
+ uint32_t dstId = it.next();
+ if (dstId == srcId)
+ continue;
+ _pass->onEmitMove(workId, dstId, srcId);
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 8:
+ //
+ // Assign OUT registers.
+ // ------------------------------------------------------------------------
+
+ if (outPending) {
+ // Live registers, we need a separate variable (outside of `_curAssignment)
+ // to hold these because of KILLed registers. If we KILL a register here it
+ // will go out from `_curAssignment`, but we cannot assign to it in here.
+ uint32_t liveRegs = _curAssignment.assigned(group);
+
+ // Must avoid as they have been already OUTed (added during the loop).
+ uint32_t outRegs = 0;
+
+ // Must avoid as they collide with already allocated ones.
+ uint32_t avoidRegs = willUse & ~clobberedByInst;
+
+ for (i = 0; i < outTiedCount; i++) {
+ RATiedReg* tiedReg = outTiedRegs[i];
+ if (!tiedReg->isOut()) continue;
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ if (assignedId != RAAssignment::kPhysNone)
+ ASMJIT_PROPAGATE(onKillReg(group, workId, assignedId));
+
+ uint32_t physId = tiedReg->outId();
+ if (physId == RAAssignment::kPhysNone) {
+ uint32_t allocableRegs = _availableRegs[group] & ~(outRegs | avoidRegs);
+
+ if (!(allocableRegs & ~liveRegs)) {
+ // There are no more registers, decide which one to spill.
+ uint32_t spillWorkId;
+ physId = decideOnSpillFor(group, workId, allocableRegs & liveRegs, &spillWorkId);
+ ASMJIT_PROPAGATE(onSpillReg(group, spillWorkId, physId));
+ }
+ else {
+ physId = decideOnAssignment(group, workId, RAAssignment::kPhysNone, allocableRegs & ~liveRegs);
+ }
+ }
+
+ // OUTs are CLOBBERed thus cannot be ASSIGNed right now.
+ ASMJIT_ASSERT(!_curAssignment.isPhysAssigned(group, physId));
+
+ if (!tiedReg->isKill())
+ ASMJIT_PROPAGATE(onAssignReg(group, workId, physId, true));
+
+ tiedReg->setOutId(physId);
+ tiedReg->markOutDone();
+
+ outRegs |= Support::bitMask(physId);
+ liveRegs &= ~Support::bitMask(physId);
+ outPending--;
+ }
+
+ clobberedByInst |= outRegs;
+ ASMJIT_ASSERT(outPending == 0);
+ }
+
+ _clobberedRegs[group] |= clobberedByInst;
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::spillAfterAllocation(InstNode* node) noexcept {
+ // This is experimental feature that would spill registers that don't have
+ // home-id and are last in this basic block. This prevents saving these regs
+ // in other basic blocks and then restoring them (mostly relevant for loops).
+ RAInst* raInst = node->passData<RAInst>();
+ uint32_t count = raInst->tiedCount();
+
+ for (uint32_t i = 0; i < count; i++) {
+ RATiedReg* tiedReg = raInst->tiedAt(i);
+ if (tiedReg->isLast()) {
+ uint32_t workId = tiedReg->workId();
+ RAWorkReg* workReg = workRegById(workId);
+ if (!workReg->hasHomeRegId()) {
+ uint32_t group = workReg->group();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+ if (assignedId != RAAssignment::kPhysNone) {
+ _cc->_setCursor(node);
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, assignedId));
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept {
+ // TODO: This should be used to make the branch allocation better.
+ DebugUtils::unused(cont);
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ // Use TryMode of `switchToAssignment()` if possible.
+ if (target->hasEntryAssignment()) {
+ ASMJIT_PROPAGATE(switchToAssignment(
+ target->entryPhysToWorkMap(),
+ target->entryWorkToPhysMap(),
+ target->liveIn(),
+ target->isAllocated(),
+ true));
+ }
+
+ ASMJIT_PROPAGATE(allocInst(node));
+ ASMJIT_PROPAGATE(spillRegsBeforeEntry(target));
+
+ if (target->hasEntryAssignment()) {
+ BaseNode* injectionPoint = _pass->extraBlock()->prev();
+ BaseNode* prevCursor = _cc->setCursor(injectionPoint);
+
+ _tmpAssignment.copyFrom(_curAssignment);
+ ASMJIT_PROPAGATE(switchToAssignment(
+ target->entryPhysToWorkMap(),
+ target->entryWorkToPhysMap(),
+ target->liveIn(),
+ target->isAllocated(),
+ false));
+
+ BaseNode* curCursor = _cc->cursor();
+ if (curCursor != injectionPoint) {
+ // Additional instructions emitted to switch from the current state to
+ // the `target` state. This means that we have to move these instructions
+ // into an independent code block and patch the jump location.
+ Operand& targetOp = node->op(node->opCount() - 1);
+ if (ASMJIT_UNLIKELY(!targetOp.isLabel()))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ Label trampoline = _cc->newLabel();
+ Label savedTarget = targetOp.as<Label>();
+
+ // Patch `target` to point to the `trampoline` we just created.
+ targetOp = trampoline;
+
+ // Clear a possible SHORT form as we have no clue now if the SHORT form would
+ // be encodable after patching the target to `trampoline` (X86 specific).
+ node->clearInstOptions(BaseInst::kOptionShortForm);
+
+ // Finalize the switch assignment sequence.
+ ASMJIT_PROPAGATE(_pass->onEmitJump(savedTarget));
+ _cc->_setCursor(injectionPoint);
+ _cc->bind(trampoline);
+ }
+
+ _cc->_setCursor(prevCursor);
+ _curAssignment.swap(_tmpAssignment);
+ }
+ else {
+ ASMJIT_PROPAGATE(_pass->setBlockEntryAssignment(target, block(), _curAssignment));
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept {
+ if (targets.empty())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ // All `targets` should have the same sharedAssignmentId, we just read the first.
+ RABlock* anyTarget = targets[0];
+ if (!anyTarget->hasSharedAssignmentId())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()];
+
+ ASMJIT_PROPAGATE(allocInst(node));
+
+ if (!sharedAssignment.empty()) {
+ ASMJIT_PROPAGATE(switchToAssignment(
+ sharedAssignment.physToWorkMap(),
+ sharedAssignment.workToPhysMap(),
+ sharedAssignment.liveIn(),
+ true, // Read-only.
+ false // Try-mode.
+ ));
+ }
+
+ ASMJIT_PROPAGATE(spillRegsBeforeEntry(anyTarget));
+
+ if (sharedAssignment.empty()) {
+ ASMJIT_PROPAGATE(_pass->setBlockEntryAssignment(anyTarget, block(), _curAssignment));
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Decision Making]
+// ============================================================================
+
+uint32_t RALocalAllocator::decideOnAssignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
+ ASMJIT_ASSERT(allocableRegs != 0);
+ DebugUtils::unused(group, physId);
+
+ RAWorkReg* workReg = workRegById(workId);
+
+ // Prefer home register id, if possible.
+ if (workReg->hasHomeRegId()) {
+ uint32_t homeId = workReg->homeRegId();
+ if (Support::bitTest(allocableRegs, homeId))
+ return homeId;
+ }
+
+ // Prefer registers used upon block entries.
+ uint32_t previouslyAssignedRegs = workReg->allocatedMask();
+ if (allocableRegs & previouslyAssignedRegs)
+ allocableRegs &= previouslyAssignedRegs;
+
+ return Support::ctz(allocableRegs);
+}
+
+uint32_t RALocalAllocator::decideOnReassignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
+ ASMJIT_ASSERT(allocableRegs != 0);
+ DebugUtils::unused(group, physId);
+
+ RAWorkReg* workReg = workRegById(workId);
+
+ // Prefer allocating back to HomeId, if possible.
+ if (workReg->hasHomeRegId()) {
+ if (Support::bitTest(allocableRegs, workReg->homeRegId()))
+ return workReg->homeRegId();
+ }
+
+ // TODO: [Register Allocator] This could be improved.
+
+ // Decided to SPILL.
+ return RAAssignment::kPhysNone;
+}
+
+uint32_t RALocalAllocator::decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept {
+ // May be used in the future to decide which register would be best to spill so `workId` can be assigned.
+ DebugUtils::unused(workId);
+ ASMJIT_ASSERT(spillableRegs != 0);
+
+ Support::BitWordIterator<uint32_t> it(spillableRegs);
+ uint32_t bestPhysId = it.next();
+ uint32_t bestWorkId = _curAssignment.physToWorkId(group, bestPhysId);
+
+ // Avoid calculating the cost model if there is only one spillable register.
+ if (it.hasNext()) {
+ uint32_t bestCost = calculateSpillCost(group, bestWorkId, bestPhysId);
+ do {
+ uint32_t localPhysId = it.next();
+ uint32_t localWorkId = _curAssignment.physToWorkId(group, localPhysId);
+ uint32_t localCost = calculateSpillCost(group, localWorkId, localPhysId);
+
+ if (localCost < bestCost) {
+ bestCost = localCost;
+ bestPhysId = localPhysId;
+ bestWorkId = localWorkId;
+ }
+ } while (it.hasNext());
+ }
+
+ *spillWorkId = bestWorkId;
+ return bestPhysId;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/client/asmjit/core/ralocal_p.h b/client/asmjit/core/ralocal_p.h
new file mode 100644
index 0000000..911acca
--- /dev/null
+++ b/client/asmjit/core/ralocal_p.h
@@ -0,0 +1,282 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
+#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/raassignment_p.h"
+#include "../core/radefs_p.h"
+#include "../core/rapass_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RALocalAllocator]
+// ============================================================================
+
+//! Local register allocator.
+class RALocalAllocator {
+public:
+ ASMJIT_NONCOPYABLE(RALocalAllocator)
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Link to `RAPass`.
+ RAPass* _pass;
+ //! Link to `BaseCompiler`.
+ BaseCompiler* _cc;
+
+ //! Architecture traits.
+ RAArchTraits _archTraits;
+ //! Registers available to the allocator.
+ RARegMask _availableRegs;
+ //! Registers clobbered by the allocator.
+ RARegMask _clobberedRegs;
+
+ //! Register assignment (current).
+ RAAssignment _curAssignment;
+ //! Register assignment used temporarily during assignment switches.
+ RAAssignment _tmpAssignment;
+
+ //! Link to the current `RABlock`.
+ RABlock* _block;
+ //! InstNode.
+ InstNode* _node;
+ //! RA instruction.
+ RAInst* _raInst;
+
+ //! Count of all TiedReg's.
+ uint32_t _tiedTotal;
+ //! TiedReg's total counter.
+ RARegCount _tiedCount;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALocalAllocator(RAPass* pass) noexcept
+ : _pass(pass),
+ _cc(pass->cc()),
+ _archTraits(pass->_archTraits),
+ _availableRegs(pass->_availableRegs),
+ _clobberedRegs(),
+ _curAssignment(),
+ _block(nullptr),
+ _node(nullptr),
+ _raInst(nullptr),
+ _tiedTotal(),
+ _tiedCount() {}
+
+ Error init() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
+
+ //! Returns the currently processed block.
+ inline RABlock* block() const noexcept { return _block; }
+ //! Sets the currently processed block.
+ inline void setBlock(RABlock* block) noexcept { _block = block; }
+
+ //! Returns the currently processed `InstNode`.
+ inline InstNode* node() const noexcept { return _node; }
+ //! Returns the currently processed `RAInst`.
+ inline RAInst* raInst() const noexcept { return _raInst; }
+
+ //! Returns all tied regs as `RATiedReg` array.
+ inline RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
+ //! Returns tied registers grouped by the given `group`.
+ inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return _raInst->tiedRegs(group); }
+
+ //! Returns count of all TiedRegs used by the instruction.
+ inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
+ //! Returns count of TiedRegs used by the given register `group`.
+ inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount.get(group); }
+
+ inline bool isGroupUsed(uint32_t group) const noexcept { return _tiedCount[group] != 0; }
+
+ //! \}
+
+ //! \name Assignment
+ //! \{
+
+ Error makeInitialAssignment() noexcept;
+
+ Error replaceAssignment(
+ const PhysToWorkMap* physToWorkMap,
+ const WorkToPhysMap* workToPhysMap) noexcept;
+
+ //! Switch to the given assignment by reassigning all register and emitting
+ //! code that reassigns them. This is always used to switch to a previously
+ //! stored assignment.
+ //!
+ //! If `tryMode` is true then the final assignment doesn't have to be exactly
+ //! same as specified by `dstPhysToWorkMap` and `dstWorkToPhysMap`. This mode
+ //! is only used before conditional jumps that already have assignment to
+ //! generate a code sequence that is always executed regardless of the flow.
+ Error switchToAssignment(
+ PhysToWorkMap* dstPhysToWorkMap,
+ WorkToPhysMap* dstWorkToPhysMap,
+ const ZoneBitVector& liveIn,
+ bool dstReadOnly,
+ bool tryMode) noexcept;
+
+ inline Error spillRegsBeforeEntry(RABlock* block) noexcept {
+ return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
+ }
+
+ Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
+
+ //! \}
+
+ //! \name Allocation
+ //! \{
+
+ Error allocInst(InstNode* node) noexcept;
+ Error spillAfterAllocation(InstNode* node) noexcept;
+
+ Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
+ Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
+
+ //! \}
+
+ //! \name Decision Making
+ //! \{
+
+ enum CostModel : uint32_t {
+ kCostOfFrequency = 1048576,
+ kCostOfDirtyFlag = kCostOfFrequency / 4
+ };
+
+ inline uint32_t costByFrequency(float freq) const noexcept {
+ return uint32_t(int32_t(freq * float(kCostOfFrequency)));
+ }
+
+ inline uint32_t calculateSpillCost(uint32_t group, uint32_t workId, uint32_t assignedId) const noexcept {
+ RAWorkReg* workReg = workRegById(workId);
+ uint32_t cost = costByFrequency(workReg->liveStats().freq());
+
+ if (_curAssignment.isPhysDirty(group, assignedId))
+ cost += kCostOfDirtyFlag;
+
+ return cost;
+ }
+
+ //! Decides on register assignment.
+ uint32_t decideOnAssignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
+
+ //! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated
+ //! in a physical register that have to be used by another WorkReg.
+ //!
+ //! The function must return either `RAAssignment::kPhysNone`, which means that
+ //! the WorkReg of `workId` should be spilled, or a valid physical register ID,
+ //! which means that the register should be moved to that physical register instead.
+ uint32_t decideOnReassignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
+
+ //! Decides on best spill given a register mask `spillableRegs`
+ uint32_t decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept;
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ //! Emits a move between a destination and source register, and fixes the
+ //! register assignment.
+ inline Error onMoveReg(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
+ if (dstPhysId == srcPhysId) return kErrorOk;
+ _curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
+ return _pass->onEmitMove(workId, dstPhysId, srcPhysId);
+ }
+
+ //! Emits a swap between two physical registers and fixes their assignment.
+ //!
+ //! \note Target must support this operation otherwise this would ASSERT.
+ inline Error onSwapReg(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
+ _curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
+ return _pass->onEmitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
+ }
+
+ //! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
+ //! and makes it assigned and clean.
+ inline Error onLoadReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.assign(group, workId, physId, RAAssignment::kClean);
+ return _pass->onEmitLoad(workId, physId);
+ }
+
+ //! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
+ //! keeps it assigned, and makes it clean.
+ inline Error onSaveReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
+ ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
+
+ _curAssignment.makeClean(group, workId, physId);
+ return _pass->onEmitSave(workId, physId);
+ }
+
+ //! Assigns a register, the content of it is undefined at this point.
+ inline Error onAssignReg(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
+ _curAssignment.assign(group, workId, physId, dirty);
+ return kErrorOk;
+ }
+
+ //! Spills a variable/register, saves the content to the memory-home if modified.
+ inline Error onSpillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ if (_curAssignment.isPhysDirty(group, physId))
+ ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
+ return onKillReg(group, workId, physId);
+ }
+
+ inline Error onDirtyReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.makeDirty(group, workId, physId);
+ return kErrorOk;
+ }
+
+ inline Error onKillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.unassign(group, workId, physId);
+ return kErrorOk;
+ }
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RALOCAL_P_H_INCLUDED
diff --git a/client/asmjit/core/rapass.cpp b/client/asmjit/core/rapass.cpp
new file mode 100644
index 0000000..0305369
--- /dev/null
+++ b/client/asmjit/core/rapass.cpp
@@ -0,0 +1,2010 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/formatter.h"
+#include "../core/ralocal_p.h"
+#include "../core/rapass_p.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../core/zonestack.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RABlock - Control Flow]
+// ============================================================================
+
+Error RABlock::appendSuccessor(RABlock* successor) noexcept {
+ RABlock* predecessor = this;
+
+ if (predecessor->_successors.contains(successor))
+ return kErrorOk;
+ ASMJIT_ASSERT(!successor->_predecessors.contains(predecessor));
+
+ ASMJIT_PROPAGATE(successor->_predecessors.willGrow(allocator()));
+ ASMJIT_PROPAGATE(predecessor->_successors.willGrow(allocator()));
+
+ predecessor->_successors.appendUnsafe(successor);
+ successor->_predecessors.appendUnsafe(predecessor);
+
+ return kErrorOk;
+}
+
+Error RABlock::prependSuccessor(RABlock* successor) noexcept {
+ RABlock* predecessor = this;
+
+ if (predecessor->_successors.contains(successor))
+ return kErrorOk;
+ ASMJIT_ASSERT(!successor->_predecessors.contains(predecessor));
+
+ ASMJIT_PROPAGATE(successor->_predecessors.willGrow(allocator()));
+ ASMJIT_PROPAGATE(predecessor->_successors.willGrow(allocator()));
+
+ predecessor->_successors.prependUnsafe(successor);
+ successor->_predecessors.prependUnsafe(predecessor);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Construction / Destruction]
+// ============================================================================
+
+RAPass::RAPass() noexcept
+ : FuncPass("RAPass"),
+ _allocator(),
+ _logger(nullptr),
+ _debugLogger(nullptr),
+ _loggerFlags(0),
+ _func(nullptr),
+ _stop(nullptr),
+ _extraBlock(nullptr),
+ _blocks(),
+ _exits(),
+ _pov(),
+ _instructionCount(0),
+ _createdBlockCount(0),
+ _sharedAssignments(),
+ _lastTimestamp(0),
+ _archRegsInfo(nullptr),
+ _archTraits(),
+ _physRegIndex(),
+ _physRegCount(),
+ _physRegTotal(0),
+ _scratchRegIndexes{},
+ _availableRegs(),
+ _availableRegCount(),
+ _clobberedRegs(),
+ _globalMaxLiveCount(),
+ _globalLiveSpans {},
+ _temporaryMem(),
+ _sp(),
+ _fp(),
+ _stackAllocator(),
+ _argsAssignment(),
+ _numStackArgsToStackSlots(0),
+ _maxWorkRegNameSize(0) {}
+RAPass::~RAPass() noexcept {}
+
+// ============================================================================
+// [asmjit::RAPass - RunOnFunction]
+// ============================================================================
+
+static void RAPass_reset(RAPass* self, FuncDetail* funcDetail) noexcept {
+ ZoneAllocator* allocator = self->allocator();
+
+ self->_blocks.reset();
+ self->_exits.reset();
+ self->_pov.reset();
+ self->_workRegs.reset();
+ self->_instructionCount = 0;
+ self->_createdBlockCount = 0;
+
+ self->_sharedAssignments.reset();
+ self->_lastTimestamp = 0;
+
+ self->_archRegsInfo = nullptr;
+ self->_archTraits.reset();
+ self->_physRegIndex.reset();
+ self->_physRegCount.reset();
+ self->_physRegTotal = 0;
+
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(self->_scratchRegIndexes); i++)
+ self->_scratchRegIndexes[i] = BaseReg::kIdBad;
+
+ self->_availableRegs.reset();
+ self->_availableRegCount.reset();
+ self->_clobberedRegs.reset();
+
+ self->_workRegs.reset();
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ self->_workRegsOfGroup[group].reset();
+ self->_strategy[group].reset();
+ self->_globalLiveSpans[group] = nullptr;
+ }
+ self->_globalMaxLiveCount.reset();
+ self->_temporaryMem.reset();
+
+ self->_stackAllocator.reset(allocator);
+ self->_argsAssignment.reset(funcDetail);
+ self->_numStackArgsToStackSlots = 0;
+ self->_maxWorkRegNameSize = 0;
+}
+
+static void RAPass_resetVirtRegData(RAPass* self) noexcept {
+ // Zero everything so it cannot be used by accident.
+ for (RAWorkReg* wReg : self->_workRegs) {
+ VirtReg* vReg = wReg->virtReg();
+ vReg->_workReg = nullptr;
+ }
+}
+
+Error RAPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) {
+ _allocator.reset(zone);
+
+#ifndef ASMJIT_NO_LOGGING
+ _logger = logger;
+ _debugLogger = nullptr;
+
+ if (logger) {
+ _loggerFlags = logger->flags();
+ if (_loggerFlags & FormatOptions::kFlagDebugPasses)
+ _debugLogger = logger;
+ }
+#else
+ DebugUtils::unused(logger);
+#endif
+
+ // Initialize all core structures to use `zone` and `func`.
+ BaseNode* end = func->endNode();
+ _func = func;
+ _stop = end->next();
+ _extraBlock = end;
+
+ RAPass_reset(this, &_func->_funcDetail);
+
+ // Initialize architecture-specific members.
+ onInit();
+
+ // Perform all allocation steps required.
+ Error err = onPerformAllSteps();
+
+ // Must be called regardless of the allocation status.
+ onDone();
+
+ // Reset possible connections introduced by the register allocator.
+ RAPass_resetVirtRegData(this);
+
+ // Reset all core structures and everything that depends on the passed `Zone`.
+ RAPass_reset(this, nullptr);
+ _allocator.reset(nullptr);
+
+#ifndef ASMJIT_NO_LOGGING
+ _logger = nullptr;
+ _debugLogger = nullptr;
+ _loggerFlags = 0;
+#endif
+
+ _func = nullptr;
+ _stop = nullptr;
+ _extraBlock = nullptr;
+
+ // Reset `Zone` as nothing should persist between `runOnFunction()` calls.
+ zone->reset();
+
+ // We alter the compiler cursor, because it doesn't make sense to reference
+ // it after the compilation - some nodes may disappear and the old cursor
+ // can go out anyway.
+ cc()->_setCursor(cc()->lastNode());
+
+ return err;
+}
+
+Error RAPass::onPerformAllSteps() noexcept {
+ ASMJIT_PROPAGATE(buildCFG());
+ ASMJIT_PROPAGATE(buildViews());
+ ASMJIT_PROPAGATE(removeUnreachableBlocks());
+
+ ASMJIT_PROPAGATE(buildDominators());
+ ASMJIT_PROPAGATE(buildLiveness());
+ ASMJIT_PROPAGATE(assignArgIndexToWorkRegs());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (logger() && logger()->hasFlag(FormatOptions::kFlagAnnotations))
+ ASMJIT_PROPAGATE(annotateCode());
+#endif
+
+ ASMJIT_PROPAGATE(runGlobalAllocator());
+ ASMJIT_PROPAGATE(runLocalAllocator());
+
+ ASMJIT_PROPAGATE(updateStackFrame());
+ ASMJIT_PROPAGATE(insertPrologEpilog());
+
+ ASMJIT_PROPAGATE(rewrite());
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Basic Block Management]
+// ============================================================================
+
+RABlock* RAPass::newBlock(BaseNode* initialNode) noexcept {
+ RABlock* block = zone()->newT<RABlock>(this);
+ if (ASMJIT_UNLIKELY(!block))
+ return nullptr;
+
+ block->setFirst(initialNode);
+ block->setLast(initialNode);
+
+ _createdBlockCount++;
+ return block;
+}
+
+RABlock* RAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt) noexcept {
+ if (cbLabel->hasPassData())
+ return cbLabel->passData<RABlock>();
+
+ FuncNode* func = this->func();
+ BaseNode* node = cbLabel->prev();
+ RABlock* block = nullptr;
+
+ // Try to find some label, but terminate the loop on any code. We try hard to
+ // coalesce code that contains two consecutive labels or a combination of
+ // non-code nodes between 2 or more labels.
+ //
+ // Possible cases that would share the same basic block:
+ //
+ // 1. Two or more consecutive labels:
+ // Label1:
+ // Label2:
+ //
+ // 2. Two or more labels separated by non-code nodes:
+ // Label1:
+ // ; Some comment...
+ // .align 16
+ // Label2:
+ size_t nPendingLabels = 0;
+
+ while (node) {
+ if (node->type() == BaseNode::kNodeLabel) {
+ // Function has a different NodeType, just make sure this was not messed
+ // up as we must never associate BasicBlock with a `func` itself.
+ ASMJIT_ASSERT(node != func);
+
+ block = node->passData<RABlock>();
+ if (block) {
+ // Exit node has always a block associated with it. If we went here it
+ // means that `cbLabel` passed here is after the end of the function
+ // and cannot be merged with the function exit block.
+ if (node == func->exitNode())
+ block = nullptr;
+ break;
+ }
+
+ nPendingLabels++;
+ }
+ else if (node->type() == BaseNode::kNodeAlign) {
+ // Align node is fine.
+ }
+ else {
+ break;
+ }
+
+ node = node->prev();
+ }
+
+ if (stoppedAt)
+ *stoppedAt = node;
+
+ if (!block) {
+ block = newBlock();
+ if (ASMJIT_UNLIKELY(!block))
+ return nullptr;
+ }
+
+ cbLabel->setPassData<RABlock>(block);
+ node = cbLabel;
+
+ while (nPendingLabels) {
+ node = node->prev();
+ for (;;) {
+ if (node->type() == BaseNode::kNodeLabel) {
+ node->setPassData<RABlock>(block);
+ nPendingLabels--;
+ break;
+ }
+
+ node = node->prev();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+ }
+
+ if (!block->first()) {
+ block->setFirst(node);
+ block->setLast(cbLabel);
+ }
+
+ return block;
+}
+
+Error RAPass::addBlock(RABlock* block) noexcept {
+ ASMJIT_PROPAGATE(_blocks.willGrow(allocator()));
+
+ block->_blockId = blockCount();
+ _blocks.appendUnsafe(block);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Build]
+// ============================================================================
+
+Error RAPass::initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept {
+ if (sharedAssignmentsMap.empty())
+ return kErrorOk;
+
+ uint32_t count = 0;
+ for (RABlock* block : _blocks) {
+ if (block->hasSharedAssignmentId()) {
+ uint32_t sharedAssignmentId = sharedAssignmentsMap[block->sharedAssignmentId()];
+ block->setSharedAssignmentId(sharedAssignmentId);
+ count = Support::max(count, sharedAssignmentId + 1);
+ }
+ }
+
+ ASMJIT_PROPAGATE(_sharedAssignments.resize(allocator(), count));
+
+ // Aggregate all entry scratch GP regs from blocks of the same assignment to
+ // the assignment itself. It will then be used instead of RABlock's own scratch
+ // regs mask, as shared assignments have precedence.
+ for (RABlock* block : _blocks) {
+ if (block->hasJumpTable()) {
+ const RABlocks& successors = block->successors();
+ if (!successors.empty()) {
+ RABlock* firstSuccessor = successors[0];
+ // NOTE: Shared assignments connect all possible successors so we only
+ // need the first to propagate exit scratch gp registers.
+ ASMJIT_ASSERT(firstSuccessor->hasSharedAssignmentId());
+ RASharedAssignment& sa = _sharedAssignments[firstSuccessor->sharedAssignmentId()];
+ sa.addEntryScratchGpRegs(block->exitScratchGpRegs());
+ }
+ }
+ if (block->hasSharedAssignmentId()) {
+ RASharedAssignment& sa = _sharedAssignments[block->sharedAssignmentId()];
+ sa.addEntryScratchGpRegs(block->_entryScratchGpRegs);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Views Order]
+// ============================================================================
+
+class RABlockVisitItem {
+public:
+ inline RABlockVisitItem(RABlock* block, uint32_t index) noexcept
+ : _block(block),
+ _index(index) {}
+
+ inline RABlockVisitItem(const RABlockVisitItem& other) noexcept
+ : _block(other._block),
+ _index(other._index) {}
+
+ inline RABlockVisitItem& operator=(const RABlockVisitItem& other) noexcept = default;
+
+ inline RABlock* block() const noexcept { return _block; }
+ inline uint32_t index() const noexcept { return _index; }
+
+ RABlock* _block;
+ uint32_t _index;
+};
+
+Error RAPass::buildViews() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildViews]\n");
+#endif
+
+ uint32_t count = blockCount();
+ if (ASMJIT_UNLIKELY(!count)) return kErrorOk;
+
+ ASMJIT_PROPAGATE(_pov.reserve(allocator(), count));
+
+ ZoneStack<RABlockVisitItem> stack;
+ ASMJIT_PROPAGATE(stack.init(allocator()));
+
+ ZoneBitVector visited;
+ ASMJIT_PROPAGATE(visited.resize(allocator(), count));
+
+ RABlock* current = _blocks[0];
+ uint32_t i = 0;
+
+ for (;;) {
+ for (;;) {
+ if (i >= current->successors().size())
+ break;
+
+ // Skip if already visited.
+ RABlock* child = current->successors()[i++];
+ if (visited.bitAt(child->blockId()))
+ continue;
+
+ // Mark as visited to prevent visiting the same block multiple times.
+ visited.setBit(child->blockId(), true);
+
+ // Add the current block on the stack, we will get back to it later.
+ ASMJIT_PROPAGATE(stack.append(RABlockVisitItem(current, i)));
+ current = child;
+ i = 0;
+ }
+
+ current->makeReachable();
+ current->_povOrder = _pov.size();
+ _pov.appendUnsafe(current);
+
+ if (stack.empty())
+ break;
+
+ RABlockVisitItem top = stack.pop();
+ current = top.block();
+ i = top.index();
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ StringTmp<1024> sb;
+ for (RABlock* block : blocks()) {
+ sb.clear();
+ if (block->hasSuccessors()) {
+ sb.appendFormat(" #%u -> {", block->blockId());
+ _dumpBlockIds(sb, block->successors());
+ sb.append("}\n");
+ }
+ else {
+ sb.appendFormat(" #%u -> {Exit}\n", block->blockId());
+ }
+ logger->log(sb);
+ }
+ });
+
+ visited.release(allocator());
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Dominators]
+// ============================================================================
+
+static ASMJIT_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept {
+ while (b1 != b2) {
+ while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom();
+ while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom();
+ }
+ return b1;
+}
+
+// Based on "A Simple, Fast Dominance Algorithm".
+Error RAPass::buildDominators() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildDominators]\n");
+#endif
+
+ if (_blocks.empty())
+ return kErrorOk;
+
+ RABlock* entryBlock = this->entryBlock();
+ entryBlock->setIDom(entryBlock);
+
+ bool changed = true;
+ uint32_t nIters = 0;
+
+ while (changed) {
+ nIters++;
+ changed = false;
+
+ uint32_t i = _pov.size();
+ while (i) {
+ RABlock* block = _pov[--i];
+ if (block == entryBlock)
+ continue;
+
+ RABlock* iDom = nullptr;
+ const RABlocks& preds = block->predecessors();
+
+ uint32_t j = preds.size();
+ while (j) {
+ RABlock* p = preds[--j];
+ if (!p->iDom()) continue;
+ iDom = !iDom ? p : intersectBlocks(iDom, p);
+ }
+
+ if (block->iDom() != iDom) {
+ ASMJIT_RA_LOG_FORMAT(" IDom of #%u -> #%u\n", block->blockId(), iDom->blockId());
+ block->setIDom(iDom);
+ changed = true;
+ }
+ }
+ }
+
+ ASMJIT_RA_LOG_FORMAT(" Done (%u iterations)\n", nIters);
+ return kErrorOk;
+}
+
+bool RAPass::_strictlyDominates(const RABlock* a, const RABlock* b) const noexcept {
+ ASMJIT_ASSERT(a != nullptr); // There must be at least one block if this function is
+ ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
+ ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `strictlyDominates()`.
+
+ // Nothing strictly dominates the entry block.
+ const RABlock* entryBlock = this->entryBlock();
+ if (a == entryBlock)
+ return false;
+
+ const RABlock* iDom = b->iDom();
+ while (iDom != a && iDom != entryBlock)
+ iDom = iDom->iDom();
+
+ return iDom != entryBlock;
+}
+
+const RABlock* RAPass::_nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept {
+ ASMJIT_ASSERT(a != nullptr); // There must be at least one block if this function is
+ ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
+ ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`.
+
+ if (a == b)
+ return a;
+
+ // If `a` strictly dominates `b` then `a` is the nearest common dominator.
+ if (_strictlyDominates(a, b))
+ return a;
+
+ // If `b` strictly dominates `a` then `b` is the nearest common dominator.
+ if (_strictlyDominates(b, a))
+ return b;
+
+ const RABlock* entryBlock = this->entryBlock();
+ uint64_t timestamp = nextTimestamp();
+
+ // Mark all A's dominators.
+ const RABlock* block = a->iDom();
+ while (block != entryBlock) {
+ block->setTimestamp(timestamp);
+ block = block->iDom();
+ }
+
+ // Check all B's dominators against marked dominators of A.
+ block = b->iDom();
+ while (block != entryBlock) {
+ if (block->hasTimestamp(timestamp))
+ return block;
+ block = block->iDom();
+ }
+
+ return entryBlock;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Utilities]
+// ============================================================================
+
+Error RAPass::removeUnreachableBlocks() noexcept {
+ uint32_t numAllBlocks = blockCount();
+ uint32_t numReachableBlocks = reachableBlockCount();
+
+ // All reachable -> nothing to do.
+ if (numAllBlocks == numReachableBlocks)
+ return kErrorOk;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::RemoveUnreachableBlocks (%u of %u unreachable)]\n", numAllBlocks - numReachableBlocks, numAllBlocks);
+#endif
+
+ for (uint32_t i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+ if (block->isReachable())
+ continue;
+
+ ASMJIT_RA_LOG_FORMAT(" Removing block {%u}\n", i);
+ BaseNode* first = block->first();
+ BaseNode* last = block->last();
+
+ BaseNode* beforeFirst = first->prev();
+ BaseNode* afterLast = last->next();
+
+ BaseNode* node = first;
+ while (node != afterLast) {
+ BaseNode* next = node->next();
+
+ if (node->isCode() || node->isRemovable())
+ cc()->removeNode(node);
+ node = next;
+ }
+
+ if (beforeFirst->next() == afterLast) {
+ block->setFirst(nullptr);
+ block->setLast(nullptr);
+ }
+ else {
+ block->setFirst(beforeFirst->next());
+ block->setLast(afterLast->prev());
+ }
+ }
+
+ return kErrorOk;
+}
+
+BaseNode* RAPass::findSuccessorStartingAt(BaseNode* node) noexcept {
+ while (node && (node->isInformative() || node->hasNoEffect()))
+ node = node->next();
+ return node;
+}
+
+bool RAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept {
+ for (;;) {
+ node = node->next();
+ if (node == target)
+ return true;
+
+ if (!node)
+ return false;
+
+ if (node->isCode() || node->isData())
+ return false;
+ }
+}
+
+// ============================================================================
+// [asmjit::RAPass - ?]
+// ============================================================================
+
+Error RAPass::_asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
+ // Checked by `asWorkReg()` - must be true.
+ ASMJIT_ASSERT(vReg->_workReg == nullptr);
+
+ uint32_t group = vReg->group();
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ RAWorkRegs& wRegs = workRegs();
+ RAWorkRegs& wRegsByGroup = workRegs(group);
+
+ ASMJIT_PROPAGATE(wRegs.willGrow(allocator()));
+ ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator()));
+
+ RAWorkReg* wReg = zone()->newT<RAWorkReg>(vReg, wRegs.size());
+ if (ASMJIT_UNLIKELY(!wReg))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ vReg->setWorkReg(wReg);
+ if (!vReg->isStack())
+ wReg->setRegByteMask(Support::lsbMask<uint64_t>(vReg->virtSize()));
+ wRegs.appendUnsafe(wReg);
+ wRegsByGroup.appendUnsafe(wReg);
+
+ // Only used by RA logging.
+ _maxWorkRegNameSize = Support::max(_maxWorkRegNameSize, vReg->nameSize());
+
+ *out = wReg;
+ return kErrorOk;
+}
+
+RAAssignment::WorkToPhysMap* RAPass::newWorkToPhysMap() noexcept {
+ uint32_t count = workRegCount();
+ size_t size = WorkToPhysMap::sizeOf(count);
+
+ // If no registers are used it could be zero, in that case return a dummy
+ // map instead of NULL.
+ if (ASMJIT_UNLIKELY(!size)) {
+ static const RAAssignment::WorkToPhysMap nullMap = {{ 0 }};
+ return const_cast<RAAssignment::WorkToPhysMap*>(&nullMap);
+ }
+
+ WorkToPhysMap* map = zone()->allocT<WorkToPhysMap>(size);
+ if (ASMJIT_UNLIKELY(!map))
+ return nullptr;
+
+ map->reset(count);
+ return map;
+}
+
+RAAssignment::PhysToWorkMap* RAPass::newPhysToWorkMap() noexcept {
+ uint32_t count = physRegTotal();
+ size_t size = PhysToWorkMap::sizeOf(count);
+
+ PhysToWorkMap* map = zone()->allocT<PhysToWorkMap>(size);
+ if (ASMJIT_UNLIKELY(!map))
+ return nullptr;
+
+ map->reset(count);
+ return map;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Registers - Liveness Analysis and Statistics]
+// ============================================================================
+
+namespace LiveOps {
+ typedef ZoneBitVector::BitWord BitWord;
+
+ struct In {
+ static ASMJIT_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept {
+ DebugUtils::unused(dst);
+ return (out | gen) & ~kill;
+ }
+ };
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i], b[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i], b[i], c[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ static ASMJIT_INLINE bool recalcInOut(RABlock* block, uint32_t numBitWords, bool initial = false) noexcept {
+ bool changed = initial;
+
+ const RABlocks& successors = block->successors();
+ uint32_t numSuccessors = successors.size();
+
+ // Calculate `OUT` based on `IN` of all successors.
+ for (uint32_t i = 0; i < numSuccessors; i++)
+ changed |= op<Support::Or>(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords);
+
+ // Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits.
+ if (changed)
+ changed = op<In>(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords);
+
+ return changed;
+ }
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::buildLiveness() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ StringTmp<512> sb;
+#endif
+
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildLiveness]\n");
+
+ uint32_t i;
+
+ uint32_t numAllBlocks = blockCount();
+ uint32_t numReachableBlocks = reachableBlockCount();
+
+ uint32_t numVisits = numReachableBlocks;
+ uint32_t numWorkRegs = workRegCount();
+ uint32_t numBitWords = ZoneBitVector::_wordsPerBits(numWorkRegs);
+
+ if (!numWorkRegs) {
+ ASMJIT_RA_LOG_FORMAT(" Done (no virtual registers)\n");
+ return kErrorOk;
+ }
+
+ ZoneVector<uint32_t> nUsesPerWorkReg; // Number of USEs of each RAWorkReg.
+ ZoneVector<uint32_t> nOutsPerWorkReg; // Number of OUTs of each RAWorkReg.
+ ZoneVector<uint32_t> nInstsPerBlock; // Number of instructions of each RABlock.
+
+ ASMJIT_PROPAGATE(nUsesPerWorkReg.resize(allocator(), numWorkRegs));
+ ASMJIT_PROPAGATE(nOutsPerWorkReg.resize(allocator(), numWorkRegs));
+ ASMJIT_PROPAGATE(nInstsPerBlock.resize(allocator(), numAllBlocks));
+
+ // --------------------------------------------------------------------------
+ // Calculate GEN/KILL of each block.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numReachableBlocks; i++) {
+ RABlock* block = _pov[i];
+ ASMJIT_PROPAGATE(block->resizeLiveBits(numWorkRegs));
+
+ BaseNode* node = block->last();
+ BaseNode* stop = block->first();
+
+ uint32_t nInsts = 0;
+ for (;;) {
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = inst->passData<RAInst>();
+ ASMJIT_ASSERT(raInst != nullptr);
+
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t count = raInst->tiedCount();
+
+ for (uint32_t j = 0; j < count; j++) {
+ RATiedReg* tiedReg = &tiedRegs[j];
+ uint32_t workId = tiedReg->workId();
+
+ // Update `nUses` and `nOuts`.
+ nUsesPerWorkReg[workId] += 1u;
+ nOutsPerWorkReg[workId] += uint32_t(tiedReg->isWrite());
+
+ // Mark as:
+ // KILL - if this VirtReg is killed afterwards.
+ // LAST - if this VirtReg is last in this basic block.
+ if (block->kill().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kKill);
+ else if (!block->gen().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kLast);
+
+ if (tiedReg->isWriteOnly()) {
+ // KILL.
+ block->kill().setBit(workId, true);
+ }
+ else {
+ // GEN.
+ block->kill().setBit(workId, false);
+ block->gen().setBit(workId, true);
+ }
+ }
+
+ nInsts++;
+ }
+
+ if (node == stop)
+ break;
+
+ node = node->prev();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+
+ nInstsPerBlock[block->blockId()] = nInsts;
+ }
+
+ // --------------------------------------------------------------------------
+ // Calculate IN/OUT of each block.
+ // --------------------------------------------------------------------------
+
+ {
+ ZoneStack<RABlock*> workList;
+ ZoneBitVector workBits;
+
+ ASMJIT_PROPAGATE(workList.init(allocator()));
+ ASMJIT_PROPAGATE(workBits.resize(allocator(), blockCount(), true));
+
+ for (i = 0; i < numReachableBlocks; i++) {
+ RABlock* block = _pov[i];
+ LiveOps::recalcInOut(block, numBitWords, true);
+ ASMJIT_PROPAGATE(workList.append(block));
+ }
+
+ while (!workList.empty()) {
+ RABlock* block = workList.popFirst();
+ uint32_t blockId = block->blockId();
+
+ workBits.setBit(blockId, false);
+ if (LiveOps::recalcInOut(block, numBitWords)) {
+ const RABlocks& predecessors = block->predecessors();
+ uint32_t numPredecessors = predecessors.size();
+
+ for (uint32_t j = 0; j < numPredecessors; j++) {
+ RABlock* pred = predecessors[j];
+ if (!workBits.bitAt(pred->blockId())) {
+ workBits.setBit(pred->blockId(), true);
+ ASMJIT_PROPAGATE(workList.append(pred));
+ }
+ }
+ }
+ numVisits++;
+ }
+
+ workList.reset();
+ workBits.release(allocator());
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ logger->logf(" LiveIn/Out Done (%u visits)\n", numVisits);
+ for (i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+
+ ASMJIT_PROPAGATE(sb.assignFormat(" {#%u}\n", block->blockId()));
+ ASMJIT_PROPAGATE(_dumpBlockLiveness(sb, block));
+
+ logger->log(sb);
+ }
+ });
+
+ // --------------------------------------------------------------------------
+ // Reserve the space in each `RAWorkReg` for references.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegById(i);
+ ASMJIT_PROPAGATE(workReg->_refs.reserve(allocator(), nUsesPerWorkReg[i]));
+ ASMJIT_PROPAGATE(workReg->_writes.reserve(allocator(), nOutsPerWorkReg[i]));
+ }
+
+ // --------------------------------------------------------------------------
+ // Assign block and instruction positions, build LiveCount and LiveSpans.
+ // --------------------------------------------------------------------------
+
+ uint32_t position = 2;
+ for (i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+ if (!block->isReachable())
+ continue;
+
+ BaseNode* node = block->first();
+ BaseNode* stop = block->last();
+
+ uint32_t endPosition = position + nInstsPerBlock[i] * 2;
+ block->setFirstPosition(position);
+ block->setEndPosition(endPosition);
+
+ RALiveCount curLiveCount;
+ RALiveCount maxLiveCount;
+
+ // Process LIVE-IN.
+ ZoneBitVector::ForEachBitSet it(block->liveIn());
+ while (it.hasNext()) {
+ RAWorkReg* workReg = _workRegs[uint32_t(it.next())];
+ curLiveCount[workReg->group()]++;
+ ASMJIT_PROPAGATE(workReg->liveSpans().openAt(allocator(), position, endPosition));
+ }
+
+ for (;;) {
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = inst->passData<RAInst>();
+ ASMJIT_ASSERT(raInst != nullptr);
+
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t count = raInst->tiedCount();
+
+ inst->setPosition(position);
+ raInst->_liveCount = curLiveCount;
+
+ for (uint32_t j = 0; j < count; j++) {
+ RATiedReg* tiedReg = &tiedRegs[j];
+ uint32_t workId = tiedReg->workId();
+
+ // Create refs and writes.
+ RAWorkReg* workReg = workRegById(workId);
+ workReg->_refs.appendUnsafe(node);
+ if (tiedReg->isWrite())
+ workReg->_writes.appendUnsafe(node);
+
+ // We couldn't calculate this in previous steps, but since we know all LIVE-OUT
+ // at this point it becomes trivial. If this is the last instruction that uses
+ // this `workReg` and it's not LIVE-OUT then it is KILLed here.
+ if (tiedReg->isLast() && !block->liveOut().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kKill);
+
+ LiveRegSpans& liveSpans = workReg->liveSpans();
+ bool wasOpen;
+ ASMJIT_PROPAGATE(liveSpans.openAt(allocator(), position + !tiedReg->isRead(), endPosition, wasOpen));
+
+ uint32_t group = workReg->group();
+ if (!wasOpen) {
+ curLiveCount[group]++;
+ raInst->_liveCount[group]++;
+ }
+
+ if (tiedReg->isKill()) {
+ liveSpans.closeAt(position + !tiedReg->isRead() + 1);
+ curLiveCount[group]--;
+ }
+
+ // Update `RAWorkReg::hintRegId`.
+ if (tiedReg->hasUseId() && !workReg->hasHintRegId()) {
+ uint32_t useId = tiedReg->useId();
+ if (!(raInst->_clobberedRegs[group] & Support::bitMask(useId)))
+ workReg->setHintRegId(useId);
+ }
+
+ // Update `RAWorkReg::clobberedSurvivalMask`.
+ if (raInst->_clobberedRegs[group] && !tiedReg->isOutOrKill())
+ workReg->addClobberSurvivalMask(raInst->_clobberedRegs[group]);
+ }
+
+ position += 2;
+ maxLiveCount.op<Support::Max>(raInst->_liveCount);
+ }
+
+ if (node == stop)
+ break;
+
+ node = node->next();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+
+ block->_maxLiveCount = maxLiveCount;
+ _globalMaxLiveCount.op<Support::Max>(maxLiveCount);
+ ASMJIT_ASSERT(position == block->endPosition());
+ }
+
+ // --------------------------------------------------------------------------
+ // Calculate WorkReg statistics.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = _workRegs[i];
+
+ LiveRegSpans& spans = workReg->liveSpans();
+ uint32_t width = spans.width();
+ float freq = width ? float(double(workReg->_refs.size()) / double(width)) : float(0);
+
+ RALiveStats& stats = workReg->liveStats();
+ stats._width = width;
+ stats._freq = freq;
+ stats._priority = freq + float(int(workReg->virtReg()->weight())) * 0.01f;
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ sb.clear();
+ _dumpLiveSpans(sb);
+ logger->log(sb);
+ });
+
+ nUsesPerWorkReg.release(allocator());
+ nOutsPerWorkReg.release(allocator());
+ nInstsPerBlock.release(allocator());
+
+ return kErrorOk;
+}
+
+Error RAPass::assignArgIndexToWorkRegs() noexcept {
+ ZoneBitVector& liveIn = entryBlock()->liveIn();
+ uint32_t argCount = func()->argCount();
+
+ for (uint32_t i = 0; i < argCount; i++) {
+ // Unassigned argument.
+ VirtReg* virtReg = func()->arg(i);
+ if (!virtReg) continue;
+
+ // Unreferenced argument.
+ RAWorkReg* workReg = virtReg->workReg();
+ if (!workReg) continue;
+
+ // Overwritten argument.
+ uint32_t workId = workReg->workId();
+ if (!liveIn.bitAt(workId))
+ continue;
+
+ workReg->setArgIndex(i);
+
+ const FuncValue& arg = func()->detail().arg(i);
+ if (arg.isReg() && _archRegsInfo->regInfo[arg.regType()].group() == workReg->group()) {
+ workReg->setHintRegId(arg.regId());
+ }
+ }
+
+ return kErrorOk;
+}
+// ============================================================================
+// [asmjit::RAPass - Allocation - Global]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+static void RAPass_dumpSpans(String& sb, uint32_t index, const LiveRegSpans& liveSpans) noexcept {
+ sb.appendFormat(" %02u: ", index);
+
+ for (uint32_t i = 0; i < liveSpans.size(); i++) {
+ const LiveRegSpan& liveSpan = liveSpans[i];
+ if (i) sb.append(", ");
+ sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id);
+ }
+
+ sb.append('\n');
+}
+#endif
+
+Error RAPass::runGlobalAllocator() noexcept {
+ ASMJIT_PROPAGATE(initGlobalLiveSpans());
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ ASMJIT_PROPAGATE(binPack(group));
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::initGlobalLiveSpans() noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ size_t physCount = _physRegCount[group];
+ LiveRegSpans* liveSpans = allocator()->allocT<LiveRegSpans>(physCount * sizeof(LiveRegSpans));
+
+ if (ASMJIT_UNLIKELY(!liveSpans))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ for (size_t physId = 0; physId < physCount; physId++)
+ new(&liveSpans[physId]) LiveRegSpans();
+
+ _globalLiveSpans[group] = liveSpans;
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::binPack(uint32_t group) noexcept {
+ if (workRegCount(group) == 0)
+ return kErrorOk;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ StringTmp<512> sb;
+
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BinPack] Available=%u (0x%08X) Count=%u\n",
+ Support::popcnt(_availableRegs[group]),
+ _availableRegs[group],
+ workRegCount(group));
+#endif
+
+ uint32_t i;
+ uint32_t physCount = _physRegCount[group];
+
+ RAWorkRegs workRegs;
+ LiveRegSpans tmpSpans;
+
+ ASMJIT_PROPAGATE(workRegs.concat(allocator(), this->workRegs(group)));
+ workRegs.sort([](const RAWorkReg* a, const RAWorkReg* b) noexcept {
+ return b->liveStats().priority() - a->liveStats().priority();
+ });
+
+ uint32_t numWorkRegs = workRegs.size();
+ uint32_t availableRegs = _availableRegs[group];
+
+ // First try to pack everything that provides register-id hint as these are
+ // most likely function arguments and fixed (precolored) virtual registers.
+ if (!workRegs.empty()) {
+ uint32_t dstIndex = 0;
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ if (workReg->hasHintRegId()) {
+ uint32_t physId = workReg->hintRegId();
+ if (availableRegs & Support::bitMask(physId)) {
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ Error err = tmpSpans.nonOverlappingUnionOf(allocator(), live, workReg->liveSpans(), LiveRegData(workReg->virtId()));
+
+ if (err == kErrorOk) {
+ workReg->setHomeRegId(physId);
+ live.swap(tmpSpans);
+ continue;
+ }
+
+ if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
+ return err;
+ }
+ }
+
+ workRegs[dstIndex++] = workReg;
+ }
+
+ workRegs._setSize(dstIndex);
+ numWorkRegs = dstIndex;
+ }
+
+ // Try to pack the rest.
+ if (!workRegs.empty()) {
+ uint32_t dstIndex = 0;
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ uint32_t physRegs = availableRegs;
+
+ while (physRegs) {
+ uint32_t physId = Support::ctz(physRegs);
+ if (workReg->clobberSurvivalMask()) {
+ uint32_t preferredMask = physRegs & workReg->clobberSurvivalMask();
+ if (preferredMask)
+ physId = Support::ctz(preferredMask);
+ }
+
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ Error err = tmpSpans.nonOverlappingUnionOf(allocator(), live, workReg->liveSpans(), LiveRegData(workReg->virtId()));
+
+ if (err == kErrorOk) {
+ workReg->setHomeRegId(physId);
+ live.swap(tmpSpans);
+ break;
+ }
+
+ if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
+ return err;
+
+ physRegs ^= Support::bitMask(physId);
+ }
+
+ // Keep it in `workRegs` if it was not allocated.
+ if (!physRegs)
+ workRegs[dstIndex++] = workReg;
+ }
+
+ workRegs._setSize(dstIndex);
+ numWorkRegs = dstIndex;
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ for (uint32_t physId = 0; physId < physCount; physId++) {
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ if (live.empty())
+ continue;
+
+ sb.clear();
+ RAPass_dumpSpans(sb, physId, live);
+ logger->log(sb);
+ }
+ });
+
+ // Maybe unused if logging is disabled.
+ DebugUtils::unused(physCount);
+
+ if (workRegs.empty()) {
+ ASMJIT_RA_LOG_FORMAT(" Completed.\n");
+ }
+ else {
+ _strategy[group].setType(RAStrategy::kStrategyComplex);
+ for (RAWorkReg* workReg : workRegs)
+ workReg->markStackPreferred();
+
+ ASMJIT_RA_LOG_COMPLEX({
+ uint32_t count = workRegs.size();
+ sb.clear();
+ sb.appendFormat(" Unassigned (%u): ", count);
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ if (i) sb.append(", ");
+ sb.append(workReg->name());
+ }
+ sb.append('\n');
+ logger->log(sb);
+ });
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Local]
+// ============================================================================
+
+Error RAPass::runLocalAllocator() noexcept {
+ RALocalAllocator lra(this);
+ ASMJIT_PROPAGATE(lra.init());
+
+ if (!blockCount())
+ return kErrorOk;
+
+ // The allocation is done when this reaches zero.
+ uint32_t blocksRemaining = reachableBlockCount();
+
+ // Current block.
+ uint32_t blockId = 0;
+ RABlock* block = _blocks[blockId];
+
+ // The first block (entry) must always be reachable.
+ ASMJIT_ASSERT(block->isReachable());
+
+ // Assign function arguments for the initial block. The `lra` is valid now.
+ lra.makeInitialAssignment();
+ ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment));
+
+ // The loop starts from the first block and iterates blocks in order, however,
+ // the algorithm also allows to jump to any other block when finished if it's
+ // a jump target. In-order iteration just makes sure that all blocks are visited.
+ for (;;) {
+ BaseNode* first = block->first();
+ BaseNode* last = block->last();
+ BaseNode* terminator = block->hasTerminator() ? last : nullptr;
+
+ BaseNode* beforeFirst = first->prev();
+ BaseNode* afterLast = last->next();
+
+ bool unconditionalJump = false;
+ RABlock* consecutive = nullptr;
+
+ if (block->hasSuccessors())
+ consecutive = block->successors()[0];
+
+ lra.setBlock(block);
+ block->makeAllocated();
+
+ BaseNode* node = first;
+ while (node != afterLast) {
+ BaseNode* next = node->next();
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+
+ if (ASMJIT_UNLIKELY(inst == terminator)) {
+ const RABlocks& successors = block->successors();
+ if (block->hasConsecutive()) {
+ ASMJIT_PROPAGATE(lra.allocBranch(inst, successors.last(), successors.first()));
+
+ node = next;
+ continue;
+ }
+ else if (successors.size() > 1) {
+ RABlock* cont = block->hasConsecutive() ? successors.first() : nullptr;
+ ASMJIT_PROPAGATE(lra.allocJumpTable(inst, successors, cont));
+
+ node = next;
+ continue;
+ }
+ else {
+ // Otherwise this is an unconditional jump, special handling isn't required.
+ unconditionalJump = true;
+ }
+ }
+
+ ASMJIT_PROPAGATE(lra.allocInst(inst));
+ if (inst->type() == BaseNode::kNodeInvoke)
+ ASMJIT_PROPAGATE(onEmitPreCall(inst->as<InvokeNode>()));
+ else
+ ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst));
+ }
+ node = next;
+ }
+
+ if (consecutive) {
+ BaseNode* prev = afterLast ? afterLast->prev() : cc()->lastNode();
+ cc()->_setCursor(unconditionalJump ? prev->prev() : prev);
+
+ if (consecutive->hasEntryAssignment()) {
+ ASMJIT_PROPAGATE(
+ lra.switchToAssignment(
+ consecutive->entryPhysToWorkMap(),
+ consecutive->entryWorkToPhysMap(),
+ consecutive->liveIn(),
+ consecutive->isAllocated(),
+ false));
+ }
+ else {
+ ASMJIT_PROPAGATE(lra.spillRegsBeforeEntry(consecutive));
+ ASMJIT_PROPAGATE(setBlockEntryAssignment(consecutive, block, lra._curAssignment));
+ lra._curAssignment.copyFrom(consecutive->entryPhysToWorkMap(), consecutive->entryWorkToPhysMap());
+ }
+ }
+
+ // Important as the local allocator can insert instructions before
+ // and after any instruction within the basic block.
+ block->setFirst(beforeFirst->next());
+ block->setLast(afterLast ? afterLast->prev() : cc()->lastNode());
+
+ if (--blocksRemaining == 0)
+ break;
+
+ // Switch to the next consecutive block, if any.
+ if (consecutive) {
+ block = consecutive;
+ if (!block->isAllocated())
+ continue;
+ }
+
+ // Get the next block.
+ for (;;) {
+ if (++blockId >= blockCount())
+ blockId = 0;
+
+ block = _blocks[blockId];
+ if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment())
+ continue;
+
+ break;
+ }
+
+ // If we switched to some block we have to update the local allocator.
+ lra.replaceAssignment(block->entryPhysToWorkMap(), block->entryWorkToPhysMap());
+ }
+
+ _clobberedRegs.op<Support::Or>(lra._clobberedRegs);
+ return kErrorOk;
+}
+
+Error RAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept {
+ if (block->hasSharedAssignmentId()) {
+ uint32_t sharedAssignmentId = block->sharedAssignmentId();
+
+ // Shouldn't happen. Entry assignment of a block that has a shared-state
+ // will assign to all blocks with the same sharedAssignmentId. It's a bug if
+ // the shared state has been already assigned.
+ if (!_sharedAssignments[sharedAssignmentId].empty())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ return setSharedAssignment(sharedAssignmentId, fromAssignment);
+ }
+
+ PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->setEntryAssignment(physToWorkMap, workToPhysMap);
+
+ // True if this is the first (entry) block, nothing to do in this case.
+ if (block == fromBlock) {
+ // Entry block should never have a shared state.
+ if (block->hasSharedAssignmentId())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ return kErrorOk;
+ }
+
+ RAAssignment as;
+ as.initLayout(_physRegCount, workRegs());
+ as.initMaps(physToWorkMap, workToPhysMap);
+
+ const ZoneBitVector& liveOut = fromBlock->liveOut();
+ const ZoneBitVector& liveIn = block->liveIn();
+
+ // It's possible that `fromBlock` has LIVE-OUT regs that `block` doesn't
+ // have in LIVE-IN, these have to be unassigned.
+ {
+ ZoneBitVector::ForEachBitOp<Support::AndNot> it(liveOut, liveIn);
+ while (it.hasNext()) {
+ uint32_t workId = uint32_t(it.next());
+ RAWorkReg* workReg = workRegById(workId);
+
+ uint32_t group = workReg->group();
+ uint32_t physId = as.workToPhysId(group, workId);
+
+ if (physId != RAAssignment::kPhysNone)
+ as.unassign(group, workId, physId);
+ }
+ }
+
+ return blockEntryAssigned(as);
+}
+
+Error RAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept {
+ ASMJIT_ASSERT(_sharedAssignments[sharedAssignmentId].empty());
+
+ PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _sharedAssignments[sharedAssignmentId].assignMaps(physToWorkMap, workToPhysMap);
+ ZoneBitVector& sharedLiveIn = _sharedAssignments[sharedAssignmentId]._liveIn;
+ ASMJIT_PROPAGATE(sharedLiveIn.resize(allocator(), workRegCount()));
+
+ RAAssignment as;
+ as.initLayout(_physRegCount, workRegs());
+
+ uint32_t sharedAssigned[BaseReg::kGroupVirt] {};
+
+ for (RABlock* block : blocks()) {
+ if (block->sharedAssignmentId() == sharedAssignmentId) {
+ ASMJIT_ASSERT(!block->hasEntryAssignment());
+
+ PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* entryWorkToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!entryPhysToWorkMap || !entryWorkToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->setEntryAssignment(entryPhysToWorkMap, entryWorkToPhysMap);
+ as.initMaps(entryPhysToWorkMap, entryWorkToPhysMap);
+
+ const ZoneBitVector& liveIn = block->liveIn();
+ sharedLiveIn.or_(liveIn);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ sharedAssigned[group] |= entryPhysToWorkMap->assigned[group];
+ Support::BitWordIterator<uint32_t> it(entryPhysToWorkMap->assigned[group]);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = as.physToWorkId(group, physId);
+
+ if (!liveIn.bitAt(workId))
+ as.unassign(group, workId, physId);
+ }
+ }
+ }
+ }
+
+ {
+ as.initMaps(physToWorkMap, workToPhysMap);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ Support::BitWordIterator<uint32_t> it(_availableRegs[group] & ~sharedAssigned[group]);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ if (as.isPhysAssigned(group, physId)) {
+ uint32_t workId = as.physToWorkId(group, physId);
+ as.unassign(group, workId, physId);
+ }
+ }
+ }
+ }
+
+ return blockEntryAssigned(as);
+}
+
+Error RAPass::blockEntryAssigned(const RAAssignment& as) noexcept {
+ // Complex allocation strategy requires to record register assignments upon
+ // block entry (or per shared state).
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ if (!_strategy[group].isComplex())
+ continue;
+
+ Support::BitWordIterator<uint32_t> it(as.assigned(group));
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = as.physToWorkId(group, physId);
+
+ RAWorkReg* workReg = workRegById(workId);
+ workReg->addAllocatedMask(Support::bitMask(physId));
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Utilities]
+// ============================================================================
+
+Error RAPass::useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept {
+ ASMJIT_ASSERT(alignment <= 64);
+
+ if (_temporaryMem.isNone()) {
+ ASMJIT_PROPAGATE(cc()->_newStack(&_temporaryMem.as<BaseMem>(), size, alignment));
+ }
+ else {
+ ASMJIT_ASSERT(_temporaryMem.as<BaseMem>().isRegHome());
+
+ uint32_t virtId = _temporaryMem.as<BaseMem>().baseId();
+ VirtReg* virtReg = cc()->virtRegById(virtId);
+
+ cc()->setStackSize(virtId, Support::max(virtReg->virtSize(), size),
+ Support::max(virtReg->alignment(), alignment));
+ }
+
+ out = _temporaryMem.as<BaseMem>();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Prolog / Epilog]
+// ============================================================================
+
+Error RAPass::updateStackFrame() noexcept {
+ // Update some StackFrame information that we updated during allocation. The
+ // only information we don't have at the moment is final local stack size,
+ // which is calculated last.
+ FuncFrame& frame = func()->frame();
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ frame.addDirtyRegs(group, _clobberedRegs[group]);
+ frame.setLocalStackAlignment(_stackAllocator.alignment());
+
+ // If there are stack arguments that are not assigned to registers upon entry
+ // and the function doesn't require dynamic stack alignment we keep these
+ // arguments where they are. This will also mark all stack slots that match
+ // these arguments as allocated.
+ if (_numStackArgsToStackSlots)
+ ASMJIT_PROPAGATE(_markStackArgsToKeep());
+
+ // Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size.
+ ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame());
+ frame.setLocalStackSize(_stackAllocator.stackSize());
+
+ // Update the stack frame based on `_argsAssignment` and finalize it.
+ // Finalization means to apply final calculation to the stack layout.
+ ASMJIT_PROPAGATE(_argsAssignment.updateFuncFrame(frame));
+ ASMJIT_PROPAGATE(frame.finalize());
+
+ // StackAllocator allocates all stots starting from [0], adjust them when necessary.
+ if (frame.localStackOffset() != 0)
+ ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset())));
+
+ // Again, if there are stack arguments allocated in function's stack we have
+ // to handle them. This handles all cases (either regular or dynamic stack
+ // alignment).
+ if (_numStackArgsToStackSlots)
+ ASMJIT_PROPAGATE(_updateStackArgs());
+
+ return kErrorOk;
+}
+
+Error RAPass::_markStackArgsToKeep() noexcept {
+ FuncFrame& frame = func()->frame();
+ bool hasSAReg = frame.hasPreservedFP() || !frame.hasDynamicAlignment();
+
+ RAWorkRegs& workRegs = _workRegs;
+ uint32_t numWorkRegs = workRegCount();
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = workRegs[workId];
+ if (workReg->hasFlag(RAWorkReg::kFlagStackArgToStack)) {
+ ASMJIT_ASSERT(workReg->hasArgIndex());
+ const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
+
+ // If the register doesn't have stack slot then we failed. It doesn't
+ // make much sense as it was marked as `kFlagStackArgToStack`, which
+ // requires the WorkReg was live-in upon function entry.
+ RAStackSlot* slot = workReg->stackSlot();
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) {
+ uint32_t typeSize = Type::sizeOf(srcArg.typeId());
+ if (typeSize == slot->size()) {
+ slot->addFlags(RAStackSlot::kFlagStackArg);
+ continue;
+ }
+ }
+
+ // NOTE: Update StackOffset here so when `_argsAssignment.updateFuncFrame()`
+ // is called it will take into consideration moving to stack slots. Without
+ // this we may miss some scratch registers later.
+ FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
+ dstArg.assignStackOffset(0);
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RAPass::_updateStackArgs() noexcept {
+ FuncFrame& frame = func()->frame();
+ RAWorkRegs& workRegs = _workRegs;
+ uint32_t numWorkRegs = workRegCount();
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = workRegs[workId];
+ if (workReg->hasFlag(RAWorkReg::kFlagStackArgToStack)) {
+ ASMJIT_ASSERT(workReg->hasArgIndex());
+ RAStackSlot* slot = workReg->stackSlot();
+
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (slot->isStackArg()) {
+ const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
+ if (frame.hasPreservedFP()) {
+ slot->setBaseRegId(_fp.id());
+ slot->setOffset(int32_t(frame.saOffsetFromSA()) + srcArg.stackOffset());
+ }
+ else {
+ slot->setOffset(int32_t(frame.saOffsetFromSP()) + srcArg.stackOffset());
+ }
+ }
+ else {
+ FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
+ dstArg.setStackOffset(slot->offset());
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RAPass::insertPrologEpilog() noexcept {
+ FuncFrame& frame = _func->frame();
+
+ cc()->_setCursor(func());
+ ASMJIT_PROPAGATE(cc()->emitProlog(frame));
+ ASMJIT_PROPAGATE(cc()->emitArgsAssignment(frame, _argsAssignment));
+
+ cc()->_setCursor(func()->exitNode());
+ ASMJIT_PROPAGATE(cc()->emitEpilog(frame));
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Rewriter]
+// ============================================================================
+
+Error RAPass::rewrite() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::Rewrite]\n");
+#endif
+
+ return _rewrite(_func, _stop);
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
+ uint32_t virtCount = cc()->_vRegArray.size();
+
+ BaseNode* node = first;
+ while (node != stop) {
+ BaseNode* next = node->next();
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = node->passData<RAInst>();
+
+ Operand* operands = inst->operands();
+ uint32_t opCount = inst->opCount();
+ uint32_t i;
+
+ // Rewrite virtual registers into physical registers.
+ if (ASMJIT_LIKELY(raInst)) {
+ // If the instruction contains pass data (raInst) then it was a subject
+ // for register allocation and must be rewritten to use physical regs.
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t tiedCount = raInst->tiedCount();
+
+ for (i = 0; i < tiedCount; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+
+ Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
+ uint32_t useId = tiedReg->useId();
+ while (useIt.hasNext()) inst->rewriteIdAtIndex(useIt.next(), useId);
+
+ Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
+ uint32_t outId = tiedReg->outId();
+ while (outIt.hasNext()) inst->rewriteIdAtIndex(outIt.next(), outId);
+ }
+
+ // This data is allocated by Zone passed to `runOnFunction()`, which
+ // will be reset after the RA pass finishes. So reset this data to
+ // prevent having a dead pointer after RA pass is complete.
+ node->resetPassData();
+
+ if (ASMJIT_UNLIKELY(node->type() != BaseNode::kNodeInst)) {
+ // FuncRet terminates the flow, it must either be removed if the exit
+ // label is next to it (optimization) or patched to an architecture
+ // dependent jump instruction that jumps to the function's exit before
+ // the epilog.
+ if (node->type() == BaseNode::kNodeFuncRet) {
+ RABlock* block = raInst->block();
+ if (!isNextTo(node, _func->exitNode())) {
+ cc()->_setCursor(node->prev());
+ ASMJIT_PROPAGATE(onEmitJump(_func->exitNode()->label()));
+ }
+
+ BaseNode* prev = node->prev();
+ cc()->removeNode(node);
+ block->setLast(prev);
+ }
+ }
+ }
+
+ // Rewrite stack slot addresses.
+ for (i = 0; i < opCount; i++) {
+ Operand& op = operands[i];
+ if (op.isMem()) {
+ BaseMem& mem = op.as<BaseMem>();
+ if (mem.isRegHome()) {
+ uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
+ if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+
+ VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
+ RAWorkReg* workReg = virtReg->workReg();
+ ASMJIT_ASSERT(workReg != nullptr);
+
+ RAStackSlot* slot = workReg->stackSlot();
+ int32_t offset = slot->offset();
+
+ mem._setBase(_sp.type(), slot->baseRegId());
+ mem.clearRegHome();
+ mem.addOffsetLo32(offset);
+ }
+ }
+ }
+ }
+
+ node = next;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+static void RAPass_dumpRAInst(RAPass* pass, String& sb, const RAInst* raInst) noexcept {
+ const RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t tiedCount = raInst->tiedCount();
+
+ for (uint32_t i = 0; i < tiedCount; i++) {
+ const RATiedReg& tiedReg = tiedRegs[i];
+
+ if (i != 0)
+ sb.append(' ');
+
+ sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name());
+ sb.append(tiedReg.isReadWrite() ? 'X' :
+ tiedReg.isRead() ? 'R' :
+ tiedReg.isWrite() ? 'W' : '?');
+
+ if (tiedReg.hasUseId())
+ sb.appendFormat("|Use=%u", tiedReg.useId());
+ else if (tiedReg.isUse())
+ sb.append("|Use");
+
+ if (tiedReg.hasOutId())
+ sb.appendFormat("|Out=%u", tiedReg.outId());
+ else if (tiedReg.isOut())
+ sb.append("|Out");
+
+ if (tiedReg.isLast())
+ sb.append("|Last");
+
+ if (tiedReg.isKill())
+ sb.append("|Kill");
+
+ sb.append("}");
+ }
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::annotateCode() noexcept {
+ uint32_t loggerFlags = _loggerFlags;
+ StringTmp<1024> sb;
+
+ for (const RABlock* block : _blocks) {
+ BaseNode* node = block->first();
+ if (!node) continue;
+
+ BaseNode* last = block->last();
+ for (;;) {
+ sb.clear();
+ Formatter::formatNode(sb, loggerFlags, cc(), node);
+
+ if ((loggerFlags & FormatOptions::kFlagDebugRA) != 0 && node->isInst() && node->hasPassData()) {
+ const RAInst* raInst = node->passData<RAInst>();
+ if (raInst->tiedCount() > 0) {
+ sb.padEnd(40);
+ sb.append(" | ");
+ RAPass_dumpRAInst(this, sb, raInst);
+ }
+ }
+
+ node->setInlineComment(
+ static_cast<char*>(
+ cc()->_dataZone.dup(sb.data(), sb.size(), true)));
+
+ if (node == last)
+ break;
+ node = node->next();
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept {
+ for (uint32_t i = 0, size = blocks.size(); i < size; i++) {
+ const RABlock* block = blocks[i];
+ if (i != 0)
+ ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId()));
+ else
+ ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId()));
+ }
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpBlockLiveness(String& sb, const RABlock* block) noexcept {
+ for (uint32_t liveType = 0; liveType < RABlock::kLiveCount; liveType++) {
+ const char* bitsName = liveType == RABlock::kLiveIn ? "IN " :
+ liveType == RABlock::kLiveOut ? "OUT " :
+ liveType == RABlock::kLiveGen ? "GEN " : "KILL";
+
+ const ZoneBitVector& bits = block->_liveBits[liveType];
+ uint32_t size = bits.size();
+ ASMJIT_ASSERT(size <= workRegCount());
+
+ uint32_t n = 0;
+ for (uint32_t workId = 0; workId < size; workId++) {
+ if (bits.bitAt(workId)) {
+ RAWorkReg* wReg = workRegById(workId);
+
+ if (!n)
+ sb.appendFormat(" %s [", bitsName);
+ else
+ sb.append(", ");
+
+ sb.append(wReg->name());
+ n++;
+ }
+ }
+
+ if (n)
+ sb.append("]\n");
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpLiveSpans(String& sb) noexcept {
+ uint32_t numWorkRegs = _workRegs.size();
+ uint32_t maxSize = _maxWorkRegNameSize;
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = _workRegs[workId];
+
+ sb.append(" ");
+
+ size_t oldSize = sb.size();
+ sb.append(workReg->name());
+ sb.padEnd(oldSize + maxSize);
+
+ RALiveStats& stats = workReg->liveStats();
+ sb.appendFormat(" {id:%04u width: %-4u freq: %0.4f priority=%0.4f}",
+ workReg->virtId(),
+ stats.width(),
+ stats.freq(),
+ stats.priority());
+ sb.append(": ");
+
+ LiveRegSpans& liveSpans = workReg->liveSpans();
+ for (uint32_t x = 0; x < liveSpans.size(); x++) {
+ const LiveRegSpan& liveSpan = liveSpans[x];
+ if (x)
+ sb.append(", ");
+ sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b);
+ }
+
+ sb.append('\n');
+ }
+
+ return kErrorOk;
+}
+#endif
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/client/asmjit/core/rapass_p.h b/client/asmjit/core/rapass_p.h
new file mode 100644
index 0000000..bedce96
--- /dev/null
+++ b/client/asmjit/core/rapass_p.h
@@ -0,0 +1,1196 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RAPASS_P_H_INCLUDED
+#define ASMJIT_CORE_RAPASS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/raassignment_p.h"
+#include "../core/radefs_p.h"
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RABlock]
+// ============================================================================
+
+//! Basic block used by register allocator pass.
+class RABlock {
+public:
+ ASMJIT_NONCOPYABLE(RABlock)
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ enum Id : uint32_t {
+ kUnassignedId = 0xFFFFFFFFu
+ };
+
+ //! Basic block flags.
+ enum Flags : uint32_t {
+ //! Block has been constructed from nodes.
+ kFlagIsConstructed = 0x00000001u,
+ //! Block is reachable (set by `buildViews()`).
+ kFlagIsReachable = 0x00000002u,
+ //! Block is a target (has an associated label or multiple labels).
+ kFlagIsTargetable = 0x00000004u,
+ //! Block has been allocated.
+ kFlagIsAllocated = 0x00000008u,
+ //! Block is a function-exit.
+ kFlagIsFuncExit = 0x00000010u,
+
+ //! Block has a terminator (jump, conditional jump, ret).
+ kFlagHasTerminator = 0x00000100u,
+ //! Block naturally flows to the next block.
+ kFlagHasConsecutive = 0x00000200u,
+ //! Block has a jump to a jump-table at the end.
+ kFlagHasJumpTable = 0x00000400u,
+ //! Block contains fixed registers (precolored).
+ kFlagHasFixedRegs = 0x00000800u,
+ //! Block contains function calls.
+ kFlagHasFuncCalls = 0x00001000u
+ };
+
+ //! Register allocator pass.
+ RAPass* _ra;
+
+ //! Block id (indexed from zero).
+ uint32_t _blockId;
+ //! Block flags, see `Flags`.
+ uint32_t _flags;
+
+ //! First `BaseNode` of this block (inclusive).
+ BaseNode* _first;
+ //! Last `BaseNode` of this block (inclusive).
+ BaseNode* _last;
+
+ //! Initial position of this block (inclusive).
+ uint32_t _firstPosition;
+ //! End position of this block (exclusive).
+ uint32_t _endPosition;
+
+ //! Weight of this block (default 0, each loop adds one).
+ uint32_t _weight;
+ //! Post-order view order, used during POV construction.
+ uint32_t _povOrder;
+
+ //! Basic statistics about registers.
+ RARegsStats _regsStats;
+ //! Maximum live-count per register group.
+ RALiveCount _maxLiveCount;
+
+ //! Timestamp (used by block visitors).
+ mutable uint64_t _timestamp;
+ //! Immediate dominator of this block.
+ RABlock* _idom;
+
+ //! Block predecessors.
+ RABlocks _predecessors;
+ //! Block successors.
+ RABlocks _successors;
+
+ enum LiveType : uint32_t {
+ kLiveIn = 0,
+ kLiveOut = 1,
+ kLiveGen = 2,
+ kLiveKill = 3,
+ kLiveCount = 4
+ };
+
+ //! Liveness in/out/use/kill.
+ ZoneBitVector _liveBits[kLiveCount];
+
+ //! Shared assignment it or `Globals::kInvalidId` if this block doesn't
+ //! have shared assignment. See `RASharedAssignment` for more details.
+ uint32_t _sharedAssignmentId;
+ //! Scratch registers that cannot be allocated upon block entry.
+ uint32_t _entryScratchGpRegs;
+ //! Scratch registers used at exit, by a terminator instruction.
+ uint32_t _exitScratchGpRegs;
+
+ //! Register assignment (PhysToWork) on entry.
+ PhysToWorkMap* _entryPhysToWorkMap;
+ //! Register assignment (WorkToPhys) on entry.
+ WorkToPhysMap* _entryWorkToPhysMap;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RABlock(RAPass* ra) noexcept
+ : _ra(ra),
+ _blockId(kUnassignedId),
+ _flags(0),
+ _first(nullptr),
+ _last(nullptr),
+ _firstPosition(0),
+ _endPosition(0),
+ _weight(0),
+ _povOrder(kUnassignedId),
+ _regsStats(),
+ _maxLiveCount(),
+ _timestamp(0),
+ _idom(nullptr),
+ _predecessors(),
+ _successors(),
+ _sharedAssignmentId(Globals::kInvalidId),
+ _entryScratchGpRegs(0),
+ _exitScratchGpRegs(0),
+ _entryPhysToWorkMap(nullptr),
+ _entryWorkToPhysMap(nullptr) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline RAPass* pass() const noexcept { return _ra; }
+ inline ZoneAllocator* allocator() const noexcept;
+
+ inline uint32_t blockId() const noexcept { return _blockId; }
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ inline bool isAssigned() const noexcept { return _blockId != kUnassignedId; }
+
+ inline bool isConstructed() const noexcept { return hasFlag(kFlagIsConstructed); }
+ inline bool isReachable() const noexcept { return hasFlag(kFlagIsReachable); }
+ inline bool isTargetable() const noexcept { return hasFlag(kFlagIsTargetable); }
+ inline bool isAllocated() const noexcept { return hasFlag(kFlagIsAllocated); }
+ inline bool isFuncExit() const noexcept { return hasFlag(kFlagIsFuncExit); }
+
+ inline void makeConstructed(const RARegsStats& regStats) noexcept {
+ _flags |= kFlagIsConstructed;
+ _regsStats.combineWith(regStats);
+ }
+
+ inline void makeReachable() noexcept { _flags |= kFlagIsReachable; }
+ inline void makeTargetable() noexcept { _flags |= kFlagIsTargetable; }
+ inline void makeAllocated() noexcept { _flags |= kFlagIsAllocated; }
+
+ inline const RARegsStats& regsStats() const noexcept { return _regsStats; }
+
+ inline bool hasTerminator() const noexcept { return hasFlag(kFlagHasTerminator); }
+ inline bool hasConsecutive() const noexcept { return hasFlag(kFlagHasConsecutive); }
+ inline bool hasJumpTable() const noexcept { return hasFlag(kFlagHasJumpTable); }
+
+ inline bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
+ inline bool hasSuccessors() const noexcept { return !_successors.empty(); }
+
+ inline const RABlocks& predecessors() const noexcept { return _predecessors; }
+ inline const RABlocks& successors() const noexcept { return _successors; }
+
+ inline BaseNode* first() const noexcept { return _first; }
+ inline BaseNode* last() const noexcept { return _last; }
+
+ inline void setFirst(BaseNode* node) noexcept { _first = node; }
+ inline void setLast(BaseNode* node) noexcept { _last = node; }
+
+ inline uint32_t firstPosition() const noexcept { return _firstPosition; }
+ inline void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; }
+
+ inline uint32_t endPosition() const noexcept { return _endPosition; }
+ inline void setEndPosition(uint32_t position) noexcept { _endPosition = position; }
+
+ inline uint32_t povOrder() const noexcept { return _povOrder; }
+
+ inline uint32_t entryScratchGpRegs() const noexcept;
+ inline uint32_t exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
+
+ inline void addEntryScratchGpRegs(uint32_t regMask) noexcept { _entryScratchGpRegs |= regMask; }
+ inline void addExitScratchGpRegs(uint32_t regMask) noexcept { _exitScratchGpRegs |= regMask; }
+
+ inline bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
+ inline uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; }
+ inline void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; }
+
+ inline uint64_t timestamp() const noexcept { return _timestamp; }
+ inline bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; }
+ inline void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; }
+ inline void resetTimestamp() const noexcept { _timestamp = 0; }
+
+ inline RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; }
+
+ inline RABlock* iDom() noexcept { return _idom; }
+ inline const RABlock* iDom() const noexcept { return _idom; }
+ inline void setIDom(RABlock* block) noexcept { _idom = block; }
+
+ inline ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; }
+ inline const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; }
+
+ inline ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; }
+ inline const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; }
+
+ inline ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; }
+ inline const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; }
+
+ inline ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; }
+ inline const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; }
+
+ inline Error resizeLiveBits(uint32_t size) noexcept {
+ ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveGen ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveKill].resize(allocator(), size));
+ return kErrorOk;
+ }
+
+ inline bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; }
+ inline WorkToPhysMap* entryWorkToPhysMap() const noexcept { return _entryWorkToPhysMap; }
+ inline PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; }
+
+ inline void setEntryAssignment(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _entryPhysToWorkMap = physToWorkMap;
+ _entryWorkToPhysMap = workToPhysMap;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds a successor to this block, and predecessor to `successor`, making
+ //! connection on both sides.
+ //!
+ //! This API must be used to manage successors and predecessors, never manage
+ //! it manually.
+ Error appendSuccessor(RABlock* successor) noexcept;
+
+ //! Similar to `appendSuccessor()`, but does prepend instead append.
+ //!
+ //! This function is used to add a natural flow (always first) to the block.
+ Error prependSuccessor(RABlock* successor) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAInst]
+// ============================================================================
+
+//! Register allocator's data associated with each `InstNode`.
+class RAInst {
+public:
+ ASMJIT_NONCOPYABLE(RAInst)
+
+ //! Parent block.
+ RABlock* _block;
+ //! Instruction flags.
+ uint32_t _flags;
+ //! Total count of RATiedReg's.
+ uint32_t _tiedTotal;
+ //! Index of RATiedReg's per register group.
+ RARegIndex _tiedIndex;
+ //! Count of RATiedReg's per register group.
+ RARegCount _tiedCount;
+ //! Number of live, and thus interfering VirtReg's at this point.
+ RALiveCount _liveCount;
+ //! Fixed physical registers used.
+ RARegMask _usedRegs;
+ //! Clobbered registers (by a function call).
+ RARegMask _clobberedRegs;
+ //! Tied registers.
+ RATiedReg _tiedRegs[1];
+
+ enum Flags : uint32_t {
+ kFlagIsTerminator = 0x00000001u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE RAInst(RABlock* block, uint32_t flags, uint32_t tiedTotal, const RARegMask& clobberedRegs) noexcept {
+ _block = block;
+ _flags = flags;
+ _tiedTotal = tiedTotal;
+ _tiedIndex.reset();
+ _tiedCount.reset();
+ _liveCount.reset();
+ _usedRegs.reset();
+ _clobberedRegs = clobberedRegs;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the instruction flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the instruction has flag `flag`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Replaces the existing instruction flags with `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
+ //! Adds instruction `flags` to this RAInst.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ //! Clears instruction `flags` from this RAInst.
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ //! Returns whether the RAInst represents an instruction that terminates this basic block.
+ inline bool isTerminator() const noexcept { return hasFlag(kFlagIsTerminator); }
+
+ //! Returns the associated block with this RAInst.
+ inline RABlock* block() const noexcept { return _block; }
+
+ //! Returns tied registers (all).
+ inline RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); }
+ //! Returns tied registers for a given `group`.
+ inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
+
+ //! Returns count of all tied registers.
+ inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
+ //! Returns count of tied registers of a given `group`.
+ inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount[group]; }
+
+ //! Returns `RATiedReg` at the given `index`.
+ inline RATiedReg* tiedAt(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _tiedTotal);
+ return tiedRegs() + index;
+ }
+
+ //! Returns `RATiedReg` at the given `index` of the given register `group`.
+ inline RATiedReg* tiedOf(uint32_t group, uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _tiedCount._regs[group]);
+ return tiedRegs(group) + index;
+ }
+
+ inline void setTiedAt(uint32_t index, RATiedReg& tied) noexcept {
+ ASMJIT_ASSERT(index < _tiedTotal);
+ _tiedRegs[index] = tied;
+ }
+
+ //! \name Static Functions
+ //! \{
+
+ static inline size_t sizeOf(uint32_t tiedRegCount) noexcept {
+ return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAInstBuilder]
+// ============================================================================
+
+//! A helper class that is used to build an array of RATiedReg items that are
+//! then copied to `RAInst`.
+class RAInstBuilder {
+public:
+ ASMJIT_NONCOPYABLE(RAInstBuilder)
+
+ //! Flags combined from all RATiedReg's.
+ uint32_t _aggregatedFlags;
+ //! Flags that will be cleared before storing the aggregated flags to `RAInst`.
+ uint32_t _forbiddenFlags;
+ RARegCount _count;
+ RARegsStats _stats;
+
+ RARegMask _used;
+ RARegMask _clobbered;
+
+ //! Current tied register in `_tiedRegs`.
+ RATiedReg* _cur;
+ //! Array of temporary tied registers.
+ RATiedReg _tiedRegs[128];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAInstBuilder() noexcept { reset(); }
+
+ inline void init() noexcept { reset(); }
+ inline void reset() noexcept {
+ _aggregatedFlags = 0;
+ _forbiddenFlags = 0;
+ _count.reset();
+ _stats.reset();
+ _used.reset();
+ _clobbered.reset();
+ _cur = _tiedRegs;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t aggregatedFlags() const noexcept { return _aggregatedFlags; }
+ inline uint32_t forbiddenFlags() const noexcept { return _forbiddenFlags; }
+
+ inline void addAggregatedFlags(uint32_t flags) noexcept { _aggregatedFlags |= flags; }
+ inline void addForbiddenFlags(uint32_t flags) noexcept { _forbiddenFlags |= flags; }
+
+ //! Returns the number of tied registers added to the builder.
+ inline uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); }
+
+ inline RATiedReg* begin() noexcept { return _tiedRegs; }
+ inline RATiedReg* end() noexcept { return _cur; }
+
+ inline const RATiedReg* begin() const noexcept { return _tiedRegs; }
+ inline const RATiedReg* end() const noexcept { return _cur; }
+
+ //! Returns `RATiedReg` at the given `index`.
+ inline RATiedReg* operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < tiedRegCount());
+ return &_tiedRegs[index];
+ }
+
+ //! Returns `RATiedReg` at the given `index`. (const).
+ inline const RATiedReg* operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < tiedRegCount());
+ return &_tiedRegs[index];
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ Error add(RAWorkReg* workReg, uint32_t flags, uint32_t allocable, uint32_t useId, uint32_t useRewriteMask, uint32_t outId, uint32_t outRewriteMask, uint32_t rmSize = 0) noexcept {
+ uint32_t group = workReg->group();
+ RATiedReg* tiedReg = workReg->tiedReg();
+
+ if (useId != BaseReg::kIdBad) {
+ _stats.makeFixed(group);
+ _used[group] |= Support::bitMask(useId);
+ flags |= RATiedReg::kUseFixed;
+ }
+
+ if (outId != BaseReg::kIdBad) {
+ _clobbered[group] |= Support::bitMask(outId);
+ flags |= RATiedReg::kOutFixed;
+ }
+
+ _aggregatedFlags |= flags;
+ _stats.makeUsed(group);
+
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, useId, useRewriteMask, outId, outRewriteMask, rmSize);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (useId != BaseReg::kIdBad) {
+ if (ASMJIT_UNLIKELY(tiedReg->hasUseId()))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ tiedReg->setUseId(useId);
+ }
+
+ if (outId != BaseReg::kIdBad) {
+ if (ASMJIT_UNLIKELY(tiedReg->hasOutId()))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ tiedReg->setOutId(outId);
+ }
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ tiedReg->_allocableRegs &= allocable;
+ tiedReg->_useRewriteMask |= useRewriteMask;
+ tiedReg->_outRewriteMask |= outRewriteMask;
+ tiedReg->_rmSize = uint8_t(Support::max<uint32_t>(tiedReg->rmSize(), rmSize));
+ return kErrorOk;
+ }
+ }
+
+ Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept {
+ ASMJIT_ASSERT(useId != BaseReg::kIdBad);
+
+ uint32_t flags = RATiedReg::kUse | RATiedReg::kRead | RATiedReg::kUseFixed;
+ uint32_t group = workReg->group();
+ uint32_t allocable = Support::bitMask(useId);
+
+ _aggregatedFlags |= flags;
+ _used[group] |= allocable;
+ _stats.makeFixed(group);
+ _stats.makeUsed(group);
+
+ RATiedReg* tiedReg = workReg->tiedReg();
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, useId, 0, BaseReg::kIdBad, 0);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (tiedReg->hasUseId()) {
+ flags |= RATiedReg::kDuplicate;
+ tiedReg->_allocableRegs |= allocable;
+ }
+ else {
+ tiedReg->setUseId(useId);
+ tiedReg->_allocableRegs &= allocable;
+ }
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ return kErrorOk;
+ }
+ }
+
+ Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept {
+ ASMJIT_ASSERT(outId != BaseReg::kIdBad);
+
+ uint32_t flags = RATiedReg::kOut | RATiedReg::kWrite | RATiedReg::kOutFixed;
+ uint32_t group = workReg->group();
+ uint32_t allocable = Support::bitMask(outId);
+
+ _aggregatedFlags |= flags;
+ _used[group] |= allocable;
+ _stats.makeFixed(group);
+ _stats.makeUsed(group);
+
+ RATiedReg* tiedReg = workReg->tiedReg();
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, BaseReg::kIdBad, 0, outId, 0);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (tiedReg->hasOutId())
+ return DebugUtils::errored(kErrorOverlappedRegs);
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ tiedReg->setOutId(outId);
+ return kErrorOk;
+ }
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RASharedAssignment]
+// ============================================================================
+
+class RASharedAssignment {
+public:
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Bit-mask of registers that cannot be used upon a block entry, for each
+ //! block that has this shared assignment. Scratch registers can come from
+ //! ISA limits (like jecx/loop instructions on x86) or because the registers
+ //! are used by jump/branch instruction that uses registers to perform an
+ //! indirect jump.
+ uint32_t _entryScratchGpRegs;
+ //! Union of all live-in registers.
+ ZoneBitVector _liveIn;
+ //! Register assignment (PhysToWork).
+ PhysToWorkMap* _physToWorkMap;
+ //! Register assignment (WorkToPhys).
+ WorkToPhysMap* _workToPhysMap;
+
+ //! Provided for clarity, most likely never called as we initialize a vector
+ //! of shared assignments to zero.
+ inline RASharedAssignment() noexcept
+ : _entryScratchGpRegs(0),
+ _liveIn(),
+ _physToWorkMap(nullptr),
+ _workToPhysMap(nullptr) {}
+
+ inline uint32_t entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
+ inline void addEntryScratchGpRegs(uint32_t mask) noexcept { _entryScratchGpRegs |= mask; }
+
+ inline const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
+
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
+
+ inline bool empty() const noexcept {
+ return _physToWorkMap == nullptr;
+ }
+
+ inline void assignMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _physToWorkMap = physToWorkMap;
+ _workToPhysMap = workToPhysMap;
+ }
+};
+
+// ============================================================================
+// [asmjit::RAPass]
+// ============================================================================
+
+//! Register allocation pass used by `BaseCompiler`.
+class RAPass : public FuncPass {
+public:
+ ASMJIT_NONCOPYABLE(RAPass)
+ typedef FuncPass Base;
+
+ enum Weights : uint32_t {
+ kCallArgWeight = 80
+ };
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Allocator that uses zone passed to `runOnFunction()`.
+ ZoneAllocator _allocator;
+ //! Logger, disabled if null.
+ Logger* _logger;
+ //! Debug logger, non-null only if `kOptionDebugPasses` option is set.
+ Logger* _debugLogger;
+ //! Logger flags.
+ uint32_t _loggerFlags;
+
+ //! Function being processed.
+ FuncNode* _func;
+ //! Stop node.
+ BaseNode* _stop;
+ //! Node that is used to insert extra code after the function body.
+ BaseNode* _extraBlock;
+
+ //! Blocks (first block is the entry, always exists).
+ RABlocks _blocks;
+ //! Function exit blocks (usually one, but can contain more).
+ RABlocks _exits;
+ //! Post order view (POV).
+ RABlocks _pov;
+
+ //! Number of instruction nodes.
+ uint32_t _instructionCount;
+ //! Number of created blocks (internal).
+ uint32_t _createdBlockCount;
+
+ //! SharedState blocks.
+ ZoneVector<RASharedAssignment> _sharedAssignments;
+
+ //! Timestamp generator (incremental).
+ mutable uint64_t _lastTimestamp;
+
+ //!< Architecture registers information.
+ const ArchRegs* _archRegsInfo;
+ //! Architecture traits.
+ RAArchTraits _archTraits;
+ //! Index to physical registers in `RAAssignment::PhysToWorkMap`.
+ RARegIndex _physRegIndex;
+ //! Count of physical registers in `RAAssignment::PhysToWorkMap`.
+ RARegCount _physRegCount;
+ //! Total number of physical registers.
+ uint32_t _physRegTotal;
+ //! Indexes of a possible scratch registers that can be selected if necessary.
+ uint8_t _scratchRegIndexes[2];
+
+ //! Registers available for allocation.
+ RARegMask _availableRegs;
+ //! Count of physical registers per group.
+ RARegCount _availableRegCount;
+ //! Registers clobbered by the function.
+ RARegMask _clobberedRegs;
+
+ //! Work registers (registers used by the function).
+ RAWorkRegs _workRegs;
+ //! Work registers per register group.
+ RAWorkRegs _workRegsOfGroup[BaseReg::kGroupVirt];
+
+ //! Register allocation strategy per register group.
+ RAStrategy _strategy[BaseReg::kGroupVirt];
+ //! Global max live-count (from all blocks) per register group.
+ RALiveCount _globalMaxLiveCount;
+ //! Global live spans per register group.
+ LiveRegSpans* _globalLiveSpans[BaseReg::kGroupVirt];
+ //! Temporary stack slot.
+ Operand _temporaryMem;
+
+ //! Stack pointer.
+ BaseReg _sp;
+ //! Frame pointer.
+ BaseReg _fp;
+ //! Stack manager.
+ RAStackAllocator _stackAllocator;
+ //! Function arguments assignment.
+ FuncArgsAssignment _argsAssignment;
+ //! Some StackArgs have to be assigned to StackSlots.
+ uint32_t _numStackArgsToStackSlots;
+
+ //! Maximum name-size computed from all WorkRegs.
+ uint32_t _maxWorkRegNameSize;
+ //! Temporary string builder used to format comments.
+ StringTmp<80> _tmpString;
+
+ //! \name Construction & Reset
+ //! \{
+
+ RAPass() noexcept;
+ virtual ~RAPass() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref Logger passed to \ref runOnFunction().
+ inline Logger* logger() const noexcept { return _logger; }
+ //! Returns \ref Logger passed to \ref runOnFunction() or null if `kOptionDebugPasses` is not set.
+ inline Logger* debugLogger() const noexcept { return _debugLogger; }
+
+ //! Returns \ref Zone passed to \ref runOnFunction().
+ inline Zone* zone() const noexcept { return _allocator.zone(); }
+ //! Returns \ref ZoneAllocator used by the register allocator.
+ inline ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
+
+ inline const ZoneVector<RASharedAssignment>& sharedAssignments() const { return _sharedAssignments; }
+ inline uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); }
+
+ //! Returns the current function node.
+ inline FuncNode* func() const noexcept { return _func; }
+ //! Returns the stop of the current function.
+ inline BaseNode* stop() const noexcept { return _stop; }
+
+ //! Returns an extra block used by the current function being processed.
+ inline BaseNode* extraBlock() const noexcept { return _extraBlock; }
+ //! Sets an extra block, see `extraBlock()`.
+ inline void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; }
+
+ inline uint32_t endPosition() const noexcept { return _instructionCount * 2; }
+
+ inline const RARegMask& availableRegs() const noexcept { return _availableRegs; }
+ inline const RARegMask& cloberredRegs() const noexcept { return _clobberedRegs; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void makeUnavailable(uint32_t group, uint32_t regId) noexcept {
+ _availableRegs[group] &= ~Support::bitMask(regId);
+ _availableRegCount[group]--;
+ }
+
+ //! Runs the register allocator for the given `func`.
+ Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) override;
+
+ //! Performs all allocation steps sequentially, called by `runOnFunction()`.
+ Error onPerformAllSteps() noexcept;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ //! Called by \ref runOnFunction() before the register allocation to initialize
+ //! architecture-specific data and constraints.
+ virtual void onInit() noexcept = 0;
+
+ //! Called by \ref runOnFunction(` after register allocation to clean everything
+ //! up. Called even if the register allocation failed.
+ virtual void onDone() noexcept = 0;
+
+ //! \}
+
+ //! \name CFG - Basic-Block Management
+ //! \{
+
+ //! Returns the function's entry block.
+ inline RABlock* entryBlock() noexcept {
+ ASMJIT_ASSERT(!_blocks.empty());
+ return _blocks[0];
+ }
+
+ //! \overload
+ inline const RABlock* entryBlock() const noexcept {
+ ASMJIT_ASSERT(!_blocks.empty());
+ return _blocks[0];
+ }
+
+ //! Returns all basic blocks of this function.
+ inline RABlocks& blocks() noexcept { return _blocks; }
+ //! \overload
+ inline const RABlocks& blocks() const noexcept { return _blocks; }
+
+ //! Returns the count of basic blocks (returns size of `_blocks` array).
+ inline uint32_t blockCount() const noexcept { return _blocks.size(); }
+ //! Returns the count of reachable basic blocks (returns size of `_pov` array).
+ inline uint32_t reachableBlockCount() const noexcept { return _pov.size(); }
+
+ //! Tests whether the CFG has dangling blocks - these were created by `newBlock()`,
+ //! but not added to CFG through `addBlocks()`. If `true` is returned and the
+ //! CFG is constructed it means that something is missing and it's incomplete.
+ //!
+ //! \note This is only used to check if the number of created blocks matches
+ //! the number of added blocks.
+ inline bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); }
+
+ //! Gest a next timestamp to be used to mark CFG blocks.
+ inline uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; }
+
+ //! Createss a new `RABlock` instance.
+ //!
+ //! \note New blocks don't have ID assigned until they are added to the block
+ //! array by calling `addBlock()`.
+ RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept;
+
+ //! Tries to find a neighboring LabelNode (without going through code) that is
+ //! already connected with `RABlock`. If no label is found then a new RABlock
+ //! is created and assigned to all possible labels in a backward direction.
+ RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept;
+
+ //! Adds the given `block` to the block list and assign it a unique block id.
+ Error addBlock(RABlock* block) noexcept;
+
+ inline Error addExitBlock(RABlock* block) noexcept {
+ block->addFlags(RABlock::kFlagIsFuncExit);
+ return _exits.append(allocator(), block);
+ }
+
+ ASMJIT_INLINE RAInst* newRAInst(RABlock* block, uint32_t flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
+ void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) RAInst(block, flags, tiedRegCount, clobberedRegs);
+ }
+
+ ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
+ uint32_t tiedRegCount = ib.tiedRegCount();
+ RAInst* raInst = newRAInst(block, ib.aggregatedFlags(), tiedRegCount, ib._clobbered);
+
+ if (ASMJIT_UNLIKELY(!raInst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ RARegIndex index;
+ uint32_t flagsFilter = ~ib.forbiddenFlags();
+
+ index.buildIndexes(ib._count);
+ raInst->_tiedIndex = index;
+ raInst->_tiedCount = ib._count;
+
+ for (uint32_t i = 0; i < tiedRegCount; i++) {
+ RATiedReg* tiedReg = ib[i];
+ RAWorkReg* workReg = workRegById(tiedReg->workId());
+
+ workReg->resetTiedReg();
+ uint32_t group = workReg->group();
+
+ if (tiedReg->hasUseId()) {
+ block->addFlags(RABlock::kFlagHasFixedRegs);
+ raInst->_usedRegs[group] |= Support::bitMask(tiedReg->useId());
+ }
+
+ if (tiedReg->hasOutId()) {
+ block->addFlags(RABlock::kFlagHasFixedRegs);
+ }
+
+ RATiedReg& dst = raInst->_tiedRegs[index[group]++];
+ dst = *tiedReg;
+ dst._flags &= flagsFilter;
+
+ if (!tiedReg->isDuplicate())
+ dst._allocableRegs &= ~ib._used[group];
+ }
+
+ node->setPassData<RAInst>(raInst);
+ return kErrorOk;
+ }
+
+ //! \}
+
+ //! \name CFG - Build CFG
+ //! \{
+
+ //! Traverse the whole function and do the following:
+ //!
+ //! 1. Construct CFG (represented by `RABlock`) by populating `_blocks` and
+ //! `_exits`. Blocks describe the control flow of the function and contain
+ //! some additional information that is used by the register allocator.
+ //!
+ //! 2. Remove unreachable code immediately. This is not strictly necessary
+ //! for BaseCompiler itself as the register allocator cannot reach such
+ //! nodes, but keeping instructions that use virtual registers would fail
+ //! during instruction encoding phase (Assembler).
+ //!
+ //! 3. `RAInst` is created for each `InstNode` or compatible. It contains
+ //! information that is essential for further analysis and register
+ //! allocation.
+ //!
+ //! Use `RACFGBuilder` template that provides the necessary boilerplate.
+ virtual Error buildCFG() noexcept = 0;
+
+ //! Called after the CFG is built.
+ Error initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept;
+
+ //! \}
+
+ //! \name CFG - Views Order
+ //! \{
+
+ //! Constructs CFG views (only POV at the moment).
+ Error buildViews() noexcept;
+
+ //! \}
+
+ //! \name CFG - Dominators
+ //! \{
+
+ // Terminology:
+ // - A node `X` dominates a node `Z` if any path from the entry point to
+ // `Z` has to go through `X`.
+ // - A node `Z` post-dominates a node `X` if any path from `X` to the end
+ // of the graph has to go through `Z`.
+
+ //! Constructs a dominator-tree from CFG.
+ Error buildDominators() noexcept;
+
+ bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept;
+ const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept;
+
+ //! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`.
+ inline bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); }
+ //! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`.
+ inline bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); }
+
+ //! Returns a nearest common dominator of `a` and `b`.
+ inline RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast<RABlock*>(_nearestCommonDominator(a, b)); }
+ //! Returns a nearest common dominator of `a` and `b` (const).
+ inline const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); }
+
+ //! \}
+
+ //! \name CFG - Utilities
+ //! \{
+
+ Error removeUnreachableBlocks() noexcept;
+
+ //! Returns `node` or some node after that is ideal for beginning a new block.
+ //! This function is mostly used after a conditional or unconditional jump to
+ //! select the successor node. In some cases the next node could be a label,
+ //! which means it could have assigned some block already.
+ BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept;
+
+ //! Returns `true` of the `node` can flow to `target` without reaching code
+ //! nor data. It's used to eliminate jumps to labels that are next right to
+ //! them.
+ bool isNextTo(BaseNode* node, BaseNode* target) noexcept;
+
+ //! \}
+
+ //! \name Virtual Register Management
+ //! \{
+
+ //! Returns a native size of the general-purpose register of the target architecture.
+ inline uint32_t registerSize() const noexcept { return _sp.size(); }
+ inline uint32_t availableRegCount(uint32_t group) const noexcept { return _availableRegCount[group]; }
+
+ inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; }
+
+ inline RAWorkRegs& workRegs() noexcept { return _workRegs; }
+ inline RAWorkRegs& workRegs(uint32_t group) noexcept { return _workRegsOfGroup[group]; }
+
+ inline const RAWorkRegs& workRegs() const noexcept { return _workRegs; }
+ inline const RAWorkRegs& workRegs(uint32_t group) const noexcept { return _workRegsOfGroup[group]; }
+
+ inline uint32_t workRegCount() const noexcept { return _workRegs.size(); }
+ inline uint32_t workRegCount(uint32_t group) const noexcept { return _workRegsOfGroup[group].size(); }
+
+ inline void _buildPhysIndex() noexcept {
+ _physRegIndex.buildIndexes(_physRegCount);
+ _physRegTotal = uint32_t(_physRegIndex[BaseReg::kGroupVirt - 1]) +
+ uint32_t(_physRegCount[BaseReg::kGroupVirt - 1]) ;
+ }
+ inline uint32_t physRegIndex(uint32_t group) const noexcept { return _physRegIndex[group]; }
+ inline uint32_t physRegTotal() const noexcept { return _physRegTotal; }
+
+ Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept;
+
+ //! Creates `RAWorkReg` data for the given `vReg`. The function does nothing
+ //! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`.
+ inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
+ *out = vReg->workReg();
+ return *out ? kErrorOk : _asWorkReg(vReg, out);
+ }
+
+ inline Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
+ const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs();
+ if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size()))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+ return asWorkReg(virtRegs[vIndex], out);
+ }
+
+ inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept {
+ RAStackSlot* slot = workReg->stackSlot();
+
+ if (slot)
+ return slot;
+
+ slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), RAStackSlot::kFlagRegHome);
+ workReg->_stackSlot = slot;
+ workReg->markStackUsed();
+ return slot;
+ }
+
+ inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept {
+ getOrCreateStackSlot(workReg);
+ return BaseMem(BaseMem::Decomposed { _sp.type(), workReg->virtId(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
+ }
+
+ WorkToPhysMap* newWorkToPhysMap() noexcept;
+ PhysToWorkMap* newPhysToWorkMap() noexcept;
+
+ inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept {
+ size_t size = PhysToWorkMap::sizeOf(_physRegTotal);
+ return static_cast<PhysToWorkMap*>(zone()->dupAligned(map, size, sizeof(uint32_t)));
+ }
+
+ inline WorkToPhysMap* cloneWorkToPhysMap(const WorkToPhysMap* map) noexcept {
+ size_t size = WorkToPhysMap::sizeOf(_workRegs.size());
+ if (ASMJIT_UNLIKELY(size == 0))
+ return const_cast<WorkToPhysMap*>(map);
+ return static_cast<WorkToPhysMap*>(zone()->dup(map, size));
+ }
+
+ //! \name Liveness Analysis & Statistics
+ //! \{
+
+ //! 1. Calculates GEN/KILL/IN/OUT of each block.
+ //! 2. Calculates live spans and basic statistics of each work register.
+ Error buildLiveness() noexcept;
+
+ //! Assigns argIndex to WorkRegs. Must be called after the liveness analysis
+ //! finishes as it checks whether the argument is live upon entry.
+ Error assignArgIndexToWorkRegs() noexcept;
+
+ //! \}
+
+ //! \name Register Allocation - Global
+ //! \{
+
+ //! Runs a global register allocator.
+ Error runGlobalAllocator() noexcept;
+
+ //! Initializes data structures used for global live spans.
+ Error initGlobalLiveSpans() noexcept;
+
+ Error binPack(uint32_t group) noexcept;
+
+ //! \}
+
+ //! \name Register Allocation - Local
+ //! \{
+
+ //! Runs a local register allocator.
+ Error runLocalAllocator() noexcept;
+ Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept;
+ Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept;
+
+ //! Called after the RA assignment has been assigned to a block.
+ //!
+ //! This cannot change the assignment, but can examine it.
+ Error blockEntryAssigned(const RAAssignment& as) noexcept;
+
+ //! \}
+
+ //! \name Register Allocation Utilities
+ //! \{
+
+ Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept;
+
+ //! \}
+
+ //! \name Function Prolog & Epilog
+ //! \{
+
+ Error updateStackFrame() noexcept;
+ Error _markStackArgsToKeep() noexcept;
+ Error _updateStackArgs() noexcept;
+ Error insertPrologEpilog() noexcept;
+
+ //! \}
+
+ //! \name Instruction Rewriter
+ //! \{
+
+ Error rewrite() noexcept;
+ Error _rewrite(BaseNode* first, BaseNode* stop) noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_LOGGING
+ //! \name Logging
+ //! \{
+
+ Error annotateCode() noexcept;
+
+ Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept;
+ Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept;
+ Error _dumpLiveSpans(String& sb) noexcept;
+
+ //! \}
+#endif
+
+ //! \name Emit
+ //! \{
+
+ virtual Error onEmitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept = 0;
+ virtual Error onEmitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept = 0;
+
+ virtual Error onEmitLoad(uint32_t workId, uint32_t dstPhysId) noexcept = 0;
+ virtual Error onEmitSave(uint32_t workId, uint32_t srcPhysId) noexcept = 0;
+
+ virtual Error onEmitJump(const Label& label) noexcept = 0;
+ virtual Error onEmitPreCall(InvokeNode* invokeNode) noexcept = 0;
+
+ //! \}
+};
+
+inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocator(); }
+
+inline uint32_t RABlock::entryScratchGpRegs() const noexcept {
+ uint32_t regs = _entryScratchGpRegs;
+ if (hasSharedAssignmentId())
+ regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs();
+ return regs;
+}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RAPASS_P_H_INCLUDED
diff --git a/client/asmjit/core/rastack.cpp b/client/asmjit/core/rastack.cpp
new file mode 100644
index 0000000..b886279
--- /dev/null
+++ b/client/asmjit/core/rastack.cpp
@@ -0,0 +1,206 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RAStackAllocator - Slots]
+// ============================================================================
+
+RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
+ if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
+ return nullptr;
+
+ RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
+ if (ASMJIT_UNLIKELY(!slot))
+ return nullptr;
+
+ slot->_baseRegId = uint8_t(baseRegId);
+ slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
+ slot->_flags = uint16_t(flags);
+ slot->_useCount = 0;
+ slot->_size = size;
+
+ slot->_weight = 0;
+ slot->_offset = 0;
+
+ _alignment = Support::max<uint32_t>(_alignment, alignment);
+ _slots.appendUnsafe(slot);
+ return slot;
+}
+
+// ============================================================================
+// [asmjit::RAStackAllocator - Utilities]
+// ============================================================================
+
+struct RAStackGap {
+ inline RAStackGap() noexcept
+ : offset(0),
+ size(0) {}
+
+ inline RAStackGap(uint32_t offset, uint32_t size) noexcept
+ : offset(offset),
+ size(size) {}
+
+ inline RAStackGap(const RAStackGap& other) noexcept
+ : offset(other.offset),
+ size(other.size) {}
+
+ uint32_t offset;
+ uint32_t size;
+};
+
+Error RAStackAllocator::calculateStackFrame() noexcept {
+ // Base weight added to all registers regardless of their size and alignment.
+ uint32_t kBaseRegWeight = 16;
+
+ // STEP 1:
+ //
+ // Update usage based on the size of the slot. We boost smaller slots in a way
+ // that 32-bit register has higher priority than a 128-bit register, however,
+ // if one 128-bit register is used 4 times more than some other 32-bit register
+ // it will overweight it.
+ for (RAStackSlot* slot : _slots) {
+ uint32_t alignment = slot->alignment();
+ ASMJIT_ASSERT(alignment > 0);
+
+ uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
+ uint64_t weight;
+
+ if (slot->isRegHome())
+ weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
+ else
+ weight = power;
+
+ // If overflown, which has less chance of winning a lottery, just use max
+ // possible weight. In such case it probably doesn't matter at all.
+ if (weight > 0xFFFFFFFFu)
+ weight = 0xFFFFFFFFu;
+
+ slot->setWeight(uint32_t(weight));
+ }
+
+ // STEP 2:
+ //
+ // Sort stack slots based on their newly calculated weight (in descending order).
+ _slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
+ return a->weight() > b->weight() ? 1 :
+ a->weight() == b->weight() ? 0 : -1;
+ });
+
+ // STEP 3:
+ //
+ // Calculate offset of each slot. We start from the slot that has the highest
+ // weight and advance to slots with lower weight. It could look that offsets
+ // start from the first slot in our list and then simply increase, but it's
+ // not always the case as we also try to fill all gaps introduced by the fact
+ // that slots are sorted by weight and not by size & alignment, so when we need
+ // to align some slot we distribute the gap caused by the alignment to `gaps`.
+ uint32_t offset = 0;
+ ZoneVector<RAStackGap> gaps[kSizeCount - 1];
+
+ for (RAStackSlot* slot : _slots) {
+ if (slot->isStackArg())
+ continue;
+
+ uint32_t slotAlignment = slot->alignment();
+ uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
+
+ // Try to find a slot within gaps first, before advancing the `offset`.
+ bool foundGap = false;
+ uint32_t gapSize = 0;
+ uint32_t gapOffset = 0;
+
+ {
+ uint32_t slotSize = slot->size();
+ if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
+ // Iterate from the lowest to the highest possible.
+ uint32_t index = Support::ctz(slotSize);
+ do {
+ if (!gaps[index].empty()) {
+ RAStackGap gap = gaps[index].pop();
+
+ ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
+ slot->setOffset(int32_t(gap.offset));
+
+ gapSize = gap.size - slotSize;
+ gapOffset = gap.offset - slotSize;
+
+ foundGap = true;
+ break;
+ }
+ } while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
+ }
+ }
+
+ // No gap found, we may create a new one(s) if the current offset is not aligned.
+ if (!foundGap && offset != alignedOffset) {
+ gapSize = alignedOffset - offset;
+ gapOffset = alignedOffset;
+
+ offset = alignedOffset;
+ }
+
+ // True if we have found a gap and not filled all of it or we aligned the current offset.
+ if (gapSize) {
+ uint32_t gapEnd = gapSize + gapOffset;
+ while (gapOffset < gapEnd) {
+ uint32_t index = Support::ctz(gapOffset);
+ uint32_t slotSize = 1u << index;
+
+ // Weird case, better to bail...
+ if (gapEnd - gapOffset < slotSize)
+ break;
+
+ ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
+ gapOffset += slotSize;
+ }
+ }
+
+ if (!foundGap) {
+ ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
+ slot->setOffset(int32_t(offset));
+ offset += slot->size();
+ }
+ }
+
+ _stackSize = Support::alignUp(offset, _alignment);
+ return kErrorOk;
+}
+
+Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
+ for (RAStackSlot* slot : _slots)
+ if (!slot->isStackArg())
+ slot->_offset += offset;
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/client/asmjit/core/rastack_p.h b/client/asmjit/core/rastack_p.h
new file mode 100644
index 0000000..33d4e1d
--- /dev/null
+++ b/client/asmjit/core/rastack_p.h
@@ -0,0 +1,187 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RASTACK_P_H_INCLUDED
+#define ASMJIT_CORE_RASTACK_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/radefs_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RAStackSlot]
+// ============================================================================
+
+//! Stack slot.
+struct RAStackSlot {
+ //! Stack slot flags.
+ //!
+ //! TODO: kFlagStackArg is not used by the current implementation, do we need to keep it?
+ enum Flags : uint32_t {
+ //! Stack slot is register home slot.
+ kFlagRegHome = 0x0001u,
+ //! Stack slot position matches argument passed via stack.
+ kFlagStackArg = 0x0002u
+ };
+
+ enum ArgIndex : uint32_t {
+ kNoArgIndex = 0xFF
+ };
+
+ //! Base register used to address the stack.
+ uint8_t _baseRegId;
+ //! Minimum alignment required by the slot.
+ uint8_t _alignment;
+ //! Reserved for future use.
+ uint16_t _flags;
+ //! Size of memory required by the slot.
+ uint32_t _size;
+
+ //! Usage counter (one unit equals one memory access).
+ uint32_t _useCount;
+ //! Weight of the slot, calculated by \ref RAStackAllocator::calculateStackFrame().
+ uint32_t _weight;
+ //! Stack offset, calculated by \ref RAStackAllocator::calculateStackFrame().
+ int32_t _offset;
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t baseRegId() const noexcept { return _baseRegId; }
+ inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); }
+
+ inline uint32_t size() const noexcept { return _size; }
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); }
+
+ inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); }
+ inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); }
+
+ inline uint32_t useCount() const noexcept { return _useCount; }
+ inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; }
+
+ inline uint32_t weight() const noexcept { return _weight; }
+ inline void setWeight(uint32_t weight) noexcept { _weight = weight; }
+
+ inline int32_t offset() const noexcept { return _offset; }
+ inline void setOffset(int32_t offset) noexcept { _offset = offset; }
+
+ //! \}
+};
+
+typedef ZoneVector<RAStackSlot*> RAStackSlots;
+
+// ============================================================================
+// [asmjit::RAStackAllocator]
+// ============================================================================
+
+//! Stack allocator.
+class RAStackAllocator {
+public:
+ ASMJIT_NONCOPYABLE(RAStackAllocator)
+
+ enum Size : uint32_t {
+ kSize1 = 0,
+ kSize2 = 1,
+ kSize4 = 2,
+ kSize8 = 3,
+ kSize16 = 4,
+ kSize32 = 5,
+ kSize64 = 6,
+ kSizeCount = 7
+ };
+
+ //! Allocator used to allocate internal data.
+ ZoneAllocator* _allocator;
+ //! Count of bytes used by all slots.
+ uint32_t _bytesUsed;
+ //! Calculated stack size (can be a bit greater than `_bytesUsed`).
+ uint32_t _stackSize;
+ //! Minimum stack alignment.
+ uint32_t _alignment;
+ //! Stack slots vector.
+ RAStackSlots _slots;
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline RAStackAllocator() noexcept
+ : _allocator(nullptr),
+ _bytesUsed(0),
+ _stackSize(0),
+ _alignment(1),
+ _slots() {}
+
+ inline void reset(ZoneAllocator* allocator) noexcept {
+ _allocator = allocator;
+ _bytesUsed = 0;
+ _stackSize = 0;
+ _alignment = 1;
+ _slots.reset();
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline ZoneAllocator* allocator() const noexcept { return _allocator; }
+
+ inline uint32_t bytesUsed() const noexcept { return _bytesUsed; }
+ inline uint32_t stackSize() const noexcept { return _stackSize; }
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ inline RAStackSlots& slots() noexcept { return _slots; }
+ inline const RAStackSlots& slots() const noexcept { return _slots; }
+ inline uint32_t slotCount() const noexcept { return _slots.size(); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept;
+
+ Error calculateStackFrame() noexcept;
+ Error adjustSlotOffsets(int32_t offset) noexcept;
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RASTACK_P_H_INCLUDED
diff --git a/client/asmjit/core/string.cpp b/client/asmjit/core/string.cpp
new file mode 100644
index 0000000..e059884
--- /dev/null
+++ b/client/asmjit/core/string.cpp
@@ -0,0 +1,551 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/string.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::String - Globals]
+// ============================================================================
+
+static const char String_baseN[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+constexpr size_t kMinAllocSize = 64;
+constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
+
+// ============================================================================
+// [asmjit::String]
+// ============================================================================
+
+Error String::reset() noexcept {
+ if (_type == kTypeLarge)
+ ::free(_large.data);
+
+ _resetInternal();
+ return kErrorOk;
+}
+
+Error String::clear() noexcept {
+ if (isLarge()) {
+ _large.size = 0;
+ _large.data[0] = '\0';
+ }
+ else {
+ _raw.uptr[0] = 0;
+ }
+
+ return kErrorOk;
+}
+
+char* String::prepare(uint32_t op, size_t size) noexcept {
+ char* curData;
+ size_t curSize;
+ size_t curCapacity;
+
+ if (isLarge()) {
+ curData = this->_large.data;
+ curSize = this->_large.size;
+ curCapacity = this->_large.capacity;
+ }
+ else {
+ curData = this->_small.data;
+ curSize = this->_small.type;
+ curCapacity = kSSOCapacity;
+ }
+
+ if (op == kOpAssign) {
+ if (size > curCapacity) {
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(size >= kMaxAllocSize))
+ return nullptr;
+
+ size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize);
+ char* newData = static_cast<char*>(::malloc(newCapacity));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return nullptr;
+
+ if (_type == kTypeLarge)
+ ::free(curData);
+
+ _large.type = kTypeLarge;
+ _large.size = size;
+ _large.capacity = newCapacity - 1;
+ _large.data = newData;
+
+ newData[size] = '\0';
+ return newData;
+ }
+ else {
+ _setSize(size);
+ curData[size] = '\0';
+ return curData;
+ }
+ }
+ else {
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize))
+ return nullptr;
+
+ size_t newSize = size + curSize;
+ size_t newSizePlusOne = newSize + 1;
+
+ if (newSizePlusOne > curCapacity) {
+ size_t newCapacity = Support::max<size_t>(curCapacity + 1, kMinAllocSize);
+
+ if (newCapacity < newSizePlusOne && newCapacity < Globals::kGrowThreshold)
+ newCapacity = Support::alignUpPowerOf2(newCapacity);
+
+ if (newCapacity < newSizePlusOne)
+ newCapacity = Support::alignUp(newSizePlusOne, Globals::kGrowThreshold);
+
+ if (ASMJIT_UNLIKELY(newCapacity < newSizePlusOne))
+ return nullptr;
+
+ char* newData = static_cast<char*>(::malloc(newCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return nullptr;
+
+ memcpy(newData, curData, curSize);
+
+ if (_type == kTypeLarge)
+ ::free(curData);
+
+ _large.type = kTypeLarge;
+ _large.size = newSize;
+ _large.capacity = newCapacity - 1;
+ _large.data = newData;
+
+ newData[newSize] = '\0';
+ return newData + curSize;
+ }
+ else {
+ _setSize(newSize);
+ curData[newSize] = '\0';
+ return curData + curSize;
+ }
+ }
+}
+
+Error String::assign(const char* data, size_t size) noexcept {
+ char* dst = nullptr;
+
+ // Null terminated string without `size` specified.
+ if (size == SIZE_MAX)
+ size = data ? strlen(data) : size_t(0);
+
+ if (isLarge()) {
+ if (size <= _large.capacity) {
+ dst = _large.data;
+ _large.size = size;
+ }
+ else {
+ size_t capacityPlusOne = Support::alignUp(size + 1, 32);
+ if (ASMJIT_UNLIKELY(capacityPlusOne < size))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = static_cast<char*>(::malloc(capacityPlusOne));
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ if (!isExternal())
+ ::free(_large.data);
+
+ _large.type = kTypeLarge;
+ _large.data = dst;
+ _large.size = size;
+ _large.capacity = capacityPlusOne - 1;
+ }
+ }
+ else {
+ if (size <= kSSOCapacity) {
+ ASMJIT_ASSERT(size < 0xFFu);
+
+ dst = _small.data;
+ _small.type = uint8_t(size);
+ }
+ else {
+ dst = static_cast<char*>(::malloc(size + 1));
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _large.type = kTypeLarge;
+ _large.data = dst;
+ _large.size = size;
+ _large.capacity = size;
+ }
+ }
+
+ // Optionally copy data from `data` and null-terminate.
+ if (data && size) {
+ // NOTE: It's better to use `memmove()`. If, for any reason, somebody uses
+ // this function to substring the same string it would work as expected.
+ ::memmove(dst, data, size);
+ }
+
+ dst[size] = '\0';
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::String - Operations]
+// ============================================================================
+
+Error String::_opString(uint32_t op, const char* str, size_t size) noexcept {
+ if (size == SIZE_MAX)
+ size = str ? strlen(str) : size_t(0);
+
+ if (!size)
+ return kErrorOk;
+
+ char* p = prepare(op, size);
+ if (!p)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ memcpy(p, str, size);
+ return kErrorOk;
+}
+
+Error String::_opChar(uint32_t op, char c) noexcept {
+ char* p = prepare(op, 1);
+ if (!p)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = c;
+ return kErrorOk;
+}
+
+Error String::_opChars(uint32_t op, char c, size_t n) noexcept {
+ if (!n)
+ return kErrorOk;
+
+ char* p = prepare(op, n);
+ if (!p)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ memset(p, c, n);
+ return kErrorOk;
+}
+
+Error String::padEnd(size_t n, char c) noexcept {
+ size_t size = this->size();
+ return n > size ? appendChars(c, n - size) : kErrorOk;
+}
+
+Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
+ if (base < 2 || base > 36)
+ base = 10;
+
+ char buf[128];
+ char* p = buf + ASMJIT_ARRAY_SIZE(buf);
+
+ uint64_t orig = i;
+ char sign = '\0';
+
+ // --------------------------------------------------------------------------
+ // [Sign]
+ // --------------------------------------------------------------------------
+
+ if ((flags & kFormatSigned) != 0 && int64_t(i) < 0) {
+ i = uint64_t(-int64_t(i));
+ sign = '-';
+ }
+ else if ((flags & kFormatShowSign) != 0) {
+ sign = '+';
+ }
+ else if ((flags & kFormatShowSpace) != 0) {
+ sign = ' ';
+ }
+
+ // --------------------------------------------------------------------------
+ // [Number]
+ // --------------------------------------------------------------------------
+
+ do {
+ uint64_t d = i / base;
+ uint64_t r = i % base;
+
+ *--p = String_baseN[r];
+ i = d;
+ } while (i);
+
+ size_t numberSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
+
+ // --------------------------------------------------------------------------
+ // [Alternate Form]
+ // --------------------------------------------------------------------------
+
+ if ((flags & kFormatAlternate) != 0) {
+ if (base == 8) {
+ if (orig != 0)
+ *--p = '0';
+ }
+ if (base == 16) {
+ *--p = 'x';
+ *--p = '0';
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Width]
+ // --------------------------------------------------------------------------
+
+ if (sign != 0)
+ *--p = sign;
+
+ if (width > 256)
+ width = 256;
+
+ if (width <= numberSize)
+ width = 0;
+ else
+ width -= numberSize;
+
+ // --------------------------------------------------------------------------
+ // Write]
+ // --------------------------------------------------------------------------
+
+ size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
+ char* data = prepare(op, prefixSize + width + numberSize);
+
+ if (!data)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ memcpy(data, p, prefixSize);
+ data += prefixSize;
+
+ memset(data, '0', width);
+ data += width;
+
+ memcpy(data, p + prefixSize, numberSize);
+ return kErrorOk;
+}
+
+Error String::_opHex(uint32_t op, const void* data, size_t size, char separator) noexcept {
+ char* dst;
+ const uint8_t* src = static_cast<const uint8_t*>(data);
+
+ if (!size)
+ return kErrorOk;
+
+ if (separator) {
+ if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = prepare(op, size * 3 - 1);
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ size_t i = 0;
+ for (;;) {
+ dst[0] = String_baseN[(src[0] >> 4) & 0xF];
+ dst[1] = String_baseN[(src[0] ) & 0xF];
+ if (++i == size)
+ break;
+ // This makes sure that the separator is only put between two hexadecimal bytes.
+ dst[2] = separator;
+ dst += 3;
+ src++;
+ }
+ }
+ else {
+ if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = prepare(op, size * 2);
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ for (size_t i = 0; i < size; i++, dst += 2, src++) {
+ dst[0] = String_baseN[(src[0] >> 4) & 0xF];
+ dst[1] = String_baseN[(src[0] ) & 0xF];
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error String::_opFormat(uint32_t op, const char* fmt, ...) noexcept {
+ Error err;
+ va_list ap;
+
+ va_start(ap, fmt);
+ err = _opVFormat(op, fmt, ap);
+ va_end(ap);
+
+ return err;
+}
+
+Error String::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
+ size_t startAt = (op == kOpAssign) ? size_t(0) : size();
+ size_t remainingCapacity = capacity() - startAt;
+
+ char buf[1024];
+ int fmtResult;
+ size_t outputSize;
+
+ va_list apCopy;
+ va_copy(apCopy, ap);
+
+ if (remainingCapacity >= 128) {
+ fmtResult = vsnprintf(data() + startAt, remainingCapacity, fmt, ap);
+ outputSize = size_t(fmtResult);
+
+ if (ASMJIT_LIKELY(outputSize <= remainingCapacity)) {
+ _setSize(startAt + outputSize);
+ return kErrorOk;
+ }
+ }
+ else {
+ fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ outputSize = size_t(fmtResult);
+
+ if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf)))
+ return _opString(op, buf, outputSize);
+ }
+
+ if (ASMJIT_UNLIKELY(fmtResult < 0))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ char* p = prepare(op, outputSize);
+ if (ASMJIT_UNLIKELY(!p))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy);
+ ASMJIT_ASSERT(size_t(fmtResult) == outputSize);
+
+ return kErrorOk;
+}
+
+Error String::truncate(size_t newSize) noexcept {
+ if (isLarge()) {
+ if (newSize < _large.size) {
+ _large.data[newSize] = '\0';
+ _large.size = newSize;
+ }
+ }
+ else {
+ if (newSize < _type) {
+ _small.data[newSize] = '\0';
+ _small.type = uint8_t(newSize);
+ }
+ }
+
+ return kErrorOk;
+}
+
+bool String::eq(const char* other, size_t size) const noexcept {
+ const char* aData = data();
+ const char* bData = other;
+
+ size_t aSize = this->size();
+ size_t bSize = size;
+
+ if (bSize == SIZE_MAX) {
+ size_t i;
+ for (i = 0; i < aSize; i++)
+ if (aData[i] != bData[i] || bData[i] == 0)
+ return false;
+ return bData[i] == 0;
+ }
+ else {
+ if (aSize != bSize)
+ return false;
+ return ::memcmp(aData, bData, aSize) == 0;
+ }
+}
+
+// ============================================================================
+// [asmjit::Support - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(core_string) {
+ String s;
+
+ EXPECT(s.isLarge() == false);
+ EXPECT(s.isExternal() == false);
+
+ EXPECT(s.assign('a') == kErrorOk);
+ EXPECT(s.size() == 1);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'a');
+ EXPECT(s.data()[1] == '\0');
+ EXPECT(s.eq("a") == true);
+ EXPECT(s.eq("a", 1) == true);
+
+ EXPECT(s.assignChars('b', 4) == kErrorOk);
+ EXPECT(s.size() == 4);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'b');
+ EXPECT(s.data()[1] == 'b');
+ EXPECT(s.data()[2] == 'b');
+ EXPECT(s.data()[3] == 'b');
+ EXPECT(s.data()[4] == '\0');
+ EXPECT(s.eq("bbbb") == true);
+ EXPECT(s.eq("bbbb", 4) == true);
+
+ EXPECT(s.assign("abc") == kErrorOk);
+ EXPECT(s.size() == 3);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'a');
+ EXPECT(s.data()[1] == 'b');
+ EXPECT(s.data()[2] == 'c');
+ EXPECT(s.data()[3] == '\0');
+ EXPECT(s.eq("abc") == true);
+ EXPECT(s.eq("abc", 3) == true);
+
+ const char* large = "Large string that will not fit into SSO buffer";
+ EXPECT(s.assign(large) == kErrorOk);
+ EXPECT(s.isLarge() == true);
+ EXPECT(s.size() == strlen(large));
+ EXPECT(s.capacity() > String::kSSOCapacity);
+ EXPECT(s.eq(large) == true);
+ EXPECT(s.eq(large, strlen(large)) == true);
+
+ const char* additional = " (additional content)";
+ EXPECT(s.isLarge() == true);
+ EXPECT(s.append(additional) == kErrorOk);
+ EXPECT(s.size() == strlen(large) + strlen(additional));
+
+ EXPECT(s.clear() == kErrorOk);
+ EXPECT(s.size() == 0);
+ EXPECT(s.empty() == true);
+ EXPECT(s.data()[0] == '\0');
+ EXPECT(s.isLarge() == true); // Clear should never release the memory.
+
+ EXPECT(s.appendUInt(1234) == kErrorOk);
+ EXPECT(s.eq("1234") == true);
+
+ StringTmp<64> sTmp;
+ EXPECT(sTmp.isLarge());
+ EXPECT(sTmp.isExternal());
+ EXPECT(sTmp.appendChars(' ', 1000) == kErrorOk);
+ EXPECT(!sTmp.isExternal());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/string.h b/client/asmjit/core/string.h
new file mode 100644
index 0000000..4c490d8
--- /dev/null
+++ b/client/asmjit/core/string.h
@@ -0,0 +1,400 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_STRING_H_INCLUDED
+#define ASMJIT_CORE_STRING_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_utilities
+//! \{
+
+// ============================================================================
+// [asmjit::FixedString]
+// ============================================================================
+
+//! A fixed string - only useful for strings that would never exceed `N - 1`
+//! characters; always null-terminated.
+template<size_t N>
+union FixedString {
+ enum : uint32_t {
+ kNumU32 = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
+ };
+
+ char str[kNumU32 * sizeof(uint32_t)];
+ uint32_t u32[kNumU32];
+
+ //! \name Utilities
+ //! \{
+
+ inline bool eq(const char* other) const noexcept {
+ return strcmp(str, other) == 0;
+ }
+
+ //! \}
+};
+// ============================================================================
+// [asmjit::String]
+// ============================================================================
+
+//! A simple non-reference counted string that uses small string optimization (SSO).
+//!
+//! This string has 3 allocation possibilities:
+//!
+//! 1. Small - embedded buffer is used for up to `kSSOCapacity` characters.
+//! This should handle most small strings and thus avoid dynamic
+//! memory allocation for most use-cases.
+//!
+//! 2. Large - string that doesn't fit into an embedded buffer (or string
+//! that was truncated from a larger buffer) and is owned by
+//! AsmJit. When you destroy the string AsmJit would automatically
+//! release the large buffer.
+//!
+//! 3. External - like Large (2), however, the large buffer is not owned by
+//! AsmJit and won't be released when the string is destroyed
+//! or reallocated. This is mostly useful for working with
+//! larger temporary strings allocated on stack or with immutable
+//! strings.
+class String {
+public:
+ ASMJIT_NONCOPYABLE(String)
+
+ //! String operation.
+ enum Op : uint32_t {
+ //! Assignment - a new content replaces the current one.
+ kOpAssign = 0,
+ //! Append - a new content is appended to the string.
+ kOpAppend = 1
+ };
+
+ //! String format flags.
+ enum FormatFlags : uint32_t {
+ kFormatShowSign = 0x00000001u,
+ kFormatShowSpace = 0x00000002u,
+ kFormatAlternate = 0x00000004u,
+ kFormatSigned = 0x80000000u
+ };
+
+ //! \cond INTERNAL
+ enum : uint32_t {
+ kLayoutSize = 32,
+ kSSOCapacity = kLayoutSize - 2
+ };
+
+ //! String type.
+ enum Type : uint8_t {
+ kTypeLarge = 0x1Fu, //!< Large string (owned by String).
+ kTypeExternal = 0x20u //!< External string (zone allocated or not owned by String).
+ };
+
+ union Raw {
+ uint8_t u8[kLayoutSize];
+ uint64_t u64[kLayoutSize / sizeof(uint64_t)];
+ uintptr_t uptr[kLayoutSize / sizeof(uintptr_t)];
+ };
+
+ struct Small {
+ uint8_t type;
+ char data[kSSOCapacity + 1u];
+ };
+
+ struct Large {
+ uint8_t type;
+ uint8_t reserved[sizeof(uintptr_t) - 1];
+ size_t size;
+ size_t capacity;
+ char* data;
+ };
+
+ union {
+ uint8_t _type;
+ Raw _raw;
+ Small _small;
+ Large _large;
+ };
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a default-initialized string if zero length.
+ inline String() noexcept
+ : _small {} {}
+
+ //! Creates a string that takes ownership of the content of the `other` string.
+ inline String(String&& other) noexcept {
+ _raw = other._raw;
+ other._resetInternal();
+ }
+
+ inline ~String() noexcept {
+ reset();
+ }
+
+ //! Reset the string into a construction state.
+ ASMJIT_API Error reset() noexcept;
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline String& operator=(String&& other) noexcept {
+ swap(other);
+ other.reset();
+ return *this;
+ }
+
+ inline bool operator==(const char* other) const noexcept { return eq(other); }
+ inline bool operator!=(const char* other) const noexcept { return !eq(other); }
+
+ inline bool operator==(const String& other) const noexcept { return eq(other); }
+ inline bool operator!=(const String& other) const noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isLarge() const noexcept { return _type >= kTypeLarge; }
+ inline bool isExternal() const noexcept { return _type == kTypeExternal; }
+
+ //! Tests whether the string is empty.
+ inline bool empty() const noexcept { return size() == 0; }
+ //! Returns the size of the string.
+ inline size_t size() const noexcept { return isLarge() ? size_t(_large.size) : size_t(_type); }
+ //! Returns the capacity of the string.
+ inline size_t capacity() const noexcept { return isLarge() ? _large.capacity : size_t(kSSOCapacity); }
+
+ //! Returns the data of the string.
+ inline char* data() noexcept { return isLarge() ? _large.data : _small.data; }
+ //! \overload
+ inline const char* data() const noexcept { return isLarge() ? _large.data : _small.data; }
+
+ inline char* start() noexcept { return data(); }
+ inline const char* start() const noexcept { return data(); }
+
+ inline char* end() noexcept { return data() + size(); }
+ inline const char* end() const noexcept { return data() + size(); }
+
+ //! \}
+
+ //! \name String Operations
+ //! \{
+
+ //! Swaps the content of this string with `other`.
+ inline void swap(String& other) noexcept {
+ std::swap(_raw, other._raw);
+ }
+
+ //! Clears the content of the string.
+ ASMJIT_API Error clear() noexcept;
+
+ ASMJIT_API char* prepare(uint32_t op, size_t size) noexcept;
+
+ ASMJIT_API Error _opString(uint32_t op, const char* str, size_t size = SIZE_MAX) noexcept;
+ ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
+ ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
+ ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
+ ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t size, char separator = '\0') noexcept;
+ ASMJIT_API Error _opFormat(uint32_t op, const char* fmt, ...) noexcept;
+ ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
+
+ //! Replaces the current of the string with `data` of the given `size`.
+ //!
+ //! Null terminated strings can set `size` to `SIZE_MAX`.
+ ASMJIT_API Error assign(const char* data, size_t size = SIZE_MAX) noexcept;
+
+ //! Replaces the current of the string with `other` string.
+ inline Error assign(const String& other) noexcept {
+ return assign(other.data(), other.size());
+ }
+
+ //! Replaces the current of the string by a single `c` character.
+ inline Error assign(char c) noexcept {
+ return _opChar(kOpAssign, c);
+ }
+
+ //! Replaces the current of the string by a `c` character, repeated `n` times.
+ inline Error assignChars(char c, size_t n) noexcept {
+ return _opChars(kOpAssign, c, n);
+ }
+
+ //! Replaces the current of the string by a formatted integer `i` (signed).
+ inline Error assignInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAssign, uint64_t(i), base, width, flags | kFormatSigned);
+ }
+
+ //! Replaces the current of the string by a formatted integer `i` (unsigned).
+ inline Error assignUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAssign, i, base, width, flags);
+ }
+
+ //! Replaces the current of the string by the given `data` converted to a HEX string.
+ inline Error assignHex(const void* data, size_t size, char separator = '\0') noexcept {
+ return _opHex(kOpAssign, data, size, separator);
+ }
+
+ //! Replaces the current of the string by a formatted string `fmt`.
+ template<typename... Args>
+ inline Error assignFormat(const char* fmt, Args&&... args) noexcept {
+ return _opFormat(kOpAssign, fmt, std::forward<Args>(args)...);
+ }
+
+ //! Replaces the current of the string by a formatted string `fmt` (va_list version).
+ inline Error assignVFormat(const char* fmt, va_list ap) noexcept {
+ return _opVFormat(kOpAssign, fmt, ap);
+ }
+
+ //! Appends `str` having the given size `size` to the string.
+ //!
+ //! Null terminated strings can set `size` to `SIZE_MAX`.
+ inline Error append(const char* str, size_t size = SIZE_MAX) noexcept {
+ return _opString(kOpAppend, str, size);
+ }
+
+ //! Appends `other` string to this string.
+ inline Error append(const String& other) noexcept {
+ return append(other.data(), other.size());
+ }
+
+ //! Appends a single `c` character.
+ inline Error append(char c) noexcept {
+ return _opChar(kOpAppend, c);
+ }
+
+ //! Appends `c` character repeated `n` times.
+ inline Error appendChars(char c, size_t n) noexcept {
+ return _opChars(kOpAppend, c, n);
+ }
+
+ //! Appends a formatted integer `i` (signed).
+ inline Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAppend, uint64_t(i), base, width, flags | kFormatSigned);
+ }
+
+ //! Appends a formatted integer `i` (unsigned).
+ inline Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAppend, i, base, width, flags);
+ }
+
+ //! Appends the given `data` converted to a HEX string.
+ inline Error appendHex(const void* data, size_t size, char separator = '\0') noexcept {
+ return _opHex(kOpAppend, data, size, separator);
+ }
+
+ //! Appends a formatted string `fmt` with `args`.
+ template<typename... Args>
+ inline Error appendFormat(const char* fmt, Args&&... args) noexcept {
+ return _opFormat(kOpAppend, fmt, std::forward<Args>(args)...);
+ }
+
+ //! Appends a formatted string `fmt` (va_list version).
+ inline Error appendVFormat(const char* fmt, va_list ap) noexcept {
+ return _opVFormat(kOpAppend, fmt, ap);
+ }
+
+ ASMJIT_API Error padEnd(size_t n, char c = ' ') noexcept;
+
+ //! Truncate the string length into `newSize`.
+ ASMJIT_API Error truncate(size_t newSize) noexcept;
+
+ ASMJIT_API bool eq(const char* other, size_t size = SIZE_MAX) const noexcept;
+ inline bool eq(const String& other) const noexcept { return eq(other.data(), other.size()); }
+
+ //! \}
+
+ //! \name Internal Functions
+ //! \{
+
+ //! Resets string to embedded and makes it empty (zero length, zero first char)
+ //!
+ //! \note This is always called internally after an external buffer was released
+ //! as it zeroes all bytes used by String's embedded storage.
+ inline void _resetInternal() noexcept {
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
+ _raw.uptr[i] = 0;
+ }
+
+ inline void _setSize(size_t newSize) noexcept {
+ if (isLarge())
+ _large.size = newSize;
+ else
+ _small.type = uint8_t(newSize);
+ }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use assign() instead of assignString()")
+ inline Error assignString(const char* data, size_t size = SIZE_MAX) noexcept { return assign(data, size); }
+
+ ASMJIT_DEPRECATED("Use assign() instead of assignChar()")
+ inline Error assignChar(char c) noexcept { return assign(c); }
+
+ ASMJIT_DEPRECATED("Use append() instead of appendString()")
+ inline Error appendString(const char* data, size_t size = SIZE_MAX) noexcept { return append(data, size); }
+
+ ASMJIT_DEPRECATED("Use append() instead of appendChar()")
+ inline Error appendChar(char c) noexcept { return append(c); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::StringTmp]
+// ============================================================================
+
+//! Temporary string builder, has statically allocated `N` bytes.
+template<size_t N>
+class StringTmp : public String {
+public:
+ ASMJIT_NONCOPYABLE(StringTmp<N>)
+
+ //! Embedded data.
+ char _embeddedData[Support::alignUp(N + 1, sizeof(size_t))];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline StringTmp() noexcept {
+ _resetToTemporary();
+ }
+
+ inline void _resetToTemporary() noexcept {
+ _large.type = kTypeExternal;
+ _large.capacity = ASMJIT_ARRAY_SIZE(_embeddedData) - 1;
+ _large.data = _embeddedData;
+ _embeddedData[0] = '\0';
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_STRING_H_INCLUDED
diff --git a/client/asmjit/core/support.cpp b/client/asmjit/core/support.cpp
new file mode 100644
index 0000000..a99477d
--- /dev/null
+++ b/client/asmjit/core/support.cpp
@@ -0,0 +1,507 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Support - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void testArrays(const T* a, const T* b, size_t size) noexcept {
+ for (size_t i = 0; i < size; i++)
+ EXPECT(a[i] == b[i], "Mismatch at %u", unsigned(i));
+}
+
+static void testAlignment() noexcept {
+ INFO("Support::isAligned()");
+ EXPECT(Support::isAligned<size_t>(0xFFFF, 4) == false);
+ EXPECT(Support::isAligned<size_t>(0xFFF4, 4) == true);
+ EXPECT(Support::isAligned<size_t>(0xFFF8, 8) == true);
+ EXPECT(Support::isAligned<size_t>(0xFFF0, 16) == true);
+
+ INFO("Support::alignUp()");
+ EXPECT(Support::alignUp<size_t>(0xFFFF, 4) == 0x10000);
+ EXPECT(Support::alignUp<size_t>(0xFFF4, 4) == 0x0FFF4);
+ EXPECT(Support::alignUp<size_t>(0xFFF8, 8) == 0x0FFF8);
+ EXPECT(Support::alignUp<size_t>(0xFFF0, 16) == 0x0FFF0);
+ EXPECT(Support::alignUp<size_t>(0xFFF0, 32) == 0x10000);
+
+ INFO("Support::alignUpDiff()");
+ EXPECT(Support::alignUpDiff<size_t>(0xFFFF, 4) == 1);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF4, 4) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF8, 8) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 16) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 32) == 16);
+
+ INFO("Support::alignUpPowerOf2()");
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0000) == 0x00000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0xFFFF) == 0x10000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0xF123) == 0x10000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0F00) == 0x01000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0100) == 0x00100);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x1001) == 0x02000);
+}
+
+static void testBitUtils() noexcept {
+ uint32_t i;
+
+ INFO("Support::shl() / shr()");
+ EXPECT(Support::shl(int32_t(0x00001111), 16) == int32_t(0x11110000u));
+ EXPECT(Support::shl(uint32_t(0x00001111), 16) == uint32_t(0x11110000u));
+ EXPECT(Support::shr(int32_t(0x11110000u), 16) == int32_t(0x00001111u));
+ EXPECT(Support::shr(uint32_t(0x11110000u), 16) == uint32_t(0x00001111u));
+ EXPECT(Support::sar(int32_t(0xFFFF0000u), 16) == int32_t(0xFFFFFFFFu));
+ EXPECT(Support::sar(uint32_t(0xFFFF0000u), 16) == uint32_t(0xFFFFFFFFu));
+
+ INFO("Support::blsi()");
+ for (i = 0; i < 32; i++) EXPECT(Support::blsi(uint32_t(1) << i) == uint32_t(1) << i);
+ for (i = 0; i < 31; i++) EXPECT(Support::blsi(uint32_t(3) << i) == uint32_t(1) << i);
+ for (i = 0; i < 64; i++) EXPECT(Support::blsi(uint64_t(1) << i) == uint64_t(1) << i);
+ for (i = 0; i < 63; i++) EXPECT(Support::blsi(uint64_t(3) << i) == uint64_t(1) << i);
+
+ INFO("Support::ctz()");
+ for (i = 0; i < 32; i++) EXPECT(Support::ctz(uint32_t(1) << i) == i);
+ for (i = 0; i < 64; i++) EXPECT(Support::ctz(uint64_t(1) << i) == i);
+ for (i = 0; i < 32; i++) EXPECT(Support::constCtz(uint32_t(1) << i) == i);
+ for (i = 0; i < 64; i++) EXPECT(Support::constCtz(uint64_t(1) << i) == i);
+
+ INFO("Support::bitMask()");
+ EXPECT(Support::bitMask(0, 1, 7) == 0x83u);
+ for (i = 0; i < 32; i++)
+ EXPECT(Support::bitMask(i) == (1u << i));
+
+ INFO("Support::bitTest()");
+ for (i = 0; i < 32; i++) {
+ EXPECT(Support::bitTest((1 << i), i) == true, "Support::bitTest(%X, %u) should return true", (1 << i), i);
+ }
+
+ INFO("Support::lsbMask<uint32_t>()");
+ for (i = 0; i < 32; i++) {
+ uint32_t expectedBits = 0;
+ for (uint32_t b = 0; b < i; b++)
+ expectedBits |= uint32_t(1) << b;
+ EXPECT(Support::lsbMask<uint32_t>(i) == expectedBits);
+ }
+
+ INFO("Support::lsbMask<uint64_t>()");
+ for (i = 0; i < 64; i++) {
+ uint64_t expectedBits = 0;
+ for (uint32_t b = 0; b < i; b++)
+ expectedBits |= uint64_t(1) << b;
+ EXPECT(Support::lsbMask<uint64_t>(i) == expectedBits);
+ }
+
+ INFO("Support::popcnt()");
+ for (i = 0; i < 32; i++) EXPECT(Support::popcnt((uint32_t(1) << i)) == 1);
+ for (i = 0; i < 64; i++) EXPECT(Support::popcnt((uint64_t(1) << i)) == 1);
+ EXPECT(Support::popcnt(0x000000F0) == 4);
+ EXPECT(Support::popcnt(0x10101010) == 4);
+ EXPECT(Support::popcnt(0xFF000000) == 8);
+ EXPECT(Support::popcnt(0xFFFFFFF7) == 31);
+ EXPECT(Support::popcnt(0x7FFFFFFF) == 31);
+
+ INFO("Support::isPowerOf2()");
+ for (i = 0; i < 64; i++) {
+ EXPECT(Support::isPowerOf2(uint64_t(1) << i) == true);
+ EXPECT(Support::isPowerOf2((uint64_t(1) << i) ^ 0x001101) == false);
+ }
+}
+
+static void testIntUtils() noexcept {
+ INFO("Support::byteswap()");
+ EXPECT(Support::byteswap32(int32_t(0x01020304)) == int32_t(0x04030201));
+ EXPECT(Support::byteswap32(uint32_t(0x01020304)) == uint32_t(0x04030201));
+
+ INFO("Support::bytepack()");
+ union BytePackData {
+ uint8_t bytes[4];
+ uint32_t u32;
+ } bpdata;
+
+ bpdata.u32 = Support::bytepack32_4x8(0x00, 0x11, 0x22, 0x33);
+ EXPECT(bpdata.bytes[0] == 0x00);
+ EXPECT(bpdata.bytes[1] == 0x11);
+ EXPECT(bpdata.bytes[2] == 0x22);
+ EXPECT(bpdata.bytes[3] == 0x33);
+
+ INFO("Support::isBetween()");
+ EXPECT(Support::isBetween<int>(10 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(11 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(20 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(9 , 10, 20) == false);
+ EXPECT(Support::isBetween<int>(21 , 10, 20) == false);
+ EXPECT(Support::isBetween<int>(101, 10, 20) == false);
+
+ INFO("Support::isInt8()");
+ EXPECT(Support::isInt8(-128) == true);
+ EXPECT(Support::isInt8( 127) == true);
+ EXPECT(Support::isInt8(-129) == false);
+ EXPECT(Support::isInt8( 128) == false);
+
+ INFO("Support::isInt16()");
+ EXPECT(Support::isInt16(-32768) == true);
+ EXPECT(Support::isInt16( 32767) == true);
+ EXPECT(Support::isInt16(-32769) == false);
+ EXPECT(Support::isInt16( 32768) == false);
+
+ INFO("Support::isInt32()");
+ EXPECT(Support::isInt32( 2147483647 ) == true);
+ EXPECT(Support::isInt32(-2147483647 - 1) == true);
+ EXPECT(Support::isInt32(uint64_t(2147483648u)) == false);
+ EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu)) == false);
+ EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu) + 1) == false);
+
+ INFO("Support::isUInt8()");
+ EXPECT(Support::isUInt8(0) == true);
+ EXPECT(Support::isUInt8(255) == true);
+ EXPECT(Support::isUInt8(256) == false);
+ EXPECT(Support::isUInt8(-1) == false);
+
+ INFO("Support::isUInt12()");
+ EXPECT(Support::isUInt12(0) == true);
+ EXPECT(Support::isUInt12(4095) == true);
+ EXPECT(Support::isUInt12(4096) == false);
+ EXPECT(Support::isUInt12(-1) == false);
+
+ INFO("Support::isUInt16()");
+ EXPECT(Support::isUInt16(0) == true);
+ EXPECT(Support::isUInt16(65535) == true);
+ EXPECT(Support::isUInt16(65536) == false);
+ EXPECT(Support::isUInt16(-1) == false);
+
+ INFO("Support::isUInt32()");
+ EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF)) == true);
+ EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF) + 1) == false);
+ EXPECT(Support::isUInt32(-1) == false);
+}
+
+static void testReadWrite() noexcept {
+ INFO("Support::readX() / writeX()");
+
+ uint8_t arr[32] = { 0 };
+
+ Support::writeU16uBE(arr + 1, 0x0102u);
+ Support::writeU16uBE(arr + 3, 0x0304u);
+ EXPECT(Support::readU32uBE(arr + 1) == 0x01020304u);
+ EXPECT(Support::readU32uLE(arr + 1) == 0x04030201u);
+ EXPECT(Support::readU32uBE(arr + 2) == 0x02030400u);
+ EXPECT(Support::readU32uLE(arr + 2) == 0x00040302u);
+
+ Support::writeU32uLE(arr + 5, 0x05060708u);
+ EXPECT(Support::readU64uBE(arr + 1) == 0x0102030408070605u);
+ EXPECT(Support::readU64uLE(arr + 1) == 0x0506070804030201u);
+
+ Support::writeU64uLE(arr + 7, 0x1122334455667788u);
+ EXPECT(Support::readU32uBE(arr + 8) == 0x77665544u);
+}
+
+static void testBitVector() noexcept {
+ INFO("Support::bitVectorOp");
+ {
+ uint32_t vec[3] = { 0 };
+ Support::bitVectorFill(vec, 1, 64);
+ EXPECT(vec[0] == 0xFFFFFFFEu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 1, 1);
+ EXPECT(vec[0] == 0xFFFFFFFCu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorFill(vec, 0, 32);
+ EXPECT(vec[0] == 0xFFFFFFFFu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 0, 32);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorFill(vec, 1, 30);
+ EXPECT(vec[0] == 0x7FFFFFFEu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 1, 95);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0x00000000u);
+ EXPECT(vec[2] == 0x00000000u);
+
+ Support::bitVectorFill(vec, 32, 64);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0xFFFFFFFFu);
+
+ Support::bitVectorSetBit(vec, 1, true);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0xFFFFFFFFu);
+
+ Support::bitVectorSetBit(vec, 95, false);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x7FFFFFFFu);
+
+ Support::bitVectorClear(vec, 33, 32);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0x00000001u);
+ EXPECT(vec[2] == 0x7FFFFFFEu);
+ }
+
+ INFO("Support::bitVectorIndexOf");
+ {
+ uint32_t vec1[1] = { 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec1, 0, true) == 31);
+ EXPECT(Support::bitVectorIndexOf(vec1, 1, true) == 31);
+ EXPECT(Support::bitVectorIndexOf(vec1, 31, true) == 31);
+
+ uint32_t vec2[2] = { 0x00000000, 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec2, 0, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 1, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 31, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 32, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 33, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 63, true) == 63);
+
+ uint32_t vec3[3] = { 0x00000001, 0x00000000, 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec3, 0, true) == 0);
+ EXPECT(Support::bitVectorIndexOf(vec3, 1, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 2, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 31, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 32, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 63, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 64, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 95, true) == 95);
+
+ uint32_t vec4[3] = { ~vec3[0], ~vec3[1], ~vec3[2] };
+ EXPECT(Support::bitVectorIndexOf(vec4, 0, false) == 0);
+ EXPECT(Support::bitVectorIndexOf(vec4, 1, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 2, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 31, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 32, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 63, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 64, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 95, false) == 95);
+ }
+
+ INFO("Support::BitWordIterator<uint32_t>");
+ {
+ Support::BitWordIterator<uint32_t> it(0x80000F01u);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 0);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 8);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 9);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 10);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 11);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(!it.hasNext());
+
+ // No bits set.
+ it.init(0x00000000u);
+ ASMJIT_ASSERT(!it.hasNext());
+
+ // Only first bit set.
+ it.init(0x00000001u);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 0);
+ ASMJIT_ASSERT(!it.hasNext());
+
+ // Only last bit set (special case).
+ it.init(0x80000000u);
+ ASMJIT_ASSERT(it.hasNext());
+ ASMJIT_ASSERT(it.next() == 31);
+ ASMJIT_ASSERT(!it.hasNext());
+ }
+
+ INFO("Support::BitWordIterator<uint64_t>");
+ {
+ Support::BitWordIterator<uint64_t> it(uint64_t(1) << 63);
+ ASMJIT_ASSERT(it.hasNext());
+ ASMJIT_ASSERT(it.next() == 63);
+ ASMJIT_ASSERT(!it.hasNext());
+ }
+
+ INFO("Support::BitVectorIterator<uint32_t>");
+ {
+ // Border cases.
+ static const uint32_t bitsNone[] = { 0xFFFFFFFFu };
+ Support::BitVectorIterator<uint32_t> it(bitsNone, 0);
+
+ EXPECT(!it.hasNext());
+ it.init(bitsNone, 0, 1);
+ EXPECT(!it.hasNext());
+ it.init(bitsNone, 0, 128);
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits1[] = { 0x80000008u, 0x80000001u, 0x00000000u, 0x80000000u, 0x00000000u, 0x00000000u, 0x00003000u };
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 3);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 32);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 204);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 205);
+ EXPECT(!it.hasNext());
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 4);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 64);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 127);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+
+ static const uint32_t bits2[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits3[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x00000000u };
+ it.init(bits3, ASMJIT_ARRAY_SIZE(bits3));
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits4[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x80000000u };
+ it.init(bits4, ASMJIT_ARRAY_SIZE(bits4));
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(!it.hasNext());
+ }
+
+ INFO("Support::BitVectorIterator<uint64_t>");
+ {
+ static const uint64_t bits1[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ Support::BitVectorIterator<uint64_t> it(bits1, ASMJIT_ARRAY_SIZE(bits1));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 95);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 223);
+ EXPECT(!it.hasNext());
+
+ static const uint64_t bits2[] = { 0x8000000000000000u, 0, 0, 0 };
+ it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(!it.hasNext());
+ }
+}
+
+static void testSorting() noexcept {
+ INFO("Support::qSort() - Testing qsort and isort of predefined arrays");
+ {
+ constexpr size_t kArraySize = 11;
+
+ int ref_[kArraySize] = { -4, -2, -1, 0, 1, 9, 12, 13, 14, 19, 22 };
+ int arr1[kArraySize] = { 0, 1, -1, 19, 22, 14, -4, 9, 12, 13, -2 };
+ int arr2[kArraySize];
+
+ memcpy(arr2, arr1, kArraySize * sizeof(int));
+
+ Support::iSort(arr1, kArraySize);
+ Support::qSort(arr2, kArraySize);
+ testArrays(arr1, ref_, kArraySize);
+ testArrays(arr2, ref_, kArraySize);
+ }
+
+ INFO("Support::qSort() - Testing qsort and isort of artificial arrays");
+ {
+ constexpr size_t kArraySize = 200;
+
+ int arr1[kArraySize];
+ int arr2[kArraySize];
+ int ref_[kArraySize];
+
+ for (size_t size = 2; size < kArraySize; size++) {
+ for (size_t i = 0; i < size; i++) {
+ arr1[i] = int(size - 1 - i);
+ arr2[i] = int(size - 1 - i);
+ ref_[i] = int(i);
+ }
+
+ Support::iSort(arr1, size);
+ Support::qSort(arr2, size);
+ testArrays(arr1, ref_, size);
+ testArrays(arr2, ref_, size);
+ }
+ }
+
+ INFO("Support::qSort() - Testing qsort and isort with an unstable compare function");
+ {
+ constexpr size_t kArraySize = 5;
+
+ float arr1[kArraySize] = { 1.0f, 0.0f, 3.0f, -1.0f, std::numeric_limits<float>::quiet_NaN() };
+ float arr2[kArraySize] = { };
+
+ memcpy(arr2, arr1, kArraySize * sizeof(float));
+
+ // We don't test as it's undefined where the NaN would be.
+ Support::iSort(arr1, kArraySize);
+ Support::qSort(arr2, kArraySize);
+ }
+}
+
+UNIT(support) {
+ testAlignment();
+ testBitUtils();
+ testIntUtils();
+ testReadWrite();
+ testBitVector();
+ testSorting();
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/support.h b/client/asmjit/core/support.h
new file mode 100644
index 0000000..9e2bee4
--- /dev/null
+++ b/client/asmjit/core/support.h
@@ -0,0 +1,1516 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_SUPPORT_H_INCLUDED
+#define ASMJIT_CORE_SUPPORT_H_INCLUDED
+
+#include "../core/globals.h"
+
+#if defined(_MSC_VER)
+ #include <intrin.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_utilities
+//! \{
+
+//! Contains support classes and functions that may be used by AsmJit source
+//! and header files. Anything defined here is considered internal and should
+//! not be used outside of AsmJit and related projects like AsmTK.
+namespace Support {
+
+// ============================================================================
+// [asmjit::Support - Architecture Features & Constraints]
+// ============================================================================
+
+//! \cond INTERNAL
+static constexpr bool kUnalignedAccess16 = ASMJIT_ARCH_X86 != 0;
+static constexpr bool kUnalignedAccess32 = ASMJIT_ARCH_X86 != 0;
+static constexpr bool kUnalignedAccess64 = ASMJIT_ARCH_X86 != 0;
+//! \endcond
+
+// ============================================================================
+// [asmjit::Support - Internal]
+// ============================================================================
+
+//! \cond INTERNAL
+namespace Internal {
+ template<typename T, size_t Alignment>
+ struct AlignedInt {};
+
+ template<> struct AlignedInt<uint16_t, 1> { typedef uint16_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint16_t, 2> { typedef uint16_t T; };
+ template<> struct AlignedInt<uint32_t, 1> { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint32_t, 2> { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 2); };
+ template<> struct AlignedInt<uint32_t, 4> { typedef uint32_t T; };
+ template<> struct AlignedInt<uint64_t, 1> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint64_t, 2> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 2); };
+ template<> struct AlignedInt<uint64_t, 4> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 4); };
+ template<> struct AlignedInt<uint64_t, 8> { typedef uint64_t T; };
+
+ // StdInt - Make an int-type by size (signed or unsigned) that is the
+ // same as types defined by <stdint.h>.
+ // Int32Or64 - Make an int-type that has at least 32 bits: [u]int[32|64]_t.
+
+ template<size_t Size, unsigned Unsigned>
+ struct StdInt {}; // Fail if not specialized.
+
+ template<> struct StdInt<1, 0> { typedef int8_t Type; };
+ template<> struct StdInt<1, 1> { typedef uint8_t Type; };
+ template<> struct StdInt<2, 0> { typedef int16_t Type; };
+ template<> struct StdInt<2, 1> { typedef uint16_t Type; };
+ template<> struct StdInt<4, 0> { typedef int32_t Type; };
+ template<> struct StdInt<4, 1> { typedef uint32_t Type; };
+ template<> struct StdInt<8, 0> { typedef int64_t Type; };
+ template<> struct StdInt<8, 1> { typedef uint64_t Type; };
+
+ template<typename T, int Unsigned = std::is_unsigned<T>::value>
+ struct Int32Or64 : public StdInt<sizeof(T) <= 4 ? size_t(4) : sizeof(T), Unsigned> {};
+}
+//! \endcond
+
+// ============================================================================
+// [asmjit::Support - Basic Traits]
+// ============================================================================
+
+template<typename T>
+static constexpr bool isUnsigned() noexcept { return std::is_unsigned<T>::value; }
+
+// ============================================================================
+// [asmjit::Support - FastUInt8]
+// ============================================================================
+
+#if ASMJIT_ARCH_X86
+typedef uint8_t FastUInt8;
+#else
+typedef unsigned int FastUInt8;
+#endif
+
+// ============================================================================
+// [asmjit::Support - asInt / asUInt / asNormalized]
+// ============================================================================
+
+//! Casts an integer `x` to either `int32_t` or `int64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T, 0>::Type asInt(const T& x) noexcept {
+ return (typename Internal::Int32Or64<T, 0>::Type)x;
+}
+
+//! Casts an integer `x` to either `uint32_t` or `uint64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T, 1>::Type asUInt(const T& x) noexcept {
+ return (typename Internal::Int32Or64<T, 1>::Type)x;
+}
+
+//! Casts an integer `x` to either `int32_t`, uint32_t`, `int64_t`, or `uint64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T>::Type asNormalized(const T& x) noexcept {
+ return (typename Internal::Int32Or64<T>::Type)x;
+}
+
+//! Casts an integer `x` to the same type as defined by `<stdint.h>`.
+template<typename T>
+static constexpr typename Internal::StdInt<sizeof(T), isUnsigned<T>()>::Type asStdInt(const T& x) noexcept {
+ return (typename Internal::StdInt<sizeof(T), isUnsigned<T>()>::Type)x;
+}
+
+// ============================================================================
+// [asmjit::Support - BitCast]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename DstT, typename SrcT>
+ union BitCastUnion {
+ ASMJIT_INLINE BitCastUnion(SrcT src) noexcept : src(src) {}
+ SrcT src;
+ DstT dst;
+ };
+}
+//! \endcond
+
+//! Bit-casts from `Src` type to `Dst` type.
+//!
+//! Useful to bit-cast between integers and floating points.
+template<typename Dst, typename Src>
+static inline Dst bitCast(const Src& x) noexcept { return Internal::BitCastUnion<Dst, Src>(x).dst; }
+
+// ============================================================================
+// [asmjit::Support - BitOps]
+// ============================================================================
+
+//! Storage used to store a pack of bits (should by compatible with a machine word).
+typedef Internal::StdInt<sizeof(uintptr_t), 1>::Type BitWord;
+
+template<typename T>
+static constexpr uint32_t bitSizeOf() noexcept { return uint32_t(sizeof(T) * 8u); }
+
+//! Number of bits stored in a single `BitWord`.
+static constexpr uint32_t kBitWordSizeInBits = bitSizeOf<BitWord>();
+
+//! Returns `0 - x` in a safe way (no undefined behavior), works for unsigned numbers as well.
+template<typename T>
+static constexpr T neg(const T& x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(U(0) - U(x));
+}
+
+template<typename T>
+static constexpr T allOnes() noexcept { return neg<T>(T(1)); }
+
+//! Returns `x << y` (shift left logical) by explicitly casting `x` to an unsigned type and back.
+template<typename X, typename Y>
+static constexpr X shl(const X& x, const Y& y) noexcept {
+ typedef typename std::make_unsigned<X>::type U;
+ return X(U(x) << y);
+}
+
+//! Returns `x >> y` (shift right logical) by explicitly casting `x` to an unsigned type and back.
+template<typename X, typename Y>
+static constexpr X shr(const X& x, const Y& y) noexcept {
+ typedef typename std::make_unsigned<X>::type U;
+ return X(U(x) >> y);
+}
+
+//! Returns `x >> y` (shift right arithmetic) by explicitly casting `x` to a signed type and back.
+template<typename X, typename Y>
+static constexpr X sar(const X& x, const Y& y) noexcept {
+ typedef typename std::make_signed<X>::type S;
+ return X(S(x) >> y);
+}
+
+//! Returns `x | (x >> y)` - helper used by some bit manipulation helpers.
+template<typename X, typename Y>
+static constexpr X or_shr(const X& x, const Y& y) noexcept { return X(x | shr(x, y)); }
+
+//! Returns `x & -x` - extracts lowest set isolated bit (like BLSI instruction).
+template<typename T>
+static constexpr T blsi(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(U(x) & neg(U(x)));
+}
+
+//! Generate a trailing bit-mask that has `n` least significant (trailing) bits set.
+template<typename T, typename CountT>
+static constexpr T lsbMask(const CountT& n) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return (sizeof(U) < sizeof(uintptr_t))
+ // Prevent undefined behavior by using a larger type than T.
+ ? T(U((uintptr_t(1) << n) - uintptr_t(1)))
+ // Prevent undefined behavior by performing `n & (nBits - 1)` so it's always within the range.
+ : shr(sar(neg(T(n)), bitSizeOf<T>() - 1u),
+ neg(T(n)) & CountT(bitSizeOf<T>() - 1u));
+}
+
+//! Tests whether the given value `x` has `n`th bit set.
+template<typename T, typename IndexT>
+static constexpr bool bitTest(T x, IndexT n) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return (U(x) & (U(1) << n)) != 0;
+}
+
+//! Returns a bit-mask that has `x` bit set.
+template<typename T>
+static constexpr uint32_t bitMask(T x) noexcept { return (1u << x); }
+
+//! Returns a bit-mask that has `x` bit set (multiple arguments).
+template<typename T, typename... Args>
+static constexpr uint32_t bitMask(T x, Args... args) noexcept { return bitMask(x) | bitMask(args...); }
+
+//! Converts a boolean value `b` to zero or full mask (all bits set).
+template<typename DstT, typename SrcT>
+static constexpr DstT bitMaskFromBool(SrcT b) noexcept {
+ typedef typename std::make_unsigned<DstT>::type U;
+ return DstT(U(0) - U(b));
+}
+
+//! \cond
+namespace Internal {
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint8_t fillTrailingBitsImpl(uint8_t x) noexcept { return or_shr(or_shr(or_shr(x, 1), 2), 4); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint16_t fillTrailingBitsImpl(uint16_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint32_t fillTrailingBitsImpl(uint32_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint64_t fillTrailingBitsImpl(uint64_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16), 32); }
+}
+//! \endcond
+
+// Fills all trailing bits right from the first most significant bit set.
+template<typename T>
+static constexpr T fillTrailingBits(const T& x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(Internal::fillTrailingBitsImpl(U(x)));
+}
+
+// ============================================================================
+// [asmjit::Support - CTZ]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ static constexpr uint32_t constCtzImpl(uint32_t xAndNegX) noexcept {
+ return 31 - ((xAndNegX & 0x0000FFFFu) ? 16 : 0)
+ - ((xAndNegX & 0x00FF00FFu) ? 8 : 0)
+ - ((xAndNegX & 0x0F0F0F0Fu) ? 4 : 0)
+ - ((xAndNegX & 0x33333333u) ? 2 : 0)
+ - ((xAndNegX & 0x55555555u) ? 1 : 0);
+ }
+
+ static constexpr uint32_t constCtzImpl(uint64_t xAndNegX) noexcept {
+ return 63 - ((xAndNegX & 0x00000000FFFFFFFFu) ? 32 : 0)
+ - ((xAndNegX & 0x0000FFFF0000FFFFu) ? 16 : 0)
+ - ((xAndNegX & 0x00FF00FF00FF00FFu) ? 8 : 0)
+ - ((xAndNegX & 0x0F0F0F0F0F0F0F0Fu) ? 4 : 0)
+ - ((xAndNegX & 0x3333333333333333u) ? 2 : 0)
+ - ((xAndNegX & 0x5555555555555555u) ? 1 : 0);
+ }
+
+ template<typename T>
+ static constexpr uint32_t constCtz(T x) noexcept {
+ return constCtzImpl(x & neg(x));
+ }
+
+ static ASMJIT_INLINE uint32_t ctz(uint32_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_ctz(x));
+ #elif defined(_MSC_VER) && (ASMJIT_ARCH_X86 || ASMJIT_ARCH_ARM)
+ unsigned long i;
+ _BitScanForward(&i, x);
+ return uint32_t(i);
+ #else
+ return constCtz(x);
+ #endif
+ }
+
+ static ASMJIT_INLINE uint32_t ctz(uint64_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_ctzll(x));
+ #elif defined(_MSC_VER) && (ASMJIT_ARCH_X86 == 64 || ASMJIT_ARCH_ARM == 64)
+ unsigned long i;
+ _BitScanForward64(&i, x);
+ return uint32_t(i);
+ #else
+ return constCtz(x);
+ #endif
+ }
+}
+//! \endcond
+
+//! Count trailing zeros in `x` (returns a position of a first bit set in `x`).
+//!
+//! \note The input MUST NOT be zero, otherwise the result is undefined.
+template<typename T>
+static inline uint32_t ctz(T x) noexcept { return Internal::ctz(asUInt(x)); }
+
+//! Count trailing zeros in `x` (constant expression).
+template<typename T>
+static constexpr uint32_t constCtz(T x) noexcept { return Internal::constCtz(asUInt(x)); }
+
+// ============================================================================
+// [asmjit::Support - PopCnt]
+// ============================================================================
+
+// Based on the following resource:
+// http://graphics.stanford.edu/~seander/bithacks.html
+//
+// Alternatively, for a very small number of bits in `x`:
+// uint32_t n = 0;
+// while (x) {
+// x &= x - 1;
+// n++;
+// }
+// return n;
+
+//! \cond
+namespace Internal {
+ static inline uint32_t constPopcntImpl(uint32_t x) noexcept {
+ x = x - ((x >> 1) & 0x55555555u);
+ x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u);
+ return (((x + (x >> 4)) & 0x0F0F0F0Fu) * 0x01010101u) >> 24;
+ }
+
+ static inline uint32_t constPopcntImpl(uint64_t x) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ x = x - ((x >> 1) & 0x5555555555555555u);
+ x = (x & 0x3333333333333333u) + ((x >> 2) & 0x3333333333333333u);
+ return uint32_t((((x + (x >> 4)) & 0x0F0F0F0F0F0F0F0Fu) * 0x0101010101010101u) >> 56);
+ }
+ else {
+ return constPopcntImpl(uint32_t(x >> 32)) +
+ constPopcntImpl(uint32_t(x & 0xFFFFFFFFu));
+ }
+ }
+
+ static inline uint32_t popcntImpl(uint32_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_popcount(x));
+ #else
+ return constPopcntImpl(asUInt(x));
+ #endif
+ }
+
+ static inline uint32_t popcntImpl(uint64_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_popcountll(x));
+ #else
+ return constPopcntImpl(asUInt(x));
+ #endif
+ }
+}
+//! \endcond
+
+//! Calculates count of bits in `x`.
+template<typename T>
+static inline uint32_t popcnt(T x) noexcept { return Internal::popcntImpl(asUInt(x)); }
+
+//! Calculates count of bits in `x` (useful in constant expressions).
+template<typename T>
+static inline uint32_t constPopcnt(T x) noexcept { return Internal::constPopcntImpl(asUInt(x)); }
+
+// ============================================================================
+// [asmjit::Support - Min/Max]
+// ============================================================================
+
+// NOTE: These are constexpr `min()` and `max()` implementations that are not
+// exactly the same as `std::min()` and `std::max()`. The return value is not
+// a reference to `a` or `b` but it's a new value instead.
+
+template<typename T>
+static constexpr T min(const T& a, const T& b) noexcept { return b < a ? b : a; }
+
+template<typename T, typename... Args>
+static constexpr T min(const T& a, const T& b, Args&&... args) noexcept { return min(min(a, b), std::forward<Args>(args)...); }
+
+template<typename T>
+static constexpr T max(const T& a, const T& b) noexcept { return a < b ? b : a; }
+
+template<typename T, typename... Args>
+static constexpr T max(const T& a, const T& b, Args&&... args) noexcept { return max(max(a, b), std::forward<Args>(args)...); }
+
+// ============================================================================
+// [asmjit::Support - Overflow Arithmetic]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename T>
+ ASMJIT_INLINE T addOverflowFallback(T x, T y, FastUInt8* of) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ U result = U(x) + U(y);
+ *of = FastUInt8(*of | FastUInt8(isUnsigned<T>() ? result < U(x) : T((U(x) ^ ~U(y)) & (U(x) ^ result)) < 0));
+ return T(result);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE T subOverflowFallback(T x, T y, FastUInt8* of) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ U result = U(x) - U(y);
+ *of = FastUInt8(*of | FastUInt8(isUnsigned<T>() ? result > U(x) : T((U(x) ^ U(y)) & (U(x) ^ result)) < 0));
+ return T(result);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE T mulOverflowFallback(T x, T y, FastUInt8* of) noexcept {
+ typedef typename Internal::StdInt<sizeof(T) * 2, isUnsigned<T>()>::Type I;
+ typedef typename std::make_unsigned<I>::type U;
+
+ U mask = allOnes<U>();
+ if (std::is_signed<T>::value) {
+ U prod = U(I(x)) * U(I(y));
+ *of = FastUInt8(*of | FastUInt8(I(prod) < I(std::numeric_limits<T>::lowest()) || I(prod) > I(std::numeric_limits<T>::max())));
+ return T(I(prod & mask));
+ }
+ else {
+ U prod = U(x) * U(y);
+ *of = FastUInt8(*of | FastUInt8((prod & ~mask) != 0));
+ return T(prod & mask);
+ }
+ }
+
+ template<>
+ ASMJIT_INLINE int64_t mulOverflowFallback(int64_t x, int64_t y, FastUInt8* of) noexcept {
+ int64_t result = int64_t(uint64_t(x) * uint64_t(y));
+ *of = FastUInt8(*of | FastUInt8(x && (result / x != y)));
+ return result;
+ }
+
+ template<>
+ ASMJIT_INLINE uint64_t mulOverflowFallback(uint64_t x, uint64_t y, FastUInt8* of) noexcept {
+ uint64_t result = x * y;
+ *of = FastUInt8(*of | FastUInt8(y != 0 && allOnes<uint64_t>() / y < x));
+ return result;
+ }
+
+ // These can be specialized.
+ template<typename T> ASMJIT_INLINE T addOverflowImpl(const T& x, const T& y, FastUInt8* of) noexcept { return addOverflowFallback(x, y, of); }
+ template<typename T> ASMJIT_INLINE T subOverflowImpl(const T& x, const T& y, FastUInt8* of) noexcept { return subOverflowFallback(x, y, of); }
+ template<typename T> ASMJIT_INLINE T mulOverflowImpl(const T& x, const T& y, FastUInt8* of) noexcept { return mulOverflowFallback(x, y, of); }
+
+ #if defined(__GNUC__) && !defined(ASMJIT_NO_INTRINSICS)
+ #if defined(__clang__) || __GNUC__ >= 5
+ #define ASMJIT_ARITH_OVERFLOW_SPECIALIZE(FUNC, T, RESULT_T, BUILTIN) \
+ template<> \
+ ASMJIT_INLINE T FUNC(const T& x, const T& y, FastUInt8* of) noexcept { \
+ RESULT_T result; \
+ *of = FastUInt8(*of | (BUILTIN((RESULT_T)x, (RESULT_T)y, &result))); \
+ return T(result); \
+ }
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, int32_t , int , __builtin_sadd_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, uint32_t, unsigned int , __builtin_uadd_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, int64_t , long long , __builtin_saddll_overflow)
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, uint64_t, unsigned long long, __builtin_uaddll_overflow)
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, int32_t , int , __builtin_ssub_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, uint32_t, unsigned int , __builtin_usub_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, int64_t , long long , __builtin_ssubll_overflow)
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, uint64_t, unsigned long long, __builtin_usubll_overflow)
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, int32_t , int , __builtin_smul_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, uint32_t, unsigned int , __builtin_umul_overflow )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, int64_t , long long , __builtin_smulll_overflow)
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(mulOverflowImpl, uint64_t, unsigned long long, __builtin_umulll_overflow)
+ #undef ASMJIT_ARITH_OVERFLOW_SPECIALIZE
+ #endif
+ #endif
+
+ // There is a bug in MSVC that makes these specializations unusable, maybe in the future...
+ #if defined(_MSC_VER) && 0
+ #define ASMJIT_ARITH_OVERFLOW_SPECIALIZE(FUNC, T, ALT_T, BUILTIN) \
+ template<> \
+ ASMJIT_INLINE T FUNC(T x, T y, FastUInt8* of) noexcept { \
+ ALT_T result; \
+ *of = FastUInt8(*of | BUILTIN(0, (ALT_T)x, (ALT_T)y, &result)); \
+ return T(result); \
+ }
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, uint32_t, unsigned int , _addcarry_u32 )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, uint32_t, unsigned int , _subborrow_u32)
+ #if ARCH_BITS >= 64
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(addOverflowImpl, uint64_t, unsigned __int64 , _addcarry_u64 )
+ ASMJIT_ARITH_OVERFLOW_SPECIALIZE(subOverflowImpl, uint64_t, unsigned __int64 , _subborrow_u64)
+ #endif
+ #undef ASMJIT_ARITH_OVERFLOW_SPECIALIZE
+ #endif
+} // {Internal}
+//! \endcond
+
+template<typename T>
+static ASMJIT_INLINE T addOverflow(const T& x, const T& y, FastUInt8* of) noexcept { return T(Internal::addOverflowImpl(asStdInt(x), asStdInt(y), of)); }
+
+template<typename T>
+static ASMJIT_INLINE T subOverflow(const T& x, const T& y, FastUInt8* of) noexcept { return T(Internal::subOverflowImpl(asStdInt(x), asStdInt(y), of)); }
+
+template<typename T>
+static ASMJIT_INLINE T mulOverflow(const T& x, const T& y, FastUInt8* of) noexcept { return T(Internal::mulOverflowImpl(asStdInt(x), asStdInt(y), of)); }
+
+// ============================================================================
+// [asmjit::Support - Alignment]
+// ============================================================================
+
+template<typename X, typename Y>
+static constexpr bool isAligned(X base, Y alignment) noexcept {
+ typedef typename Internal::StdInt<sizeof(X), 1>::Type U;
+ return ((U)base % (U)alignment) == 0;
+}
+
+//! Tests whether the `x` is a power of two (only one bit is set).
+template<typename T>
+static constexpr bool isPowerOf2(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return x && !(U(x) & (U(x) - U(1)));
+}
+
+template<typename X, typename Y>
+static constexpr X alignUp(X x, Y alignment) noexcept {
+ typedef typename Internal::StdInt<sizeof(X), 1>::Type U;
+ return (X)( ((U)x + ((U)(alignment) - 1u)) & ~((U)(alignment) - 1u) );
+}
+
+template<typename T>
+static constexpr T alignUpPowerOf2(T x) noexcept {
+ typedef typename Internal::StdInt<sizeof(T), 1>::Type U;
+ return (T)(fillTrailingBits(U(x) - 1u) + 1u);
+}
+
+//! Returns either zero or a positive difference between `base` and `base` when
+//! aligned to `alignment`.
+template<typename X, typename Y>
+static constexpr typename Internal::StdInt<sizeof(X), 1>::Type alignUpDiff(X base, Y alignment) noexcept {
+ typedef typename Internal::StdInt<sizeof(X), 1>::Type U;
+ return alignUp(U(base), alignment) - U(base);
+}
+
+template<typename X, typename Y>
+static constexpr X alignDown(X x, Y alignment) noexcept {
+ typedef typename Internal::StdInt<sizeof(X), 1>::Type U;
+ return (X)( (U)x & ~((U)(alignment) - 1u) );
+}
+
+// ============================================================================
+// [asmjit::Support - NumGranularized]
+// ============================================================================
+
+//! Calculates the number of elements that would be required if `base` is
+//! granularized by `granularity`. This function can be used to calculate
+//! the number of BitWords to represent N bits, for example.
+template<typename X, typename Y>
+static constexpr X numGranularized(X base, Y granularity) noexcept {
+ typedef typename Internal::StdInt<sizeof(X), 1>::Type U;
+ return X((U(base) + U(granularity) - 1) / U(granularity));
+}
+
+// ============================================================================
+// [asmjit::Support - IsBetween]
+// ============================================================================
+
+//! Checks whether `x` is greater than or equal to `a` and lesser than or equal to `b`.
+template<typename T>
+static constexpr bool isBetween(const T& x, const T& a, const T& b) noexcept {
+ return x >= a && x <= b;
+}
+
+// ============================================================================
+// [asmjit::Support - IsInt / IsUInt]
+// ============================================================================
+
+//! Checks whether the given integer `x` can be casted to a 4-bit signed integer.
+template<typename T>
+static constexpr bool isInt4(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? isBetween<S>(S(x), -8, 7)
+ : U(x) <= U(7u);
+}
+
+//! Checks whether the given integer `x` can be casted to an 8-bit signed integer.
+template<typename T>
+static constexpr bool isInt8(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 1 || isBetween<S>(S(x), -128, 127)
+ : U(x) <= U(127u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 16-bit signed integer.
+template<typename T>
+static constexpr bool isInt16(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 2 || isBetween<S>(S(x), -32768, 32767)
+ : sizeof(T) <= 1 || U(x) <= U(32767u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit signed integer.
+template<typename T>
+static constexpr bool isInt32(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 4 || isBetween<S>(S(x), -2147483647 - 1, 2147483647)
+ : sizeof(T) <= 2 || U(x) <= U(2147483647u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 4-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt4(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? x >= T(0) && x <= T(15)
+ : U(x) <= U(15u);
+}
+
+//! Checks whether the given integer `x` can be casted to an 8-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt8(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 1 || T(x) <= T(255)) && x >= T(0)
+ : (sizeof(T) <= 1 || U(x) <= U(255u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific).
+template<typename T>
+static constexpr bool isUInt12(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 1 || T(x) <= T(4095)) && x >= T(0)
+ : (sizeof(T) <= 1 || U(x) <= U(4095u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 16-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt16(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 2 || T(x) <= T(65535)) && x >= T(0)
+ : (sizeof(T) <= 2 || U(x) <= U(65535u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt32(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 4 || T(x) <= T(4294967295u)) && x >= T(0)
+ : (sizeof(T) <= 4 || U(x) <= U(4294967295u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer.
+template<typename T>
+static constexpr bool isIntOrUInt32(T x) noexcept {
+ return sizeof(T) <= 4 ? true : (uint32_t(uint64_t(x) >> 32) + 1u) <= 1u;
+}
+
+// ============================================================================
+// [asmjit::Support - ByteSwap]
+// ============================================================================
+
+static constexpr uint32_t byteswap32(uint32_t x) noexcept {
+ return (x << 24) | (x >> 24) | ((x << 8) & 0x00FF0000u) | ((x >> 8) & 0x0000FF00);
+}
+
+// ============================================================================
+// [asmjit::Support - BytePack / Unpack]
+// ============================================================================
+
+//! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`.
+static constexpr uint32_t bytepack32_4x8(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
+ return ASMJIT_ARCH_LE ? (a | (b << 8) | (c << 16) | (d << 24))
+ : (d | (c << 8) | (b << 16) | (a << 24));
+}
+
+template<typename T>
+static constexpr uint32_t unpackU32At0(T x) noexcept { return ASMJIT_ARCH_LE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); }
+template<typename T>
+static constexpr uint32_t unpackU32At1(T x) noexcept { return ASMJIT_ARCH_BE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); }
+
+// ============================================================================
+// [asmjit::Support - Position of byte (in bit-shift)]
+// ============================================================================
+
+static inline uint32_t byteShiftOfDWordStruct(uint32_t index) noexcept {
+ return ASMJIT_ARCH_LE ? index * 8 : (uint32_t(sizeof(uint32_t)) - 1u - index) * 8;
+}
+
+// ============================================================================
+// [asmjit::Support - String Utilities]
+// ============================================================================
+
+template<typename T>
+static constexpr T asciiToLower(T c) noexcept { return T(c ^ T(T(c >= T('A') && c <= T('Z')) << 5)); }
+
+template<typename T>
+static constexpr T asciiToUpper(T c) noexcept { return T(c ^ T(T(c >= T('a') && c <= T('z')) << 5)); }
+
+static ASMJIT_INLINE size_t strLen(const char* s, size_t maxSize) noexcept {
+ size_t i = 0;
+ while (i < maxSize && s[i] != '\0')
+ i++;
+ return i;
+}
+
+static constexpr uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; }
+
+// Gets a hash of the given string `data` of size `size`. Size must be valid
+// as this function doesn't check for a null terminator and allows it in the
+// middle of the string.
+static inline uint32_t hashString(const char* data, size_t size) noexcept {
+ uint32_t hashCode = 0;
+ for (uint32_t i = 0; i < size; i++)
+ hashCode = hashRound(hashCode, uint8_t(data[i]));
+ return hashCode;
+}
+
+static ASMJIT_INLINE const char* findPackedString(const char* p, uint32_t id) noexcept {
+ uint32_t i = 0;
+ while (i < id) {
+ while (p[0])
+ p++;
+ p++;
+ i++;
+ }
+ return p;
+}
+
+//! Compares two instruction names.
+//!
+//! `a` is a null terminated instruction name from arch-specific `nameData[]`
+//! table. `b` is a possibly non-null terminated instruction name passed to
+//! `InstAPI::stringToInstId()`.
+static ASMJIT_INLINE int cmpInstName(const char* a, const char* b, size_t size) noexcept {
+ for (size_t i = 0; i < size; i++) {
+ int c = int(uint8_t(a[i])) - int(uint8_t(b[i]));
+ if (c != 0) return c;
+ }
+ return int(uint8_t(a[size]));
+}
+
+// ============================================================================
+// [asmjit::Support - Read / Write]
+// ============================================================================
+
+static inline uint32_t readU8(const void* p) noexcept { return uint32_t(static_cast<const uint8_t*>(p)[0]); }
+static inline int32_t readI8(const void* p) noexcept { return int32_t(static_cast<const int8_t*>(p)[0]); }
+
+template<uint32_t BO, size_t Alignment>
+static inline uint32_t readU16x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ return uint32_t(static_cast<const U16AlignedToN*>(p)[0]);
+ }
+ else {
+ uint32_t hi = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 0));
+ uint32_t lo = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 1));
+ return shl(hi, 8) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline int32_t readI16x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ return int32_t(int16_t(static_cast<const U16AlignedToN*>(p)[0]));
+ }
+ else {
+ int32_t hi = readI8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 0));
+ uint32_t lo = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 1));
+ return shl(hi, 8) | int32_t(lo);
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative>
+static inline uint32_t readU24u(const void* p) noexcept {
+ uint32_t b0 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 2 : 0));
+ uint32_t b1 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 1));
+ uint32_t b2 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 2));
+ return shl(b0, 16) | shl(b1, 8) | b2;
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline uint32_t readU32x(const void* p) noexcept {
+ if (kUnalignedAccess32 || Alignment >= 4) {
+ typedef typename Internal::AlignedInt<uint32_t, Alignment>::T U32AlignedToN;
+ uint32_t x = static_cast<const U32AlignedToN*>(p)[0];
+ return BO == ByteOrder::kNative ? x : byteswap32(x);
+ }
+ else {
+ uint32_t hi = readU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 2 : 0));
+ uint32_t lo = readU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 2));
+ return shl(hi, 16) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline uint64_t readU64x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) {
+ typedef typename Internal::AlignedInt<uint64_t, Alignment>::T U64AlignedToN;
+ return static_cast<const U64AlignedToN*>(p)[0];
+ }
+ else {
+ uint32_t hi = readU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 4 : 0));
+ uint32_t lo = readU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 4));
+ return shl(uint64_t(hi), 32) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline int32_t readI32x(const void* p) noexcept { return int32_t(readU32x<BO, Alignment>(p)); }
+
+template<uint32_t BO, size_t Alignment>
+static inline int64_t readI64x(const void* p) noexcept { return int64_t(readU64x<BO, Alignment>(p)); }
+
+template<size_t Alignment> static inline int32_t readI16xLE(const void* p) noexcept { return readI16x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI16xBE(const void* p) noexcept { return readI16x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU16xLE(const void* p) noexcept { return readU16x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU16xBE(const void* p) noexcept { return readU16x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI32xLE(const void* p) noexcept { return readI32x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI32xBE(const void* p) noexcept { return readI32x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU32xLE(const void* p) noexcept { return readU32x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU32xBE(const void* p) noexcept { return readU32x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline int64_t readI64xLE(const void* p) noexcept { return readI64x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int64_t readI64xBE(const void* p) noexcept { return readI64x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint64_t readU64xLE(const void* p) noexcept { return readU64x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint64_t readU64xBE(const void* p) noexcept { return readU64x<ByteOrder::kBE, Alignment>(p); }
+
+static inline int32_t readI16a(const void* p) noexcept { return readI16x<ByteOrder::kNative, 2>(p); }
+static inline int32_t readI16u(const void* p) noexcept { return readI16x<ByteOrder::kNative, 1>(p); }
+static inline uint32_t readU16a(const void* p) noexcept { return readU16x<ByteOrder::kNative, 2>(p); }
+static inline uint32_t readU16u(const void* p) noexcept { return readU16x<ByteOrder::kNative, 1>(p); }
+
+static inline int32_t readI16aLE(const void* p) noexcept { return readI16xLE<2>(p); }
+static inline int32_t readI16uLE(const void* p) noexcept { return readI16xLE<1>(p); }
+static inline uint32_t readU16aLE(const void* p) noexcept { return readU16xLE<2>(p); }
+static inline uint32_t readU16uLE(const void* p) noexcept { return readU16xLE<1>(p); }
+
+static inline int32_t readI16aBE(const void* p) noexcept { return readI16xBE<2>(p); }
+static inline int32_t readI16uBE(const void* p) noexcept { return readI16xBE<1>(p); }
+static inline uint32_t readU16aBE(const void* p) noexcept { return readU16xBE<2>(p); }
+static inline uint32_t readU16uBE(const void* p) noexcept { return readU16xBE<1>(p); }
+
+static inline uint32_t readU24uLE(const void* p) noexcept { return readU24u<ByteOrder::kLE>(p); }
+static inline uint32_t readU24uBE(const void* p) noexcept { return readU24u<ByteOrder::kBE>(p); }
+
+static inline int32_t readI32a(const void* p) noexcept { return readI32x<ByteOrder::kNative, 4>(p); }
+static inline int32_t readI32u(const void* p) noexcept { return readI32x<ByteOrder::kNative, 1>(p); }
+static inline uint32_t readU32a(const void* p) noexcept { return readU32x<ByteOrder::kNative, 4>(p); }
+static inline uint32_t readU32u(const void* p) noexcept { return readU32x<ByteOrder::kNative, 1>(p); }
+
+static inline int32_t readI32aLE(const void* p) noexcept { return readI32xLE<4>(p); }
+static inline int32_t readI32uLE(const void* p) noexcept { return readI32xLE<1>(p); }
+static inline uint32_t readU32aLE(const void* p) noexcept { return readU32xLE<4>(p); }
+static inline uint32_t readU32uLE(const void* p) noexcept { return readU32xLE<1>(p); }
+
+static inline int32_t readI32aBE(const void* p) noexcept { return readI32xBE<4>(p); }
+static inline int32_t readI32uBE(const void* p) noexcept { return readI32xBE<1>(p); }
+static inline uint32_t readU32aBE(const void* p) noexcept { return readU32xBE<4>(p); }
+static inline uint32_t readU32uBE(const void* p) noexcept { return readU32xBE<1>(p); }
+
+static inline int64_t readI64a(const void* p) noexcept { return readI64x<ByteOrder::kNative, 8>(p); }
+static inline int64_t readI64u(const void* p) noexcept { return readI64x<ByteOrder::kNative, 1>(p); }
+static inline uint64_t readU64a(const void* p) noexcept { return readU64x<ByteOrder::kNative, 8>(p); }
+static inline uint64_t readU64u(const void* p) noexcept { return readU64x<ByteOrder::kNative, 1>(p); }
+
+static inline int64_t readI64aLE(const void* p) noexcept { return readI64xLE<8>(p); }
+static inline int64_t readI64uLE(const void* p) noexcept { return readI64xLE<1>(p); }
+static inline uint64_t readU64aLE(const void* p) noexcept { return readU64xLE<8>(p); }
+static inline uint64_t readU64uLE(const void* p) noexcept { return readU64xLE<1>(p); }
+
+static inline int64_t readI64aBE(const void* p) noexcept { return readI64xBE<8>(p); }
+static inline int64_t readI64uBE(const void* p) noexcept { return readI64xBE<1>(p); }
+static inline uint64_t readU64aBE(const void* p) noexcept { return readU64xBE<8>(p); }
+static inline uint64_t readU64uBE(const void* p) noexcept { return readU64xBE<1>(p); }
+
+static inline void writeU8(void* p, uint32_t x) noexcept { static_cast<uint8_t*>(p)[0] = uint8_t(x & 0xFFu); }
+static inline void writeI8(void* p, int32_t x) noexcept { static_cast<uint8_t*>(p)[0] = uint8_t(x & 0xFF); }
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU16x(void* p, uint32_t x) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ static_cast<U16AlignedToN*>(p)[0] = uint16_t(x & 0xFFFFu);
+ }
+ else {
+ static_cast<uint8_t*>(p)[0] = uint8_t((x >> (BO == ByteOrder::kLE ? 0 : 8)) & 0xFFu);
+ static_cast<uint8_t*>(p)[1] = uint8_t((x >> (BO == ByteOrder::kLE ? 8 : 0)) & 0xFFu);
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative>
+static inline void writeU24u(void* p, uint32_t v) noexcept {
+ static_cast<uint8_t*>(p)[0] = uint8_t((v >> (BO == ByteOrder::kLE ? 0 : 16)) & 0xFFu);
+ static_cast<uint8_t*>(p)[1] = uint8_t((v >> (BO == ByteOrder::kLE ? 8 : 8)) & 0xFFu);
+ static_cast<uint8_t*>(p)[2] = uint8_t((v >> (BO == ByteOrder::kLE ? 16 : 0)) & 0xFFu);
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU32x(void* p, uint32_t x) noexcept {
+ if (kUnalignedAccess32 || Alignment >= 4) {
+ typedef typename Internal::AlignedInt<uint32_t, Alignment>::T U32AlignedToN;
+ static_cast<U32AlignedToN*>(p)[0] = (BO == ByteOrder::kNative) ? x : Support::byteswap32(x);
+ }
+ else {
+ writeU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<uint8_t*>(p) + 0, x >> (BO == ByteOrder::kLE ? 0 : 16));
+ writeU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<uint8_t*>(p) + 2, x >> (BO == ByteOrder::kLE ? 16 : 0));
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU64x(void* p, uint64_t x) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) {
+ typedef typename Internal::AlignedInt<uint64_t, Alignment>::T U64AlignedToN;
+ static_cast<U64AlignedToN*>(p)[0] = x;
+ }
+ else {
+ writeU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<uint8_t*>(p) + 0, uint32_t((x >> (BO == ByteOrder::kLE ? 0 : 32)) & 0xFFFFFFFFu));
+ writeU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<uint8_t*>(p) + 4, uint32_t((x >> (BO == ByteOrder::kLE ? 32 : 0)) & 0xFFFFFFFFu));
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI16x(void* p, int32_t x) noexcept { writeU16x<BO, Alignment>(p, uint32_t(x)); }
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI32x(void* p, int32_t x) noexcept { writeU32x<BO, Alignment>(p, uint32_t(x)); }
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI64x(void* p, int64_t x) noexcept { writeU64x<BO, Alignment>(p, uint64_t(x)); }
+
+template<size_t Alignment = 1> static inline void writeI16xLE(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI16xBE(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU16xLE(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU16xBE(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kBE, Alignment>(p, x); }
+
+template<size_t Alignment = 1> static inline void writeI32xLE(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI32xBE(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU32xLE(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU32xBE(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kBE, Alignment>(p, x); }
+
+template<size_t Alignment = 1> static inline void writeI64xLE(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI64xBE(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU64xLE(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU64xBE(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kBE, Alignment>(p, x); }
+
+static inline void writeI16a(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kNative, 2>(p, x); }
+static inline void writeI16u(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU16a(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kNative, 2>(p, x); }
+static inline void writeU16u(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI16aLE(void* p, int32_t x) noexcept { writeI16xLE<2>(p, x); }
+static inline void writeI16uLE(void* p, int32_t x) noexcept { writeI16xLE<1>(p, x); }
+static inline void writeU16aLE(void* p, uint32_t x) noexcept { writeU16xLE<2>(p, x); }
+static inline void writeU16uLE(void* p, uint32_t x) noexcept { writeU16xLE<1>(p, x); }
+
+static inline void writeI16aBE(void* p, int32_t x) noexcept { writeI16xBE<2>(p, x); }
+static inline void writeI16uBE(void* p, int32_t x) noexcept { writeI16xBE<1>(p, x); }
+static inline void writeU16aBE(void* p, uint32_t x) noexcept { writeU16xBE<2>(p, x); }
+static inline void writeU16uBE(void* p, uint32_t x) noexcept { writeU16xBE<1>(p, x); }
+
+static inline void writeU24uLE(void* p, uint32_t v) noexcept { writeU24u<ByteOrder::kLE>(p, v); }
+static inline void writeU24uBE(void* p, uint32_t v) noexcept { writeU24u<ByteOrder::kBE>(p, v); }
+
+static inline void writeI32a(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kNative, 4>(p, x); }
+static inline void writeI32u(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU32a(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kNative, 4>(p, x); }
+static inline void writeU32u(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI32aLE(void* p, int32_t x) noexcept { writeI32xLE<4>(p, x); }
+static inline void writeI32uLE(void* p, int32_t x) noexcept { writeI32xLE<1>(p, x); }
+static inline void writeU32aLE(void* p, uint32_t x) noexcept { writeU32xLE<4>(p, x); }
+static inline void writeU32uLE(void* p, uint32_t x) noexcept { writeU32xLE<1>(p, x); }
+
+static inline void writeI32aBE(void* p, int32_t x) noexcept { writeI32xBE<4>(p, x); }
+static inline void writeI32uBE(void* p, int32_t x) noexcept { writeI32xBE<1>(p, x); }
+static inline void writeU32aBE(void* p, uint32_t x) noexcept { writeU32xBE<4>(p, x); }
+static inline void writeU32uBE(void* p, uint32_t x) noexcept { writeU32xBE<1>(p, x); }
+
+static inline void writeI64a(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kNative, 8>(p, x); }
+static inline void writeI64u(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU64a(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kNative, 8>(p, x); }
+static inline void writeU64u(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI64aLE(void* p, int64_t x) noexcept { writeI64xLE<8>(p, x); }
+static inline void writeI64uLE(void* p, int64_t x) noexcept { writeI64xLE<1>(p, x); }
+static inline void writeU64aLE(void* p, uint64_t x) noexcept { writeU64xLE<8>(p, x); }
+static inline void writeU64uLE(void* p, uint64_t x) noexcept { writeU64xLE<1>(p, x); }
+
+static inline void writeI64aBE(void* p, int64_t x) noexcept { writeI64xBE<8>(p, x); }
+static inline void writeI64uBE(void* p, int64_t x) noexcept { writeI64xBE<1>(p, x); }
+static inline void writeU64aBE(void* p, uint64_t x) noexcept { writeU64xBE<8>(p, x); }
+static inline void writeU64uBE(void* p, uint64_t x) noexcept { writeU64xBE<1>(p, x); }
+
+// ============================================================================
+// [asmjit::Support - Operators]
+// ============================================================================
+
+//! \cond INTERNAL
+struct Set { template<typename T> static inline T op(T x, T y) noexcept { DebugUtils::unused(x); return y; } };
+struct SetNot { template<typename T> static inline T op(T x, T y) noexcept { DebugUtils::unused(x); return ~y; } };
+struct And { template<typename T> static inline T op(T x, T y) noexcept { return x & y; } };
+struct AndNot { template<typename T> static inline T op(T x, T y) noexcept { return x & ~y; } };
+struct NotAnd { template<typename T> static inline T op(T x, T y) noexcept { return ~x & y; } };
+struct Or { template<typename T> static inline T op(T x, T y) noexcept { return x | y; } };
+struct Xor { template<typename T> static inline T op(T x, T y) noexcept { return x ^ y; } };
+struct Add { template<typename T> static inline T op(T x, T y) noexcept { return x + y; } };
+struct Sub { template<typename T> static inline T op(T x, T y) noexcept { return x - y; } };
+struct Min { template<typename T> static inline T op(T x, T y) noexcept { return min<T>(x, y); } };
+struct Max { template<typename T> static inline T op(T x, T y) noexcept { return max<T>(x, y); } };
+//! \endcond
+
+// ============================================================================
+// [asmjit::Support - BitWordIterator]
+// ============================================================================
+
+//! Iterates over each bit in a number which is set to 1.
+//!
+//! Example of use:
+//!
+//! ```
+//! uint32_t bitsToIterate = 0x110F;
+//! Support::BitWordIterator<uint32_t> it(bitsToIterate);
+//!
+//! while (it.hasNext()) {
+//! uint32_t bitIndex = it.next();
+//! std::printf("Bit at %u is set\n", unsigned(bitIndex));
+//! }
+//! ```
+template<typename T>
+class BitWordIterator {
+public:
+ inline explicit BitWordIterator(T bitWord) noexcept
+ : _bitWord(bitWord) {}
+
+ inline void init(T bitWord) noexcept { _bitWord = bitWord; }
+ inline bool hasNext() const noexcept { return _bitWord != 0; }
+
+ inline uint32_t next() noexcept {
+ ASMJIT_ASSERT(_bitWord != 0);
+ uint32_t index = ctz(_bitWord);
+ _bitWord ^= T(1u) << index;
+ return index;
+ }
+
+ T _bitWord;
+};
+
+// ============================================================================
+// [asmjit::Support - BitVectorOps]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename T, class OperatorT, class FullWordOpT>
+ static inline void bitVectorOp(T* buf, size_t index, size_t count) noexcept {
+ if (count == 0)
+ return;
+
+ const size_t kTSizeInBits = bitSizeOf<T>();
+ size_t vecIndex = index / kTSizeInBits; // T[]
+ size_t bitIndex = index % kTSizeInBits; // T[][]
+
+ buf += vecIndex;
+
+ // The first BitWord requires special handling to preserve bits outside the fill region.
+ const T kFillMask = allOnes<T>();
+ size_t firstNBits = min<size_t>(kTSizeInBits - bitIndex, count);
+
+ buf[0] = OperatorT::op(buf[0], (kFillMask >> (kTSizeInBits - firstNBits)) << bitIndex);
+ buf++;
+ count -= firstNBits;
+
+ // All bits between the first and last affected BitWords can be just filled.
+ while (count >= kTSizeInBits) {
+ buf[0] = FullWordOpT::op(buf[0], kFillMask);
+ buf++;
+ count -= kTSizeInBits;
+ }
+
+ // The last BitWord requires special handling as well
+ if (count)
+ buf[0] = OperatorT::op(buf[0], kFillMask >> (kTSizeInBits - count));
+ }
+}
+//! \endcond
+
+//! Sets bit in a bit-vector `buf` at `index`.
+template<typename T>
+static inline bool bitVectorGetBit(T* buf, size_t index) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ return bool((buf[vecIndex] >> bitIndex) & 0x1u);
+}
+
+//! Sets bit in a bit-vector `buf` at `index` to `value`.
+template<typename T>
+static inline void bitVectorSetBit(T* buf, size_t index, bool value) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ T bitMask = T(1u) << bitIndex;
+ if (value)
+ buf[vecIndex] |= bitMask;
+ else
+ buf[vecIndex] &= ~bitMask;
+}
+
+//! Sets bit in a bit-vector `buf` at `index` to `value`.
+template<typename T>
+static inline void bitVectorFlipBit(T* buf, size_t index) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ T bitMask = T(1u) << bitIndex;
+ buf[vecIndex] ^= bitMask;
+}
+
+//! Fills `count` bits in bit-vector `buf` starting at bit-index `index`.
+template<typename T>
+static inline void bitVectorFill(T* buf, size_t index, size_t count) noexcept { Internal::bitVectorOp<T, Or, Set>(buf, index, count); }
+
+//! Clears `count` bits in bit-vector `buf` starting at bit-index `index`.
+template<typename T>
+static inline void bitVectorClear(T* buf, size_t index, size_t count) noexcept { Internal::bitVectorOp<T, AndNot, SetNot>(buf, index, count); }
+
+template<typename T>
+static inline size_t bitVectorIndexOf(T* buf, size_t start, bool value) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+ size_t vecIndex = start / kTSizeInBits; // T[]
+ size_t bitIndex = start % kTSizeInBits; // T[][]
+
+ T* p = buf + vecIndex;
+
+ // We always look for zeros, if value is `true` we have to flip all bits before the search.
+ const T kFillMask = allOnes<T>();
+ const T kFlipMask = value ? T(0) : kFillMask;
+
+ // The first BitWord requires special handling as there are some bits we want to ignore.
+ T bits = (*p ^ kFlipMask) & (kFillMask << bitIndex);
+ for (;;) {
+ if (bits)
+ return (size_t)(p - buf) * kTSizeInBits + ctz(bits);
+ bits = *++p ^ kFlipMask;
+ }
+}
+
+// ============================================================================
+// [asmjit::Support - BitVectorIterator]
+// ============================================================================
+
+template<typename T>
+class BitVectorIterator {
+public:
+ const T* _ptr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+
+ ASMJIT_INLINE BitVectorIterator(const BitVectorIterator& other) noexcept = default;
+
+ ASMJIT_INLINE BitVectorIterator(const T* data, size_t numBitWords, size_t start = 0) noexcept {
+ init(data, numBitWords, start);
+ }
+
+ ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0) noexcept {
+ const T* ptr = data + (start / bitSizeOf<T>());
+ size_t idx = alignDown(start, bitSizeOf<T>());
+ size_t end = numBitWords * bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = *ptr++ & (allOnes<T>() << (start % bitSizeOf<T>()));
+ while (!bitWord && (idx += bitSizeOf<T>()) < end)
+ bitWord = *ptr++;
+ }
+
+ _ptr = ptr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ }
+
+ ASMJIT_INLINE bool hasNext() const noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t peekNext() const noexcept {
+ ASMJIT_ASSERT(_current != T(0));
+ return _idx + ctz(_current);
+ }
+};
+
+// ============================================================================
+// [asmjit::Support - BitVectorOpIterator]
+// ============================================================================
+
+template<typename T, class OperatorT>
+class BitVectorOpIterator {
+public:
+ static constexpr uint32_t kTSizeInBits = bitSizeOf<T>();
+
+ const T* _aPtr;
+ const T* _bPtr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+
+ ASMJIT_INLINE BitVectorOpIterator(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept {
+ init(aData, bData, numBitWords, start);
+ }
+
+ ASMJIT_INLINE void init(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept {
+ const T* aPtr = aData + (start / bitSizeOf<T>());
+ const T* bPtr = bData + (start / bitSizeOf<T>());
+ size_t idx = alignDown(start, bitSizeOf<T>());
+ size_t end = numBitWords * bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = OperatorT::op(*aPtr++, *bPtr++) & (allOnes<T>() << (start % bitSizeOf<T>()));
+ while (!bitWord && (idx += kTSizeInBits) < end)
+ bitWord = OperatorT::op(*aPtr++, *bPtr++);
+ }
+
+ _aPtr = aPtr;
+ _bPtr = bPtr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ }
+
+ ASMJIT_INLINE bool hasNext() noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += kTSizeInBits) < _end)
+ bitWord = OperatorT::op(*_aPtr++, *_bPtr++);
+
+ _current = bitWord;
+ return n;
+ }
+};
+
+// ============================================================================
+// [asmjit::Support - Sorting]
+// ============================================================================
+
+//! Sort order.
+enum SortOrder : uint32_t {
+ kSortAscending = 0, //!< Ascending.
+ kSortDescending = 1 //!< Descending.
+};
+
+//! A helper class that provides comparison of any user-defined type that
+//! implements `<` and `>` operators (primitive types are supported as well).
+template<uint32_t Order = kSortAscending>
+struct Compare {
+ template<typename A, typename B>
+ inline int operator()(const A& a, const B& b) const noexcept {
+ return Order == kSortAscending ? int(a > b) - int(a < b)
+ : int(a < b) - int(a > b);
+ }
+};
+
+//! Insertion sort.
+template<typename T, typename CompareT = Compare<kSortAscending>>
+static inline void iSort(T* base, size_t size, const CompareT& cmp = CompareT()) noexcept {
+ for (T* pm = base + 1; pm < base + size; pm++)
+ for (T* pl = pm; pl > base && cmp(pl[-1], pl[0]) > 0; pl--)
+ std::swap(pl[-1], pl[0]);
+}
+
+//! \cond
+namespace Internal {
+ //! Quick-sort implementation.
+ template<typename T, class CompareT>
+ struct QSortImpl {
+ static constexpr size_t kStackSize = 64 * 2;
+ static constexpr size_t kISortThreshold = 7;
+
+ // Based on "PDCLib - Public Domain C Library" and rewritten to C++.
+ static void sort(T* base, size_t size, const CompareT& cmp) noexcept {
+ T* end = base + size;
+ T* stack[kStackSize];
+ T** stackptr = stack;
+
+ for (;;) {
+ if ((size_t)(end - base) > kISortThreshold) {
+ // We work from second to last - first will be pivot element.
+ T* pi = base + 1;
+ T* pj = end - 1;
+ std::swap(base[(size_t)(end - base) / 2], base[0]);
+
+ if (cmp(*pi , *pj ) > 0) std::swap(*pi , *pj );
+ if (cmp(*base, *pj ) > 0) std::swap(*base, *pj );
+ if (cmp(*pi , *base) > 0) std::swap(*pi , *base);
+
+ // Now we have the median for pivot element, entering main loop.
+ for (;;) {
+ while (pi < pj && cmp(*++pi, *base) < 0) continue; // Move `i` right until `*i >= pivot`.
+ while (pj > base && cmp(*--pj, *base) > 0) continue; // Move `j` left until `*j <= pivot`.
+
+ if (pi > pj) break;
+ std::swap(*pi, *pj);
+ }
+
+ // Move pivot into correct place.
+ std::swap(*base, *pj);
+
+ // Larger subfile base / end to stack, sort smaller.
+ if (pj - base > end - pi) {
+ // Left is larger.
+ *stackptr++ = base;
+ *stackptr++ = pj;
+ base = pi;
+ }
+ else {
+ // Right is larger.
+ *stackptr++ = pi;
+ *stackptr++ = end;
+ end = pj;
+ }
+ ASMJIT_ASSERT(stackptr <= stack + kStackSize);
+ }
+ else {
+ // UB sanitizer doesn't like applying offset to a nullptr base.
+ if (base != end)
+ iSort(base, (size_t)(end - base), cmp);
+
+ if (stackptr == stack)
+ break;
+
+ end = *--stackptr;
+ base = *--stackptr;
+ }
+ }
+ }
+ };
+}
+//! \endcond
+
+//! Quick sort implementation.
+//!
+//! The main reason to provide a custom qsort implementation is that we needed
+//! something that will never throw `bad_alloc` exception. This implementation
+//! doesn't use dynamic memory allocation.
+template<typename T, class CompareT = Compare<kSortAscending>>
+static inline void qSort(T* base, size_t size, const CompareT& cmp = CompareT()) noexcept {
+ Internal::QSortImpl<T, CompareT>::sort(base, size, cmp);
+}
+
+// ============================================================================
+// [asmjit::Support - Iterators]
+// ============================================================================
+
+template<typename T>
+class Iterator {
+public:
+ constexpr Iterator(T* p) noexcept : _p(p) {}
+ constexpr Iterator(const Iterator& other) noexcept = default;
+
+ inline Iterator& operator=(const Iterator& other) noexcept = default;
+
+ inline Iterator operator+(size_t n) const noexcept { return Iterator(_p + n); }
+ inline Iterator operator-(size_t n) const noexcept { return Iterator(_p - n); }
+
+ inline Iterator& operator+=(size_t n) noexcept { _p += n; return *this; }
+ inline Iterator& operator-=(size_t n) noexcept { _p -= n; return *this; }
+
+ inline Iterator& operator++() noexcept { return operator+=(1); }
+ inline Iterator& operator--() noexcept { return operator-=(1); }
+
+ inline Iterator operator++(int) noexcept { T* prev = _p; operator+=(1); return Iterator(prev); }
+ inline Iterator operator--(int) noexcept { T* prev = _p; operator-=(1); return Iterator(prev); }
+
+ inline bool operator==(const Iterator& other) noexcept { return _p == other._p; }
+ inline bool operator!=(const Iterator& other) noexcept { return _p != other._p; }
+
+ inline T& operator*() const noexcept { return _p[0]; }
+
+ T* _p;
+};
+
+template<typename T>
+class ReverseIterator {
+public:
+ constexpr ReverseIterator(T* p) noexcept : _p(p) {}
+ constexpr ReverseIterator(const ReverseIterator& other) noexcept = default;
+
+ inline ReverseIterator& operator=(const ReverseIterator& other) noexcept = default;
+
+ inline ReverseIterator operator+(size_t n) const noexcept { return ReverseIterator(_p + n); }
+ inline ReverseIterator operator-(size_t n) const noexcept { return ReverseIterator(_p - n); }
+
+ inline ReverseIterator& operator+=(size_t n) noexcept { _p -= n; return *this; }
+ inline ReverseIterator& operator-=(size_t n) noexcept { _p += n; return *this; }
+
+ inline ReverseIterator& operator++() noexcept { return operator+=(1); }
+ inline ReverseIterator& operator--() noexcept { return operator-=(1); }
+
+ inline ReverseIterator operator++(int) noexcept { T* prev = _p; operator+=(1); return ReverseIterator(prev); }
+ inline ReverseIterator operator--(int) noexcept { T* prev = _p; operator-=(1); return ReverseIterator(prev); }
+
+ inline bool operator==(const ReverseIterator& other) noexcept { return _p == other._p; }
+ inline bool operator!=(const ReverseIterator& other) noexcept { return _p != other._p; }
+
+ inline T& operator*() const noexcept { return _p[-1]; }
+
+ T* _p;
+};
+
+// ============================================================================
+// [asmjit::Support::Temporary]
+// ============================================================================
+
+//! Used to pass a temporary buffer to:
+//!
+//! - Containers that use user-passed buffer as an initial storage (still can grow).
+//! - Zone allocator that would use the temporary buffer as a first block.
+struct Temporary {
+ void* _data;
+ size_t _size;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ constexpr Temporary(const Temporary& other) noexcept = default;
+ constexpr Temporary(void* data, size_t size) noexcept
+ : _data(data),
+ _size(size) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Temporary& operator=(const Temporary& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the data storage.
+ template<typename T = void>
+ constexpr T* data() const noexcept { return static_cast<T*>(_data); }
+ //! Returns the data storage size in bytes.
+ constexpr size_t size() const noexcept { return _size; }
+
+ //! \}
+};
+
+} // {Support}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_SUPPORT_H_INCLUDED
diff --git a/client/asmjit/core/target.cpp b/client/asmjit/core/target.cpp
new file mode 100644
index 0000000..9ce94f3
--- /dev/null
+++ b/client/asmjit/core/target.cpp
@@ -0,0 +1,37 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/target.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Target - Construction / Destruction]
+// ============================================================================
+
+Target::Target() noexcept
+ : _environment() {}
+Target::~Target() noexcept {}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/target.h b/client/asmjit/core/target.h
new file mode 100644
index 0000000..4d144c6
--- /dev/null
+++ b/client/asmjit/core/target.h
@@ -0,0 +1,175 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_TARGET_H_INCLUDED
+#define ASMJIT_CORE_TARGET_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/func.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CodeInfo]
+// ============================================================================
+
+#ifndef ASMJIT_NO_DEPRECATED
+//! Basic information about a code (or target). It describes its architecture,
+//! code generation mode (or optimization level), and base address.
+class ASMJIT_DEPRECATED_STRUCT("Use Environment instead of CodeInfo") CodeInfo {
+public:
+ //!< Environment information.
+ Environment _environment;
+ //! Base address.
+ uint64_t _baseAddress;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline CodeInfo() noexcept
+ : _environment(),
+ _baseAddress(Globals::kNoBaseAddress) {}
+
+ inline explicit CodeInfo(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
+ : _environment(arch, subArch),
+ _baseAddress(baseAddress) {}
+
+ inline explicit CodeInfo(const Environment& environment, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
+ : _environment(environment),
+ _baseAddress(baseAddress) {}
+
+
+ inline CodeInfo(const CodeInfo& other) noexcept { init(other); }
+
+ inline bool isInitialized() const noexcept {
+ return _environment.arch() != Environment::kArchUnknown;
+ }
+
+ inline void init(const CodeInfo& other) noexcept {
+ *this = other;
+ }
+
+ inline void init(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
+ _environment.init(arch, subArch);
+ _baseAddress = baseAddress;
+ }
+
+ inline void reset() noexcept {
+ _environment.reset();
+ _baseAddress = Globals::kNoBaseAddress;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline CodeInfo& operator=(const CodeInfo& other) noexcept = default;
+
+ inline bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
+ inline bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the target environment information, see \ref Environment.
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Returns the target architecture, see \ref Environment::Arch.
+ inline uint32_t arch() const noexcept { return _environment.arch(); }
+ //! Returns the target sub-architecture, see \ref Environment::SubArch.
+ inline uint32_t subArch() const noexcept { return _environment.subArch(); }
+ //! Returns the native size of the target's architecture GP register.
+ inline uint32_t gpSize() const noexcept { return _environment.registerSize(); }
+
+ //! Tests whether this CodeInfo has a base address set.
+ inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
+ //! Returns the base address or \ref Globals::kNoBaseAddress if it's not set.
+ inline uint64_t baseAddress() const noexcept { return _baseAddress; }
+ //! Sets base address to `p`.
+ inline void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
+ //! Resets base address (implicitly sets it to \ref Globals::kNoBaseAddress).
+ inline void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
+
+ //! \}
+};
+#endif // !ASMJIT_NO_DEPRECATED
+
+// ============================================================================
+// [asmjit::Target]
+// ============================================================================
+
+//! Target is an abstract class that describes a machine code target.
+class ASMJIT_VIRTAPI Target {
+public:
+ ASMJIT_BASE_CLASS(Target)
+ ASMJIT_NONCOPYABLE(Target)
+
+ //! Target environment information.
+ Environment _environment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `Target` instance.
+ ASMJIT_API Target() noexcept;
+ //! Destroys the `Target` instance.
+ ASMJIT_API virtual ~Target() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns CodeInfo of this target.
+ //!
+ //! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
+ //! code compatible and executable by this Runtime.
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Returns the target architecture, see \ref Environment::Arch.
+ inline uint32_t arch() const noexcept { return _environment.arch(); }
+ //! Returns the target sub-architecture, see \ref Environment::SubArch.
+ inline uint32_t subArch() const noexcept { return _environment.subArch(); }
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use environment() instead")
+ inline CodeInfo codeInfo() const noexcept { return CodeInfo(_environment); }
+
+ ASMJIT_DEPRECATED("Use environment().format() instead")
+ inline uint32_t targetType() const noexcept { return _environment.format(); }
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_TARGET_H_INCLUDED
diff --git a/client/asmjit/core/type.cpp b/client/asmjit/core/type.cpp
new file mode 100644
index 0000000..97892d9
--- /dev/null
+++ b/client/asmjit/core/type.cpp
@@ -0,0 +1,92 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/misc_p.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Type]
+// ============================================================================
+
+namespace Type {
+
+template<uint32_t TYPE_ID>
+struct BaseOfTypeId {
+ static constexpr uint32_t kTypeId =
+ isBase (TYPE_ID) ? TYPE_ID :
+ isMask8 (TYPE_ID) ? kIdU8 :
+ isMask16(TYPE_ID) ? kIdU16 :
+ isMask32(TYPE_ID) ? kIdU32 :
+ isMask64(TYPE_ID) ? kIdU64 :
+ isMmx32 (TYPE_ID) ? kIdI32 :
+ isMmx64 (TYPE_ID) ? kIdI64 :
+ isVec32 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec32Start :
+ isVec64 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec64Start :
+ isVec128(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec128Start :
+ isVec256(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec256Start :
+ isVec512(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec512Start : 0;
+};
+
+template<uint32_t TYPE_ID>
+struct SizeOfTypeId {
+ static constexpr uint32_t kTypeSize =
+ isInt8 (TYPE_ID) ? 1 :
+ isUInt8 (TYPE_ID) ? 1 :
+ isInt16 (TYPE_ID) ? 2 :
+ isUInt16 (TYPE_ID) ? 2 :
+ isInt32 (TYPE_ID) ? 4 :
+ isUInt32 (TYPE_ID) ? 4 :
+ isInt64 (TYPE_ID) ? 8 :
+ isUInt64 (TYPE_ID) ? 8 :
+ isFloat32(TYPE_ID) ? 4 :
+ isFloat64(TYPE_ID) ? 8 :
+ isFloat80(TYPE_ID) ? 10 :
+ isMask8 (TYPE_ID) ? 1 :
+ isMask16 (TYPE_ID) ? 2 :
+ isMask32 (TYPE_ID) ? 4 :
+ isMask64 (TYPE_ID) ? 8 :
+ isMmx32 (TYPE_ID) ? 4 :
+ isMmx64 (TYPE_ID) ? 8 :
+ isVec32 (TYPE_ID) ? 4 :
+ isVec64 (TYPE_ID) ? 8 :
+ isVec128 (TYPE_ID) ? 16 :
+ isVec256 (TYPE_ID) ? 32 :
+ isVec512 (TYPE_ID) ? 64 : 0;
+};
+
+const TypeData _typeData = {
+ #define VALUE(X) BaseOfTypeId<X>::kTypeId
+ { ASMJIT_LOOKUP_TABLE_256(VALUE, 0) },
+ #undef VALUE
+
+ #define VALUE(X) SizeOfTypeId<X>::kTypeSize
+ { ASMJIT_LOOKUP_TABLE_256(VALUE, 0) }
+ #undef VALUE
+};
+
+} // {Type}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/type.h b/client/asmjit/core/type.h
new file mode 100644
index 0000000..ef03ecb
--- /dev/null
+++ b/client/asmjit/core/type.h
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_TYPE_H_INCLUDED
+#define ASMJIT_CORE_TYPE_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::Type]
+// ============================================================================
+
+//! Provides a minimalist type-system that is used by Asmjit library.
+namespace Type {
+
+//! TypeId.
+//!
+//! This is an additional information that can be used to describe a value-type
+//! of physical or virtual register. it's used mostly by BaseCompiler to describe
+//! register representation (the group of data stored in the register and the
+//! width used) and it's also used by APIs that allow to describe and work with
+//! function signatures.
+enum Id : uint32_t {
+ kIdVoid = 0, //!< Void type.
+
+ _kIdBaseStart = 32,
+ _kIdBaseEnd = 44,
+
+ _kIdIntStart = 32,
+ _kIdIntEnd = 41,
+
+ kIdIntPtr = 32, //!< Abstract signed integer type that has a native size.
+ kIdUIntPtr = 33, //!< Abstract unsigned integer type that has a native size.
+
+ kIdI8 = 34, //!< 8-bit signed integer type.
+ kIdU8 = 35, //!< 8-bit unsigned integer type.
+ kIdI16 = 36, //!< 16-bit signed integer type.
+ kIdU16 = 37, //!< 16-bit unsigned integer type.
+ kIdI32 = 38, //!< 32-bit signed integer type.
+ kIdU32 = 39, //!< 32-bit unsigned integer type.
+ kIdI64 = 40, //!< 64-bit signed integer type.
+ kIdU64 = 41, //!< 64-bit unsigned integer type.
+
+ _kIdFloatStart = 42,
+ _kIdFloatEnd = 44,
+
+ kIdF32 = 42, //!< 32-bit floating point type.
+ kIdF64 = 43, //!< 64-bit floating point type.
+ kIdF80 = 44, //!< 80-bit floating point type.
+
+ _kIdMaskStart = 45,
+ _kIdMaskEnd = 48,
+
+ kIdMask8 = 45, //!< 8-bit opmask register (K).
+ kIdMask16 = 46, //!< 16-bit opmask register (K).
+ kIdMask32 = 47, //!< 32-bit opmask register (K).
+ kIdMask64 = 48, //!< 64-bit opmask register (K).
+
+ _kIdMmxStart = 49,
+ _kIdMmxEnd = 50,
+
+ kIdMmx32 = 49, //!< 64-bit MMX register only used for 32 bits.
+ kIdMmx64 = 50, //!< 64-bit MMX register.
+
+ _kIdVec32Start = 51,
+ _kIdVec32End = 60,
+
+ kIdI8x4 = 51,
+ kIdU8x4 = 52,
+ kIdI16x2 = 53,
+ kIdU16x2 = 54,
+ kIdI32x1 = 55,
+ kIdU32x1 = 56,
+ kIdF32x1 = 59,
+
+ _kIdVec64Start = 61,
+ _kIdVec64End = 70,
+
+ kIdI8x8 = 61,
+ kIdU8x8 = 62,
+ kIdI16x4 = 63,
+ kIdU16x4 = 64,
+ kIdI32x2 = 65,
+ kIdU32x2 = 66,
+ kIdI64x1 = 67,
+ kIdU64x1 = 68,
+ kIdF32x2 = 69,
+ kIdF64x1 = 70,
+
+ _kIdVec128Start = 71,
+ _kIdVec128End = 80,
+
+ kIdI8x16 = 71,
+ kIdU8x16 = 72,
+ kIdI16x8 = 73,
+ kIdU16x8 = 74,
+ kIdI32x4 = 75,
+ kIdU32x4 = 76,
+ kIdI64x2 = 77,
+ kIdU64x2 = 78,
+ kIdF32x4 = 79,
+ kIdF64x2 = 80,
+
+ _kIdVec256Start = 81,
+ _kIdVec256End = 90,
+
+ kIdI8x32 = 81,
+ kIdU8x32 = 82,
+ kIdI16x16 = 83,
+ kIdU16x16 = 84,
+ kIdI32x8 = 85,
+ kIdU32x8 = 86,
+ kIdI64x4 = 87,
+ kIdU64x4 = 88,
+ kIdF32x8 = 89,
+ kIdF64x4 = 90,
+
+ _kIdVec512Start = 91,
+ _kIdVec512End = 100,
+
+ kIdI8x64 = 91,
+ kIdU8x64 = 92,
+ kIdI16x32 = 93,
+ kIdU16x32 = 94,
+ kIdI32x16 = 95,
+ kIdU32x16 = 96,
+ kIdI64x8 = 97,
+ kIdU64x8 = 98,
+ kIdF32x16 = 99,
+ kIdF64x8 = 100,
+
+ kIdCount = 101,
+ kIdMax = 255
+};
+
+struct TypeData {
+ uint8_t baseOf[kIdMax + 1];
+ uint8_t sizeOf[kIdMax + 1];
+};
+ASMJIT_VARAPI const TypeData _typeData;
+
+static constexpr bool isVoid(uint32_t typeId) noexcept { return typeId == 0; }
+static constexpr bool isValid(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdVec512End; }
+static constexpr bool isBase(uint32_t typeId) noexcept { return typeId >= _kIdBaseStart && typeId <= _kIdBaseEnd; }
+static constexpr bool isAbstract(uint32_t typeId) noexcept { return typeId >= kIdIntPtr && typeId <= kIdUIntPtr; }
+
+static constexpr bool isInt(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdIntEnd; }
+static constexpr bool isInt8(uint32_t typeId) noexcept { return typeId == kIdI8; }
+static constexpr bool isUInt8(uint32_t typeId) noexcept { return typeId == kIdU8; }
+static constexpr bool isInt16(uint32_t typeId) noexcept { return typeId == kIdI16; }
+static constexpr bool isUInt16(uint32_t typeId) noexcept { return typeId == kIdU16; }
+static constexpr bool isInt32(uint32_t typeId) noexcept { return typeId == kIdI32; }
+static constexpr bool isUInt32(uint32_t typeId) noexcept { return typeId == kIdU32; }
+static constexpr bool isInt64(uint32_t typeId) noexcept { return typeId == kIdI64; }
+static constexpr bool isUInt64(uint32_t typeId) noexcept { return typeId == kIdU64; }
+
+static constexpr bool isGp8(uint32_t typeId) noexcept { return typeId >= kIdI8 && typeId <= kIdU8; }
+static constexpr bool isGp16(uint32_t typeId) noexcept { return typeId >= kIdI16 && typeId <= kIdU16; }
+static constexpr bool isGp32(uint32_t typeId) noexcept { return typeId >= kIdI32 && typeId <= kIdU32; }
+static constexpr bool isGp64(uint32_t typeId) noexcept { return typeId >= kIdI64 && typeId <= kIdU64; }
+
+static constexpr bool isFloat(uint32_t typeId) noexcept { return typeId >= _kIdFloatStart && typeId <= _kIdFloatEnd; }
+static constexpr bool isFloat32(uint32_t typeId) noexcept { return typeId == kIdF32; }
+static constexpr bool isFloat64(uint32_t typeId) noexcept { return typeId == kIdF64; }
+static constexpr bool isFloat80(uint32_t typeId) noexcept { return typeId == kIdF80; }
+
+static constexpr bool isMask(uint32_t typeId) noexcept { return typeId >= _kIdMaskStart && typeId <= _kIdMaskEnd; }
+static constexpr bool isMask8(uint32_t typeId) noexcept { return typeId == kIdMask8; }
+static constexpr bool isMask16(uint32_t typeId) noexcept { return typeId == kIdMask16; }
+static constexpr bool isMask32(uint32_t typeId) noexcept { return typeId == kIdMask32; }
+static constexpr bool isMask64(uint32_t typeId) noexcept { return typeId == kIdMask64; }
+
+static constexpr bool isMmx(uint32_t typeId) noexcept { return typeId >= _kIdMmxStart && typeId <= _kIdMmxEnd; }
+static constexpr bool isMmx32(uint32_t typeId) noexcept { return typeId == kIdMmx32; }
+static constexpr bool isMmx64(uint32_t typeId) noexcept { return typeId == kIdMmx64; }
+
+static constexpr bool isVec(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec512End; }
+static constexpr bool isVec32(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec32End; }
+static constexpr bool isVec64(uint32_t typeId) noexcept { return typeId >= _kIdVec64Start && typeId <= _kIdVec64End; }
+static constexpr bool isVec128(uint32_t typeId) noexcept { return typeId >= _kIdVec128Start && typeId <= _kIdVec128End; }
+static constexpr bool isVec256(uint32_t typeId) noexcept { return typeId >= _kIdVec256Start && typeId <= _kIdVec256End; }
+static constexpr bool isVec512(uint32_t typeId) noexcept { return typeId >= _kIdVec512Start && typeId <= _kIdVec512End; }
+
+//! \cond
+enum TypeCategory : uint32_t {
+ kTypeCategoryUnknown = 0,
+ kTypeCategoryEnum = 1,
+ kTypeCategoryIntegral = 2,
+ kTypeCategoryFloatingPoint = 3,
+ kTypeCategoryFunction = 4
+};
+
+template<typename T, uint32_t Category>
+struct IdOfT_ByCategory {}; // Fails if not specialized.
+
+template<typename T>
+struct IdOfT_ByCategory<T, kTypeCategoryIntegral> {
+ enum : uint32_t {
+ kTypeId = (sizeof(T) == 1 && std::is_signed<T>::value) ? kIdI8 :
+ (sizeof(T) == 1 && !std::is_signed<T>::value) ? kIdU8 :
+ (sizeof(T) == 2 && std::is_signed<T>::value) ? kIdI16 :
+ (sizeof(T) == 2 && !std::is_signed<T>::value) ? kIdU16 :
+ (sizeof(T) == 4 && std::is_signed<T>::value) ? kIdI32 :
+ (sizeof(T) == 4 && !std::is_signed<T>::value) ? kIdU32 :
+ (sizeof(T) == 8 && std::is_signed<T>::value) ? kIdI64 :
+ (sizeof(T) == 8 && !std::is_signed<T>::value) ? kIdU64 : kIdVoid
+ };
+};
+
+template<typename T>
+struct IdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
+ enum : uint32_t {
+ kTypeId = (sizeof(T) == 4 ) ? kIdF32 :
+ (sizeof(T) == 8 ) ? kIdF64 :
+ (sizeof(T) >= 10) ? kIdF80 : kIdVoid
+ };
+};
+
+template<typename T>
+struct IdOfT_ByCategory<T, kTypeCategoryEnum>
+ : public IdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {};
+
+template<typename T>
+struct IdOfT_ByCategory<T, kTypeCategoryFunction> {
+ enum: uint32_t { kTypeId = kIdUIntPtr };
+};
+//! \endcond
+
+//! IdOfT<> template allows to get a TypeId from a C++ type `T`.
+template<typename T>
+struct IdOfT
+#ifdef _DOXYGEN
+ //! TypeId of C++ type `T`.
+ static constexpr uint32_t kTypeId = _TypeIdDeducedAtCompileTime_;
+#else
+ : public IdOfT_ByCategory<T,
+ std::is_enum<T>::value ? kTypeCategoryEnum :
+ std::is_integral<T>::value ? kTypeCategoryIntegral :
+ std::is_floating_point<T>::value ? kTypeCategoryFloatingPoint :
+ std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown>
+#endif
+{};
+
+//! \cond
+template<typename T>
+struct IdOfT<T*> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
+
+template<typename T>
+struct IdOfT<T&> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
+//! \endcond
+
+static inline uint32_t baseOf(uint32_t typeId) noexcept {
+ ASMJIT_ASSERT(typeId <= kIdMax);
+ return _typeData.baseOf[typeId];
+}
+
+static inline uint32_t sizeOf(uint32_t typeId) noexcept {
+ ASMJIT_ASSERT(typeId <= kIdMax);
+ return _typeData.sizeOf[typeId];
+}
+
+//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId
+//! into a type that matches `registerSize` (general-purpose register size).
+//! If you find such TypeId it's then only about adding the offset to it.
+//!
+//! For example:
+//!
+//! ```
+//! uint32_t registerSize = '4' or '8';
+//! uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
+//!
+//! uint32_t typeId = 'some type-id';
+//!
+//! // Normalize some typeId into a non-abstract typeId.
+//! if (Type::isAbstract(typeId)) typeId += deabstractDelta;
+//!
+//! // The same, but by using Type::deabstract() function.
+//! typeId = Type::deabstract(typeId, deabstractDelta);
+//! ```
+static constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
+ return registerSize >= 8 ? kIdI64 - kIdIntPtr : kIdI32 - kIdIntPtr;
+}
+
+static constexpr uint32_t deabstract(uint32_t typeId, uint32_t deabstractDelta) noexcept {
+ return isAbstract(typeId) ? typeId + deabstractDelta : typeId;
+}
+
+//! bool as C++ type-name.
+struct Bool {};
+//! int8_t as C++ type-name.
+struct I8 {};
+//! uint8_t as C++ type-name.
+struct U8 {};
+//! int16_t as C++ type-name.
+struct I16 {};
+//! uint16_t as C++ type-name.
+struct U16 {};
+//! int32_t as C++ type-name.
+struct I32 {};
+//! uint32_t as C++ type-name.
+struct U32 {};
+//! int64_t as C++ type-name.
+struct I64 {};
+//! uint64_t as C++ type-name.
+struct U64 {};
+//! intptr_t as C++ type-name.
+struct IPtr {};
+//! uintptr_t as C++ type-name.
+struct UPtr {};
+//! float as C++ type-name.
+struct F32 {};
+//! double as C++ type-name.
+struct F64 {};
+
+} // {Type}
+
+// ============================================================================
+// [ASMJIT_DEFINE_TYPE_ID]
+// ============================================================================
+
+//! \cond
+#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
+namespace Type { \
+ template<> \
+ struct IdOfT<T> { \
+ enum : uint32_t { kTypeId = TYPE_ID }; \
+ }; \
+}
+
+ASMJIT_DEFINE_TYPE_ID(void, kIdVoid);
+ASMJIT_DEFINE_TYPE_ID(Bool, kIdU8);
+ASMJIT_DEFINE_TYPE_ID(I8 , kIdI8);
+ASMJIT_DEFINE_TYPE_ID(U8 , kIdU8);
+ASMJIT_DEFINE_TYPE_ID(I16 , kIdI16);
+ASMJIT_DEFINE_TYPE_ID(U16 , kIdU16);
+ASMJIT_DEFINE_TYPE_ID(I32 , kIdI32);
+ASMJIT_DEFINE_TYPE_ID(U32 , kIdU32);
+ASMJIT_DEFINE_TYPE_ID(I64 , kIdI64);
+ASMJIT_DEFINE_TYPE_ID(U64 , kIdU64);
+ASMJIT_DEFINE_TYPE_ID(IPtr, kIdIntPtr);
+ASMJIT_DEFINE_TYPE_ID(UPtr, kIdUIntPtr);
+ASMJIT_DEFINE_TYPE_ID(F32 , kIdF32);
+ASMJIT_DEFINE_TYPE_ID(F64 , kIdF64);
+//! \endcond
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_TYPE_H_INCLUDED
diff --git a/client/asmjit/core/virtmem.cpp b/client/asmjit/core/virtmem.cpp
new file mode 100644
index 0000000..0606748
--- /dev/null
+++ b/client/asmjit/core/virtmem.cpp
@@ -0,0 +1,589 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/osutils.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/virtmem.h"
+
+#if !defined(_WIN32)
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+ #include <sys/stat.h>
+ #include <sys/types.h>
+ #include <unistd.h>
+
+ // Linux has a `memfd_create` syscall that we would like to use, if available.
+ #if defined(__linux__)
+ #include <sys/syscall.h>
+ #endif
+
+ // Apple recently introduced MAP_JIT flag, which we want to use.
+ #if defined(__APPLE__)
+ #include <TargetConditionals.h>
+ #if TARGET_OS_OSX
+ #include <sys/utsname.h>
+ #endif
+ // Older SDK doesn't define `MAP_JIT`.
+ #ifndef MAP_JIT
+ #define MAP_JIT 0x800
+ #endif
+ #endif
+
+ // BSD/OSX: `MAP_ANONYMOUS` is not defined, `MAP_ANON` is.
+ #if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+#endif
+
+#include <atomic>
+
+#if defined(__APPLE__)
+ #define ASMJIT_VM_SHM_DETECT 0
+#else
+ #define ASMJIT_VM_SHM_DETECT 1
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::VirtMem - Utilities]
+// ============================================================================
+
+static const uint32_t VirtMem_dualMappingFilter[2] = {
+ VirtMem::kAccessWrite,
+ VirtMem::kAccessExecute
+};
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Windows]]
+// ============================================================================
+
+#if defined(_WIN32)
+struct ScopedHandle {
+ inline ScopedHandle() noexcept
+ : value(nullptr) {}
+
+ inline ~ScopedHandle() noexcept {
+ if (value != nullptr)
+ ::CloseHandle(value);
+ }
+
+ HANDLE value;
+};
+
+static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
+ SYSTEM_INFO systemInfo;
+
+ ::GetSystemInfo(&systemInfo);
+ vmInfo.pageSize = Support::alignUpPowerOf2<uint32_t>(systemInfo.dwPageSize);
+ vmInfo.pageGranularity = systemInfo.dwAllocationGranularity;
+}
+
+// Windows specific implementation that uses `VirtualAlloc` and `VirtualFree`.
+static DWORD VirtMem_accessToWinProtectFlags(uint32_t flags) noexcept {
+ DWORD protectFlags;
+
+ // READ|WRITE|EXECUTE.
+ if (flags & VirtMem::kAccessExecute)
+ protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
+ else if (flags & VirtMem::kAccessReadWrite)
+ protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
+ else
+ protectFlags = PAGE_NOACCESS;
+
+ // Any other flags to consider?
+ return protectFlags;
+}
+
+static DWORD VirtMem_accessToWinDesiredAccess(uint32_t flags) noexcept {
+ DWORD access = (flags & VirtMem::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
+ if (flags & VirtMem::kAccessExecute)
+ access |= FILE_MAP_EXECUTE;
+ return access;
+}
+
+Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
+ *p = nullptr;
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ DWORD protectFlags = VirtMem_accessToWinProtectFlags(flags);
+ void* result = ::VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, protectFlags);
+
+ if (!result)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = result;
+ return kErrorOk;
+}
+
+Error VirtMem::release(void* p, size_t size) noexcept {
+ DebugUtils::unused(size);
+ if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, MEM_RELEASE)))
+ return DebugUtils::errored(kErrorInvalidArgument);
+ return kErrorOk;
+}
+
+Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
+ DWORD protectFlags = VirtMem_accessToWinProtectFlags(flags);
+ DWORD oldFlags;
+
+ if (::VirtualProtect(p, size, protectFlags, &oldFlags))
+ return kErrorOk;
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ScopedHandle handle;
+ handle.value = ::CreateFileMappingW(
+ INVALID_HANDLE_VALUE,
+ nullptr,
+ PAGE_EXECUTE_READWRITE,
+ (DWORD)(uint64_t(size) >> 32),
+ (DWORD)(size & 0xFFFFFFFFu),
+ nullptr);
+
+ if (ASMJIT_UNLIKELY(!handle.value))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ void* ptr[2];
+ for (uint32_t i = 0; i < 2; i++) {
+ DWORD desiredAccess = VirtMem_accessToWinDesiredAccess(flags & ~VirtMem_dualMappingFilter[i]);
+ ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
+
+ if (ptr[i] == nullptr) {
+ if (i == 0)
+ ::UnmapViewOfFile(ptr[0]);
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ }
+
+ dm->ro = ptr[0];
+ dm->rw = ptr[1];
+ return kErrorOk;
+}
+
+Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
+ DebugUtils::unused(size);
+ bool failed = false;
+
+ if (!::UnmapViewOfFile(dm->ro))
+ failed = true;
+
+ if (dm->ro != dm->rw && !UnmapViewOfFile(dm->rw))
+ failed = true;
+
+ if (failed)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+ return kErrorOk;
+}
+#endif
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Posix]]
+// ============================================================================
+
+#if !defined(_WIN32)
+struct ScopedFD {
+ inline ScopedFD() noexcept
+ : value(-1) {}
+
+ inline ~ScopedFD() noexcept {
+ if (value != -1)
+ close(value);
+ }
+
+ int value;
+};
+
+static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
+ uint32_t pageSize = uint32_t(::getpagesize());
+
+ vmInfo.pageSize = pageSize;
+ vmInfo.pageGranularity = Support::max<uint32_t>(pageSize, 65536);
+}
+
+// Some operating systems don't allow /dev/shm to be executable. On Linux this
+// happens when /dev/shm is mounted with 'noexec', which is enforced by systemd.
+// Other operating systems like OSX also restrict executable permissions regarding
+// /dev/shm, so we use a runtime detection before trying to allocate the requested
+// memory by the user. Sometimes we don't need the detection as we know it would
+// always result in 'kShmStrategyTmpDir'.
+enum ShmStrategy : uint32_t {
+ kShmStrategyUnknown = 0,
+ kShmStrategyDevShm = 1,
+ kShmStrategyTmpDir = 2
+};
+
+// Posix specific implementation that uses `mmap()` and `munmap()`.
+static int VirtMem_accessToPosixProtection(uint32_t flags) noexcept {
+ int protection = 0;
+ if (flags & VirtMem::kAccessRead ) protection |= PROT_READ;
+ if (flags & VirtMem::kAccessWrite ) protection |= PROT_READ | PROT_WRITE;
+ if (flags & VirtMem::kAccessExecute) protection |= PROT_READ | PROT_EXEC;
+ return protection;
+}
+
+// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
+static Error VirtMem_makeErrorFromErrno(int e) noexcept {
+ switch (e) {
+ case EACCES:
+ case EAGAIN:
+ case ENODEV:
+ case EPERM:
+ return kErrorInvalidState;
+
+ case EFBIG:
+ case ENOMEM:
+ case EOVERFLOW:
+ return kErrorOutOfMemory;
+
+ case EMFILE:
+ case ENFILE:
+ return kErrorTooManyHandles;
+
+ default:
+ return kErrorInvalidArgument;
+ }
+}
+
+#if defined(__APPLE__)
+// Detects whether the current process is hardened, which means that pages that
+// have WRITE and EXECUTABLE flags cannot be allocated without MAP_JIT flag.
+static ASMJIT_INLINE bool VirtMem_isHardened() noexcept {
+ static volatile uint32_t globalHardenedFlag;
+
+ enum HardenedFlag : uint32_t {
+ kHardenedFlagUnknown = 0,
+ kHardenedFlagDisabled = 1,
+ kHardenedFlagEnabled = 2
+ };
+
+ uint32_t flag = globalHardenedFlag;
+ if (flag == kHardenedFlagUnknown) {
+ VirtMem::Info memInfo;
+ VirtMem_getInfo(memInfo);
+
+ void* ptr = mmap(nullptr, memInfo.pageSize, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ptr == MAP_FAILED) {
+ flag = kHardenedFlagEnabled;
+ }
+ else {
+ flag = kHardenedFlagDisabled;
+ munmap(ptr, memInfo.pageSize);
+ }
+ globalHardenedFlag = flag;
+ }
+
+ return flag == kHardenedFlagEnabled;
+}
+
+// MAP_JIT flag required to run unsigned JIT code is only supported by kernel
+// version 10.14+ (Mojave) and IOS.
+static ASMJIT_INLINE bool VirtMem_hasMapJitSupport() noexcept {
+#if TARGET_OS_OSX
+ static volatile int globalVersion;
+
+ int ver = globalVersion;
+ if (!ver) {
+ struct utsname osname;
+ uname(&osname);
+ ver = atoi(osname.release);
+ globalVersion = ver;
+ }
+ return ver >= 18;
+#else
+ // Assume it's available.
+ return true;
+#endif
+}
+
+static ASMJIT_INLINE int VirtMem_appleSpecificMMapFlags(uint32_t flags) {
+ // Always use MAP_JIT flag if user asked for it (could be used for testing
+ // on non-hardened processes) and detect whether it must be used when the
+ // process is actually hardened (in that case it doesn't make sense to rely
+ // on user `flags`).
+ bool useMapJit = ((flags & VirtMem::kMMapEnableMapJit) != 0) || VirtMem_isHardened();
+ if (useMapJit)
+ return VirtMem_hasMapJitSupport() ? int(MAP_JIT) : 0;
+ else
+ return 0;
+}
+#else
+static ASMJIT_INLINE int VirtMem_appleSpecificMMapFlags(uint32_t flags) {
+ DebugUtils::unused(flags);
+ return 0;
+}
+#endif
+
+static const char* VirtMem_getTmpDir() noexcept {
+ const char* tmpDir = getenv("TMPDIR");
+ return tmpDir ? tmpDir : "/tmp";
+}
+
+static Error VirtMem_openAnonymousMemory(int* fd, bool preferTmpOverDevShm) noexcept {
+#if defined(SYS_memfd_create)
+ // Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means
+ // it's not available and we will never call it again (would be pointless).
+
+ // Zero initialized, if ever changed to '1' that would mean the syscall is not
+ // available and we must use `shm_open()` and `shm_unlink()`.
+ static volatile uint32_t memfd_create_not_supported;
+
+ if (!memfd_create_not_supported) {
+ *fd = (int)syscall(SYS_memfd_create, "vmem", 0);
+ if (ASMJIT_LIKELY(*fd >= 0))
+ return kErrorOk;
+
+ int e = errno;
+ if (e == ENOSYS)
+ memfd_create_not_supported = 1;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+#endif
+
+#if defined(SHM_ANON)
+ // Originally FreeBSD extension, apparently works in other BSDs too.
+ DebugUtils::unused(preferTmpOverDevShm);
+ *fd = shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+
+ if (ASMJIT_LIKELY(*fd >= 0))
+ return kErrorOk;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+#else
+ // POSIX API. We have to generate somehow a unique name. This is nothing
+ // cryptographic, just using a bit from the stack address to always have
+ // a different base for different threads (as threads have their own stack)
+ // and retries for avoiding collisions. We use `shm_open()` with flags that
+ // require creation of the file so we never open an existing shared memory.
+ static std::atomic<uint32_t> internalCounter;
+
+ StringTmp<128> uniqueName;
+ const char* kShmFormat = "/shm-id-%08llX";
+
+ uint32_t kRetryCount = 100;
+ uint64_t bits = ((uintptr_t)(void*)&uniqueName) & 0x55555555u;
+
+ for (uint32_t i = 0; i < kRetryCount; i++) {
+ bits -= uint64_t(OSUtils::getTickCount()) * 773703683;
+ bits = ((bits >> 14) ^ (bits << 6)) + uint64_t(++internalCounter) * 10619863;
+
+ if (!ASMJIT_VM_SHM_DETECT || preferTmpOverDevShm) {
+ uniqueName.assign(VirtMem_getTmpDir());
+ uniqueName.appendFormat(kShmFormat, (unsigned long long)bits);
+ *fd = open(uniqueName.data(), O_RDWR | O_CREAT | O_EXCL, 0);
+ if (ASMJIT_LIKELY(*fd >= 0)) {
+ unlink(uniqueName.data());
+ return kErrorOk;
+ }
+ }
+ else {
+ uniqueName.assignFormat(kShmFormat, (unsigned long long)bits);
+ *fd = shm_open(uniqueName.data(), O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+ if (ASMJIT_LIKELY(*fd >= 0)) {
+ shm_unlink(uniqueName.data());
+ return kErrorOk;
+ }
+ }
+
+ int e = errno;
+ if (e == EEXIST)
+ continue;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ return kErrorOk;
+#endif
+}
+
+#if ASMJIT_VM_SHM_DETECT
+static Error VirtMem_detectShmStrategy(uint32_t* strategyOut) noexcept {
+ ScopedFD fd;
+ VirtMem::Info vmInfo = VirtMem::info();
+
+ ASMJIT_PROPAGATE(VirtMem_openAnonymousMemory(&fd.value, false));
+ if (ftruncate(fd.value, off_t(vmInfo.pageSize)) != 0)
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+
+ void* ptr = mmap(nullptr, vmInfo.pageSize, PROT_READ | PROT_EXEC, MAP_SHARED, fd.value, 0);
+ if (ptr == MAP_FAILED) {
+ int e = errno;
+ if (e == EINVAL) {
+ *strategyOut = kShmStrategyTmpDir;
+ return kErrorOk;
+ }
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ else {
+ munmap(ptr, vmInfo.pageSize);
+ *strategyOut = kShmStrategyDevShm;
+ return kErrorOk;
+ }
+}
+#endif
+
+#if ASMJIT_VM_SHM_DETECT
+static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
+ // Initially don't assume anything. It has to be tested whether
+ // '/dev/shm' was mounted with 'noexec' flag or not.
+ static volatile uint32_t globalShmStrategy = kShmStrategyUnknown;
+
+ uint32_t strategy = globalShmStrategy;
+ if (strategy == kShmStrategyUnknown) {
+ ASMJIT_PROPAGATE(VirtMem_detectShmStrategy(&strategy));
+ globalShmStrategy = strategy;
+ }
+
+ *strategyOut = strategy;
+ return kErrorOk;
+}
+#else
+static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
+ *strategyOut = kShmStrategyTmpDir;
+ return kErrorOk;
+}
+#endif
+
+Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
+ *p = nullptr;
+
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ int protection = VirtMem_accessToPosixProtection(flags);
+ int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | VirtMem_appleSpecificMMapFlags(flags);
+ void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0);
+
+ if (ptr == MAP_FAILED)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = ptr;
+ return kErrorOk;
+}
+
+Error VirtMem::release(void* p, size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ return kErrorOk;
+}
+
+
+Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
+ int protection = VirtMem_accessToPosixProtection(flags);
+ if (mprotect(p, size, protection) == 0)
+ return kErrorOk;
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+
+ if (off_t(size) <= 0)
+ return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
+
+ bool preferTmpOverDevShm = (flags & kMappingPreferTmp) != 0;
+ if (!preferTmpOverDevShm) {
+ uint32_t strategy;
+ ASMJIT_PROPAGATE(VirtMem_getShmStrategy(&strategy));
+ preferTmpOverDevShm = (strategy == kShmStrategyTmpDir);
+ }
+
+ // ScopedFD will automatically close the file descriptor in its destructor.
+ ScopedFD fd;
+ ASMJIT_PROPAGATE(VirtMem_openAnonymousMemory(&fd.value, preferTmpOverDevShm));
+ if (ftruncate(fd.value, off_t(size)) != 0)
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+
+ void* ptr[2];
+ for (uint32_t i = 0; i < 2; i++) {
+ ptr[i] = mmap(nullptr, size, VirtMem_accessToPosixProtection(flags & ~VirtMem_dualMappingFilter[i]), MAP_SHARED, fd.value, 0);
+ if (ptr[i] == MAP_FAILED) {
+ // Get the error now before `munmap` has a chance to clobber it.
+ int e = errno;
+ if (i == 1)
+ munmap(ptr[0], size);
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ }
+
+ dm->ro = ptr[0];
+ dm->rw = ptr[1];
+ return kErrorOk;
+}
+
+Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
+ Error err = release(dm->ro, size);
+ if (dm->ro != dm->rw)
+ err |= release(dm->rw, size);
+
+ if (err)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+ return kErrorOk;
+}
+#endif
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Memory Info]]
+// ============================================================================
+
+VirtMem::Info VirtMem::info() noexcept {
+ static VirtMem::Info vmInfo;
+ static std::atomic<uint32_t> vmInfoInitialized;
+
+ if (!vmInfoInitialized.load()) {
+ VirtMem::Info localMemInfo;
+ VirtMem_getInfo(localMemInfo);
+
+ vmInfo = localMemInfo;
+ vmInfoInitialized.store(1u);
+ }
+
+ return vmInfo;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/client/asmjit/core/virtmem.h b/client/asmjit/core/virtmem.h
new file mode 100644
index 0000000..8d3ee01
--- /dev/null
+++ b/client/asmjit/core/virtmem.h
@@ -0,0 +1,145 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_VIRTMEM_H_INCLUDED
+#define ASMJIT_CORE_VIRTMEM_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_virtual_memory
+//! \{
+
+// ============================================================================
+// [asmjit::VirtMem]
+// ============================================================================
+
+//! Virtual memory management.
+namespace VirtMem {
+
+//! Virtual memory and memory mapping flags.
+enum Flags : uint32_t {
+ //! No access flags.
+ kAccessNone = 0x00000000u,
+ //! Memory is readable.
+ kAccessRead = 0x00000001u,
+ //! Memory is writable (implies read access).
+ kAccessWrite = 0x00000002u,
+ //! Memory is executable (implies read access).
+ kAccessExecute = 0x00000004u,
+
+ //! A combination of `kAccessRead | kAccessWrite`
+ kAccessReadWrite = 0x00000003u,
+
+ //! Use a `MAP_JIT` flag available on Apple platforms (OSX Mojave+), which
+ //! allows JIT code to be executed in OSX bundles. This flag is not turned
+ //! on by default, because when a process uses `fork()` the child process
+ //! has no access to the pages mapped with `MAP_JIT`, which could break code
+ //! that doesn't expect this behavior.
+ kMMapEnableMapJit = 0x00000010u,
+
+ //! Not an access flag, only used by `allocDualMapping()` to override the
+ //! default allocation strategy to always use a 'tmp' directory instead of
+ //! "/dev/shm" (on POSIX platforms). Please note that this flag will be
+ //! ignored if the operating system allows to allocate an executable memory
+ //! by a different API than `open()` or `shm_open()`. For example on Linux
+ //! `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is
+ //! used if SHM_ANON is defined.
+ kMappingPreferTmp = 0x80000000u
+};
+
+//! Virtual memory information.
+struct Info {
+ //! Virtual memory page size.
+ uint32_t pageSize;
+ //! Virtual memory page granularity.
+ uint32_t pageGranularity;
+};
+
+//! Dual memory mapping used to map an anonymous memory into two memory regions
+//! where one region is read-only, but executable, and the second region is
+//! read+write, but not executable. Please see \ref VirtMem::allocDualMapping()
+//! for more details.
+struct DualMapping {
+ //! Pointer to data with 'Read' or 'Read+Execute' access.
+ void* ro;
+ //! Pointer to data with 'Read-Write' access, but never 'Write+Execute'.
+ void* rw;
+};
+
+//! Returns virtual memory information, see `VirtMem::Info` for more details.
+ASMJIT_API Info info() noexcept;
+
+//! Allocates virtual memory by either using `VirtualAlloc()` (Windows)
+//! or `mmap()` (POSIX).
+//!
+//! \note `size` should be aligned to a page size, use \ref VirtMem::info()
+//! to obtain it. Invalid size will not be corrected by the implementation
+//! and the allocation would not succeed in such case.
+ASMJIT_API Error alloc(void** p, size_t size, uint32_t flags) noexcept;
+
+//! Releases virtual memory previously allocated by \ref VirtMem::alloc() or
+//! \ref VirtMem::allocDualMapping().
+//!
+//! \note The size must be the same as used by \ref VirtMem::alloc(). If the
+//! size is not the same value the call will fail on any POSIX system, but
+//! pass on Windows, because of the difference of the implementation.
+ASMJIT_API Error release(void* p, size_t size) noexcept;
+
+//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect`
+//! (Windows).
+ASMJIT_API Error protect(void* p, size_t size, uint32_t flags) noexcept;
+
+//! Allocates virtual memory and creates two views of it where the first view
+//! has no write access. This is an addition to the API that should be used
+//! in cases in which the operating system either enforces W^X security policy
+//! or the application wants to use this policy by default to improve security
+//! and prevent an accidental (or purposed) self-modifying code.
+//!
+//! The memory returned in the `dm` are two independent mappings of the same
+//! shared memory region. You must use \ref VirtMem::releaseDualMapping() to
+//! release it when it's no longer needed. Never use `VirtMem::release()` to
+//! release the memory returned by `allocDualMapping()` as that would fail on
+//! Windows.
+//!
+//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
+ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept;
+
+//! Releases the virtual memory mapping previously allocated by
+//! \ref VirtMem::allocDualMapping().
+//!
+//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
+ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
+
+} // VirtMem
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif // ASMJIT_CORE_VIRTMEM_H_INCLUDED
diff --git a/client/asmjit/core/zone.cpp b/client/asmjit/core/zone.cpp
new file mode 100644
index 0000000..61f7cec
--- /dev/null
+++ b/client/asmjit/core/zone.cpp
@@ -0,0 +1,382 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Zone - Statics]
+// ============================================================================
+
+// Zero size block used by `Zone` that doesn't have any memory allocated.
+// Should be allocated in read-only memory and should never be modified.
+const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
+
+// ============================================================================
+// [asmjit::Zone - Init / Reset]
+// ============================================================================
+
+void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
+ ASMJIT_ASSERT(blockSize >= kMinBlockSize);
+ ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
+ ASMJIT_ASSERT(blockAlignment <= 64);
+
+ // Just to make the compiler happy...
+ constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
+ constexpr size_t kBlockAlignmentShiftMask = 0x7u;
+
+ _assignZeroBlock();
+ _blockSize = blockSize & kBlockSizeMask;
+ _isTemporary = temporary != nullptr;
+ _blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
+
+ // Setup the first [temporary] block, if necessary.
+ if (temporary) {
+ Block* block = temporary->data<Block>();
+ block->prev = nullptr;
+ block->next = nullptr;
+
+ ASMJIT_ASSERT(temporary->size() >= kBlockSize);
+ block->size = temporary->size() - kBlockSize;
+
+ _assignBlock(block);
+ }
+}
+
+void Zone::reset(uint32_t resetPolicy) noexcept {
+ Block* cur = _block;
+
+ // Can't be altered.
+ if (cur == &_zeroBlock)
+ return;
+
+ if (resetPolicy == Globals::kResetHard) {
+ Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
+ _ptr = initial->data();
+ _end = initial->data();
+ _block = initial;
+
+ // Since cur can be in the middle of the double-linked list, we have to
+ // traverse both directions (`prev` and `next`) separately to visit all.
+ Block* next = cur->next;
+ do {
+ Block* prev = cur->prev;
+
+ // If this is the first block and this ZoneTmp is temporary then the
+ // first block is statically allocated. We cannot free it and it makes
+ // sense to keep it even when this is hard reset.
+ if (prev == nullptr && _isTemporary) {
+ cur->prev = nullptr;
+ cur->next = nullptr;
+ _assignBlock(cur);
+ break;
+ }
+
+ ::free(cur);
+ cur = prev;
+ } while (cur);
+
+ cur = next;
+ while (cur) {
+ next = cur->next;
+ ::free(cur);
+ cur = next;
+ }
+ }
+ else {
+ while (cur->prev)
+ cur = cur->prev;
+ _assignBlock(cur);
+ }
+}
+
+// ============================================================================
+// [asmjit::Zone - Alloc]
+// ============================================================================
+
+void* Zone::_alloc(size_t size, size_t alignment) noexcept {
+ Block* curBlock = _block;
+ Block* next = curBlock->next;
+
+ size_t rawBlockAlignment = blockAlignment();
+ size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
+
+ // If the `Zone` has been cleared the current block doesn't have to be the
+ // last one. Check if there is a block that can be used instead of allocating
+ // a new one. If there is a `next` block it's completely unused, we don't have
+ // to check for remaining bytes in that case.
+ if (next) {
+ uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
+ uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
+
+ if (size <= (size_t)(end - ptr)) {
+ _block = next;
+ _ptr = ptr + size;
+ _end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
+ return static_cast<void*>(ptr);
+ }
+ }
+
+ size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
+ size_t newSize = Support::max(blockSize(), size);
+
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(newSize > SIZE_MAX - kBlockSize - blockAlignmentOverhead))
+ return nullptr;
+
+ // Allocate new block - we add alignment overhead to `newSize`, which becomes the
+ // new block size, and we also add `kBlockOverhead` to the allocator as it includes
+ // members of `Zone::Block` structure.
+ newSize += blockAlignmentOverhead;
+ Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
+
+ if (ASMJIT_UNLIKELY(!newBlock))
+ return nullptr;
+
+ // Align the pointer to `minimumAlignment` and adjust the size of this block
+ // accordingly. It's the same as using `minimumAlignment - Support::alignUpDiff()`,
+ // just written differently.
+ {
+ newBlock->prev = nullptr;
+ newBlock->next = nullptr;
+ newBlock->size = newSize;
+
+ if (curBlock != &_zeroBlock) {
+ newBlock->prev = curBlock;
+ curBlock->next = newBlock;
+
+ // Does only happen if there is a next block, but the requested memory
+ // can't fit into it. In this case a new buffer is allocated and inserted
+ // between the current block and the next one.
+ if (next) {
+ newBlock->next = next;
+ next->prev = newBlock;
+ }
+ }
+
+ uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
+ uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
+
+ _ptr = ptr + size;
+ _end = end;
+ _block = newBlock;
+
+ ASMJIT_ASSERT(_ptr <= _end);
+ return static_cast<void*>(ptr);
+ }
+}
+
+void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
+ void* p = alloc(size, alignment);
+ if (ASMJIT_UNLIKELY(!p))
+ return p;
+ return memset(p, 0, size);
+}
+
+void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
+ if (ASMJIT_UNLIKELY(!data || !size))
+ return nullptr;
+
+ ASMJIT_ASSERT(size != SIZE_MAX);
+ uint8_t* m = allocT<uint8_t>(size + nullTerminate);
+ if (ASMJIT_UNLIKELY(!m)) return nullptr;
+
+ memcpy(m, data, size);
+ if (nullTerminate) m[size] = '\0';
+
+ return static_cast<void*>(m);
+}
+
+char* Zone::sformat(const char* fmt, ...) noexcept {
+ if (ASMJIT_UNLIKELY(!fmt))
+ return nullptr;
+
+ char buf[512];
+ size_t size;
+ va_list ap;
+
+ va_start(ap, fmt);
+ size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
+ va_end(ap);
+
+ buf[size++] = 0;
+ return static_cast<char*>(dup(buf, size));
+}
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Helpers]
+// ============================================================================
+
+#if defined(ASMJIT_BUILD_DEBUG)
+static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
+ ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
+ while (cur) {
+ if (cur == block)
+ return true;
+ cur = cur->next;
+ }
+ return false;
+}
+#endif
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Init / Reset]
+// ============================================================================
+
+void ZoneAllocator::reset(Zone* zone) noexcept {
+ // Free dynamic blocks.
+ DynamicBlock* block = _dynamicBlocks;
+ while (block) {
+ DynamicBlock* next = block->next;
+ ::free(block);
+ block = next;
+ }
+
+ // Zero the entire class and initialize to the given `zone`.
+ memset(this, 0, sizeof(*this));
+ _zone = zone;
+}
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Alloc / Release]
+// ============================================================================
+
+void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ // Use the memory pool only if the requested block has a reasonable size.
+ uint32_t slot;
+ if (_getSlotIndex(size, slot, allocatedSize)) {
+ // Slot reuse.
+ uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
+ size = allocatedSize;
+
+ if (p) {
+ _slots[slot] = reinterpret_cast<Slot*>(p)->next;
+ return p;
+ }
+
+ _zone->align(kBlockAlignment);
+ p = _zone->ptr();
+ size_t remain = (size_t)(_zone->end() - p);
+
+ if (ASMJIT_LIKELY(remain >= size)) {
+ _zone->setPtr(p + size);
+ return p;
+ }
+ else {
+ // Distribute the remaining memory to suitable slots, if possible.
+ if (remain >= kLoGranularity) {
+ do {
+ size_t distSize = Support::min<size_t>(remain, kLoMaxSize);
+ uint32_t distSlot = uint32_t((distSize - kLoGranularity) / kLoGranularity);
+ ASMJIT_ASSERT(distSlot < kLoCount);
+
+ reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
+ _slots[distSlot] = reinterpret_cast<Slot*>(p);
+
+ p += distSize;
+ remain -= distSize;
+ } while (remain >= kLoGranularity);
+ _zone->setPtr(p);
+ }
+
+ p = static_cast<uint8_t*>(_zone->_alloc(size, kBlockAlignment));
+ if (ASMJIT_UNLIKELY(!p)) {
+ allocatedSize = 0;
+ return nullptr;
+ }
+
+ return p;
+ }
+ }
+ else {
+ // Allocate a dynamic block.
+ size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
+
+ // Handle a possible overflow.
+ if (ASMJIT_UNLIKELY(kBlockOverhead >= SIZE_MAX - size))
+ return nullptr;
+
+ void* p = ::malloc(size + kBlockOverhead);
+ if (ASMJIT_UNLIKELY(!p)) {
+ allocatedSize = 0;
+ return nullptr;
+ }
+
+ // Link as first in `_dynamicBlocks` double-linked list.
+ DynamicBlock* block = static_cast<DynamicBlock*>(p);
+ DynamicBlock* next = _dynamicBlocks;
+
+ if (next)
+ next->prev = block;
+
+ block->prev = nullptr;
+ block->next = next;
+ _dynamicBlocks = block;
+
+ // Align the pointer to the guaranteed alignment and store `DynamicBlock`
+ // at the beginning of the memory block, so `_releaseDynamic()` can find it.
+ p = Support::alignUp(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
+ reinterpret_cast<DynamicBlock**>(p)[-1] = block;
+
+ allocatedSize = size;
+ return p;
+ }
+}
+
+void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ void* p = _alloc(size, allocatedSize);
+ if (ASMJIT_UNLIKELY(!p)) return p;
+ return memset(p, 0, allocatedSize);
+}
+
+void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
+ DebugUtils::unused(size);
+ ASMJIT_ASSERT(isInitialized());
+
+ // Pointer to `DynamicBlock` is stored at [-1].
+ DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
+ ASMJIT_ASSERT(ZoneAllocator_hasDynamicBlock(this, block));
+
+ // Unlink and free.
+ DynamicBlock* prev = block->prev;
+ DynamicBlock* next = block->next;
+
+ if (prev)
+ prev->next = next;
+ else
+ _dynamicBlocks = next;
+
+ if (next)
+ next->prev = prev;
+
+ ::free(block);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zone.h b/client/asmjit/core/zone.h
new file mode 100644
index 0000000..52e9f12
--- /dev/null
+++ b/client/asmjit/core/zone.h
@@ -0,0 +1,649 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONE_H_INCLUDED
+#define ASMJIT_CORE_ZONE_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::Zone]
+// ============================================================================
+
+//! Zone memory.
+//!
+//! Zone is an incremental memory allocator that allocates memory by simply
+//! incrementing a pointer. It allocates blocks of memory by using C's `malloc()`,
+//! but divides these blocks into smaller segments requested by calling
+//! `Zone::alloc()` and friends.
+//!
+//! Zone has no function to release the allocated memory. It has to be released
+//! all at once by calling `reset()`. If you need a more friendly allocator that
+//! also supports `release()`, consider using `Zone` with `ZoneAllocator`.
+class Zone {
+public:
+ ASMJIT_NONCOPYABLE(Zone)
+
+ //! \cond INTERNAL
+
+ //! A single block of memory managed by `Zone`.
+ struct Block {
+ inline uint8_t* data() const noexcept {
+ return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(this) + sizeof(*this));
+ }
+
+ //! Link to the previous block.
+ Block* prev;
+ //! Link to the next block.
+ Block* next;
+ //! Size of the block.
+ size_t size;
+ };
+
+ enum Limits : size_t {
+ kBlockSize = sizeof(Block),
+ kBlockOverhead = Globals::kAllocOverhead + kBlockSize,
+
+ kMinBlockSize = 64, // The number is ridiculously small, but still possible.
+ kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 4 - 1),
+ kMinAlignment = 1,
+ kMaxAlignment = 64
+ };
+
+ //! Pointer in the current block.
+ uint8_t* _ptr;
+ //! End of the current block.
+ uint8_t* _end;
+ //! Current block.
+ Block* _block;
+
+ union {
+ struct {
+ //! Default block size.
+ size_t _blockSize : Support::bitSizeOf<size_t>() - 4;
+ //! First block is temporary (ZoneTmp).
+ size_t _isTemporary : 1;
+ //! Block alignment (1 << alignment).
+ size_t _blockAlignmentShift : 3;
+ };
+ size_t _packedData;
+ };
+
+ static ASMJIT_API const Block _zeroBlock;
+
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new Zone.
+ //!
+ //! The `blockSize` parameter describes the default size of the block. If the
+ //! `size` parameter passed to `alloc()` is greater than the default size
+ //! `Zone` will allocate and use a larger block, but it will not change the
+ //! default `blockSize`.
+ //!
+ //! It's not required, but it's good practice to set `blockSize` to a
+ //! reasonable value that depends on the usage of `Zone`. Greater block sizes
+ //! are generally safer and perform better than unreasonably low block sizes.
+ ASMJIT_INLINE explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
+ _init(blockSize, blockAlignment, nullptr);
+ }
+
+ ASMJIT_INLINE Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
+ _init(blockSize, blockAlignment, &temporary);
+ }
+
+ //! Moves an existing `Zone`.
+ //!
+ //! \note You cannot move an existing `ZoneTmp` as it uses embedded storage.
+ //! Attempting to move `ZoneTmp` would result in assertion failure in debug
+ //! mode and undefined behavior in release mode.
+ ASMJIT_INLINE Zone(Zone&& other) noexcept
+ : _ptr(other._ptr),
+ _end(other._end),
+ _block(other._block),
+ _packedData(other._packedData) {
+ ASMJIT_ASSERT(!other.isTemporary());
+ other._block = const_cast<Block*>(&_zeroBlock);
+ other._ptr = other._block->data();
+ other._end = other._block->data();
+ }
+
+ //! Destroys the `Zone` instance.
+ //!
+ //! This will destroy the `Zone` instance and release all blocks of memory
+ //! allocated by it. It performs implicit `reset(Globals::kResetHard)`.
+ ASMJIT_INLINE ~Zone() noexcept { reset(Globals::kResetHard); }
+
+ ASMJIT_API void _init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept;
+
+ //! Resets the `Zone` invalidating all blocks allocated.
+ //!
+ //! See `Globals::ResetPolicy` for more details.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
+ ASMJIT_INLINE bool isTemporary() const noexcept { return _isTemporary != 0; }
+
+ //! Returns the default block size.
+ ASMJIT_INLINE size_t blockSize() const noexcept { return _blockSize; }
+ //! Returns the default block alignment.
+ ASMJIT_INLINE size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
+ //! Returns remaining size of the current block.
+ ASMJIT_INLINE size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
+
+ //! Returns the current zone cursor (dangerous).
+ //!
+ //! This is a function that can be used to get exclusive access to the current
+ //! block's memory buffer.
+ template<typename T = uint8_t>
+ ASMJIT_INLINE T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
+
+ //! Returns the end of the current zone block, only useful if you use `ptr()`.
+ template<typename T = uint8_t>
+ ASMJIT_INLINE T* end() noexcept { return reinterpret_cast<T*>(_end); }
+
+ //! Sets the current zone pointer to `ptr` (must be within the current block).
+ template<typename T>
+ ASMJIT_INLINE void setPtr(T* ptr) noexcept {
+ uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
+ ASMJIT_ASSERT(p >= _ptr && p <= _end);
+ _ptr = p;
+ }
+
+ //! Sets the end zone pointer to `end` (must be within the current block).
+ template<typename T>
+ ASMJIT_INLINE void setEnd(T* end) noexcept {
+ uint8_t* p = reinterpret_cast<uint8_t*>(end);
+ ASMJIT_ASSERT(p >= _ptr && p <= _end);
+ _end = p;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ ASMJIT_INLINE void swap(Zone& other) noexcept {
+ // This could lead to a disaster.
+ ASMJIT_ASSERT(!this->isTemporary());
+ ASMJIT_ASSERT(!other.isTemporary());
+
+ std::swap(_ptr, other._ptr);
+ std::swap(_end, other._end);
+ std::swap(_block, other._block);
+ std::swap(_packedData, other._packedData);
+ }
+
+ //! Aligns the current pointer to `alignment`.
+ ASMJIT_INLINE void align(size_t alignment) noexcept {
+ _ptr = Support::min(Support::alignUp(_ptr, alignment), _end);
+ }
+
+ //! Ensures the remaining size is at least equal or greater than `size`.
+ //!
+ //! \note This function doesn't respect any alignment. If you need to ensure
+ //! there is enough room for an aligned allocation you need to call `align()`
+ //! before calling `ensure()`.
+ ASMJIT_INLINE Error ensure(size_t size) noexcept {
+ if (size <= remainingSize())
+ return kErrorOk;
+ else
+ return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
+ }
+
+ ASMJIT_INLINE void _assignBlock(Block* block) noexcept {
+ size_t alignment = blockAlignment();
+ _ptr = Support::alignUp(block->data(), alignment);
+ _end = Support::alignDown(block->data() + block->size, alignment);
+ _block = block;
+ }
+
+ ASMJIT_INLINE void _assignZeroBlock() noexcept {
+ Block* block = const_cast<Block*>(&_zeroBlock);
+ _ptr = block->data();
+ _end = block->data();
+ _block = block;
+ }
+
+ //! \}
+
+ //! \name Allocation
+ //! \{
+
+ //! Allocates the requested memory specified by `size`.
+ //!
+ //! Pointer returned is valid until the `Zone` instance is destroyed or reset
+ //! by calling `reset()`. If you plan to make an instance of C++ from the
+ //! given pointer use placement `new` and `delete` operators:
+ //!
+ //! ```
+ //! using namespace asmjit;
+ //!
+ //! class Object { ... };
+ //!
+ //! // Create Zone with default block size of approximately 65536 bytes.
+ //! Zone zone(65536 - Zone::kBlockOverhead);
+ //!
+ //! // Create your objects using zone object allocating, for example:
+ //! Object* obj = static_cast<Object*>( zone.alloc(sizeof(Object)) );
+ //!
+ //! if (!obj) {
+ //! // Handle out of memory error.
+ //! }
+ //!
+ //! // Placement `new` and `delete` operators can be used to instantiate it.
+ //! new(obj) Object();
+ //!
+ //! // ... lifetime of your objects ...
+ //!
+ //! // To destroy the instance (if required).
+ //! obj->~Object();
+ //!
+ //! // Reset or destroy `Zone`.
+ //! zone.reset();
+ //! ```
+ ASMJIT_INLINE void* alloc(size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(size > remainingSize()))
+ return _alloc(size, 1);
+
+ uint8_t* ptr = _ptr;
+ _ptr += size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` and `alignment`.
+ ASMJIT_INLINE void* alloc(size_t size, size_t alignment) noexcept {
+ ASMJIT_ASSERT(Support::isPowerOf2(alignment));
+ uint8_t* ptr = Support::alignUp(_ptr, alignment);
+
+ if (ptr >= _end || size > (size_t)(_end - ptr))
+ return _alloc(size, alignment);
+
+ _ptr = ptr + size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` without doing any checks.
+ //!
+ //! Can only be called if `remainingSize()` returns size at least equal to `size`.
+ ASMJIT_INLINE void* allocNoCheck(size_t size) noexcept {
+ ASMJIT_ASSERT(remainingSize() >= size);
+
+ uint8_t* ptr = _ptr;
+ _ptr += size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
+ //!
+ //! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
+ ASMJIT_INLINE void* allocNoCheck(size_t size, size_t alignment) noexcept {
+ ASMJIT_ASSERT(Support::isPowerOf2(alignment));
+
+ uint8_t* ptr = Support::alignUp(_ptr, alignment);
+ ASMJIT_ASSERT(size <= (size_t)(_end - ptr));
+
+ _ptr = ptr + size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
+ ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept;
+
+ //! Like `alloc()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(alloc(size, alignment));
+ }
+
+ //! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(allocNoCheck(size, alignment));
+ }
+
+ //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(allocZeroed(size, alignment));
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T>
+ ASMJIT_INLINE T* newT() noexcept {
+ void* p = alloc(sizeof(T), alignof(T));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T();
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T, typename... Args>
+ ASMJIT_INLINE T* newT(Args&&... args) noexcept {
+ void* p = alloc(sizeof(T), alignof(T));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T(std::forward<Args>(args)...);
+ }
+
+ //! \cond INTERNAL
+ //!
+ //! Internal alloc function used by other inlines.
+ ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept;
+ //! \endcond
+
+ //! Helper to duplicate data.
+ ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
+
+ //! Helper to duplicate data.
+ ASMJIT_INLINE void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
+ align(alignment);
+ return dup(data, size, nullTerminate);
+ }
+
+ //! Helper to duplicate a formatted string, maximum size is 256 bytes.
+ ASMJIT_API char* sformat(const char* str, ...) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [b2d::ZoneTmp]
+// ============================================================================
+
+//! \ref Zone with `N` bytes of a static storage, used for the initial block.
+//!
+//! Temporary zones are used in cases where it's known that some memory will be
+//! required, but in many cases it won't exceed N bytes, so the whole operation
+//! can be performed without a dynamic memory allocation.
+template<size_t N>
+class ZoneTmp : public Zone {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTmp<N>)
+
+ //! Temporary storage, embedded after \ref Zone.
+ struct Storage {
+ char data[N];
+ } _storage;
+
+ //! Creates a temporary zone. Dynamic block size is specified by `blockSize`.
+ ASMJIT_INLINE explicit ZoneTmp(size_t blockSize, size_t blockAlignment = 1) noexcept
+ : Zone(blockSize, blockAlignment, Support::Temporary(_storage.data, N)) {}
+};
+
+// ============================================================================
+// [asmjit::ZoneAllocator]
+// ============================================================================
+
+//! Zone-based memory allocator that uses an existing `Zone` and provides a
+//! `release()` functionality on top of it. It uses `Zone` only for chunks
+//! that can be pooled, and uses libc `malloc()` for chunks that are large.
+//!
+//! The advantage of ZoneAllocator is that it can allocate small chunks of memory
+//! really fast, and these chunks, when released, will be reused by consecutive
+//! calls to `alloc()`. Also, since ZoneAllocator uses `Zone`, you can turn any
+//! `Zone` into a `ZoneAllocator`, and use it in your `Pass` when necessary.
+//!
+//! ZoneAllocator is used by AsmJit containers to make containers having only
+//! few elements fast (and lightweight) and to allow them to grow and use
+//! dynamic blocks when require more storage.
+class ZoneAllocator {
+public:
+ ASMJIT_NONCOPYABLE(ZoneAllocator)
+
+ //! \cond INTERNAL
+ enum {
+ // In short, we pool chunks of these sizes:
+ // [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
+
+ //! How many bytes per a low granularity pool (has to be at least 16).
+ kLoGranularity = 32,
+ //! Number of slots of a low granularity pool.
+ kLoCount = 4,
+ //! Maximum size of a block that can be allocated in a low granularity pool.
+ kLoMaxSize = kLoGranularity * kLoCount,
+
+ //! How many bytes per a high granularity pool.
+ kHiGranularity = 64,
+ //! Number of slots of a high granularity pool.
+ kHiCount = 6,
+ //! Maximum size of a block that can be allocated in a high granularity pool.
+ kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount,
+
+ //! Alignment of every pointer returned by `alloc()`.
+ kBlockAlignment = kLoGranularity
+ };
+
+ //! Single-linked list used to store unused chunks.
+ struct Slot {
+ //! Link to a next slot in a single-linked list.
+ Slot* next;
+ };
+
+ //! A block of memory that has been allocated dynamically and is not part of
+ //! block-list used by the allocator. This is used to keep track of all these
+ //! blocks so they can be freed by `reset()` if not freed explicitly.
+ struct DynamicBlock {
+ DynamicBlock* prev;
+ DynamicBlock* next;
+ };
+
+ //! \endcond
+
+ //! Zone used to allocate memory that fits into slots.
+ Zone* _zone;
+ //! Indexed slots containing released memory.
+ Slot* _slots[kLoCount + kHiCount];
+ //! Dynamic blocks for larger allocations (no slots).
+ DynamicBlock* _dynamicBlocks;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `ZoneAllocator`.
+ //!
+ //! \note To use it, you must first `init()` it.
+ inline ZoneAllocator() noexcept {
+ memset(this, 0, sizeof(*this));
+ }
+
+ //! Creates a new `ZoneAllocator` initialized to use `zone`.
+ inline explicit ZoneAllocator(Zone* zone) noexcept {
+ memset(this, 0, sizeof(*this));
+ _zone = zone;
+ }
+
+ //! Destroys the `ZoneAllocator`.
+ inline ~ZoneAllocator() noexcept { reset(); }
+
+ //! Tests whether the `ZoneAllocator` is initialized (i.e. has `Zone`).
+ inline bool isInitialized() const noexcept { return _zone != nullptr; }
+
+ //! Convenience function to initialize the `ZoneAllocator` with `zone`.
+ //!
+ //! It's the same as calling `reset(zone)`.
+ inline void init(Zone* zone) noexcept { reset(zone); }
+
+ //! Resets this `ZoneAllocator` and also forget about the current `Zone` which
+ //! is attached (if any). Reset optionally attaches a new `zone` passed, or
+ //! keeps the `ZoneAllocator` in an uninitialized state, if `zone` is null.
+ ASMJIT_API void reset(Zone* zone = nullptr) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator`
+ //! is not initialized.
+ inline Zone* zone() const noexcept { return _zone; }
+
+ //! \}
+
+ //! \cond
+ //! \name Internals
+ //! \{
+
+ //! Returns the slot index to be used for `size`. Returns `true` if a valid slot
+ //! has been written to `slot` and `allocatedSize` has been filled with slot
+ //! exact size (`allocatedSize` can be equal or slightly greater than `size`).
+ static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
+ ASMJIT_ASSERT(size > 0);
+ if (size > kHiMaxSize)
+ return false;
+
+ if (size <= kLoMaxSize)
+ slot = uint32_t((size - 1) / kLoGranularity);
+ else
+ slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
+
+ return true;
+ }
+
+ //! \overload
+ static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(size > 0);
+ if (size > kHiMaxSize)
+ return false;
+
+ if (size <= kLoMaxSize) {
+ slot = uint32_t((size - 1) / kLoGranularity);
+ allocatedSize = Support::alignUp(size, kLoGranularity);
+ }
+ else {
+ slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
+ allocatedSize = Support::alignUp(size, kHiGranularity);
+ }
+
+ return true;
+ }
+
+ //! \}
+ //! \endcond
+
+ //! \name Allocation
+ //! \{
+
+ //! \cond INTERNAL
+ ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept;
+ ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept;
+ ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept;
+ //! \endcond
+
+ //! Allocates `size` bytes of memory, ideally from an available pool.
+ //!
+ //! \note `size` can't be zero, it will assert in debug mode in such case.
+ inline void* alloc(size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ size_t allocatedSize;
+ return _alloc(size, allocatedSize);
+ }
+
+ //! Like `alloc(size)`, but provides a second argument `allocatedSize` that
+ //! provides a way to know how big the block returned actually is. This is
+ //! useful for containers to prevent growing too early.
+ inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _alloc(size, allocatedSize);
+ }
+
+ //! Like `alloc()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ inline T* allocT(size_t size = sizeof(T)) noexcept {
+ return static_cast<T*>(alloc(size));
+ }
+
+ //! Like `alloc(size)`, but returns zeroed memory.
+ inline void* allocZeroed(size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ size_t allocatedSize;
+ return _allocZeroed(size, allocatedSize);
+ }
+
+ //! Like `alloc(size, allocatedSize)`, but returns zeroed memory.
+ inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _allocZeroed(size, allocatedSize);
+ }
+
+ //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ inline T* allocZeroedT(size_t size = sizeof(T)) noexcept {
+ return static_cast<T*>(allocZeroed(size));
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T>
+ inline T* newT() noexcept {
+ void* p = allocT<T>();
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T();
+ }
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T, typename... Args>
+ inline T* newT(Args&&... args) noexcept {
+ void* p = allocT<T>();
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T(std::forward<Args>(args)...);
+ }
+
+ //! Releases the memory previously allocated by `alloc()`. The `size` argument
+ //! has to be the same as used to call `alloc()` or `allocatedSize` returned
+ //! by `alloc()`.
+ inline void release(void* p, size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(p != nullptr);
+ ASMJIT_ASSERT(size != 0);
+
+ uint32_t slot;
+ if (_getSlotIndex(size, slot)) {
+ static_cast<Slot*>(p)->next = static_cast<Slot*>(_slots[slot]);
+ _slots[slot] = static_cast<Slot*>(p);
+ }
+ else {
+ _releaseDynamic(p, size);
+ }
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONE_H_INCLUDED
diff --git a/client/asmjit/core/zonehash.cpp b/client/asmjit/core/zonehash.cpp
new file mode 100644
index 0000000..fb48d85
--- /dev/null
+++ b/client/asmjit/core/zonehash.cpp
@@ -0,0 +1,331 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonehash.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Helpers]
+// ============================================================================
+
+#define ASMJIT_POPULATE_PRIMES(ENTRY) \
+ ENTRY(2 , 0x80000000, 32), /* [N * 0x80000000 >> 32] (rcp=2147483648) */ \
+ ENTRY(11 , 0xBA2E8BA3, 35), /* [N * 0xBA2E8BA3 >> 35] (rcp=3123612579) */ \
+ ENTRY(29 , 0x8D3DCB09, 36), /* [N * 0x8D3DCB09 >> 36] (rcp=2369637129) */ \
+ ENTRY(41 , 0xC7CE0C7D, 37), /* [N * 0xC7CE0C7D >> 37] (rcp=3352169597) */ \
+ ENTRY(59 , 0x8AD8F2FC, 37), /* [N * 0x8AD8F2FC >> 37] (rcp=2329473788) */ \
+ ENTRY(83 , 0xC565C87C, 38), /* [N * 0xC565C87C >> 38] (rcp=3311782012) */ \
+ ENTRY(131 , 0xFA232CF3, 39), /* [N * 0xFA232CF3 >> 39] (rcp=4196609267) */ \
+ ENTRY(191 , 0xAB8F69E3, 39), /* [N * 0xAB8F69E3 >> 39] (rcp=2878302691) */ \
+ ENTRY(269 , 0xF3A0D52D, 40), /* [N * 0xF3A0D52D >> 40] (rcp=4087403821) */ \
+ ENTRY(383 , 0xAB1CBDD4, 40), /* [N * 0xAB1CBDD4 >> 40] (rcp=2870787540) */ \
+ ENTRY(541 , 0xF246FACC, 41), /* [N * 0xF246FACC >> 41] (rcp=4064737996) */ \
+ ENTRY(757 , 0xAD2589A4, 41), /* [N * 0xAD2589A4 >> 41] (rcp=2904918436) */ \
+ ENTRY(1061 , 0xF7129426, 42), /* [N * 0xF7129426 >> 42] (rcp=4145189926) */ \
+ ENTRY(1499 , 0xAEE116B7, 42), /* [N * 0xAEE116B7 >> 42] (rcp=2933986999) */ \
+ ENTRY(2099 , 0xF9C7A737, 43), /* [N * 0xF9C7A737 >> 43] (rcp=4190611255) */ \
+ ENTRY(2939 , 0xB263D25C, 43), /* [N * 0xB263D25C >> 43] (rcp=2992886364) */ \
+ ENTRY(4111 , 0xFF10E02E, 44), /* [N * 0xFF10E02E >> 44] (rcp=4279296046) */ \
+ ENTRY(5779 , 0xB5722823, 44), /* [N * 0xB5722823 >> 44] (rcp=3044157475) */ \
+ ENTRY(8087 , 0x81A97405, 44), /* [N * 0x81A97405 >> 44] (rcp=2175366149) */ \
+ ENTRY(11321 , 0xB93E91DB, 45), /* [N * 0xB93E91DB >> 45] (rcp=3107885531) */ \
+ ENTRY(15859 , 0x843CC26B, 45), /* [N * 0x843CC26B >> 45] (rcp=2218574443) */ \
+ ENTRY(22189 , 0xBD06B9EA, 46), /* [N * 0xBD06B9EA >> 46] (rcp=3171334634) */ \
+ ENTRY(31051 , 0x8713F186, 46), /* [N * 0x8713F186 >> 46] (rcp=2266231174) */ \
+ ENTRY(43451 , 0xC10F1CB9, 47), /* [N * 0xC10F1CB9 >> 47] (rcp=3238993081) */ \
+ ENTRY(60869 , 0x89D06A86, 47), /* [N * 0x89D06A86 >> 47] (rcp=2312137350) */ \
+ ENTRY(85159 , 0xC502AF3B, 48), /* [N * 0xC502AF3B >> 48] (rcp=3305287483) */ \
+ ENTRY(102107 , 0xA44F65AE, 48), /* [N * 0xA44F65AE >> 48] (rcp=2756666798) */ \
+ ENTRY(122449 , 0x89038F77, 48), /* [N * 0x89038F77 >> 48] (rcp=2298711927) */ \
+ ENTRY(146819 , 0xE48AF7E9, 49), /* [N * 0xE48AF7E9 >> 49] (rcp=3834312681) */ \
+ ENTRY(176041 , 0xBE9B145B, 49), /* [N * 0xBE9B145B >> 49] (rcp=3197834331) */ \
+ ENTRY(211073 , 0x9EF882BA, 49), /* [N * 0x9EF882BA >> 49] (rcp=2667086522) */ \
+ ENTRY(253081 , 0x849571AB, 49), /* [N * 0x849571AB >> 49] (rcp=2224386475) */ \
+ ENTRY(303469 , 0xDD239C97, 50), /* [N * 0xDD239C97 >> 50] (rcp=3710098583) */ \
+ ENTRY(363887 , 0xB86C196D, 50), /* [N * 0xB86C196D >> 50] (rcp=3094092141) */ \
+ ENTRY(436307 , 0x99CFA4E9, 50), /* [N * 0x99CFA4E9 >> 50] (rcp=2580522217) */ \
+ ENTRY(523177 , 0x804595C0, 50), /* [N * 0x804595C0 >> 50] (rcp=2152043968) */ \
+ ENTRY(627293 , 0xD5F69FCF, 51), /* [N * 0xD5F69FCF >> 51] (rcp=3589709775) */ \
+ ENTRY(752177 , 0xB27063BA, 51), /* [N * 0xB27063BA >> 51] (rcp=2993710010) */ \
+ ENTRY(901891 , 0x94D170AC, 51), /* [N * 0x94D170AC >> 51] (rcp=2496753836) */ \
+ ENTRY(1081369 , 0xF83C9767, 52), /* [N * 0xF83C9767 >> 52] (rcp=4164720487) */ \
+ ENTRY(1296563 , 0xCF09435D, 52), /* [N * 0xCF09435D >> 52] (rcp=3473490781) */ \
+ ENTRY(1554583 , 0xACAC7198, 52), /* [N * 0xACAC7198 >> 52] (rcp=2896982424) */ \
+ ENTRY(1863971 , 0x90033EE3, 52), /* [N * 0x90033EE3 >> 52] (rcp=2416131811) */ \
+ ENTRY(2234923 , 0xF0380EBD, 53), /* [N * 0xF0380EBD >> 53] (rcp=4030205629) */ \
+ ENTRY(2679673 , 0xC859731E, 53), /* [N * 0xC859731E >> 53] (rcp=3361305374) */ \
+ ENTRY(3212927 , 0xA718DE27, 53), /* [N * 0xA718DE27 >> 53] (rcp=2803424807) */ \
+ ENTRY(3852301 , 0x8B5D1B4B, 53), /* [N * 0x8B5D1B4B >> 53] (rcp=2338134859) */ \
+ ENTRY(4618921 , 0xE8774804, 54), /* [N * 0xE8774804 >> 54] (rcp=3900131332) */ \
+ ENTRY(5076199 , 0xD386574E, 54), /* [N * 0xD386574E >> 54] (rcp=3548796750) */ \
+ ENTRY(5578757 , 0xC0783FE1, 54), /* [N * 0xC0783FE1 >> 54] (rcp=3229106145) */ \
+ ENTRY(6131057 , 0xAF21B08F, 54), /* [N * 0xAF21B08F >> 54] (rcp=2938220687) */ \
+ ENTRY(6738031 , 0x9F5AFD6E, 54), /* [N * 0x9F5AFD6E >> 54] (rcp=2673540462) */ \
+ ENTRY(7405163 , 0x90FFC3B9, 54), /* [N * 0x90FFC3B9 >> 54] (rcp=2432680889) */ \
+ ENTRY(8138279 , 0x83EFECFC, 54), /* [N * 0x83EFECFC >> 54] (rcp=2213539068) */ \
+ ENTRY(8943971 , 0xF01AA2EF, 55), /* [N * 0xF01AA2EF >> 55] (rcp=4028277487) */ \
+ ENTRY(9829447 , 0xDA7979B2, 55), /* [N * 0xDA7979B2 >> 55] (rcp=3665394098) */ \
+ ENTRY(10802581 , 0xC6CB2771, 55), /* [N * 0xC6CB2771 >> 55] (rcp=3335202673) */ \
+ ENTRY(11872037 , 0xB4E2C7DD, 55), /* [N * 0xB4E2C7DD >> 55] (rcp=3034761181) */ \
+ ENTRY(13047407 , 0xA4974124, 55), /* [N * 0xA4974124 >> 55] (rcp=2761376036) */ \
+ ENTRY(14339107 , 0x95C39CF1, 55), /* [N * 0x95C39CF1 >> 55] (rcp=2512624881) */ \
+ ENTRY(15758737 , 0x8845C763, 55), /* [N * 0x8845C763 >> 55] (rcp=2286274403) */ \
+ ENTRY(17318867 , 0xF7FE593F, 56), /* [N * 0xF7FE593F >> 56] (rcp=4160641343) */ \
+ ENTRY(19033439 , 0xE1A75D93, 56), /* [N * 0xE1A75D93 >> 56] (rcp=3785842067) */ \
+ ENTRY(20917763 , 0xCD5389B3, 56), /* [N * 0xCD5389B3 >> 56] (rcp=3444804019) */ \
+ ENTRY(22988621 , 0xBAD4841A, 56), /* [N * 0xBAD4841A >> 56] (rcp=3134489626) */ \
+ ENTRY(25264543 , 0xA9FFF2FF, 56), /* [N * 0xA9FFF2FF >> 56] (rcp=2852123391) */ \
+ ENTRY(27765763 , 0x9AAF8BF3, 56), /* [N * 0x9AAF8BF3 >> 56] (rcp=2595195891) */ \
+ ENTRY(30514607 , 0x8CC04E18, 56), /* [N * 0x8CC04E18 >> 56] (rcp=2361413144) */ \
+ ENTRY(33535561 , 0x80127068, 56), /* [N * 0x80127068 >> 56] (rcp=2148692072) */ \
+ ENTRY(36855587 , 0xE911F0BB, 57), /* [N * 0xE911F0BB >> 57] (rcp=3910267067) */ \
+ ENTRY(38661533 , 0xDE2ED7BE, 57), /* [N * 0xDE2ED7BE >> 57] (rcp=3727611838) */ \
+ ENTRY(40555961 , 0xD3CDF2FD, 57), /* [N * 0xD3CDF2FD >> 57] (rcp=3553489661) */ \
+ ENTRY(42543269 , 0xC9E9196C, 57), /* [N * 0xC9E9196C >> 57] (rcp=3387496812) */ \
+ ENTRY(44627909 , 0xC07A9EB6, 57), /* [N * 0xC07A9EB6 >> 57] (rcp=3229261494) */ \
+ ENTRY(46814687 , 0xB77CEF65, 57), /* [N * 0xB77CEF65 >> 57] (rcp=3078418277) */ \
+ ENTRY(49108607 , 0xAEEAC65C, 57), /* [N * 0xAEEAC65C >> 57] (rcp=2934621788) */ \
+ ENTRY(51514987 , 0xA6BF0EF0, 57), /* [N * 0xA6BF0EF0 >> 57] (rcp=2797539056) */ \
+ ENTRY(54039263 , 0x9EF510B5, 57), /* [N * 0x9EF510B5 >> 57] (rcp=2666860725) */ \
+ ENTRY(56687207 , 0x97883B42, 57), /* [N * 0x97883B42 >> 57] (rcp=2542287682) */ \
+ ENTRY(59464897 , 0x907430ED, 57), /* [N * 0x907430ED >> 57] (rcp=2423533805) */ \
+ ENTRY(62378699 , 0x89B4CA91, 57), /* [N * 0x89B4CA91 >> 57] (rcp=2310326929) */ \
+ ENTRY(65435273 , 0x83461568, 57), /* [N * 0x83461568 >> 57] (rcp=2202408296) */ \
+ ENTRY(68641607 , 0xFA489AA8, 58), /* [N * 0xFA489AA8 >> 58] (rcp=4199062184) */ \
+ ENTRY(72005051 , 0xEE97B1C5, 58), /* [N * 0xEE97B1C5 >> 58] (rcp=4002918853) */ \
+ ENTRY(75533323 , 0xE3729293, 58), /* [N * 0xE3729293 >> 58] (rcp=3815936659) */ \
+ ENTRY(79234469 , 0xD8D2BBA3, 58), /* [N * 0xD8D2BBA3 >> 58] (rcp=3637689251) */ \
+ ENTRY(83116967 , 0xCEB1F196, 58), /* [N * 0xCEB1F196 >> 58] (rcp=3467768214) */ \
+ ENTRY(87189709 , 0xC50A4426, 58), /* [N * 0xC50A4426 >> 58] (rcp=3305784358) */ \
+ ENTRY(91462061 , 0xBBD6052B, 58), /* [N * 0xBBD6052B >> 58] (rcp=3151365419) */ \
+ ENTRY(95943737 , 0xB30FD999, 58), /* [N * 0xB30FD999 >> 58] (rcp=3004160409) */ \
+ ENTRY(100644991 , 0xAAB29CED, 58), /* [N * 0xAAB29CED >> 58] (rcp=2863832301) */ \
+ ENTRY(105576619 , 0xA2B96421, 58), /* [N * 0xA2B96421 >> 58] (rcp=2730058785) */ \
+ ENTRY(110749901 , 0x9B1F8434, 58), /* [N * 0x9B1F8434 >> 58] (rcp=2602533940) */ \
+ ENTRY(116176651 , 0x93E08B4A, 58), /* [N * 0x93E08B4A >> 58] (rcp=2480966474) */ \
+ ENTRY(121869317 , 0x8CF837E0, 58), /* [N * 0x8CF837E0 >> 58] (rcp=2365077472) */ \
+ ENTRY(127840913 , 0x86627F01, 58), /* [N * 0x86627F01 >> 58] (rcp=2254601985) */ \
+ ENTRY(134105159 , 0x801B8178, 58), /* [N * 0x801B8178 >> 58] (rcp=2149286264) */ \
+ ENTRY(140676353 , 0xF43F294F, 59), /* [N * 0xF43F294F >> 59] (rcp=4097780047) */ \
+ ENTRY(147569509 , 0xE8D67089, 59), /* [N * 0xE8D67089 >> 59] (rcp=3906367625) */ \
+ ENTRY(154800449 , 0xDDF6243C, 59), /* [N * 0xDDF6243C >> 59] (rcp=3723895868) */ \
+ ENTRY(162385709 , 0xD397E6AE, 59), /* [N * 0xD397E6AE >> 59] (rcp=3549947566) */ \
+ ENTRY(170342629 , 0xC9B5A65A, 59), /* [N * 0xC9B5A65A >> 59] (rcp=3384125018) */ \
+ ENTRY(178689419 , 0xC0499865, 59), /* [N * 0xC0499865 >> 59] (rcp=3226048613) */ \
+ ENTRY(187445201 , 0xB74E35FA, 59), /* [N * 0xB74E35FA >> 59] (rcp=3075356154) */ \
+ ENTRY(196630033 , 0xAEBE3AC1, 59), /* [N * 0xAEBE3AC1 >> 59] (rcp=2931702465) */ \
+ ENTRY(206264921 , 0xA694A37F, 59), /* [N * 0xA694A37F >> 59] (rcp=2794759039) */ \
+ ENTRY(216371963 , 0x9ECCA59F, 59), /* [N * 0x9ECCA59F >> 59] (rcp=2664211871) */ \
+ ENTRY(226974197 , 0x9761B6AE, 59), /* [N * 0x9761B6AE >> 59] (rcp=2539763374) */ \
+ ENTRY(238095983 , 0x904F79A1, 59), /* [N * 0x904F79A1 >> 59] (rcp=2421127585) */ \
+ ENTRY(249762697 , 0x8991CD1F, 59), /* [N * 0x8991CD1F >> 59] (rcp=2308033823) */ \
+ ENTRY(262001071 , 0x8324BCA5, 59), /* [N * 0x8324BCA5 >> 59] (rcp=2200222885) */ \
+ ENTRY(274839137 , 0xFA090732, 60), /* [N * 0xFA090732 >> 60] (rcp=4194895666) */ \
+ ENTRY(288306269 , 0xEE5B16ED, 60), /* [N * 0xEE5B16ED >> 60] (rcp=3998947053) */ \
+ ENTRY(302433337 , 0xE338CE49, 60), /* [N * 0xE338CE49 >> 60] (rcp=3812150857) */ \
+ ENTRY(317252587 , 0xD89BABC0, 60), /* [N * 0xD89BABC0 >> 60] (rcp=3634080704) */ \
+ ENTRY(374358107 , 0xB790EF43, 60), /* [N * 0xB790EF43 >> 60] (rcp=3079728963) */ \
+ ENTRY(441742621 , 0x9B908414, 60), /* [N * 0x9B908414 >> 60] (rcp=2609939476) */ \
+ ENTRY(521256293 , 0x83D596FA, 60), /* [N * 0x83D596FA >> 60] (rcp=2211813114) */ \
+ ENTRY(615082441 , 0xDF72B16E, 61), /* [N * 0xDF72B16E >> 61] (rcp=3748835694) */ \
+ ENTRY(725797313 , 0xBD5CDB3B, 61), /* [N * 0xBD5CDB3B >> 61] (rcp=3176979259) */ \
+ ENTRY(856440829 , 0xA07A14E9, 61), /* [N * 0xA07A14E9 >> 61] (rcp=2692355305) */ \
+ ENTRY(1010600209, 0x87FF5289, 61), /* [N * 0x87FF5289 >> 61] (rcp=2281656969) */ \
+ ENTRY(1192508257, 0xE6810540, 62), /* [N * 0xE6810540 >> 62] (rcp=3867215168) */ \
+ ENTRY(1407159797, 0xC357A480, 62), /* [N * 0xC357A480 >> 62] (rcp=3277300864) */ \
+ ENTRY(1660448617, 0xA58B5B4F, 62), /* [N * 0xA58B5B4F >> 62] (rcp=2777373519) */ \
+ ENTRY(1959329399, 0x8C4AB55F, 62), /* [N * 0x8C4AB55F >> 62] (rcp=2353706335) */ \
+ ENTRY(2312008693, 0xEDC86320, 63), /* [N * 0xEDC86320 >> 63] (rcp=3989332768) */ \
+ ENTRY(2728170257, 0xC982C4D2, 63), /* [N * 0xC982C4D2 >> 63] (rcp=3380790482) */ \
+ ENTRY(3219240923, 0xAAC599B6, 63) /* [N * 0xAAC599B6 >> 63] (rcp=2865076662) */
+
+
+struct HashPrime {
+ //! Prime number
+ uint32_t prime;
+ //! Reciprocal to turn division into multiplication.
+ uint32_t rcp;
+};
+
+static const HashPrime ZoneHash_primeArray[] = {
+ #define E(PRIME, RCP, SHIFT) { PRIME, RCP }
+ ASMJIT_POPULATE_PRIMES(E)
+ #undef E
+};
+
+static const uint8_t ZoneHash_primeShift[] = {
+ #define E(PRIME, RCP, SHIFT) uint8_t(SHIFT)
+ ASMJIT_POPULATE_PRIMES(E)
+ #undef E
+};
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Rehash]
+// ============================================================================
+
+void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexcept {
+ ASMJIT_ASSERT(primeIndex < ASMJIT_ARRAY_SIZE(ZoneHash_primeArray));
+ uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
+
+ ZoneHashNode** oldData = _data;
+ ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
+ allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
+
+ // We can still store nodes into the table, but it will degrade.
+ if (ASMJIT_UNLIKELY(newData == nullptr))
+ return;
+
+ uint32_t i;
+ uint32_t oldCount = _bucketsCount;
+
+ _data = newData;
+ _bucketsCount = newCount;
+ _bucketsGrow = uint32_t(newCount * 0.9);
+ _rcpValue = ZoneHash_primeArray[primeIndex].rcp;
+ _rcpShift = ZoneHash_primeShift[primeIndex];
+ _primeIndex = uint8_t(primeIndex);
+
+ for (i = 0; i < oldCount; i++) {
+ ZoneHashNode* node = oldData[i];
+ while (node) {
+ ZoneHashNode* next = node->_hashNext;
+ uint32_t hashMod = _calcMod(node->_hashCode);
+
+ node->_hashNext = newData[hashMod];
+ newData[hashMod] = node;
+ node = next;
+ }
+ }
+
+ if (oldData != _embedded)
+ allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
+}
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Ops]
+// ============================================================================
+
+ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
+ uint32_t hashMod = _calcMod(node->_hashCode);
+ ZoneHashNode* next = _data[hashMod];
+
+ node->_hashNext = next;
+ _data[hashMod] = node;
+
+ if (++_size > _bucketsGrow) {
+ uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
+ if (primeIndex > _primeIndex)
+ _rehash(allocator, primeIndex);
+ }
+
+ return node;
+}
+
+ZoneHashNode* ZoneHashBase::_remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
+ DebugUtils::unused(allocator);
+ uint32_t hashMod = _calcMod(node->_hashCode);
+
+ ZoneHashNode** pPrev = &_data[hashMod];
+ ZoneHashNode* p = *pPrev;
+
+ while (p) {
+ if (p == node) {
+ *pPrev = p->_hashNext;
+ _size--;
+ return node;
+ }
+
+ pPrev = &p->_hashNext;
+ p = *pPrev;
+ }
+
+ return nullptr;
+}
+
+// ============================================================================
+// [asmjit::ZoneHash - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+struct MyHashNode : public ZoneHashNode {
+ inline MyHashNode(uint32_t key) noexcept
+ : ZoneHashNode(key),
+ _key(key) {}
+
+ uint32_t _key;
+};
+
+struct MyKeyMatcher {
+ inline MyKeyMatcher(uint32_t key) noexcept
+ : _key(key) {}
+
+ inline uint32_t hashCode() const noexcept { return _key; }
+ inline bool matches(const MyHashNode* node) const noexcept { return node->_key == _key; }
+
+ uint32_t _key;
+};
+
+UNIT(zone_hash) {
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
+
+ Zone zone(4096);
+ ZoneAllocator allocator(&zone);
+
+ ZoneHash<MyHashNode> hashTable;
+
+ uint32_t key;
+ INFO("Inserting %u elements to HashTable", unsigned(kCount));
+ for (key = 0; key < kCount; key++) {
+ hashTable.insert(&allocator, zone.newT<MyHashNode>(key));
+ }
+
+ uint32_t count = kCount;
+ INFO("Removing %u elements from HashTable and validating each operation", unsigned(kCount));
+ do {
+ MyHashNode* node;
+
+ for (key = 0; key < count; key++) {
+ node = hashTable.get(MyKeyMatcher(key));
+ EXPECT(node != nullptr);
+ EXPECT(node->_key == key);
+ }
+
+ {
+ count--;
+ node = hashTable.get(MyKeyMatcher(count));
+ hashTable.remove(&allocator, node);
+
+ node = hashTable.get(MyKeyMatcher(count));
+ EXPECT(node == nullptr);
+ }
+ } while (count);
+
+ EXPECT(hashTable.empty());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zonehash.h b/client/asmjit/core/zonehash.h
new file mode 100644
index 0000000..0f1f21f
--- /dev/null
+++ b/client/asmjit/core/zonehash.h
@@ -0,0 +1,218 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONEHASH_H_INCLUDED
+#define ASMJIT_CORE_ZONEHASH_H_INCLUDED
+
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneHashNode]
+// ============================================================================
+
+//! Node used by \ref ZoneHash template.
+//!
+//! You must provide function `bool eq(const Key& key)` in order to make
+//! `ZoneHash::get()` working.
+class ZoneHashNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHashNode)
+
+ inline ZoneHashNode(uint32_t hashCode = 0) noexcept
+ : _hashNext(nullptr),
+ _hashCode(hashCode),
+ _customData(0) {}
+
+ //! Next node in the chain, null if it terminates the chain.
+ ZoneHashNode* _hashNext;
+ //! Precalculated hash-code of key.
+ uint32_t _hashCode;
+ //! Padding, can be reused by any Node that inherits `ZoneHashNode`.
+ uint32_t _customData;
+};
+
+// ============================================================================
+// [asmjit::ZoneHashBase]
+// ============================================================================
+
+//! Base class used by \ref ZoneHash template
+class ZoneHashBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHashBase)
+
+ //! Buckets data.
+ ZoneHashNode** _data;
+ //! Count of records inserted into the hash table.
+ size_t _size;
+ //! Count of hash buckets.
+ uint32_t _bucketsCount;
+ //! When buckets array should grow (only checked after insertion).
+ uint32_t _bucketsGrow;
+ //! Reciprocal value of `_bucketsCount`.
+ uint32_t _rcpValue;
+ //! How many bits to shift right when hash is multiplied with `_rcpValue`.
+ uint8_t _rcpShift;
+ //! Prime value index in internal prime array.
+ uint8_t _primeIndex;
+
+ //! Embedded data, used by empty hash tables.
+ ZoneHashNode* _embedded[1];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneHashBase() noexcept {
+ reset();
+ }
+
+ inline ZoneHashBase(ZoneHashBase&& other) noexcept {
+ _data = other._data;
+ _size = other._size;
+ _bucketsCount = other._bucketsCount;
+ _bucketsGrow = other._bucketsGrow;
+ _rcpValue = other._rcpValue;
+ _rcpShift = other._rcpShift;
+ _primeIndex = other._primeIndex;
+ _embedded[0] = other._embedded[0];
+
+ if (_data == other._embedded) _data = _embedded;
+ }
+
+ inline void reset() noexcept {
+ _data = _embedded;
+ _size = 0;
+ _bucketsCount = 1;
+ _bucketsGrow = 1;
+ _rcpValue = 1;
+ _rcpShift = 0;
+ _primeIndex = 0;
+ _embedded[0] = nullptr;
+ }
+
+ inline void release(ZoneAllocator* allocator) noexcept {
+ ZoneHashNode** oldData = _data;
+ if (oldData != _embedded)
+ allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
+ reset();
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _size == 0; }
+ inline size_t size() const noexcept { return _size; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void _swap(ZoneHashBase& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_bucketsCount, other._bucketsCount);
+ std::swap(_bucketsGrow, other._bucketsGrow);
+ std::swap(_rcpValue, other._rcpValue);
+ std::swap(_rcpShift, other._rcpShift);
+ std::swap(_primeIndex, other._primeIndex);
+ std::swap(_embedded[0], other._embedded[0]);
+
+ if (_data == other._embedded) _data = _embedded;
+ if (other._data == _embedded) other._data = other._embedded;
+ }
+
+ //! \cond INTERNAL
+ inline uint32_t _calcMod(uint32_t hash) const noexcept {
+ uint32_t x = uint32_t((uint64_t(hash) * _rcpValue) >> _rcpShift);
+ return hash - x * _bucketsCount;
+ }
+
+ ASMJIT_API void _rehash(ZoneAllocator* allocator, uint32_t newCount) noexcept;
+ ASMJIT_API ZoneHashNode* _insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
+ ASMJIT_API ZoneHashNode* _remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
+ //! \endcond
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneHash]
+// ============================================================================
+
+//! Low-level hash table specialized for storing string keys and POD values.
+//!
+//! This hash table allows duplicates to be inserted (the API is so low
+//! level that it's up to you if you allow it or not, as you should first
+//! `get()` the node and then modify it or insert a new node by using `insert()`,
+//! depending on the intention).
+template<typename NodeT>
+class ZoneHash : public ZoneHashBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHash<NodeT>)
+
+ typedef NodeT Node;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneHash() noexcept
+ : ZoneHashBase() {}
+
+ inline ZoneHash(ZoneHash&& other) noexcept
+ : ZoneHash(other) {}
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); }
+
+ template<typename KeyT>
+ inline NodeT* get(const KeyT& key) const noexcept {
+ uint32_t hashMod = _calcMod(key.hashCode());
+ NodeT* node = static_cast<NodeT*>(_data[hashMod]);
+
+ while (node && !key.matches(node))
+ node = static_cast<NodeT*>(node->_hashNext);
+ return node;
+ }
+
+ inline NodeT* insert(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_insert(allocator, node)); }
+ inline NodeT* remove(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_remove(allocator, node)); }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONEHASH_H_INCLUDED
diff --git a/client/asmjit/core/zonelist.cpp b/client/asmjit/core/zonelist.cpp
new file mode 100644
index 0000000..3496aa8
--- /dev/null
+++ b/client/asmjit/core/zonelist.cpp
@@ -0,0 +1,182 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/zone.h"
+#include "../core/zonelist.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneList - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+class MyListNode : public ZoneListNode<MyListNode> {};
+
+UNIT(zone_list) {
+ Zone zone(4096);
+ ZoneList<MyListNode> list;
+
+ MyListNode* a = zone.newT<MyListNode>();
+ MyListNode* b = zone.newT<MyListNode>();
+ MyListNode* c = zone.newT<MyListNode>();
+ MyListNode* d = zone.newT<MyListNode>();
+
+ INFO("Append / Unlink");
+
+ // []
+ EXPECT(list.empty() == true);
+
+ // [A]
+ list.append(a);
+ EXPECT(list.empty() == false);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == a);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+
+ // [A, B]
+ list.append(b);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == b);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == b);
+ EXPECT(b->prev() == a);
+ EXPECT(b->next() == nullptr);
+
+ // [A, B, C]
+ list.append(c);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == c);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == b);
+ EXPECT(b->prev() == a);
+ EXPECT(b->next() == c);
+ EXPECT(c->prev() == b);
+ EXPECT(c->next() == nullptr);
+
+ // [B, C]
+ list.unlink(a);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == c);
+ EXPECT(c->prev() == b);
+ EXPECT(c->next() == nullptr);
+
+ // [B]
+ list.unlink(c);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == b);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+ EXPECT(c->prev() == nullptr);
+ EXPECT(c->next() == nullptr);
+
+ // []
+ list.unlink(b);
+ EXPECT(list.empty() == true);
+ EXPECT(list.first() == nullptr);
+ EXPECT(list.last() == nullptr);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+
+ INFO("Prepend / Unlink");
+
+ // [A]
+ list.prepend(a);
+ EXPECT(list.empty() == false);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == a);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+
+ // [B, A]
+ list.prepend(b);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == a);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == a);
+ EXPECT(a->prev() == b);
+ EXPECT(a->next() == nullptr);
+
+ INFO("InsertAfter / InsertBefore");
+
+ // [B, A, C]
+ list.insertAfter(a, c);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == a);
+ EXPECT(a->prev() == b);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ // [B, D, A, C]
+ list.insertBefore(a, d);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == d);
+ EXPECT(d->prev() == b);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ INFO("PopFirst / Pop");
+
+ // [D, A, C]
+ EXPECT(list.popFirst() == b);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+
+ EXPECT(list.first() == d);
+ EXPECT(list.last() == c);
+ EXPECT(d->prev() == nullptr);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ // [D, A]
+ EXPECT(list.pop() == c);
+ EXPECT(c->prev() == nullptr);
+ EXPECT(c->next() == nullptr);
+
+ EXPECT(list.first() == d);
+ EXPECT(list.last() == a);
+ EXPECT(d->prev() == nullptr);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == nullptr);
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zonelist.h b/client/asmjit/core/zonelist.h
new file mode 100644
index 0000000..d7fb1dd
--- /dev/null
+++ b/client/asmjit/core/zonelist.h
@@ -0,0 +1,205 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONELIST_H_INCLUDED
+#define ASMJIT_CORE_ZONELIST_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneListNode]
+// ============================================================================
+
+//! Node used by \ref ZoneList template.
+template<typename NodeT>
+class ZoneListNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneListNode)
+
+ NodeT* _listNodes[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneListNode() noexcept
+ : _listNodes { nullptr, nullptr } {}
+
+ inline ZoneListNode(ZoneListNode&& other) noexcept
+ : _listNodes { other._listNodes[0], other._listNodes[1] } {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool hasPrev() const noexcept { return _listNodes[Globals::kLinkPrev] != nullptr; }
+ inline bool hasNext() const noexcept { return _listNodes[Globals::kLinkNext] != nullptr; }
+
+ inline NodeT* prev() const noexcept { return _listNodes[Globals::kLinkPrev]; }
+ inline NodeT* next() const noexcept { return _listNodes[Globals::kLinkNext]; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneList<T>]
+// ============================================================================
+
+//! Zone allocated list container that uses nodes of `NodeT` type.
+template <typename NodeT>
+class ZoneList {
+public:
+ ASMJIT_NONCOPYABLE(ZoneList)
+
+ NodeT* _nodes[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneList() noexcept
+ : _nodes { nullptr, nullptr } {}
+
+ inline ZoneList(ZoneList&& other) noexcept
+ : _nodes { other._nodes[0], other._nodes[1] } {}
+
+ inline void reset() noexcept {
+ _nodes[0] = nullptr;
+ _nodes[1] = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _nodes[0] == nullptr; }
+ inline NodeT* first() const noexcept { return _nodes[Globals::kLinkFirst]; }
+ inline NodeT* last() const noexcept { return _nodes[Globals::kLinkLast]; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneList& other) noexcept {
+ std::swap(_nodes[0], other._nodes[0]);
+ std::swap(_nodes[1], other._nodes[1]);
+ }
+
+ // Can be used to both append and prepend.
+ inline void _addNode(NodeT* node, size_t dir) noexcept {
+ NodeT* prev = _nodes[dir];
+
+ node->_listNodes[!dir] = prev;
+ _nodes[dir] = node;
+ if (prev)
+ prev->_listNodes[dir] = node;
+ else
+ _nodes[!dir] = node;
+ }
+
+ // Can be used to both append and prepend.
+ inline void _insertNode(NodeT* ref, NodeT* node, size_t dir) noexcept {
+ ASMJIT_ASSERT(ref != nullptr);
+
+ NodeT* prev = ref;
+ NodeT* next = ref->_listNodes[dir];
+
+ prev->_listNodes[dir] = node;
+ if (next)
+ next->_listNodes[!dir] = node;
+ else
+ _nodes[dir] = node;
+
+ node->_listNodes[!dir] = prev;
+ node->_listNodes[ dir] = next;
+ }
+
+ inline void append(NodeT* node) noexcept { _addNode(node, Globals::kLinkLast); }
+ inline void prepend(NodeT* node) noexcept { _addNode(node, Globals::kLinkFirst); }
+
+ inline void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkNext); }
+ inline void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkPrev); }
+
+ inline NodeT* unlink(NodeT* node) noexcept {
+ NodeT* prev = node->prev();
+ NodeT* next = node->next();
+
+ if (prev) { prev->_listNodes[1] = next; node->_listNodes[0] = nullptr; } else { _nodes[0] = next; }
+ if (next) { next->_listNodes[0] = prev; node->_listNodes[1] = nullptr; } else { _nodes[1] = prev; }
+
+ node->_listNodes[0] = nullptr;
+ node->_listNodes[1] = nullptr;
+
+ return node;
+ }
+
+ inline NodeT* popFirst() noexcept {
+ NodeT* node = _nodes[0];
+ ASMJIT_ASSERT(node != nullptr);
+
+ NodeT* next = node->next();
+ _nodes[0] = next;
+
+ if (next) {
+ next->_listNodes[0] = nullptr;
+ node->_listNodes[1] = nullptr;
+ }
+ else {
+ _nodes[1] = nullptr;
+ }
+
+ return node;
+ }
+
+ inline NodeT* pop() noexcept {
+ NodeT* node = _nodes[1];
+ ASMJIT_ASSERT(node != nullptr);
+
+ NodeT* prev = node->prev();
+ _nodes[1] = prev;
+
+ if (prev) {
+ prev->_listNodes[1] = nullptr;
+ node->_listNodes[0] = nullptr;
+ }
+ else {
+ _nodes[0] = nullptr;
+ }
+
+ return node;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONELIST_H_INCLUDED
diff --git a/client/asmjit/core/zonestack.cpp b/client/asmjit/core/zonestack.cpp
new file mode 100644
index 0000000..52841b5
--- /dev/null
+++ b/client/asmjit/core/zonestack.cpp
@@ -0,0 +1,197 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/zone.h"
+#include "../core/zonestack.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneStackBase - Init / Reset]
+// ============================================================================
+
+Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcept {
+ ZoneAllocator* oldAllocator = _allocator;
+
+ if (oldAllocator) {
+ Block* block = _block[Globals::kLinkFirst];
+ while (block) {
+ Block* next = block->next();
+ oldAllocator->release(block, kBlockSize);
+ block = next;
+ }
+
+ _allocator = nullptr;
+ _block[Globals::kLinkLeft] = nullptr;
+ _block[Globals::kLinkRight] = nullptr;
+ }
+
+ if (allocator) {
+ Block* block = static_cast<Block*>(allocator->alloc(kBlockSize));
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->_link[Globals::kLinkLeft] = nullptr;
+ block->_link[Globals::kLinkRight] = nullptr;
+ block->_start = (uint8_t*)block + middleIndex;
+ block->_end = (uint8_t*)block + middleIndex;
+
+ _allocator = allocator;
+ _block[Globals::kLinkLeft] = block;
+ _block[Globals::kLinkRight] = block;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ZoneStackBase - Ops]
+// ============================================================================
+
+Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ Block* prev = _block[side];
+ ASMJIT_ASSERT(!prev->empty());
+
+ Block* block = _allocator->allocT<Block>(kBlockSize);
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->_link[ side] = nullptr;
+ block->_link[!side] = prev;
+ block->_start = (uint8_t*)block + initialIndex;
+ block->_end = (uint8_t*)block + initialIndex;
+
+ prev->_link[side] = block;
+ _block[side] = block;
+
+ return kErrorOk;
+}
+
+void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
+ Block* block = _block[side];
+ ASMJIT_ASSERT(block->empty());
+
+ Block* prev = block->_link[!side];
+ if (prev) {
+ ASMJIT_ASSERT(prev->_link[side] == block);
+ _allocator->release(block, kBlockSize);
+
+ prev->_link[side] = nullptr;
+ _block[side] = prev;
+ }
+ else if (_block[!side] == block) {
+ // If the container becomes empty center both pointers in the remaining block.
+ block->_start = (uint8_t*)block + middleIndex;
+ block->_end = (uint8_t*)block + middleIndex;
+ }
+}
+
+// ============================================================================
+// [asmjit::ZoneStack - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void test_zone_stack(ZoneAllocator* allocator, const char* typeName) {
+ ZoneStack<T> stack;
+
+ INFO("Testing ZoneStack<%s>", typeName);
+ INFO(" (%d items per one Block)", ZoneStack<T>::kNumBlockItems);
+
+ EXPECT(stack.init(allocator) == kErrorOk);
+ EXPECT(stack.empty(), "Stack must be empty after `init()`");
+
+ EXPECT(stack.append(42) == kErrorOk);
+ EXPECT(!stack.empty() , "Stack must not be empty after an item has been appended");
+ EXPECT(stack.pop() == 42 , "Stack.pop() must return the item that has been appended last");
+ EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
+
+ EXPECT(stack.prepend(43) == kErrorOk);
+ EXPECT(!stack.empty() , "Stack must not be empty after an item has been prepended");
+ EXPECT(stack.popFirst() == 43, "Stack.popFirst() must return the item that has been prepended last");
+ EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
+
+ int i;
+ int iMin =-100000;
+ int iMax = 100000;
+
+ INFO("Validating prepend() & popFirst()");
+ for (i = iMax; i >= 0; i--) stack.prepend(T(i));
+ for (i = 0; i <= iMax; i++) {
+ T item = stack.popFirst();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ if (!stack.empty()) {
+ item = stack.popFirst();
+ EXPECT(i + 1 == item, "Item '%d' didn't match the item '%lld' popped", i + 1, (long long)item);
+ stack.prepend(item);
+ }
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append() & pop()");
+ for (i = 0; i <= iMax; i++) stack.append(T(i));
+ for (i = iMax; i >= 0; i--) {
+ T item = stack.pop();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ if (!stack.empty()) {
+ item = stack.pop();
+ EXPECT(i - 1 == item, "Item '%d' didn't match the item '%lld' popped", i - 1, (long long)item);
+ stack.append(item);
+ }
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append()/prepend() & popFirst()");
+ for (i = 1; i <= iMax; i++) stack.append(T(i));
+ for (i = 0; i >= iMin; i--) stack.prepend(T(i));
+
+ for (i = iMin; i <= iMax; i++) {
+ T item = stack.popFirst();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append()/prepend() & pop()");
+ for (i = 0; i >= iMin; i--) stack.prepend(T(i));
+ for (i = 1; i <= iMax; i++) stack.append(T(i));
+
+ for (i = iMax; i >= iMin; i--) {
+ T item = stack.pop();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ }
+ EXPECT(stack.empty());
+}
+
+UNIT(zone_stack) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+ ZoneAllocator allocator(&zone);
+
+ test_zone_stack<int>(&allocator, "int");
+ test_zone_stack<int64_t>(&allocator, "int64_t");
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zonestack.h b/client/asmjit/core/zonestack.h
new file mode 100644
index 0000000..2de6cdc
--- /dev/null
+++ b/client/asmjit/core/zonestack.h
@@ -0,0 +1,234 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONESTACK_H_INCLUDED
+#define ASMJIT_CORE_ZONESTACK_H_INCLUDED
+
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneStackBase]
+// ============================================================================
+
+//! Base class used by \ref ZoneStack.
+class ZoneStackBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneStackBase)
+
+ static constexpr uint32_t kBlockSize = ZoneAllocator::kHiMaxSize;
+
+ struct Block {
+ inline bool empty() const noexcept { return _start == _end; }
+ inline Block* prev() const noexcept { return _link[Globals::kLinkLeft]; }
+ inline Block* next() const noexcept { return _link[Globals::kLinkRight]; }
+
+ inline void setPrev(Block* block) noexcept { _link[Globals::kLinkLeft] = block; }
+ inline void setNext(Block* block) noexcept { _link[Globals::kLinkRight] = block; }
+
+ template<typename T>
+ inline T* start() const noexcept { return static_cast<T*>(_start); }
+ template<typename T>
+ inline void setStart(T* start) noexcept { _start = static_cast<void*>(start); }
+
+ template<typename T>
+ inline T* end() const noexcept { return (T*)_end; }
+ template<typename T>
+ inline void setEnd(T* end) noexcept { _end = (void*)end; }
+
+ template<typename T>
+ inline T* data() const noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
+
+ template<typename T>
+ inline bool canPrepend() const noexcept { return _start > data<void>(); }
+
+ template<typename T>
+ inline bool canAppend() const noexcept {
+ size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T);
+ size_t kStartBlockIndex = sizeof(Block);
+ size_t kEndBlockIndex = kStartBlockIndex + kNumBlockItems * sizeof(T);
+
+ return (uintptr_t)_end <= ((uintptr_t)this + kEndBlockIndex - sizeof(T));
+ }
+
+ Block* _link[Globals::kLinkCount]; //!< Next and previous blocks.
+ void* _start; //!< Pointer to the start of the array.
+ void* _end; //!< Pointer to the end of the array.
+ };
+
+ //! Allocator used to allocate data.
+ ZoneAllocator* _allocator;
+ //! First and last blocks.
+ Block* _block[Globals::kLinkCount];
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline ZoneStackBase() noexcept {
+ _allocator = nullptr;
+ _block[0] = nullptr;
+ _block[1] = nullptr;
+ }
+ inline ~ZoneStackBase() noexcept { reset(); }
+
+ inline bool isInitialized() const noexcept { return _allocator != nullptr; }
+ ASMJIT_API Error _init(ZoneAllocator* allocator, size_t middleIndex) noexcept;
+ inline Error reset() noexcept { return _init(nullptr, 0); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns `ZoneAllocator` attached to this container.
+ inline ZoneAllocator* allocator() const noexcept { return _allocator; }
+
+ inline bool empty() const noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _block[0]->start<void>() == _block[1]->end<void>();
+ }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept;
+ ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept;
+
+ //! \}
+ //! \endcond
+};
+
+// ============================================================================
+// [asmjit::ZoneStack<T>]
+// ============================================================================
+
+//! Zone allocated stack container.
+template<typename T>
+class ZoneStack : public ZoneStackBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneStack<T>)
+
+ enum : uint32_t {
+ kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)),
+ kStartBlockIndex = uint32_t(sizeof(Block)),
+ kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)),
+ kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
+ };
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline ZoneStack() noexcept {}
+ inline ~ZoneStack() noexcept {}
+
+ inline Error init(ZoneAllocator* allocator) noexcept { return _init(allocator, kMidBlockIndex); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ ASMJIT_INLINE Error prepend(T item) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ Block* block = _block[Globals::kLinkFirst];
+
+ if (!block->canPrepend<T>()) {
+ ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkFirst, kEndBlockIndex));
+ block = _block[Globals::kLinkFirst];
+ }
+
+ T* ptr = block->start<T>() - 1;
+ ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
+ *ptr = item;
+ block->setStart<T>(ptr);
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE Error append(T item) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ Block* block = _block[Globals::kLinkLast];
+
+ if (!block->canAppend<T>()) {
+ ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkLast, kStartBlockIndex));
+ block = _block[Globals::kLinkLast];
+ }
+
+ T* ptr = block->end<T>();
+ ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
+
+ *ptr++ = item;
+ block->setEnd(ptr);
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE T popFirst() noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(!empty());
+
+ Block* block = _block[Globals::kLinkFirst];
+ ASMJIT_ASSERT(!block->empty());
+
+ T* ptr = block->start<T>();
+ T item = *ptr++;
+
+ block->setStart(ptr);
+ if (block->empty())
+ _cleanupBlock(Globals::kLinkFirst, kMidBlockIndex);
+
+ return item;
+ }
+
+ ASMJIT_INLINE T pop() noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(!empty());
+
+ Block* block = _block[Globals::kLinkLast];
+ ASMJIT_ASSERT(!block->empty());
+
+ T* ptr = block->end<T>();
+ T item = *--ptr;
+ ASMJIT_ASSERT(ptr >= block->data<T>());
+ ASMJIT_ASSERT(ptr >= block->start<T>());
+
+ block->setEnd(ptr);
+ if (block->empty())
+ _cleanupBlock(Globals::kLinkLast, kMidBlockIndex);
+
+ return item;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONESTACK_H_INCLUDED
diff --git a/client/asmjit/core/zonestring.h b/client/asmjit/core/zonestring.h
new file mode 100644
index 0000000..cb25b29
--- /dev/null
+++ b/client/asmjit/core/zonestring.h
@@ -0,0 +1,137 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_SMALLSTRING_H_INCLUDED
+#define ASMJIT_CORE_SMALLSTRING_H_INCLUDED
+
+#include "../core/globals.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneStringBase]
+// ============================================================================
+
+//! A helper class used by \ref ZoneString implementation.
+struct ZoneStringBase {
+ union {
+ struct {
+ uint32_t _size;
+ char _embedded[sizeof(void*) * 2 - 4];
+ };
+ struct {
+ void* _dummy;
+ char* _external;
+ };
+ };
+
+ inline void reset() noexcept {
+ _dummy = nullptr;
+ _external = nullptr;
+ }
+
+ Error setData(Zone* zone, uint32_t maxEmbeddedSize, const char* str, size_t size) noexcept {
+ if (size == SIZE_MAX)
+ size = strlen(str);
+
+ if (size <= maxEmbeddedSize) {
+ memcpy(_embedded, str, size);
+ _embedded[size] = '\0';
+ }
+ else {
+ char* external = static_cast<char*>(zone->dup(str, size, true));
+ if (ASMJIT_UNLIKELY(!external))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ _external = external;
+ }
+
+ _size = uint32_t(size);
+ return kErrorOk;
+ }
+};
+
+// ============================================================================
+// [asmjit::ZoneString<N>]
+// ============================================================================
+
+//! A string template that can be zone allocated.
+//!
+//! Helps with creating strings that can be either statically allocated if they
+//! are small, or externally allocated in case their size exceeds the limit.
+//! The `N` represents the size of the whole `ZoneString` structure, based on
+//! that size the maximum size of the internal buffer is determined.
+template<size_t N>
+class ZoneString {
+public:
+ static constexpr uint32_t kWholeSize =
+ (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
+ static constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
+
+ union {
+ ZoneStringBase _base;
+ char _wholeData[kWholeSize];
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneString() noexcept { reset(); }
+ inline void reset() noexcept { _base.reset(); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the string is empty.
+ inline bool empty() const noexcept { return _base._size == 0; }
+
+ //! Returns the string data.
+ inline const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; }
+ //! Returns the string size.
+ inline uint32_t size() const noexcept { return _base._size; }
+
+ //! Tests whether the string is embedded (e.g. no dynamically allocated).
+ inline bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
+
+ //! Copies a new `data` of the given `size` to the string.
+ //!
+ //! If the `size` exceeds the internal buffer the given `zone` will be
+ //! used to duplicate the data, otherwise the internal buffer will be
+ //! used as a storage.
+ inline Error setData(Zone* zone, const char* data, size_t size) noexcept {
+ return _base.setData(zone, kMaxEmbeddedSize, data, size);
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_SMALLSTRING_H_INCLUDED
diff --git a/client/asmjit/core/zonetree.cpp b/client/asmjit/core/zonetree.cpp
new file mode 100644
index 0000000..a16f092
--- /dev/null
+++ b/client/asmjit/core/zonetree.cpp
@@ -0,0 +1,118 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneTree - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename NodeT>
+struct ZoneRBUnit {
+ typedef ZoneTree<NodeT> Tree;
+
+ static void verifyTree(Tree& tree) noexcept {
+ EXPECT(checkHeight(static_cast<NodeT*>(tree._root)) > 0);
+ }
+
+ // Check whether the Red-Black tree is valid.
+ static int checkHeight(NodeT* node) noexcept {
+ if (!node) return 1;
+
+ NodeT* ln = node->left();
+ NodeT* rn = node->right();
+
+ // Invalid tree.
+ EXPECT(ln == nullptr || *ln < *node);
+ EXPECT(rn == nullptr || *rn > *node);
+
+ // Red violation.
+ EXPECT(!node->isRed() ||
+ (!ZoneTreeNode::_isValidRed(ln) && !ZoneTreeNode::_isValidRed(rn)));
+
+ // Black violation.
+ int lh = checkHeight(ln);
+ int rh = checkHeight(rn);
+ EXPECT(!lh || !rh || lh == rh);
+
+ // Only count black links.
+ return (lh && rh) ? lh + !node->isRed() : 0;
+ }
+};
+
+class MyRBNode : public ZoneTreeNodeT<MyRBNode> {
+public:
+ ASMJIT_NONCOPYABLE(MyRBNode)
+
+ inline explicit MyRBNode(uint32_t key) noexcept
+ : _key(key) {}
+
+ inline bool operator<(const MyRBNode& other) const noexcept { return _key < other._key; }
+ inline bool operator>(const MyRBNode& other) const noexcept { return _key > other._key; }
+
+ inline bool operator<(uint32_t queryKey) const noexcept { return _key < queryKey; }
+ inline bool operator>(uint32_t queryKey) const noexcept { return _key > queryKey; }
+
+ uint32_t _key;
+};
+
+UNIT(zone_rbtree) {
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
+
+ Zone zone(4096);
+ ZoneTree<MyRBNode> rbTree;
+
+ uint32_t key;
+ INFO("Inserting %u elements to RBTree and validating each operation", unsigned(kCount));
+ for (key = 0; key < kCount; key++) {
+ rbTree.insert(zone.newT<MyRBNode>(key));
+ ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
+ }
+
+ uint32_t count = kCount;
+ INFO("Removing %u elements from RBTree and validating each operation", unsigned(kCount));
+ do {
+ MyRBNode* node;
+
+ for (key = 0; key < count; key++) {
+ node = rbTree.get(key);
+ EXPECT(node != nullptr);
+ EXPECT(node->_key == key);
+ }
+
+ node = rbTree.get(--count);
+ rbTree.remove(node);
+ ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
+ } while (count);
+
+ EXPECT(rbTree.empty());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zonetree.h b/client/asmjit/core/zonetree.h
new file mode 100644
index 0000000..1877919
--- /dev/null
+++ b/client/asmjit/core/zonetree.h
@@ -0,0 +1,385 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONETREE_H_INCLUDED
+#define ASMJIT_CORE_ZONETREE_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneTreeNode]
+// ============================================================================
+
+//! RB-Tree node.
+//!
+//! The color is stored in a least significant bit of the `left` node.
+//!
+//! WARNING: Always use accessors to access left and right children.
+class ZoneTreeNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTreeNode)
+
+ enum : uintptr_t {
+ kRedMask = 0x1,
+ kPtrMask = ~kRedMask
+ };
+
+ uintptr_t _rbNodeData[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTreeNode() noexcept
+ : _rbNodeData { 0, 0 } {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); }
+
+ inline bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; }
+ inline bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; }
+ inline bool hasRight() const noexcept { return _rbNodeData[1] != 0; }
+
+ template<typename T = ZoneTreeNode>
+ inline T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); }
+ template<typename T = ZoneTreeNode>
+ inline T* left() const noexcept { return static_cast<T*>(_getLeft()); }
+ template<typename T = ZoneTreeNode>
+ inline T* right() const noexcept { return static_cast<T*>(_getRight()); }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ inline ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); }
+ inline ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); }
+ inline ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); }
+
+ inline void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; }
+ inline void _setLeft(ZoneTreeNode* node) noexcept { _rbNodeData[0] = (_rbNodeData[0] & kRedMask) | (uintptr_t)node; }
+ inline void _setRight(ZoneTreeNode* node) noexcept { _rbNodeData[1] = (uintptr_t)node; }
+
+ inline void _makeRed() noexcept { _rbNodeData[0] |= kRedMask; }
+ inline void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; }
+
+ //! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
+ static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); }
+
+ //! \}
+ //! \endcond
+};
+
+//! RB-Tree node casted to `NodeT`.
+template<typename NodeT>
+class ZoneTreeNodeT : public ZoneTreeNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTreeNodeT)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTreeNodeT() noexcept
+ : ZoneTreeNode() {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); }
+ inline NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); }
+ inline NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneTree]
+// ============================================================================
+
+//! RB-Tree.
+template<typename NodeT>
+class ZoneTree {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTree)
+
+ typedef NodeT Node;
+ NodeT* _root;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTree() noexcept
+ : _root(nullptr) {}
+
+ inline ZoneTree(ZoneTree&& other) noexcept
+ : _root(other._root) {}
+
+ inline void reset() noexcept { _root = nullptr; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _root == nullptr; }
+ inline NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneTree& other) noexcept {
+ std::swap(_root, other._root);
+ }
+
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ void insert(NodeT* node, const CompareT& cmp = CompareT()) noexcept {
+ // Node to insert must not contain garbage.
+ ASMJIT_ASSERT(!node->hasLeft());
+ ASMJIT_ASSERT(!node->hasRight());
+ ASMJIT_ASSERT(!node->isRed());
+
+ if (!_root) {
+ _root = node;
+ return;
+ }
+
+ ZoneTreeNode head; // False root node,
+ head._setRight(_root); // having root on the right.
+
+ ZoneTreeNode* g = nullptr; // Grandparent.
+ ZoneTreeNode* p = nullptr; // Parent.
+ ZoneTreeNode* t = &head; // Iterator.
+ ZoneTreeNode* q = _root; // Query.
+
+ size_t dir = 0; // Direction for accessing child nodes.
+ size_t last = 0; // Not needed to initialize, but makes some tools happy.
+
+ node->_makeRed(); // New nodes are always red and violations fixed appropriately.
+
+ // Search down the tree.
+ for (;;) {
+ if (!q) {
+ // Insert new node at the bottom.
+ q = node;
+ p->_setChild(dir, node);
+ }
+ else if (_isValidRed(q->_getLeft()) && _isValidRed(q->_getRight())) {
+ // Color flip.
+ q->_makeRed();
+ q->_getLeft()->_makeBlack();
+ q->_getRight()->_makeBlack();
+ }
+
+ // Fix red violation.
+ if (_isValidRed(q) && _isValidRed(p))
+ t->_setChild(t->_getRight() == g,
+ q == p->_getChild(last) ? _singleRotate(g, !last) : _doubleRotate(g, !last));
+
+ // Stop if found.
+ if (q == node)
+ break;
+
+ last = dir;
+ dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
+
+ // Update helpers.
+ if (g) t = g;
+
+ g = p;
+ p = q;
+ q = q->_getChild(dir);
+ }
+
+ // Update root and make it black.
+ _root = static_cast<NodeT*>(head._getRight());
+ _root->_makeBlack();
+ }
+
+ //! Remove node from RBTree.
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ void remove(ZoneTreeNode* node, const CompareT& cmp = CompareT()) noexcept {
+ ZoneTreeNode head; // False root node,
+ head._setRight(_root); // having root on the right.
+
+ ZoneTreeNode* g = nullptr; // Grandparent.
+ ZoneTreeNode* p = nullptr; // Parent.
+ ZoneTreeNode* q = &head; // Query.
+
+ ZoneTreeNode* f = nullptr; // Found item.
+ ZoneTreeNode* gf = nullptr; // Found grandparent.
+ size_t dir = 1; // Direction (0 or 1).
+
+ // Search and push a red down.
+ while (q->hasChild(dir)) {
+ size_t last = dir;
+
+ // Update helpers.
+ g = p;
+ p = q;
+ q = q->_getChild(dir);
+ dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
+
+ // Save found node.
+ if (q == node) {
+ f = q;
+ gf = g;
+ }
+
+ // Push the red node down.
+ if (!_isValidRed(q) && !_isValidRed(q->_getChild(dir))) {
+ if (_isValidRed(q->_getChild(!dir))) {
+ ZoneTreeNode* child = _singleRotate(q, dir);
+ p->_setChild(last, child);
+ p = child;
+ }
+ else if (!_isValidRed(q->_getChild(!dir)) && p->_getChild(!last)) {
+ ZoneTreeNode* s = p->_getChild(!last);
+ if (!_isValidRed(s->_getChild(!last)) && !_isValidRed(s->_getChild(last))) {
+ // Color flip.
+ p->_makeBlack();
+ s->_makeRed();
+ q->_makeRed();
+ }
+ else {
+ size_t dir2 = g->_getRight() == p;
+ ZoneTreeNode* child = g->_getChild(dir2);
+
+ if (_isValidRed(s->_getChild(last))) {
+ child = _doubleRotate(p, last);
+ g->_setChild(dir2, child);
+ }
+ else if (_isValidRed(s->_getChild(!last))) {
+ child = _singleRotate(p, last);
+ g->_setChild(dir2, child);
+ }
+
+ // Ensure correct coloring.
+ q->_makeRed();
+ child->_makeRed();
+ child->_getLeft()->_makeBlack();
+ child->_getRight()->_makeBlack();
+ }
+ }
+ }
+ }
+
+ // Replace and remove.
+ ASMJIT_ASSERT(f != nullptr);
+ ASMJIT_ASSERT(f != &head);
+ ASMJIT_ASSERT(q != &head);
+
+ p->_setChild(p->_getRight() == q,
+ q->_getChild(q->_getLeft() == nullptr));
+
+ // NOTE: The original algorithm used a trick to just copy 'key/value' to
+ // `f` and mark `q` for deletion. But this is unacceptable here as we
+ // really want to destroy the passed `node`. So, we have to make sure that
+ // we have really removed `f` and not `q`.
+ if (f != q) {
+ ASMJIT_ASSERT(f != &head);
+ ASMJIT_ASSERT(f != gf);
+
+ ZoneTreeNode* n = gf ? gf : &head;
+ dir = (n == &head) ? 1 : cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
+
+ for (;;) {
+ if (n->_getChild(dir) == f) {
+ n->_setChild(dir, q);
+ // RAW copy, including the color.
+ q->_rbNodeData[0] = f->_rbNodeData[0];
+ q->_rbNodeData[1] = f->_rbNodeData[1];
+ break;
+ }
+
+ n = n->_getChild(dir);
+
+ // Cannot be true as we know that it must reach `f` in few iterations.
+ ASMJIT_ASSERT(n != nullptr);
+ dir = cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
+ }
+ }
+
+ // Update root and make it black.
+ _root = static_cast<NodeT*>(head._getRight());
+ if (_root) _root->_makeBlack();
+ }
+
+ template<typename KeyT, typename CompareT = Support::Compare<Support::kSortAscending>>
+ ASMJIT_INLINE NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
+ ZoneTreeNode* node = _root;
+ while (node) {
+ auto result = cmp(*static_cast<const NodeT*>(node), key);
+ if (result == 0) break;
+
+ // Go left or right depending on the `result`.
+ node = node->_getChild(result < 0);
+ }
+ return static_cast<NodeT*>(node);
+ }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return ZoneTreeNode::_isValidRed(node); }
+
+ //! Single rotation.
+ static ASMJIT_INLINE ZoneTreeNode* _singleRotate(ZoneTreeNode* root, size_t dir) noexcept {
+ ZoneTreeNode* save = root->_getChild(!dir);
+ root->_setChild(!dir, save->_getChild(dir));
+ save->_setChild( dir, root);
+ root->_makeRed();
+ save->_makeBlack();
+ return save;
+ }
+
+ //! Double rotation.
+ static ASMJIT_INLINE ZoneTreeNode* _doubleRotate(ZoneTreeNode* root, size_t dir) noexcept {
+ root->_setChild(!dir, _singleRotate(root->_getChild(!dir), !dir));
+ return _singleRotate(root, dir);
+ }
+
+ //! \}
+ //! \endcond
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONETREE_H_INCLUDED
diff --git a/client/asmjit/core/zonevector.cpp b/client/asmjit/core/zonevector.cpp
new file mode 100644
index 0000000..7ab53bf
--- /dev/null
+++ b/client/asmjit/core/zonevector.cpp
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneVectorBase - Helpers]
+// ============================================================================
+
+Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t threshold = Globals::kGrowThreshold / sizeOfT;
+ uint32_t capacity = _capacity;
+ uint32_t after = _size;
+
+ if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ after += n;
+ if (capacity >= after)
+ return kErrorOk;
+
+ // ZoneVector is used as an array to hold short-lived data structures used
+ // during code generation. The growing strategy is simple - use small capacity
+ // at the beginning (very good for ZoneAllocator) and then grow quicker to
+ // prevent successive reallocations.
+ if (capacity < 4)
+ capacity = 4;
+ else if (capacity < 8)
+ capacity = 8;
+ else if (capacity < 16)
+ capacity = 16;
+ else if (capacity < 64)
+ capacity = 64;
+ else if (capacity < 256)
+ capacity = 256;
+
+ while (capacity < after) {
+ if (capacity < threshold)
+ capacity *= 2;
+ else
+ capacity += threshold;
+ }
+
+ return _reserve(allocator, sizeOfT, capacity);
+}
+
+Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t oldCapacity = _capacity;
+ if (oldCapacity >= n) return kErrorOk;
+
+ uint32_t nBytes = n * sizeOfT;
+ if (ASMJIT_UNLIKELY(nBytes < n))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ size_t allocatedBytes;
+ uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ void* oldData = _data;
+ if (_size)
+ memcpy(newData, oldData, size_t(_size) * sizeOfT);
+
+ if (oldData)
+ allocator->release(oldData, size_t(oldCapacity) * sizeOfT);
+
+ _capacity = uint32_t(allocatedBytes / sizeOfT);
+ ASMJIT_ASSERT(_capacity >= n);
+
+ _data = newData;
+ return kErrorOk;
+}
+
+Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t size = _size;
+
+ if (_capacity < n) {
+ ASMJIT_PROPAGATE(_grow(allocator, sizeOfT, n - size));
+ ASMJIT_ASSERT(_capacity >= n);
+ }
+
+ if (size < n)
+ memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT);
+
+ _size = n;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ZoneBitVector - Ops]
+// ============================================================================
+
+Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept {
+ BitWord* data = _data;
+ uint32_t newSize = other.size();
+
+ if (!newSize) {
+ _size = 0;
+ return kErrorOk;
+ }
+
+ if (newSize > _capacity) {
+ // Realloc needed... Calculate the minimum capacity (in bytes) requied.
+ uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits);
+ if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Normalize to bytes.
+ uint32_t minimumCapacity = minimumCapacityInBits / 8;
+ size_t allocatedCapacity;
+
+ BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // `allocatedCapacity` now contains number in bytes, we need bits.
+ size_t allocatedCapacityInBits = allocatedCapacity * 8;
+
+ // Arithmetic overflow should normally not happen. If it happens we just
+ // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
+ // this value is still safe to be used to call `_allocator->release(...)`.
+ if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
+ allocatedCapacityInBits = minimumCapacityInBits;
+
+ if (data)
+ allocator->release(data, _capacity / 8);
+ data = newData;
+
+ _data = data;
+ _capacity = uint32_t(allocatedCapacityInBits);
+ }
+
+ _size = newSize;
+ _copyBits(data, other.data(), _wordsPerBits(newSize));
+
+ return kErrorOk;
+}
+
+Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept {
+ ASMJIT_ASSERT(idealCapacity >= newSize);
+
+ if (newSize <= _size) {
+ // The size after the resize is lesser than or equal to the current size.
+ uint32_t idx = newSize / kBitWordSizeInBits;
+ uint32_t bit = newSize % kBitWordSizeInBits;
+
+ // Just set all bits outside of the new size in the last word to zero.
+ // There is a case that there are not bits to set if `bit` is zero. This
+ // happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128,
+ // and so on. In that case don't change anything as that would mean settings
+ // bits outside of the `_size`.
+ if (bit)
+ _data[idx] &= (BitWord(1) << bit) - 1u;
+
+ _size = newSize;
+ return kErrorOk;
+ }
+
+ uint32_t oldSize = _size;
+ BitWord* data = _data;
+
+ if (newSize > _capacity) {
+ // Realloc needed, calculate the minimum capacity (in bytes) requied.
+ uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits);
+
+ if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Normalize to bytes.
+ uint32_t minimumCapacity = minimumCapacityInBits / 8;
+ size_t allocatedCapacity;
+
+ BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // `allocatedCapacity` now contains number in bytes, we need bits.
+ size_t allocatedCapacityInBits = allocatedCapacity * 8;
+
+ // Arithmetic overflow should normally not happen. If it happens we just
+ // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
+ // this value is still safe to be used to call `_allocator->release(...)`.
+ if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
+ allocatedCapacityInBits = minimumCapacityInBits;
+
+ _copyBits(newData, data, _wordsPerBits(oldSize));
+
+ if (data)
+ allocator->release(data, _capacity / 8);
+ data = newData;
+
+ _data = data;
+ _capacity = uint32_t(allocatedCapacityInBits);
+ }
+
+ // Start (of the old size) and end (of the new size) bits
+ uint32_t idx = oldSize / kBitWordSizeInBits;
+ uint32_t startBit = oldSize % kBitWordSizeInBits;
+ uint32_t endBit = newSize % kBitWordSizeInBits;
+
+ // Set new bits to either 0 or 1. The `pattern` is used to set multiple
+ // bits per bit-word and contains either all zeros or all ones.
+ BitWord pattern = Support::bitMaskFromBool<BitWord>(newBitsValue);
+
+ // First initialize the last bit-word of the old size.
+ if (startBit) {
+ uint32_t nBits = 0;
+
+ if (idx == (newSize / kBitWordSizeInBits)) {
+ // The number of bit-words is the same after the resize. In that case
+ // we need to set only bits necessary in the current last bit-word.
+ ASMJIT_ASSERT(startBit < endBit);
+ nBits = endBit - startBit;
+ }
+ else {
+ // There is be more bit-words after the resize. In that case we don't
+ // have to be extra careful about the last bit-word of the old size.
+ nBits = kBitWordSizeInBits - startBit;
+ }
+
+ data[idx++] |= pattern << nBits;
+ }
+
+ // Initialize all bit-words after the last bit-word of the old size.
+ uint32_t endIdx = _wordsPerBits(newSize);
+ while (idx < endIdx) data[idx++] = pattern;
+
+ // Clear unused bits of the last bit-word.
+ if (endBit)
+ data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1);
+
+ _size = newSize;
+ return kErrorOk;
+}
+
+Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
+ uint32_t kThreshold = Globals::kGrowThreshold * 8;
+ uint32_t newSize = _size + 1;
+ uint32_t idealCapacity = _capacity;
+
+ if (idealCapacity < 128)
+ idealCapacity = 128;
+ else if (idealCapacity <= kThreshold)
+ idealCapacity *= 2;
+ else
+ idealCapacity += kThreshold;
+
+ if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
+ if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max()))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ idealCapacity = newSize;
+ }
+
+ return _resize(allocator, newSize, idealCapacity, value);
+}
+
+// ============================================================================
+// [asmjit::ZoneVector / ZoneBitVector - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
+ int i;
+ int kMax = 100000;
+
+ ZoneVector<T> vec;
+
+ INFO("ZoneVector<%s> basic tests", typeName);
+ EXPECT(vec.append(allocator, 0) == kErrorOk);
+ EXPECT(vec.empty() == false);
+ EXPECT(vec.size() == 1);
+ EXPECT(vec.capacity() >= 1);
+ EXPECT(vec.indexOf(0) == 0);
+ EXPECT(vec.indexOf(-11) == Globals::kNotFound);
+
+ vec.clear();
+ EXPECT(vec.empty());
+ EXPECT(vec.size() == 0);
+ EXPECT(vec.indexOf(0) == Globals::kNotFound);
+
+ for (i = 0; i < kMax; i++) {
+ EXPECT(vec.append(allocator, T(i)) == kErrorOk);
+ }
+ EXPECT(vec.empty() == false);
+ EXPECT(vec.size() == uint32_t(kMax));
+ EXPECT(vec.indexOf(T(kMax - 1)) == uint32_t(kMax - 1));
+
+ vec.release(allocator);
+}
+
+static void test_zone_bitvector(ZoneAllocator* allocator) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+
+ uint32_t i, count;
+ uint32_t kMaxCount = 100;
+
+ ZoneBitVector vec;
+ EXPECT(vec.empty());
+ EXPECT(vec.size() == 0);
+
+ INFO("ZoneBitVector::resize()");
+ for (count = 1; count < kMaxCount; count++) {
+ vec.clear();
+ EXPECT(vec.resize(allocator, count, false) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < count; i++)
+ EXPECT(vec.bitAt(i) == false);
+
+ vec.clear();
+ EXPECT(vec.resize(allocator, count, true) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < count; i++)
+ EXPECT(vec.bitAt(i) == true);
+ }
+
+ INFO("ZoneBitVector::fillBits() / clearBits()");
+ for (count = 1; count < kMaxCount; count += 2) {
+ vec.clear();
+ EXPECT(vec.resize(allocator, count) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < (count + 1) / 2; i++) {
+ bool value = bool(i & 1);
+ if (value)
+ vec.fillBits(i, count - i * 2);
+ else
+ vec.clearBits(i, count - i * 2);
+ }
+
+ for (i = 0; i < count; i++) {
+ EXPECT(vec.bitAt(i) == bool(i & 1));
+ }
+ }
+}
+
+UNIT(zone_vector) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+ ZoneAllocator allocator(&zone);
+
+ test_zone_vector<int>(&allocator, "int");
+ test_zone_vector<int64_t>(&allocator, "int64_t");
+ test_zone_bitvector(&allocator);
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/client/asmjit/core/zonevector.h b/client/asmjit/core/zonevector.h
new file mode 100644
index 0000000..770543b
--- /dev/null
+++ b/client/asmjit/core/zonevector.h
@@ -0,0 +1,710 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
+#define ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneVectorBase]
+// ============================================================================
+
+//! Base class used by \ref ZoneVector template.
+class ZoneVectorBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneVectorBase)
+
+ // STL compatibility;
+ typedef uint32_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ //! Vector data (untyped).
+ void* _data;
+ //! Size of the vector.
+ size_type _size;
+ //! Capacity of the vector.
+ size_type _capacity;
+
+protected:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new instance of `ZoneVectorBase`.
+ inline ZoneVectorBase() noexcept
+ : _data(nullptr),
+ _size(0),
+ _capacity(0) {}
+
+ inline ZoneVectorBase(ZoneVectorBase&& other) noexcept
+ : _data(other._data),
+ _size(other._size),
+ _capacity(other._capacity) {}
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ inline void _release(ZoneAllocator* allocator, uint32_t sizeOfT) noexcept {
+ if (_data != nullptr) {
+ allocator->release(_data, _capacity * sizeOfT);
+ reset();
+ }
+ }
+
+ ASMJIT_API Error _grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+ ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+ ASMJIT_API Error _reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+
+ inline void _swap(ZoneVectorBase& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_capacity, other._capacity);
+ }
+
+ //! \}
+ //! \endcond
+
+public:
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the vector is empty.
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the vector size.
+ inline size_type size() const noexcept { return _size; }
+ //! Returns the vector capacity.
+ inline size_type capacity() const noexcept { return _capacity; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Makes the vector empty (won't change the capacity or data pointer).
+ inline void clear() noexcept { _size = 0; }
+ //! Resets the vector data and set its `size` to zero.
+ inline void reset() noexcept {
+ _data = nullptr;
+ _size = 0;
+ _capacity = 0;
+ }
+
+ //! Truncates the vector to at most `n` items.
+ inline void truncate(size_type n) noexcept {
+ _size = Support::min(_size, n);
+ }
+
+ //! Sets size of the vector to `n`. Used internally by some algorithms.
+ inline void _setSize(size_type n) noexcept {
+ ASMJIT_ASSERT(n <= _capacity);
+ _size = n;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneVector<T>]
+// ============================================================================
+
+//! Template used to store and manage array of Zone allocated data.
+//!
+//! This template has these advantages over other std::vector<>:
+//! - Always non-copyable (designed to be non-copyable, we want it).
+//! - Optimized for working only with POD types.
+//! - Uses ZoneAllocator, thus small vectors are almost for free.
+//! - Explicit allocation, ZoneAllocator is not part of the data.
+template <typename T>
+class ZoneVector : public ZoneVectorBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneVector<T>)
+
+ // STL compatibility;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+
+ typedef Support::Iterator<T> iterator;
+ typedef Support::Iterator<const T> const_iterator;
+ typedef Support::ReverseIterator<T> reverse_iterator;
+ typedef Support::ReverseIterator<const T> const_reverse_iterator;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneVector() noexcept : ZoneVectorBase() {}
+ inline ZoneVector(ZoneVector&& other) noexcept : ZoneVector(other) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns vector data.
+ inline T* data() noexcept { return static_cast<T*>(_data); }
+ //! Returns vector data (const)
+ inline const T* data() const noexcept { return static_cast<const T*>(_data); }
+
+ //! Returns item at the given index `i` (const).
+ inline const T& at(size_t i) const noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ inline void _setEndPtr(T* p) noexcept {
+ ASMJIT_ASSERT(p >= data() && p <= data() + _capacity);
+ _setSize(uint32_t((uintptr_t)(p - data())));
+ }
+
+ //! \}
+
+ //! \name STL Compatibility (Iterators)
+ //! \{
+
+ inline iterator begin() noexcept { return iterator(data()); };
+ inline const_iterator begin() const noexcept { return const_iterator(data()); };
+
+ inline iterator end() noexcept { return iterator(data() + _size); };
+ inline const_iterator end() const noexcept { return const_iterator(data() + _size); };
+
+ inline reverse_iterator rbegin() noexcept { return reverse_iterator(data()); };
+ inline const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(data()); };
+
+ inline reverse_iterator rend() noexcept { return reverse_iterator(data() + _size); };
+ inline const_reverse_iterator rend() const noexcept { return const_reverse_iterator(data() + _size); };
+
+ inline const_iterator cbegin() const noexcept { return const_iterator(data()); };
+ inline const_iterator cend() const noexcept { return const_iterator(data() + _size); };
+
+ inline const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(data()); };
+ inline const_reverse_iterator crend() const noexcept { return const_reverse_iterator(data() + _size); };
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Swaps this vector with `other`.
+ inline void swap(ZoneVector<T>& other) noexcept { _swap(other); }
+
+ //! Prepends `item` to the vector.
+ inline Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ ::memmove(static_cast<T*>(_data) + 1, _data, size_t(_size) * sizeof(T));
+ memcpy(_data, &item, sizeof(T));
+
+ _size++;
+ return kErrorOk;
+ }
+
+ //! Inserts an `item` at the specified `index`.
+ inline Error insert(ZoneAllocator* allocator, uint32_t index, const T& item) noexcept {
+ ASMJIT_ASSERT(index <= _size);
+
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ T* dst = static_cast<T*>(_data) + index;
+ ::memmove(dst + 1, dst, size_t(_size - index) * sizeof(T));
+ memcpy(dst, &item, sizeof(T));
+ _size++;
+
+ return kErrorOk;
+ }
+
+ //! Appends `item` to the vector.
+ inline Error append(ZoneAllocator* allocator, const T& item) noexcept {
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
+ _size++;
+
+ return kErrorOk;
+ }
+
+ //! Appends `other` vector at the end of this vector.
+ inline Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
+ uint32_t size = other._size;
+ if (_capacity - _size < size)
+ ASMJIT_PROPAGATE(grow(allocator, size));
+
+ if (size) {
+ memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
+ _size += size;
+ }
+
+ return kErrorOk;
+ }
+
+ //! Prepends `item` to the vector (unsafe case).
+ //!
+ //! Can only be used together with `willGrow()`. If `willGrow(N)` returns
+ //! `kErrorOk` then N elements can be added to the vector without checking
+ //! if there is a place for them. Used mostly internally.
+ inline void prependUnsafe(const T& item) noexcept {
+ ASMJIT_ASSERT(_size < _capacity);
+ T* data = static_cast<T*>(_data);
+
+ if (_size)
+ ::memmove(data + 1, data, size_t(_size) * sizeof(T));
+
+ memcpy(data, &item, sizeof(T));
+ _size++;
+ }
+
+ //! Append s`item` to the vector (unsafe case).
+ //!
+ //! Can only be used together with `willGrow()`. If `willGrow(N)` returns
+ //! `kErrorOk` then N elements can be added to the vector without checking
+ //! if there is a place for them. Used mostly internally.
+ inline void appendUnsafe(const T& item) noexcept {
+ ASMJIT_ASSERT(_size < _capacity);
+
+ memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
+ _size++;
+ }
+
+ //! Concatenates all items of `other` at the end of the vector.
+ inline void concatUnsafe(const ZoneVector<T>& other) noexcept {
+ uint32_t size = other._size;
+ ASMJIT_ASSERT(_capacity - _size >= size);
+
+ if (size) {
+ memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
+ _size += size;
+ }
+ }
+
+ //! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
+ inline uint32_t indexOf(const T& val) const noexcept {
+ const T* data = static_cast<const T*>(_data);
+ uint32_t size = _size;
+
+ for (uint32_t i = 0; i < size; i++)
+ if (data[i] == val)
+ return i;
+ return Globals::kNotFound;
+ }
+
+ //! Tests whether the vector contains `val`.
+ inline bool contains(const T& val) const noexcept {
+ return indexOf(val) != Globals::kNotFound;
+ }
+
+ //! Removes item at index `i`.
+ inline void removeAt(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < _size);
+
+ T* data = static_cast<T*>(_data) + i;
+ uint32_t size = --_size - i;
+
+ if (size)
+ ::memmove(data, data + 1, size_t(size) * sizeof(T));
+ }
+
+ //! Pops the last element from the vector and returns it.
+ inline T pop() noexcept {
+ ASMJIT_ASSERT(_size > 0);
+
+ uint32_t index = --_size;
+ return data()[index];
+ }
+
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ inline void sort(const CompareT& cmp = CompareT()) noexcept {
+ Support::qSort<T, CompareT>(data(), size(), cmp);
+ }
+
+ //! Returns item at index `i`.
+ inline T& operator[](size_t i) noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ //! Returns item at index `i`.
+ inline const T& operator[](size_t i) const noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ //! Returns a reference to the first element of the vector.
+ //!
+ //! \note The vector must have at least one element. Attempting to use
+ //! `first()` on empty vector will trigger an assertion failure in debug
+ //! builds.
+ inline T& first() noexcept { return operator[](0); }
+ //! \overload
+ inline const T& first() const noexcept { return operator[](0); }
+
+ //! Returns a reference to the last element of the vector.
+ //!
+ //! \note The vector must have at least one element. Attempting to use
+ //! `last()` on empty vector will trigger an assertion failure in debug
+ //! builds.
+ inline T& last() noexcept { return operator[](_size - 1); }
+ //! \overload
+ inline const T& last() const noexcept { return operator[](_size - 1); }
+
+ //! \}
+
+ //! \name Memory Management
+ //! \{
+
+ //! Releases the memory held by `ZoneVector<T>` back to the `allocator`.
+ inline void release(ZoneAllocator* allocator) noexcept {
+ _release(allocator, sizeof(T));
+ }
+
+ //! Called to grow the buffer to fit at least `n` elements more.
+ inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return ZoneVectorBase::_grow(allocator, sizeof(T), n);
+ }
+
+ //! Resizes the vector to hold `n` elements.
+ //!
+ //! If `n` is greater than the current size then the additional elements'
+ //! content will be initialized to zero. If `n` is less than the current
+ //! size then the vector will be truncated to exactly `n` elements.
+ inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return ZoneVectorBase::_resize(allocator, sizeof(T), n);
+ }
+
+ //! Reallocates the internal array to fit at least `n` items.
+ inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return n > _capacity ? ZoneVectorBase::_reserve(allocator, sizeof(T), n) : Error(kErrorOk);
+ }
+
+ inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
+ return _capacity - _size < n ? grow(allocator, n) : Error(kErrorOk);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneBitVector]
+// ============================================================================
+
+//! Zone-allocated bit vector.
+class ZoneBitVector {
+public:
+ typedef Support::BitWord BitWord;
+ static constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
+
+ //! Bits.
+ BitWord* _data;
+ //! Size of the bit-vector (in bits).
+ uint32_t _size;
+ //! Capacity of the bit-vector (in bits).
+ uint32_t _capacity;
+
+ ASMJIT_NONCOPYABLE(ZoneBitVector)
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ static inline uint32_t _wordsPerBits(uint32_t nBits) noexcept {
+ return ((nBits + kBitWordSizeInBits - 1) / kBitWordSizeInBits);
+ }
+
+ static inline void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = 0;
+ }
+
+ static inline void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = ~BitWord(0);
+ }
+
+ static inline void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = src[i];
+ }
+
+ //! \}
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneBitVector() noexcept
+ : _data(nullptr),
+ _size(0),
+ _capacity(0) {}
+
+ inline ZoneBitVector(ZoneBitVector&& other) noexcept
+ : _data(other._data),
+ _size(other._size),
+ _capacity(other._capacity) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline bool operator==(const ZoneBitVector& other) const noexcept { return eq(other); }
+ inline bool operator!=(const ZoneBitVector& other) const noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the bit-vector is empty (has no bits).
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the size of this bit-vector (in bits).
+ inline uint32_t size() const noexcept { return _size; }
+ //! Returns the capacity of this bit-vector (in bits).
+ inline uint32_t capacity() const noexcept { return _capacity; }
+
+ //! Returns the size of the `BitWord[]` array in `BitWord` units.
+ inline uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); }
+ //! Returns the capacity of the `BitWord[]` array in `BitWord` units.
+ inline uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); }
+
+ //! REturns bit-vector data as `BitWord[]`.
+ inline BitWord* data() noexcept { return _data; }
+ //! \overload
+ inline const BitWord* data() const noexcept { return _data; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneBitVector& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_capacity, other._capacity);
+ }
+
+ inline void clear() noexcept {
+ _size = 0;
+ }
+
+ inline void reset() noexcept {
+ _data = nullptr;
+ _size = 0;
+ _capacity = 0;
+ }
+
+ inline void truncate(uint32_t newSize) noexcept {
+ _size = Support::min(_size, newSize);
+ _clearUnusedBits();
+ }
+
+ inline bool bitAt(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return Support::bitVectorGetBit(_data, index);
+ }
+
+ inline void setBit(uint32_t index, bool value) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ Support::bitVectorSetBit(_data, index, value);
+ }
+
+ inline void flipBit(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ Support::bitVectorFlipBit(_data, index);
+ }
+
+ ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
+ uint32_t index = _size;
+ if (ASMJIT_UNLIKELY(index >= _capacity))
+ return _append(allocator, value);
+
+ uint32_t idx = index / kBitWordSizeInBits;
+ uint32_t bit = index % kBitWordSizeInBits;
+
+ if (bit == 0)
+ _data[idx] = BitWord(value) << bit;
+ else
+ _data[idx] |= BitWord(value) << bit;
+
+ _size++;
+ return kErrorOk;
+ }
+
+ ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
+
+ inline void clearAll() noexcept {
+ _zeroBits(_data, _wordsPerBits(_size));
+ }
+
+ inline void fillAll() noexcept {
+ _fillBits(_data, _wordsPerBits(_size));
+ _clearUnusedBits();
+ }
+
+ inline void clearBits(uint32_t start, uint32_t count) noexcept {
+ ASMJIT_ASSERT(start <= _size);
+ ASMJIT_ASSERT(_size - start >= count);
+
+ Support::bitVectorClear(_data, start, count);
+ }
+
+ inline void fillBits(uint32_t start, uint32_t count) noexcept {
+ ASMJIT_ASSERT(start <= _size);
+ ASMJIT_ASSERT(_size - start >= count);
+
+ Support::bitVectorFill(_data, start, count);
+ }
+
+ //! Performs a logical bitwise AND between bits specified in this array and bits
+ //! in `other`. If `other` has less bits than `this` then all remaining bits are
+ //! set to zero.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void and_(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t thisBitWordCount = sizeInBitWords();
+ uint32_t otherBitWordCount = other.sizeInBitWords();
+ uint32_t commonBitWordCount = Support::min(thisBitWordCount, otherBitWordCount);
+
+ uint32_t i = 0;
+ while (i < commonBitWordCount) {
+ dst[i] = dst[i] & src[i];
+ i++;
+ }
+
+ while (i < thisBitWordCount) {
+ dst[i] = 0;
+ i++;
+ }
+ }
+
+ //! Performs a logical bitwise AND between bits specified in this array and
+ //! negated bits in `other`. If `other` has less bits than `this` then all
+ //! remaining bits are kept intact.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void andNot(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
+ for (uint32_t i = 0; i < commonBitWordCount; i++)
+ dst[i] = dst[i] & ~src[i];
+ }
+
+ //! Performs a logical bitwise OP between bits specified in this array and bits
+ //! in `other`. If `other` has less bits than `this` then all remaining bits
+ //! are kept intact.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void or_(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
+ for (uint32_t i = 0; i < commonBitWordCount; i++)
+ dst[i] = dst[i] | src[i];
+ _clearUnusedBits();
+ }
+
+ inline void _clearUnusedBits() noexcept {
+ uint32_t idx = _size / kBitWordSizeInBits;
+ uint32_t bit = _size % kBitWordSizeInBits;
+
+ if (!bit) return;
+ _data[idx] &= (BitWord(1) << bit) - 1u;
+ }
+
+ inline bool eq(const ZoneBitVector& other) const noexcept {
+ if (_size != other._size)
+ return false;
+
+ const BitWord* aData = _data;
+ const BitWord* bData = other._data;
+ uint32_t numBitWords = _wordsPerBits(_size);
+
+ for (uint32_t i = 0; i < numBitWords; i++)
+ if (aData[i] != bData[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+
+ //! \name Memory Management
+ //! \{
+
+ inline void release(ZoneAllocator* allocator) noexcept {
+ if (!_data) return;
+ allocator->release(_data, _capacity / 8);
+ reset();
+ }
+
+ inline Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept {
+ return _resize(allocator, newSize, newSize, newBitsValue);
+ }
+
+ ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept;
+ ASMJIT_API Error _append(ZoneAllocator* allocator, bool value) noexcept;
+
+ //! \}
+
+ //! \name Iterators
+ //! \{
+
+ class ForEachBitSet : public Support::BitVectorIterator<BitWord> {
+ public:
+ ASMJIT_INLINE explicit ForEachBitSet(const ZoneBitVector& bitVector) noexcept
+ : Support::BitVectorIterator<BitWord>(bitVector.data(), bitVector.sizeInBitWords()) {}
+ };
+
+ template<class Operator>
+ class ForEachBitOp : public Support::BitVectorOpIterator<BitWord, Operator> {
+ public:
+ ASMJIT_INLINE ForEachBitOp(const ZoneBitVector& a, const ZoneBitVector& b) noexcept
+ : Support::BitVectorOpIterator<BitWord, Operator>(a.data(), b.data(), a.sizeInBitWords()) {
+ ASMJIT_ASSERT(a.size() == b.size());
+ }
+ };
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONEVECTOR_H_INCLUDED