diff --git a/.gitignore b/.gitignore
index 8a46e0f54..5db70c460 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,6 +42,15 @@ src/noit_config.h
src/scripts/noit-config
src/udns/udns_codes.c
src/utils/dtrace_probes.h
+src/LuaJIT/lib/vmdef.lua
+src/LuaJIT/src/buildvm
+src/LuaJIT/src/lj_bcdef.h
+src/LuaJIT/src/lj_ffdef.h
+src/LuaJIT/src/lj_folddef.h
+src/LuaJIT/src/lj_libdef.h
+src/LuaJIT/src/lj_recdef.h
+src/LuaJIT/src/lj_vm.s
+src/LuaJIT/src/luajit
test/00.pem
test/01.pem
test/02.pem
diff --git a/configure.in b/configure.in
index a01533815..7173a80c1 100755
--- a/configure.in
+++ b/configure.in
@@ -39,12 +39,15 @@ AC_PATH_PROG(NROFF, nroff)
AC_SUBST(PERL)
if test "x$GCC" = "xyes" ; then
- CFLAGS="$CFLAGS -g -std=c99"
+ C99FLAG="-std=c99"
+ CFLAGS="$CFLAGS -g $C99FLAG"
DEPFLAGS="-MM"
else
- CFLAGS="$CFLAGS -g -xc99=all"
+ C99FLAG="-xc99=all"
+ CFLAGS="$CFLAGS -g $C99FLAG"
DEPFLAGS="-xM1"
fi
+SHLDFLAGS="$LDFLAGS"
CPPFLAGS="$CPPFLAGS -D_XOPEN_SOURCE=600 -D_XOPEN_SOURCE_EXTENDED -D_POSIX_SOURCE -D_POSIX_C_SOURCE=200112L "'-I$(top_srcdir)/src'
DTRACEHDR=dtrace_probes.h
@@ -59,6 +62,8 @@ case $host in
SHLD="$CC -dynamiclib -flat_namespace -undefined suppress"
MODULEEXT=bundle
RLDFLAG="-Wl,--rpath="
+ # This is needed for luajit on Mac OS X
+ LDFLAGS="$LDFLAGS -pagezero_size 10000 -image_base 100000000"
;;
*-*-solaris*)
DTRACE=/usr/sbin/dtrace
@@ -203,6 +208,7 @@ AC_CHECK_LIB(curses, clear, , [AC_MSG_ERROR([curses not found, but required])])
CPPFLAGS="$CPPFLAGS `pcre-config --cflags`"
LDFLAGS="$LDFLAGS `pcre-config --libs`"
+SHLDFLAGS="$SHLDFLAGS `pcre-config --libs`"
AC_CHECK_LIB(pcre, pcre_compile, ,
[
AC_MSG_ERROR([libpcre not found, but required])
@@ -686,6 +692,8 @@ AC_SUBST(docdir)
AC_SUBST(mansubdir)
SHCFLAGS="$PICFLAGS $CFLAGS"
AC_SUBST(SHCFLAGS)
+AC_SUBST(LD)
+AC_SUBST(SHLDFLAGS)
SUBPREFIX="$prefix"
if test "x$prefix" = "xNONE"; then
@@ -741,6 +749,9 @@ else
fi
fi
+LUACFLAGS=`echo $CFLAGS | sed -e "s#${C99FLAG}##g;"`
+AC_SUBST(LUACFLAGS)
+
AC_OUTPUT([
Makefile
src/Makefile
@@ -755,7 +766,6 @@ src/modules/Makefile
src/modules-lua/Makefile
src/utils/Makefile
src/noitedit/Makefile
-src/lua/Makefile
src/java/Makefile
src/java/run-iep.sh
src/java/jezebel
diff --git a/src/LuaJIT/COPYRIGHT b/src/LuaJIT/COPYRIGHT
new file mode 100644
index 000000000..f2f9a18a7
--- /dev/null
+++ b/src/LuaJIT/COPYRIGHT
@@ -0,0 +1,56 @@
+===============================================================================
+LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
+
+Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+
+===============================================================================
+[ LuaJIT includes code from Lua 5.1/5.2, which has this license statement: ]
+
+Copyright (C) 1994-2012 Lua.org, PUC-Rio.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+===============================================================================
+[ LuaJIT includes code from dlmalloc, which has this license statement: ]
+
+This is a version (aka dlmalloc) of malloc/free/realloc written by
+Doug Lea and released to the public domain, as explained at
+http://creativecommons.org/licenses/publicdomain
+
+===============================================================================
diff --git a/src/LuaJIT/README b/src/LuaJIT/README
new file mode 100644
index 000000000..56dd07f27
--- /dev/null
+++ b/src/LuaJIT/README
@@ -0,0 +1,16 @@
+README for LuaJIT 2.0.0-beta10
+------------------------------
+
+LuaJIT is a Just-In-Time (JIT) compiler for the Lua programming language.
+
+Project Homepage: http://luajit.org/
+
+LuaJIT is Copyright (C) 2005-2012 Mike Pall.
+LuaJIT is free software, released under the MIT license.
+See full Copyright Notice in the COPYRIGHT file or in luajit.h.
+
+Documentation for LuaJIT is available in HTML format.
+Please point your favorite browser to:
+
+ doc/luajit.html
+
diff --git a/src/LuaJIT/doc/bluequad-print.css b/src/LuaJIT/doc/bluequad-print.css
new file mode 100644
index 000000000..16a6a72a3
--- /dev/null
+++ b/src/LuaJIT/doc/bluequad-print.css
@@ -0,0 +1,166 @@
+/* Copyright (C) 2004-2012 Mike Pall.
+ *
+ * You are welcome to use the general ideas of this design for your own sites.
+ * But please do not steal the stylesheet, the layout or the color scheme.
+ */
+body {
+ font-family: serif;
+ font-size: 11pt;
+ margin: 0 3em;
+ padding: 0;
+ border: none;
+}
+a:link, a:visited, a:hover, a:active {
+ text-decoration: none;
+ background: transparent;
+ color: #0000ff;
+}
+h1, h2, h3 {
+ font-family: sans-serif;
+ font-weight: bold;
+ text-align: left;
+ margin: 0.5em 0;
+ padding: 0;
+}
+h1 {
+ font-size: 200%;
+}
+h2 {
+ font-size: 150%;
+}
+h3 {
+ font-size: 125%;
+}
+p {
+ margin: 0 0 0.5em 0;
+ padding: 0;
+}
+ul, ol {
+ margin: 0.5em 0;
+ padding: 0 0 0 2em;
+}
+ul {
+ list-style: outside square;
+}
+ol {
+ list-style: outside decimal;
+}
+li {
+ margin: 0;
+ padding: 0;
+}
+dl {
+ margin: 1em 0;
+ padding: 1em;
+ border: 1px solid black;
+}
+dt {
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+}
+dt sup {
+ float: right;
+ margin-left: 1em;
+}
+dd {
+ margin: 0.5em 0 0 2em;
+ padding: 0;
+}
+table {
+ table-layout: fixed;
+ width: 100%;
+ margin: 1em 0;
+ padding: 0;
+ border: 1px solid black;
+ border-spacing: 0;
+ border-collapse: collapse;
+}
+tr {
+ margin: 0;
+ padding: 0;
+ border: none;
+}
+td {
+ text-align: left;
+ margin: 0;
+ padding: 0.2em 0.5em;
+ border-top: 1px solid black;
+ border-bottom: 1px solid black;
+}
+tr.separate td {
+ border-top: double;
+}
+tt, pre, code, kbd, samp {
+ font-family: monospace;
+ font-size: 75%;
+}
+kbd {
+ font-weight: bolder;
+}
+blockquote, pre {
+ margin: 1em 2em;
+ padding: 0;
+}
+img {
+ border: none;
+ vertical-align: baseline;
+ margin: 0;
+ padding: 0;
+}
+img.left {
+ float: left;
+ margin: 0.5em 1em 0.5em 0;
+}
+img.right {
+ float: right;
+ margin: 0.5em 0 0.5em 1em;
+}
+.flush {
+ clear: both;
+ visibility: hidden;
+}
+.hide, .noprint, #nav {
+ display: none !important;
+}
+.pagebreak {
+ page-break-before: always;
+}
+#site {
+ text-align: right;
+ font-family: sans-serif;
+ font-weight: bold;
+ margin: 0 1em;
+ border-bottom: 1pt solid black;
+}
+#site a {
+ font-size: 1.2em;
+}
+#site a:link, #site a:visited {
+ text-decoration: none;
+ font-weight: bold;
+ background: transparent;
+ color: #ffffff;
+}
+#logo {
+ color: #ff8000;
+}
+#head {
+ clear: both;
+ margin: 0 1em;
+}
+#main {
+ line-height: 1.3;
+ text-align: justify;
+ margin: 1em;
+}
+#foot {
+ clear: both;
+ font-size: 80%;
+ text-align: center;
+ margin: 0 1.25em;
+ padding: 0.5em 0 0 0;
+ border-top: 1pt solid black;
+ page-break-before: avoid;
+ page-break-after: avoid;
+}
diff --git a/src/LuaJIT/doc/bluequad.css b/src/LuaJIT/doc/bluequad.css
new file mode 100644
index 000000000..ffd32019d
--- /dev/null
+++ b/src/LuaJIT/doc/bluequad.css
@@ -0,0 +1,306 @@
+/* Copyright (C) 2004-2012 Mike Pall.
+ *
+ * You are welcome to use the general ideas of this design for your own sites.
+ * But please do not steal the stylesheet, the layout or the color scheme.
+ */
+/* colorscheme:
+ *
+ * site | head #4162bf/white | #6078bf/#e6ecff
+ * ------+------ ----------------+-------------------
+ * nav | main #bfcfff | #e6ecff/black
+ *
+ * nav: hiback loback #c5d5ff #b9c9f9
+ * hiborder loborder #e6ecff #97a7d7
+ * link hover #2142bf #ff0000
+ *
+ * link: link visited hover #2142bf #8122bf #ff0000
+ *
+ * main: boxback boxborder #f0f4ff #bfcfff
+ */
+body {
+ font-family: Verdana, Arial, Helvetica, sans-serif;
+ font-size: 10pt;
+ margin: 0;
+ padding: 0;
+ border: none;
+ background: #e0e0e0;
+ color: #000000;
+}
+a:link {
+ text-decoration: none;
+ background: transparent;
+ color: #2142bf;
+}
+a:visited {
+ text-decoration: none;
+ background: transparent;
+ color: #8122bf;
+}
+a:hover, a:active {
+ text-decoration: underline;
+ background: transparent;
+ color: #ff0000;
+}
+h1, h2, h3 {
+ font-weight: bold;
+ text-align: left;
+ margin: 0.5em 0;
+ padding: 0;
+ background: transparent;
+}
+h1 {
+ font-size: 200%;
+ line-height: 3em; /* really 6em relative to body, match #site span */
+ margin: 0;
+}
+h2 {
+ font-size: 150%;
+ color: #606060;
+}
+h3 {
+ font-size: 125%;
+ color: #404040;
+}
+p {
+ max-width: 600px;
+ margin: 0 0 0.5em 0;
+ padding: 0;
+}
+b {
+ color: #404040;
+}
+ul, ol {
+ max-width: 600px;
+ margin: 0.5em 0;
+ padding: 0 0 0 2em;
+}
+ul {
+ list-style: outside square;
+}
+ol {
+ list-style: outside decimal;
+}
+li {
+ margin: 0;
+ padding: 0;
+}
+dl {
+ max-width: 600px;
+ margin: 1em 0;
+ padding: 1em;
+ border: 1px solid #bfcfff;
+ background: #f0f4ff;
+}
+dt {
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+}
+dt sup {
+ float: right;
+ margin-left: 1em;
+ color: #808080;
+}
+dt a:visited {
+ text-decoration: none;
+ color: #2142bf;
+}
+dt a:hover, dt a:active {
+ text-decoration: none;
+ color: #ff0000;
+}
+dd {
+ margin: 0.5em 0 0 2em;
+ padding: 0;
+}
+div.tablewrap { /* for IE *sigh* */
+ max-width: 600px;
+}
+table {
+ table-layout: fixed;
+ border-spacing: 0;
+ border-collapse: collapse;
+ max-width: 600px;
+ width: 100%;
+ margin: 1em 0;
+ padding: 0;
+ border: 1px solid #bfcfff;
+}
+tr {
+ margin: 0;
+ padding: 0;
+ border: none;
+}
+tr.odd {
+ background: #f0f4ff;
+}
+tr.separate td {
+ border-top: 1px solid #bfcfff;
+}
+td {
+ text-align: left;
+ margin: 0;
+ padding: 0.2em 0.5em;
+ border: none;
+}
+tt, code, kbd, samp {
+ font-family: Courier New, Courier, monospace;
+ line-height: 1.2;
+ font-size: 110%;
+}
+kbd {
+ font-weight: bolder;
+}
+blockquote, pre {
+ max-width: 600px;
+ margin: 1em 2em;
+ padding: 0;
+}
+pre {
+ line-height: 1.1;
+}
+pre.code {
+ line-height: 1.4;
+ margin: 0.5em 0 1em 0.5em;
+ padding: 0.5em 1em;
+ border: 1px solid #bfcfff;
+ background: #f0f4ff;
+}
+img {
+ border: none;
+ vertical-align: baseline;
+ margin: 0;
+ padding: 0;
+}
+img.left {
+ float: left;
+ margin: 0.5em 1em 0.5em 0;
+}
+img.right {
+ float: right;
+ margin: 0.5em 0 0.5em 1em;
+}
+.indent {
+ padding-left: 1em;
+}
+.flush {
+ clear: both;
+ visibility: hidden;
+}
+.hide, .noscreen {
+ display: none !important;
+}
+.ext {
+ color: #ff8000;
+}
+#site {
+ clear: both;
+ float: left;
+ width: 13em;
+ text-align: center;
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+ background: transparent;
+ color: #ffffff;
+}
+#site a {
+ font-size: 200%;
+}
+#site a:link, #site a:visited {
+ text-decoration: none;
+ font-weight: bold;
+ background: transparent;
+ color: #ffffff;
+}
+#site span {
+ line-height: 3em; /* really 6em relative to body, match h1 */
+}
+#logo {
+ color: #ffb380;
+}
+#head {
+ margin: 0;
+ padding: 0 0 0 2em;
+ border-left: solid 13em #4162bf;
+ border-right: solid 3em #6078bf;
+ background: #6078bf;
+ color: #e6ecff;
+}
+#nav {
+ clear: both;
+ float: left;
+ overflow: hidden;
+ text-align: left;
+ line-height: 1.5;
+ width: 13em;
+ padding-top: 1em;
+ background: transparent;
+}
+#nav ul {
+ list-style: none outside;
+ margin: 0;
+ padding: 0;
+}
+#nav li {
+ margin: 0;
+ padding: 0;
+}
+#nav a {
+ display: block;
+ text-decoration: none;
+ font-weight: bold;
+ margin: 0;
+ padding: 2px 1em;
+ border-top: 1px solid transparent;
+ border-bottom: 1px solid transparent;
+ background: transparent;
+ color: #2142bf;
+}
+#nav a:hover, #nav a:active {
+ text-decoration: none;
+ border-top: 1px solid #97a7d7;
+ border-bottom: 1px solid #e6ecff;
+ background: #b9c9f9;
+ color: #ff0000;
+}
+#nav a.current, #nav a.current:hover, #nav a.current:active {
+ border-top: 1px solid #e6ecff;
+ border-bottom: 1px solid #97a7d7;
+ background: #c5d5ff;
+ color: #2142bf;
+}
+#nav ul ul a {
+ padding: 0 1em 0 1.7em;
+}
+#nav ul ul ul a {
+ padding: 0 0.5em 0 2.4em;
+}
+#main {
+ line-height: 1.5;
+ text-align: left;
+ margin: 0;
+ padding: 1em 2em;
+ border-left: solid 13em #bfcfff;
+ border-right: solid 3em #e6ecff;
+ background: #e6ecff;
+}
+#foot {
+ clear: both;
+ font-size: 80%;
+ text-align: center;
+ margin: 0;
+ padding: 0.5em;
+ background: #6078bf;
+ color: #ffffff;
+}
+#foot a:link, #foot a:visited {
+ text-decoration: underline;
+ background: transparent;
+ color: #ffffff;
+}
+#foot a:hover, #foot a:active {
+ text-decoration: underline;
+ background: transparent;
+ color: #bfcfff;
+}
diff --git a/src/LuaJIT/doc/changes.html b/src/LuaJIT/doc/changes.html
new file mode 100644
index 000000000..95ada4fb1
--- /dev/null
+++ b/src/LuaJIT/doc/changes.html
@@ -0,0 +1,720 @@
+
+
+
+LuaJIT Change History
+
+
+
+
+
+
+
+
+
+
+
+
LuaJIT Change History
+
+
+
+
+This is a list of changes between the released versions of LuaJIT.
+The current development version is LuaJIT 2.0.0-beta10 .
+The current stable version is LuaJIT 1.1.8 .
+
+
+Please check the
+» Online Change History
+to see whether newer versions are available.
+
+
+
+
LuaJIT 2.0.0-beta10 — 2012-05-09
+
+New features:
+
+The MIPS of LuaJIT is complete. It requires a CPU conforming to the
+MIPS32 R1 architecture with hardware FPU. O32 hard-fp ABI,
+little-endian or big-endian.
+Auto-detect target arch via cross-compiler. No need for
+TARGET=arch anymore.
+Make DynASM compatible with Lua 5.2.
+From Lua 5.2: Try __tostring metamethod on non-string error
+messages..
+
+Correctness and completeness:
+
+Fix parsing of hex literals with exponents.
+Fix bytecode dump for certain number constants.
+Fix argument type in error message for relative arguments.
+Fix argument error handling on Lua stacks without a frame.
+Add missing mcode limit check in assembler backend.
+Fix compilation on OpenBSD.
+Avoid recursive GC steps after GC-triggered trace exit.
+Replace <unwind.h> definitions with our own.
+Fix OSX build issues. Bump minimum required OSX version to 10.4.
+Fix discharge order of comparisons in Lua parser.
+Ensure running __gc of userdata created in __gc
+at state close.
+Limit number of userdata __gc separations at state close.
+Fix bytecode JMP slot range when optimizing
+and /or with constant LHS.
+Fix DSE of USTORE .
+Make lua_concat() work from C hook with partial frame.
+Add required PHIs for implicit conversions, e.g. via XREF
+forwarding.
+Add more comparison variants to Valgrind suppressions file.
+Disable loading bytecode with an extra header (BOM or #! ).
+Fix PHI stack slot syncing.
+ARM: Reorder type/value tests to silence Valgrind.
+ARM: Fix register allocation for ldrd -optimized
+HREFK .
+ARM: Fix conditional branch fixup for OBAR .
+ARM: Invoke SPLIT pass for double args in FFI call.
+ARM: Handle all CALL* ops with double results in
+SPLIT pass.
+ARM: Fix rejoin of POW in SPLIT pass.
+ARM: Fix compilation of math.sinh , math.cosh ,
+math.tanh .
+ARM, PPC: Avoid pointless arg clearing in BC_IFUNCF .
+PPC: Fix resume after yield from hook.
+PPC: Fix argument checking for rawget() .
+PPC: Fix fusion of floating-point XLOAD /XSTORE .
+PPC: Fix HREFK code generation for huge tables.
+PPC: Use builtin D-Cache/I-Cache sync code.
+
+FFI library:
+
+Ignore empty statements in ffi.cdef() .
+Ignore number parsing errors while skipping definitions.
+Don't touch frame in callbacks with tailcalls to fast functions.
+Fix library unloading on POSIX systems.
+Finalize cdata before userdata when closing the state.
+Change ffi.load() library name resolution for Cygwin.
+Fix resolving of function name redirects on Windows/x86.
+Fix symbol resolving error messages on Windows.
+Fix blacklisting of C functions calling callbacks.
+Fix result type of pointer difference.
+Use correct PC in FFI metamethod error message.
+Allow 'typedef _Bool int BOOL;' for the Windows API.
+Don't record test for bool result of call, if ignored.
+
+
+
+
LuaJIT 2.0.0-beta9 — 2011-12-14
+
+New features:
+
+PPC port of LuaJIT is complete. Default is the dual-number port
+(usually faster). Single-number port selectable via src/Makefile
+at build time.
+Add FFI callback support.
+Extend -b to generate .c , .h or .obj/.o
+files with embedded bytecode.
+Allow loading embedded bytecode with require() .
+From Lua 5.2: Change to '\z' escape. Reject undefined escape
+sequences.
+
+Correctness and completeness:
+
+Fix OSX 10.7 build. Fix install_name and versioning on OSX.
+Fix iOS build.
+Install dis_arm.lua , too.
+Mark installed shared library as executable.
+Add debug option to msvcbuild.bat and improve error handling.
+Fix data-flow analysis for iterators.
+Fix forced unwinding triggered by external unwinder.
+Record missing for loop slot loads (return to lower frame).
+Always use ANSI variants of Windows system functions.
+Fix GC barrier for multi-result table constructor (TSETM ).
+Fix/add various FOLD rules.
+Add potential PHI for number conversions due to type instability.
+Do not eliminate PHIs only referenced from other PHIs.
+Correctly anchor implicit number to string conversions in Lua/C API.
+Fix various stack limit checks.
+x64: Use thread-safe exceptions for external unwinding (GCC platforms).
+x64: Fix result type of cdata index conversions.
+x64: Fix math.random() and bit.bswap() code generation.
+x64: Fix lightuserdata comparisons.
+x64: Always extend stack-passed arguments to pointer size.
+ARM: Many fixes to code generation backend.
+PPC/e500: Fix dispatch for binop metamethods.
+PPC/e500: Save/restore condition registers when entering/leaving the VM.
+PPC/e500: Fix write barrier in stores of strings to upvalues.
+
+FFI library:
+
+Fix C comment parsing.
+Fix snapshot optimization for cdata comparisons.
+Fix recording of const/enum lookups in namespaces.
+Fix call argument and return handling for I8/U8/I16/U16 types.
+Fix unfused loads of float fields.
+Fix ffi.string() recording.
+Save GetLastError() around ffi.load() and symbol
+resolving, too.
+Improve ld script detection in ffi.load() .
+Record loads/stores to external variables in namespaces.
+Compile calls to stdcall, fastcall and vararg functions.
+Treat function ctypes like pointers in comparisons.
+Resolve __call metamethod for pointers, too.
+Record C function calls with bool return values.
+Record ffi.errno() .
+x86: Fix number to uint32_t conversion rounding.
+x86: Fix 64 bit arithmetic in assembler backend.
+x64: Fix struct-by-value calling conventions.
+ARM: Ensure invocation of SPLIT pass for float conversions.
+
+Structural and performance enhancements:
+
+Display trace types with -jv and -jdump .
+Record isolated calls. But prefer recording loops over calls.
+Specialize to prototype for non-monomorphic functions. Solves the
+trace-explosion problem for closure-heavy programming styles.
+Always generate a portable vmdef.lua . Easier for distros.
+
+
+
+
LuaJIT 2.0.0-beta8 — 2011-06-23
+
+New features:
+
+Soft-float ARM port of LuaJIT is complete.
+Add support for bytecode loading/saving and -b command line
+option.
+From Lua 5.2: __len metamethod for tables
+(disabled by default).
+
+Correctness and completeness:
+
+ARM: Misc. fixes for interpreter.
+x86/x64: Fix bit.* argument checking in interpreter.
+Catch early out-of-memory in memory allocator initialization.
+Fix data-flow analysis for paths leading to an upvalue close.
+Fix check for missing arguments in string.format() .
+Fix Solaris/x86 build (note: not a supported target).
+Fix recording of loops with instable directions in side traces.
+x86/x64: Fix fusion of comparisons with u8 /u16
+XLOAD .
+x86/x64: Fix register allocation for variable shifts.
+
+FFI library:
+
+Add ffi.errno() . Save errno /GetLastError()
+around allocations etc.
+Fix __gc for VLA/VLS cdata objects.
+Fix recording of casts from 32 bit cdata pointers to integers.
+tonumber(cdata) returns nil for non-numbers.
+Show address pointed to for tostring(pointer) .
+Print NULL pointers as "cdata<... *>: NULL" .
+Support __tostring metamethod for pointers to structs, too.
+
+Structural and performance enhancements:
+
+More tuning for loop unrolling heuristics.
+Flatten and compress in-memory debug info (saves ~70%).
+
+
+
+
LuaJIT 2.0.0-beta7 — 2011-05-05
+
+New features:
+
+ARM port of the LuaJIT interpreter is complete.
+FFI library: Add ffi.gc() , ffi.metatype() ,
+ffi.istype() .
+FFI library: Resolve ld script redirection in ffi.load() .
+From Lua 5.2: package.searchpath() , fp:read("*L") ,
+load(string) .
+From Lua 5.2, disabled by default: empty statement,
+table.unpack() , modified coroutine.running() .
+
+Correctness and completeness:
+
+FFI library: numerous fixes.
+Fix type mismatches in store-to-load forwarding.
+Fix error handling within metamethods.
+Fix table.maxn() .
+Improve accuracy of x^-k on x64.
+Fix code generation for Intel Atom in x64 mode.
+Fix narrowing of POW.
+Fix recording of retried fast functions.
+Fix code generation for bit.bnot() and multiplies.
+Fix error location within cpcall frames.
+Add workaround for old libgcc unwind bug.
+Fix lua_yield() and getmetatable(lightuserdata) on x64.
+Misc. fixes for PPC/e500 interpreter.
+Fix stack slot updates for down-recursion.
+
+Structural and performance enhancements:
+
+Add dual-number mode (int/double) for the VM. Enabled for ARM.
+Improve narrowing of arithmetic operators and for loops.
+Tune loop unrolling heuristics and increase trace recorder limits.
+Eliminate dead slots in snapshots using bytecode data-flow analysis.
+Avoid phantom stores to proxy tables.
+Optimize lookups in empty proxy tables.
+Improve bytecode optimization of and /or operators.
+
+
+
+
LuaJIT 2.0.0-beta6 — 2011-02-11
+
+New features:
+
+PowerPC/e500v2 port of the LuaJIT interpreter is complete.
+Various minor features from Lua 5.2: Hex escapes in literals,
+'\*' escape, reversible string.format("%q",s) ,
+"%g" pattern, table.sort checks callbacks,
+os.exit(status|true|false[,close]) .
+Lua 5.2 __pairs and __ipairs metamethods
+(disabled by default).
+Initial release of the FFI library.
+
+Correctness and completeness:
+
+Fix string.format() for non-finite numbers.
+Fix memory leak when compiled to use the built-in allocator.
+x86/x64: Fix unnecessary resize in TSETM bytecode.
+Fix various GC issues with traces and jit.flush() .
+x64: Fix fusion of indexes for array references.
+x86/x64: Fix stack overflow handling for coroutine results.
+Enable low-2GB memory allocation on FreeBSD/x64.
+Fix collectgarbage("count") result if more than 2GB is in use.
+Fix parsing of hex floats.
+x86/x64: Fix loop branch inversion with trailing
+HREF+NE/EQ .
+Add jit.os string.
+coroutine.create() permits running C functions, too.
+Fix OSX build to work with newer ld64 versions.
+Fix bytecode optimization of and /or operators.
+
+Structural and performance enhancements:
+
+Emit specialized bytecode for pairs() /next() .
+Improve bytecode coalescing of nil constants.
+Compile calls to vararg functions.
+Compile select() .
+Improve alias analysis, esp. for loads from allocations.
+Tuning of various compiler heuristics.
+Refactor and extend IR conversion instructions.
+x86/x64: Various backend enhancements related to the FFI.
+Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.
+
+
+
+
LuaJIT 2.0.0-beta5 — 2010-08-24
+
+Correctness and completeness:
+
+Fix trace exit dispatch to function headers.
+Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.
+Reorganize and fix placement of generated machine code on x64.
+Fix TNEW in x64 interpreter.
+Do not eliminate PHIs for values only referenced from side exits.
+OS-independent canonicalization of strings for non-finite numbers.
+Fix string.char() range check on x64.
+Fix tostring() resolving within print() .
+Fix error handling for next() .
+Fix passing of constant arguments to external calls on x64.
+Fix interpreter argument check for two-argument SSE math functions.
+Fix C frame chain corruption caused by lua_cpcall() .
+Fix return from pcall() within active hook.
+
+Structural and performance enhancements:
+
+Replace on-trace GC frame syncing with interpreter exit.
+Improve hash lookup specialization by not removing dead keys during GC.
+Turn traces into true GC objects.
+Avoid starting a GC cycle immediately after library init.
+Add weak guards to improve dead-code elimination.
+Speed up string interning.
+
+
+
+
LuaJIT 2.0.0-beta4 — 2010-03-28
+
+Correctness and completeness:
+
+Fix precondition for on-trace creation of table keys.
+Fix {f()} on x64 when table is resized.
+Fix folding of ordered comparisons with same references.
+Fix snapshot restores for multi-result bytecodes.
+Fix potential hang when recording bytecode with nested closures.
+Fix recording of getmetatable() , tonumber() and bad argument types.
+Fix SLOAD fusion across returns to lower frames.
+
+Structural and performance enhancements:
+
+Add array bounds check elimination. -Oabc is enabled by default.
+More tuning for x64, e.g. smaller table objects.
+
+
+
+
LuaJIT 2.0.0-beta3 — 2010-03-07
+
+LuaJIT x64 port:
+
+Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.
+Port interpreter and JIT compiler to x64.
+Port DynASM to x64.
+Many 32/64 bit cleanups in the VM.
+Allow building the interpreter with either x87 or SSE2 arithmetics.
+Add external unwinding and C++ exception interop (default on x64).
+
+Correctness and completeness:
+
+Fix constructor bytecode generation for certain conditional values.
+Fix some cases of ordered string comparisons.
+Fix lua_tocfunction() .
+Fix cutoff register in JMP bytecode for some conditional expressions.
+Fix PHI marking algorithm for references from variant slots.
+Fix package.cpath for non-default PREFIX.
+Fix DWARF2 frame unwind information for interpreter on OSX.
+Drive the GC forward on string allocations in the parser.
+Implement call/return hooks (zero-cost if disabled).
+Implement yield from C hooks.
+Disable JIT compiler on older non-SSE2 CPUs instead of aborting.
+
+Structural and performance enhancements:
+
+Compile recursive code (tail-, up- and down-recursion).
+Improve heuristics for bytecode penalties and blacklisting.
+Split CALL/FUNC recording and clean up fast function call semantics.
+Major redesign of internal function call handling.
+Improve FOR loop const specialization and integerness checks.
+Switch to pre-initialized stacks. Avoid frame-clearing.
+Colocation of prototypes and related data: bytecode, constants, debug info.
+Cleanup parser and streamline bytecode generation.
+Add support for weak IR references to register allocator.
+Switch to compressed, extensible snapshots.
+Compile returns to frames below the start frame.
+Improve alias analysis of upvalues using a disambiguation hash value.
+Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.
+Add generic C call handling to IR and backend.
+Improve KNUM fuse vs. load heuristics.
+Compile various io.*() functions.
+Compile math.sinh() , math.cosh() , math.tanh()
+and math.random() .
+
+
+
+
LuaJIT 2.0.0-beta2 — 2009-11-09
+
+Reorganize build system. Build static+shared library on POSIX.
+Allow C++ exception conversion on all platforms
+using a wrapper function.
+Automatically catch C++ exceptions and rethrow Lua error
+(DWARF2 only).
+Check for the correct x87 FPU precision at strategic points.
+Always use wrappers for libm functions.
+Resurrect metamethod name strings before copying them.
+Mark current trace, even if compiler is idle.
+Ensure FILE metatable is created only once.
+Fix type comparisons when different integer types are involved.
+Fix getmetatable() recording.
+Fix TDUP with dead keys in template table.
+jit.flush(tr) returns status.
+Prevent manual flush of a trace that's still linked.
+Improve register allocation heuristics for invariant references.
+Compile the push/pop variants of table.insert() and
+table.remove() .
+Compatibility with MSVC link /debug .
+Fix lua_iscfunction() .
+Fix math.random() when compiled with -fpic (OSX).
+Fix table.maxn() .
+Bump MACOSX_DEPLOYMENT_TARGET to 10.4
+luaL_check*() and luaL_opt*() now support
+negative arguments, too.
+This matches the behavior of Lua 5.1, but not the specification.
+
+
+
LuaJIT 2.0.0-beta1 — 2009-10-31
+
+This is the first public release of LuaJIT 2.0.
+The whole VM has been rewritten from the ground up, so there's
+no point in listing differences over earlier versions.
+
+
+
+
+
LuaJIT 1.1.8 — 2012-04-16
+
+
+
LuaJIT 1.1.7 — 2011-05-05
+
+
+
LuaJIT 1.1.6 — 2010-03-28
+
+Added fixes for the
+» currently known bugs in Lua 5.1.4 .
+Removed wrong GC check in jit_createstate() .
+Thanks to Tim Mensch.
+Fixed bad assertions while compiling table.insert() and
+table.remove() .
+
+
+
LuaJIT 1.1.5 — 2008-10-25
+
+
+
LuaJIT 1.1.4 — 2008-02-05
+
+Merged with Lua 5.1.3. Fixes all
+» known bugs in Lua 5.1.2 .
+Fixed possible (but unlikely) stack corruption while compiling
+k^x expressions.
+Fixed DynASM template for cmpss instruction.
+
+
+
LuaJIT 1.1.3 — 2007-05-24
+
+Merged with Lua 5.1.2. Fixes all
+» known bugs in Lua 5.1.1 .
+Merged pending Lua 5.1.x fixes: "return -nil" bug, spurious count hook call.
+Remove a (sometimes) wrong assertion in luaJIT_findpc() .
+DynASM now allows labels for displacements and .aword .
+Fix some compiler warnings for DynASM glue (internal API change).
+Correct naming for SSSE3 (temporarily known as SSE4) in DynASM and x86 disassembler.
+The loadable debug modules now handle redirection to stdout
+(e.g. -j trace=- ).
+
+
+
LuaJIT 1.1.2 — 2006-06-24
+
+Fix MSVC inline assembly: use only local variables with
+lua_number2int() .
+Fix "attempt to call a thread value" bug on Mac OS X:
+make values of consts used as lightuserdata keys unique
+to avoid joining by the compiler/linker.
+
+
+
LuaJIT 1.1.1 — 2006-06-20
+
+Merged with Lua 5.1.1. Fixes all
+» known bugs in Lua 5.1 .
+Enforce (dynamic) linker error for EXE/DLL version mismatches.
+Minor changes to DynASM: faster pre-processing, smaller encoding
+for some immediates.
+
+
+This release is in sync with Coco 1.1.1 (see the
+» Coco Change History ).
+
+
+
LuaJIT 1.1.0 — 2006-03-13
+
+Merged with Lua 5.1 (final).
+
+New JIT call frame setup:
+
+The C stack is kept 16 byte aligned (faster).
+Mandatory for Mac OS X on Intel, too.
+Faster calling conventions for internal C helper functions.
+Better instruction scheduling for function prologue, OP_CALL and
+OP_RETURN.
+
+
+Miscellaneous optimizations:
+
+Faster loads of FP constants. Remove narrow-to-wide store-to-load
+forwarding stalls.
+Use (scalar) SSE2 ops (if the CPU supports it) to speed up slot moves
+and FP to integer conversions.
+Optimized the two-argument form of OP_CONCAT (a..b ).
+Inlined OP_MOD (a%b ).
+With better accuracy than the C variant, too.
+Inlined OP_POW (a^b ). Unroll x^k or
+use k^x = 2^(log2(k)*x) or call pow() .
+
+
+Changes in the optimizer:
+
+Improved hinting for table keys derived from table values
+(t1[t2[x]] ).
+Lookup hinting now works with arbitrary object types and
+supports index chains, too.
+Generate type hints for arithmetic and comparison operators,
+OP_LEN, OP_CONCAT and OP_FORPREP.
+Remove several hint definitions in favour of a generic COMBINE hint.
+Complete rewrite of jit.opt_inline module
+(ex jit.opt_lib ).
+
+
+Use adaptive deoptimization:
+
+If runtime verification of a contract fails, the affected
+instruction is recompiled and patched on-the-fly.
+Regular programs will trigger deoptimization only occasionally.
+This avoids generating code for uncommon fallback cases
+most of the time. Generated code is up to 30% smaller compared to
+LuaJIT 1.0.3.
+Deoptimization is used for many opcodes and contracts:
+
+OP_CALL, OP_TAILCALL: type mismatch for callable.
+Inlined calls: closure mismatch, parameter number and type mismatches.
+OP_GETTABLE, OP_SETTABLE: table or key type and range mismatches.
+All arithmetic and comparison operators, OP_LEN, OP_CONCAT,
+OP_FORPREP: operand type and range mismatches.
+
+Complete redesign of the debug and traceback info
+(bytecode ↔ mcode) to support deoptimization.
+Much more flexible and needs only 50% of the space.
+The modules jit.trace , jit.dumphints and
+jit.dump handle deoptimization.
+
+
+Inlined many popular library functions
+(for commonly used arguments only):
+
+Most math.* functions (the 18 most used ones)
+[2x-10x faster].
+string.len , string.sub and string.char
+[2x-10x faster].
+table.insert , table.remove and table.getn
+[3x-5x faster].
+coroutine.yield and coroutine.resume
+[3x-5x faster].
+pairs , ipairs and the corresponding iterators
+[8x-15x faster].
+
+
+Changes in the core and loadable modules and the stand-alone executable:
+
+Added jit.version , jit.version_num
+and jit.arch .
+Reorganized some internal API functions (jit.util.*mcode* ).
+The -j dump output now shows JSUB names, too.
+New x86 disassembler module written in pure Lua. No dependency
+on ndisasm anymore. Flexible API, very compact (500 lines)
+and complete (x87, MMX, SSE, SSE2, SSE3, SSSE3, privileged instructions).
+luajit -v prints the LuaJIT version and copyright
+on a separate line.
+
+
+Added SSE, SSE2, SSE3 and SSSE3 support to DynASM.
+Miscellaneous doc changes. Added a section about
+embedding LuaJIT .
+
+
+This release is in sync with Coco 1.1.0 (see the
+» Coco Change History ).
+
+
+
+
+
LuaJIT 1.0.3 — 2005-09-08
+
+Even more docs.
+Unified closure checks in jit.* .
+Fixed some range checks in jit.util.* .
+Fixed __newindex call originating from jit_settable_str() .
+Merged with Lua 5.1 alpha (including early bug fixes).
+
+
+This is the first public release of LuaJIT.
+
+
+
LuaJIT 1.0.2 — 2005-09-02
+
+Add support for flushing the Valgrind translation cache
+(MYCFLAGS= -DUSE_VALGRIND ).
+Add support for freeing executable mcode memory to the mmap() -based
+variant for POSIX systems.
+Reorganized the C function signature handling in
+jit.opt_lib .
+Changed to index-based hints for inlining C functions.
+Still no support in the backend for inlining.
+Hardcode HEAP_CREATE_ENABLE_EXECUTE value if undefined.
+Misc. changes to the jit.* modules.
+Misc. changes to the Makefiles.
+Lots of new docs.
+Complete doc reorg.
+
+
+Not released because Lua 5.1 alpha came out today.
+
+
+
LuaJIT 1.0.1 — 2005-08-31
+
+Missing GC step in OP_CONCAT .
+Fix result handling for C –> JIT calls.
+Detect CPU feature bits.
+Encode conditional moves (fucomip ) only when supported.
+Add fallback instructions for FP compares.
+Add support for LUA_COMPAT_VARARG . Still disabled by default.
+MSVC needs a specific place for the CALLBACK attribute
+(David Burgess).
+Misc. doc updates.
+
+
+Interim non-public release.
+Special thanks to Adam D. Moss for reporting most of the bugs.
+
+
+
LuaJIT 1.0.0 — 2005-08-29
+
+This is the initial non-public release of LuaJIT.
+
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/contact.html b/src/LuaJIT/doc/contact.html
new file mode 100644
index 000000000..e70deb971
--- /dev/null
+++ b/src/LuaJIT/doc/contact.html
@@ -0,0 +1,100 @@
+
+
+
+Contact
+
+
+
+
+
+
+
+
+
+
+
Contact
+
+
+
+
+Please send general questions to the
+» LuaJIT mailing list .
+You can also send any questions you have directly to me:
+
+
+
+
+
+
+
+
+
+
Copyright
+
+All documentation is
+Copyright © 2005-2012 Mike Pall.
+
+
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_c_api.html b/src/LuaJIT/doc/ext_c_api.html
new file mode 100644
index 000000000..e8bc0ce71
--- /dev/null
+++ b/src/LuaJIT/doc/ext_c_api.html
@@ -0,0 +1,185 @@
+
+
+
+Lua/C API Extensions
+
+
+
+
+
+
+
+
+
+
+
Lua/C API Extensions
+
+
+
+
+LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include
+directory must be in the compiler search path (-Ipath )
+to be able to include the required header for C code:
+
+
+#include "luajit.h"
+
+
+Or for C++ code:
+
+
+#include "lua.hpp"
+
+
+
luaJIT_setmode(L, idx, mode)
+— Control VM
+
+This is a C API extension to allow control of the VM from C code. The
+full prototype of LuaJIT_setmode is:
+
+
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+
+
+The returned status is either success (1 ) or failure (0 ).
+The second argument is either 0 or a stack index (similar to the
+other Lua/C API functions).
+
+
+The third argument specifies the mode, which is 'or'ed with a flag.
+The flag can be LUAJIT_MODE_OFF to turn a feature on,
+LUAJIT_MODE_ON to turn a feature off, or
+LUAJIT_MODE_FLUSH to flush cached code.
+
+
+The following modes are defined:
+
+
+
luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)
+
+Turn the whole JIT compiler on or off or flush the whole cache of compiled code.
+
+
+
luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)
+luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)
+luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)
+
+This sets the mode for the function at the stack index idx or
+the parent of the calling function (idx = 0 ). It either
+enables JIT compilation for a function, disables it and flushes any
+already compiled code or only flushes already compiled code. This
+applies recursively to all sub-functions of the function with
+LUAJIT_MODE_ALLFUNC or only to the sub-functions with
+LUAJIT_MODE_ALLSUBFUNC .
+
+
+
luaJIT_setmode(L, trace,
+ LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)
+
+Flushes the specified root trace and all of its side traces from the cache.
+The code for the trace will be retained as long as there are any other
+traces which link to it.
+
+
+
luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)
+
+This mode defines a wrapper function for calls to C functions. If
+called with LUAJIT_MODE_ON , the stack index at idx
+must be a lightuserdata object holding a pointer to the wrapper
+function. From now on all C functions are called through the wrapper
+function. If called with LUAJIT_MODE_OFF this mode is turned
+off and all C functions are directly called.
+
+
+The wrapper function can be used for debugging purposes or to catch
+and convert foreign exceptions. But please read the section on
+C++ exception interoperability
+first. Recommended usage can be seen in this C++ code excerpt:
+
+
+#include <exception>
+#include "lua.hpp"
+
+// Catch C++ exceptions and convert them to Lua error messages.
+// Customize as needed for your own exception classes.
+static int wrap_exceptions(lua_State *L, lua_CFunction f)
+{
+ try {
+ return f(L); // Call wrapped function and return result.
+ } catch (const char *s) { // Catch and convert exceptions.
+ lua_pushstring(L, s);
+ } catch (std::exception& e) {
+ lua_pushstring(L, e.what());
+ } catch (...) {
+ lua_pushliteral(L, "caught (...)");
+ }
+ return lua_error(L); // Rethrow as a Lua error.
+}
+
+static int myinit(lua_State *L)
+{
+ ...
+ // Define wrapper function and enable it.
+ lua_pushlightuserdata(L, (void *)wrap_exceptions);
+ luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
+ lua_pop(L, 1);
+ ...
+}
+
+
+Note that you can only define a single global wrapper function ,
+so be careful when using this mechanism from multiple C++ modules.
+Also note that this mechanism is not without overhead.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_ffi.html b/src/LuaJIT/doc/ext_ffi.html
new file mode 100644
index 000000000..e6f0bf287
--- /dev/null
+++ b/src/LuaJIT/doc/ext_ffi.html
@@ -0,0 +1,334 @@
+
+
+
+FFI Library
+
+
+
+
+
+
+
+
+
+
+
+
FFI Library
+
+
+
+
+
+The FFI library allows calling external C functions and
+using C data structures from pure Lua code.
+
+
+
+
+The FFI library largely obviates the need to write tedious manual
+Lua/C bindings in C. No need to learn a separate binding language
+— it parses plain C declarations! These can be
+cut-n-pasted from C header files or reference manuals. It's up to
+the task of binding large libraries without the need for dealing with
+fragile binding generators.
+
+
+
+The FFI library is tightly integrated into LuaJIT (it's not available
+as a separate module). The code generated by the JIT-compiler for
+accesses to C data structures from Lua code is on par with the
+code a C compiler would generate. Calls to C functions can
+be inlined in JIT-compiled code, unlike calls to functions bound via
+the classic Lua/C API.
+
+
+This page gives a short introduction to the usage of the FFI library.
+Please use the FFI sub-topics in the navigation bar to learn more.
+
+
+
Motivating Example: Calling External C Functions
+
+It's really easy to call an external C library function:
+
+
+①
+②
+
+
+③ local ffi = require("ffi")
+ffi.cdef[[
+int printf(const char *fmt, ...);
+]]
+ffi.C.printf("Hello %s!", "world")
+
+
+So, let's pick that apart:
+
+
+① Load the FFI library.
+
+
+② Add a C declaration
+for the function. The part inside the double-brackets (in green) is
+just standard C syntax.
+
+
+③ Call the named
+C function — Yes, it's that simple!
+
+
+Actually, what goes on behind the scenes is far from simple: ③ makes use of the standard
+C library namespace ffi.C . Indexing this namespace with
+a symbol name ("printf" ) automatically binds it to the
+standard C library. The result is a special kind of object which,
+when called, runs the printf function. The arguments passed
+to this function are automatically converted from Lua objects to the
+corresponding C types.
+
+
+Ok, so maybe the use of printf() wasn't such a spectacular
+example. You could have done that with io.write() and
+string.format() , too. But you get the idea ...
+
+
+So here's something to pop up a message box on Windows:
+
+
+local ffi = require("ffi")
+ffi.cdef[[
+int MessageBoxA(void *w, const char *txt, const char *cap, int type);
+]]
+ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)
+
+
+Bing! Again, that was far too easy, no?
+
+
+Compare this with the effort required to bind that function using the
+classic Lua/C API: create an extra C file, add a C function
+that retrieves and checks the argument types passed from Lua and calls
+the actual C function, add a list of module functions and their
+names, add a luaopen_* function and register all module
+functions, compile and link it into a shared library (DLL), move it to
+the proper path, add Lua code that loads the module aaaand ... finally
+call the binding function. Phew!
+
+
+
Motivating Example: Using C Data Structures
+
+The FFI library allows you to create and access C data
+structures. Of course the main use for this is for interfacing with
+C functions. But they can be used stand-alone, too.
+
+
+Lua is built upon high-level data types. They are flexible, extensible
+and dynamic. That's why we all love Lua so much. Alas, this can be
+inefficient for certain tasks, where you'd really want a low-level
+data type. E.g. a large array of a fixed structure needs to be
+implemented with a big table holding lots of tiny tables. This imposes
+both a substantial memory overhead as well as a performance overhead.
+
+
+Here's a sketch of a library that operates on color images plus a
+simple benchmark. First, the plain Lua version:
+
+
+local floor = math.floor
+
+local function image_ramp_green(n)
+ local img = {}
+ local f = 255/(n-1)
+ for i=1,n do
+ img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
+ end
+ return img
+end
+
+local function image_to_grey(img, n)
+ for i=1,n do
+ local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
+ img[i].red = y; img[i].green = y; img[i].blue = y
+ end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+ image_to_grey(img, N)
+end
+
+
+This creates a table with 160.000 pixels, each of which is a table
+holding four number values in the range of 0-255. First an image with
+a green ramp is created (1D for simplicity), then the image is
+converted to greyscale 1000 times. Yes, that's silly, but I was in
+need of a simple example ...
+
+
+And here's the FFI version. The modified parts have been marked in
+bold:
+
+
+①
+
+
+
+
+
+②
+
+③
+④
+
+
+
+
+
+
+③
+⑤ local ffi = require("ffi")
+ffi.cdef[[
+ typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;
+]]
+
+local function image_ramp_green(n)
+ local img = ffi.new("rgba_pixel[?]", n)
+ local f = 255/(n-1)
+ for i=0,n-1 do
+ img[i].green = i*f
+ img[i].alpha = 255
+ end
+ return img
+end
+
+local function image_to_grey(img, n)
+ for i=0,n-1 do
+ local y = 0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue
+ img[i].red = y; img[i].green = y; img[i].blue = y
+ end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+ image_to_grey(img, N)
+end
+
+
+Ok, so that wasn't too difficult:
+
+
+① First, load the FFI
+library and declare the low-level data type. Here we choose a
+struct which holds four byte fields, one for each component
+of a 4x8 bit RGBA pixel.
+
+
+② Creating the data
+structure with ffi.new() is straightforward — the
+'?' is a placeholder for the number of elements of a
+variable-length array.
+
+
+③ C arrays are
+zero-based, so the indexes have to run from 0 to
+n-1 . One might want to allocate one more element instead to
+simplify converting legacy code.
+
+
+④ Since ffi.new()
+zero-fills the array by default, we only need to set the green and the
+alpha fields.
+
+
+⑤ The calls to
+math.floor() can be omitted here, because floating-point
+numbers are already truncated towards zero when converting them to an
+integer. This happens implicitly when the number is stored in the
+fields of each pixel.
+
+
+Now let's have a look at the impact of the changes: first, memory
+consumption for the image is down from 22 Megabytes to
+640 Kilobytes (400*400*4 bytes). That's a factor of 35x less! So,
+yes, tables do have a noticeable overhead. BTW: The original program
+would consume 40 Megabytes in plain Lua (on x64).
+
+
+Next, performance: the pure Lua version runs in 9.57 seconds (52.9
+seconds with the Lua interpreter) and the FFI version runs in 0.48
+seconds on my machine (YMMV). That's a factor of 20x faster (110x
+faster than the Lua interpreter).
+
+
+The avid reader may notice that converting the pure Lua version over
+to use array indexes for the colors ([1] instead of
+.red , [2] instead of .green etc.) ought to
+be more compact and faster. This is certainly true (by a factor of
+~1.7x). Switching to a struct-of-arrays would help, too.
+
+
+However the resulting code would be less idiomatic and rather
+error-prone. And it still doesn't get even close to the performance of
+the FFI version of the code. Also, high-level data structures cannot
+be easily passed to other C functions, especially I/O functions,
+without undue conversion penalties.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_ffi_api.html b/src/LuaJIT/doc/ext_ffi_api.html
new file mode 100644
index 000000000..ae2c6cc8d
--- /dev/null
+++ b/src/LuaJIT/doc/ext_ffi_api.html
@@ -0,0 +1,555 @@
+
+
+
+ffi.* API Functions
+
+
+
+
+
+
+
+
+
+
+
+
ffi.* API Functions
+
+
+
+
+This page describes the API functions provided by the FFI library in
+detail. It's recommended to read through the
+introduction and the
+FFI tutorial first.
+
+
+
Glossary
+
+cdecl — An abstract C type declaration (a Lua
+string).
+ctype — A C type object. This is a special kind of
+cdata returned by ffi.typeof() . It serves as a
+cdata constructor when called.
+cdata — A C data object. It holds a value of the
+corresponding ctype .
+ct — A C type specification which can be used for
+most of the API functions. Either a cdecl , a ctype or a
+cdata serving as a template type.
+cb — A callback object. This is a C data object
+holding a special function pointer. Calling this function from
+C code runs an associated Lua function.
+VLA — A variable-length array is declared with a
+? instead of the number of elements, e.g. "int[?]" .
+The number of elements (nelem ) must be given when it's
+created .
+VLS — A variable-length struct is a struct C
+type where the last element is a VLA . The same rules for
+declaration and creation apply.
+
+
+
Declaring and Accessing External Symbols
+
+External symbols must be declared first and can then be accessed by
+indexing a C library
+namespace , which automatically binds the symbol to a specific
+library.
+
+
+
ffi.cdef(def)
+
+Adds multiple C declarations for types or external symbols (named
+variables or functions). def must be a Lua string. It's
+recommended to use the syntactic sugar for string arguments as
+follows:
+
+
+ffi.cdef[[
+typedef struct foo { int a, b; } foo_t; // Declare a struct and typedef.
+int dofoo(foo_t *f, int n); /* Declare an external C function. */
+]]
+
+
+The contents of the string (the part in green above) must be a
+sequence of
+C declarations ,
+separated by semicolons. The trailing semicolon for a single
+declaration may be omitted.
+
+
+Please note that external symbols are only declared , but they
+are not bound to any specific address, yet. Binding is
+achieved with C library namespaces (see below).
+
+
+C declarations are not passed through a C pre-processor,
+yet. No pre-processor tokens are allowed, except for
+#pragma pack . Replace #define in existing
+C header files with enum , static const
+or typedef and/or pass the files through an external
+C pre-processor (once). Be careful not to include unneeded or
+redundant declarations from unrelated header files.
+
+
+
ffi.C
+
+This is the default C library namespace — note the
+uppercase 'C' . It binds to the default set of symbols or
+libraries on the target system. These are more or less the same as a
+C compiler would offer by default, without specifying extra link
+libraries.
+
+
+On POSIX systems, this binds to symbols in the default or global
+namespace. This includes all exported symbols from the executable and
+any libraries loaded into the global namespace. This includes at least
+libc , libm , libdl (on Linux),
+libgcc (if compiled with GCC), as well as any exported
+symbols from the Lua/C API provided by LuaJIT itself.
+
+
+On Windows systems, this binds to symbols exported from the
+*.exe , the lua51.dll (i.e. the Lua/C API
+provided by LuaJIT itself), the C runtime library LuaJIT was linked
+with (msvcrt*.dll ), kernel32.dll ,
+user32.dll and gdi32.dll .
+
+
+
clib = ffi.load(name [,global])
+
+This loads the dynamic library given by name and returns
+a new C library namespace which binds to its symbols. On POSIX
+systems, if global is true , the library symbols are
+loaded into the global namespace, too.
+
+
+If name is a path, the library is loaded from this path.
+Otherwise name is canonicalized in a system-dependent way and
+searched in the default search path for dynamic libraries:
+
+
+On POSIX systems, if the name contains no dot, the extension
+.so is appended. Also, the lib prefix is prepended
+if necessary. So ffi.load("z") looks for "libz.so"
+in the default shared library search path.
+
+
+On Windows systems, if the name contains no dot, the extension
+.dll is appended. So ffi.load("ws2_32") looks for
+"ws2_32.dll" in the default DLL search path.
+
+
+
Creating cdata Objects
+
+The following API functions create cdata objects (type()
+returns "cdata" ). All created cdata objects are
+garbage collected .
+
+
+
cdata = ffi.new(ct [,nelem] [,init...])
+cdata = ctype ([nelem,] [init...])
+
+Creates a cdata object for the given ct . VLA/VLS types
+require the nelem argument. The second syntax uses a ctype as
+a constructor and is otherwise fully equivalent.
+
+
+The cdata object is initialized according to the
+rules for initializers ,
+using the optional init arguments. Excess initializers cause
+an error.
+
+
+Performance notice: if you want to create many objects of one kind,
+parse the cdecl only once and get its ctype with
+ffi.typeof() . Then use the ctype as a constructor repeatedly.
+
+
+Please note that an anonymous struct declaration implicitly
+creates a new and distinguished ctype every time you use it for
+ffi.new() . This is probably not what you want,
+especially if you create more than one cdata object. Different anonymous
+structs are not considered assignment-compatible by the
+C standard, even though they may have the same fields! Also, they
+are considered different types by the JIT-compiler, which may cause an
+excessive number of traces. It's strongly suggested to either declare
+a named struct or typedef with ffi.cdef()
+or to create a single ctype object for an anonymous struct
+with ffi.typeof() .
+
+
+
ctype = ffi.typeof(ct)
+
+Creates a ctype object for the given ct .
+
+
+This function is especially useful to parse a cdecl only once and then
+use the resulting ctype object as a constructor .
+
+
+
cdata = ffi.cast(ct, init)
+
+Creates a scalar cdata object for the given ct . The cdata
+object is initialized with init using the "cast" variant of
+the C type conversion
+rules .
+
+
+This functions is mainly useful to override the pointer compatibility
+checks or to convert pointers to addresses or vice versa.
+
+
+
+
+Creates a ctype object for the given ct and associates it with
+a metatable. Only struct /union types, complex numbers
+and vectors are allowed. Other types may be wrapped in a
+struct , if needed.
+
+
+The association with a metatable is permanent and cannot be changed
+afterwards. Neither the contents of the metatable nor the
+contents of an __index table (if any) may be modified
+afterwards. The associated metatable automatically applies to all uses
+of this type, no matter how the objects are created or where they
+originate from. Note that pre-defined operations on types have
+precedence (e.g. declared field names cannot be overriden).
+
+
+All standard Lua metamethods are implemented. These are called directly,
+without shortcuts and on any mix of types. For binary operations, the
+left operand is checked first for a valid ctype metamethod. The
+__gc metamethod only applies to struct /union
+types and performs an implicit ffi.gc()
+call during creation of an instance.
+
+
+
cdata = ffi.gc(cdata, finalizer)
+
+Associates a finalizer with a pointer or aggregate cdata object. The
+cdata object is returned unchanged.
+
+
+This function allows safe integration of unmanaged resources into the
+automatic memory management of the LuaJIT garbage collector. Typical
+usage:
+
+
+local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
+...
+p = nil -- Last reference to p is gone.
+-- GC will eventually run finalizer: ffi.C.free(p)
+
+
+A cdata finalizer works like the __gc metamethod for userdata
+objects: when the last reference to a cdata object is gone, the
+associated finalizer is called with the cdata object as an argument. The
+finalizer can be a Lua function or a cdata function or cdata function
+pointer. An existing finalizer can be removed by setting a nil
+finalizer, e.g. right before explicitly deleting a resource:
+
+
+ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.
+
+
+
C Type Information
+
+The following API functions return information about C types.
+They are most useful for inspecting cdata objects.
+
+
+
size = ffi.sizeof(ct [,nelem])
+
+Returns the size of ct in bytes. Returns nil if
+the size is not known (e.g. for "void" or function types).
+Requires nelem for VLA/VLS types, except for cdata objects.
+
+
+
align = ffi.alignof(ct)
+
+Returns the minimum required alignment for ct in bytes.
+
+
+
ofs [,bpos,bsize] = ffi.offsetof(ct, field)
+
+Returns the offset (in bytes) of field relative to the start
+of ct , which must be a struct . Additionally returns
+the position and the field size (in bits) for bit fields.
+
+
+
status = ffi.istype(ct, obj)
+
+Returns true if obj has the C type given by
+ct . Returns false otherwise.
+
+
+C type qualifiers (const etc.) are ignored. Pointers are
+checked with the standard pointer compatibility rules, but without any
+special treatment for void * . If ct specifies a
+struct /union , then a pointer to this type is accepted,
+too. Otherwise the types must match exactly.
+
+
+Note: this function accepts all kinds of Lua objects for the
+obj argument, but always returns false for non-cdata
+objects.
+
+
+
Utility Functions
+
+
err = ffi.errno([newerr])
+
+Returns the error number set by the last C function call which
+indicated an error condition. If the optional newerr argument
+is present, the error number is set to the new value and the previous
+value is returned.
+
+
+This function offers a portable and OS-independent way to get and set the
+error number. Note that only some C functions set the error
+number. And it's only significant if the function actually indicated an
+error condition (e.g. with a return value of -1 or
+NULL ). Otherwise, it may or may not contain any previously set
+value.
+
+
+You're advised to call this function only when needed and as close as
+possible after the return of the related C function. The
+errno value is preserved across hooks, memory allocations,
+invocations of the JIT compiler and other internal VM activity. The same
+applies to the value returned by GetLastError() on Windows, but
+you need to declare and call it yourself.
+
+
+
str = ffi.string(ptr [,len])
+
+Creates an interned Lua string from the data pointed to by
+ptr .
+
+
+If the optional argument len is missing, ptr is
+converted to a "char *" and the data is assumed to be
+zero-terminated. The length of the string is computed with
+strlen() .
+
+
+Otherwise ptr is converted to a "void *" and
+len gives the length of the data. The data may contain
+embedded zeros and need not be byte-oriented (though this may cause
+endianess issues).
+
+
+This function is mainly useful to convert (temporary)
+"const char *" pointers returned by
+C functions to Lua strings and store them or pass them to other
+functions expecting a Lua string. The Lua string is an (interned) copy
+of the data and bears no relation to the original data area anymore.
+Lua strings are 8 bit clean and may be used to hold arbitrary,
+non-character data.
+
+
+Performance notice: it's faster to pass the length of the string, if
+it's known. E.g. when the length is returned by a C call like
+sprintf() .
+
+
+
ffi.copy(dst, src, len)
+ffi.copy(dst, str)
+
+Copies the data pointed to by src to dst .
+dst is converted to a "void *" and src
+is converted to a "const void *" .
+
+
+In the first syntax, len gives the number of bytes to copy.
+Caveat: if src is a Lua string, then len must not
+exceed #src+1 .
+
+
+In the second syntax, the source of the copy must be a Lua string. All
+bytes of the string plus a zero-terminator are copied to
+dst (i.e. #src+1 bytes).
+
+
+Performance notice: ffi.copy() may be used as a faster
+(inlinable) replacement for the C library functions
+memcpy() , strcpy() and strncpy() .
+
+
+
ffi.fill(dst, len [,c])
+
+Fills the data pointed to by dst with len constant
+bytes, given by c . If c is omitted, the data is
+zero-filled.
+
+
+Performance notice: ffi.fill() may be used as a faster
+(inlinable) replacement for the C library function
+memset(dst, c, len) . Please note the different
+order of arguments!
+
+
+
Target-specific Information
+
+
status = ffi.abi(param)
+
+Returns true if param (a Lua string) applies for the
+target ABI (Application Binary Interface). Returns false
+otherwise. The following parameters are currently defined:
+
+
+
+Parameter
+Description
+
+
+32bit 32 bit architecture
+
+64bit 64 bit architecture
+
+le Little-endian architecture
+
+be Big-endian architecture
+
+fpu Target has a hardware FPU
+
+softfp softfp calling conventions
+
+hardfp hardfp calling conventions
+
+eabi EABI variant of the standard ABI
+
+win Windows variant of the standard ABI
+
+
+
ffi.os
+
+Contains the target OS name. Same contents as
+jit.os .
+
+
+
ffi.arch
+
+Contains the target architecture name. Same contents as
+jit.arch .
+
+
+
Methods for Callbacks
+
+The C types for callbacks
+have some extra methods:
+
+
+
cb:free()
+
+Free the resources associated with a callback. The associated Lua
+function is unanchored and may be garbage collected. The callback
+function pointer is no longer valid and must not be called anymore
+(it may be reused by a subsequently created callback).
+
+
+
cb:set(func)
+
+Associate a new Lua function with a callback. The C type of the
+callback and the callback function pointer are unchanged.
+
+
+This method is useful to dynamically switch the receiver of callbacks
+without creating a new callback each time and registering it again (e.g.
+with a GUI library).
+
+
+
Extended Standard Library Functions
+
+The following standard library functions have been extended to work
+with cdata objects:
+
+
+
n = tonumber(cdata)
+
+Converts a number cdata object to a double and returns it as
+a Lua number. This is particularly useful for boxed 64 bit
+integer values. Caveat: this conversion may incur a precision loss.
+
+
+
s = tostring(cdata)
+
+Returns a string representation of the value of 64 bit integers
+(" nnnLL" or " nnnULL" ) or
+complex numbers (" re±imi" ). Otherwise
+returns a string representation of the C type of a ctype object
+("ctype< type>" ) or a cdata object
+("cdata< type>: address" ).
+
+
+
Extensions to the Lua Parser
+
+The parser for Lua source code treats numeric literals with the
+suffixes LL or ULL as signed or unsigned 64 bit
+integers. Case doesn't matter, but uppercase is recommended for
+readability. It handles both decimal (42LL ) and hexadecimal
+(0x2aLL ) literals.
+
+
+The imaginary part of complex numbers can be specified by suffixing
+number literals with i or I , e.g. 12.5i .
+Caveat: you'll need to use 1i to get an imaginary part with
+the value one, since i itself still refers to a variable
+named i .
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_ffi_semantics.html b/src/LuaJIT/doc/ext_ffi_semantics.html
new file mode 100644
index 000000000..033318a17
--- /dev/null
+++ b/src/LuaJIT/doc/ext_ffi_semantics.html
@@ -0,0 +1,1148 @@
+
+
+
+FFI Semantics
+
+
+
+
+
+
+
+
+
+
+
+
FFI Semantics
+
+
+
+
+This page describes the detailed semantics underlying the FFI library
+and its interaction with both Lua and C code.
+
+
+Given that the FFI library is designed to interface with C code
+and that declarations can be written in plain C syntax, it
+closely follows the C language semantics , wherever possible.
+Some minor concessions are needed for smoother interoperation with Lua
+language semantics.
+
+
+Please don't be overwhelmed by the contents of this page — this
+is a reference and you may need to consult it, if in doubt. It doesn't
+hurt to skim this page, but most of the semantics "just work" as you'd
+expect them to work. It should be straightforward to write
+applications using the LuaJIT FFI for developers with a C or C++
+background.
+
+
+Please note: this doesn't comprise the final specification for the FFI
+semantics, yet. Some semantics may need to be changed, based on your
+feedback. Please report any problems you may
+encounter or any improvements you'd like to see — thank you!
+
+
+
C Language Support
+
+The FFI library has a built-in C parser with a minimal memory
+footprint. It's used by the ffi.* library
+functions to declare C types or external symbols.
+
+
+It's only purpose is to parse C declarations, as found e.g. in
+C header files. Although it does evaluate constant expressions,
+it's not a C compiler. The body of inline
+C function definitions is simply ignored.
+
+
+Also, this is not a validating C parser. It expects and
+accepts correctly formed C declarations, but it may choose to
+ignore bad declarations or show rather generic error messages. If in
+doubt, please check the input against your favorite C compiler.
+
+
+The C parser complies to the C99 language standard plus
+the following extensions:
+
+
+
+The '\e' escape in character and string literals.
+
+The C99/C++ boolean type, declared with the keywords bool
+or _Bool .
+
+Complex numbers, declared with the keywords complex or
+_Complex .
+
+Two complex number types: complex (aka
+complex double ) and complex float .
+
+Vector types, declared with the GCC mode or
+vector_size attribute.
+
+Unnamed ('transparent') struct /union fields
+inside a struct /union .
+
+Incomplete enum declarations, handled like incomplete
+struct declarations.
+
+Unnamed enum fields inside a
+struct /union . This is similar to a scoped C++
+enum , except that declared constants are visible in the
+global namespace, too.
+
+Scoped static const declarations inside a
+struct /union (from C++).
+
+Zero-length arrays ([0] ), empty
+struct /union , variable-length arrays (VLA,
+[?] ) and variable-length structs (VLS, with a trailing
+VLA).
+
+C++ reference types (int &x ).
+
+Alternate GCC keywords with '__ ', e.g.
+__const__ .
+
+GCC __attribute__ with the following attributes:
+aligned , packed , mode ,
+vector_size , cdecl , fastcall ,
+stdcall .
+
+The GCC __extension__ keyword and the GCC
+__alignof__ operator.
+
+GCC __asm__("symname") symbol name redirection for
+function declarations.
+
+MSVC keywords for fixed-length types: __int8 ,
+__int16 , __int32 and __int64 .
+
+MSVC __cdecl , __fastcall , __stdcall ,
+__ptr32 , __ptr64 , __declspec(align(n))
+and #pragma pack .
+
+All other GCC/MSVC-specific attributes are ignored.
+
+
+
+The following C types are pre-defined by the C parser (like
+a typedef , except re-declarations will be ignored):
+
+
+
+Vararg handling: va_list , __builtin_va_list ,
+__gnuc_va_list .
+
+From <stddef.h> : ptrdiff_t ,
+size_t , wchar_t .
+
+From <stdint.h> : int8_t , int16_t ,
+int32_t , int64_t , uint8_t ,
+uint16_t , uint32_t , uint64_t ,
+intptr_t , uintptr_t .
+
+
+
+You're encouraged to use these types in preference to the
+compiler-specific extensions or the target-dependent standard types.
+E.g. char differs in signedness and long differs in
+size, depending on the target architecture and platform ABI.
+
+
+The following C features are not supported:
+
+
+
+A declaration must always have a type specifier; it doesn't
+default to an int type.
+
+Old-style empty function declarations (K&R) are not allowed.
+All C functions must have a proper prototype declaration. A
+function declared without parameters (int foo(); ) is
+treated as a function taking zero arguments, like in C++.
+
+The long double C type is parsed correctly, but
+there's no support for the related conversions, accesses or arithmetic
+operations.
+
+Wide character strings and character literals are not
+supported.
+
+See below for features that are currently
+not implemented.
+
+
+
+
C Type Conversion Rules
+
+
Conversions from C types to Lua objects
+
+These conversion rules apply for read accesses to
+C types: indexing pointers, arrays or
+struct /union types; reading external variables or
+constant values; retrieving return values from C calls:
+
+
+
+Input
+Conversion
+Output
+
+
+int8_t , int16_t →sign-ext int32_t → double number
+
+uint8_t , uint16_t →zero-ext int32_t → double number
+
+int32_t , uint32_t → double number
+
+int64_t , uint64_t boxed value 64 bit int cdata
+
+double , float → double number
+
+bool 0 → false , otherwise true boolean
+
+Complex number boxed value complex cdata
+
+Vector boxed value vector cdata
+
+Pointer boxed value pointer cdata
+
+Array boxed reference reference cdata
+
+struct /union boxed reference reference cdata
+
+
+Bitfields or enum types are treated like their underlying
+type.
+
+
+Reference types are dereferenced before a conversion can take
+place — the conversion is applied to the C type pointed to
+by the reference.
+
+
+
Conversions from Lua objects to C types
+
+These conversion rules apply for write accesses to
+C types: indexing pointers, arrays or
+struct /union types; initializing cdata objects;
+casts to C types; writing to external variables; passing
+arguments to C calls:
+
+
+
+Input
+Conversion
+Output
+
+
+number → double
+
+boolean false → 0, true → 1bool
+
+nil NULL →(void *)
+
+userdata userdata payload → (void *)
+
+lightuserdata lightuserdata address → (void *)
+
+string match against enum constant enum
+
+string copy string data + zero-byte int8_t[] , uint8_t[]
+
+string string data → const char[]
+
+function create callback →C function type
+
+table table initializer Array
+
+table table initializer struct /union
+
+cdata cdata payload → C type
+
+
+If the result type of this conversion doesn't match the
+C type of the destination, the
+conversion rules between C types
+are applied.
+
+
+Reference types are immutable after initialization ("no re-seating of
+references"). For initialization purposes or when passing values to
+reference parameters, they are treated like pointers. Note that unlike
+in C++, there's no way to implement automatic reference generation of
+variables under the Lua language semantics. If you want to call a
+function with a reference parameter, you need to explicitly pass a
+one-element array.
+
+
+
Conversions between C types
+
+These conversion rules are more or less the same as the standard
+C conversion rules. Some rules only apply to casts, or require
+pointer or type compatibility:
+
+
+
+Input
+Conversion
+Output
+
+
+Signed integer →narrow or sign-extend Integer
+
+Unsigned integer →narrow or zero-extend Integer
+
+Integer →round double , float
+
+double , float →trunc int32_t →narrow (u)int8_t , (u)int16_t
+
+double , float →trunc (u)int32_t , (u)int64_t
+
+double , float →round float , double
+
+Number n == 0 → 0, otherwise 1 bool
+
+bool false → 0, true → 1Number
+
+Complex number convert real part Number
+
+Number convert real part, imag = 0 Complex number
+
+Complex number convert real and imag part Complex number
+
+Number convert scalar and replicate Vector
+
+Vector copy (same size) Vector
+
+struct /union take base address (compat) Pointer
+
+Array take base address (compat) Pointer
+
+Function take function address Function pointer
+
+Number convert via uintptr_t (cast) Pointer
+
+Pointer convert address (compat/cast) Pointer
+
+Pointer convert address (cast) Integer
+
+Array convert base address (cast) Integer
+
+Array copy (compat) Array
+
+struct /union copy (identical type) struct /union
+
+
+Bitfields or enum types are treated like their underlying
+type.
+
+
+Conversions not listed above will raise an error. E.g. it's not
+possible to convert a pointer to a complex number or vice versa.
+
+
+
Conversions for vararg C function arguments
+
+The following default conversion rules apply when passing Lua objects
+to the variable argument part of vararg C functions:
+
+
+
+Input
+Conversion
+Output
+
+
+number → double
+
+boolean false → 0, true → 1bool
+
+nil NULL →(void *)
+
+userdata userdata payload → (void *)
+
+lightuserdata lightuserdata address → (void *)
+
+string string data → const char *
+
+float cdata→ double
+
+Array cdata take base address Element pointer
+
+struct /union cdatatake base address struct /union pointer
+
+Function cdata take function address Function pointer
+
+Any other cdata no conversion C type
+
+
+To pass a Lua object, other than a cdata object, as a specific type,
+you need to override the conversion rules: create a temporary cdata
+object with a constructor or a cast and initialize it with the value
+to pass:
+
+
+Assuming x is a Lua number, here's how to pass it as an
+integer to a vararg function:
+
+
+ffi.cdef[[
+int printf(const char *fmt, ...);
+]]
+ffi.C.printf("integer value: %d\n", ffi.new("int", x))
+
+
+If you don't do this, the default Lua number → double
+conversion rule applies. A vararg C function expecting an integer
+will see a garbled or uninitialized value.
+
+
+
Initializers
+
+Creating a cdata object with
+ffi.new() or the
+equivalent constructor syntax always initializes its contents, too.
+Different rules apply, depending on the number of optional
+initializers and the C types involved:
+
+
+If no initializers are given, the object is filled with zero bytes.
+
+Scalar types (numbers and pointers) accept a single initializer.
+The Lua object is converted to the scalar
+C type .
+
+Valarrays (complex numbers and vectors) are treated like scalars
+when a single initializer is given. Otherwise they are treated like
+regular arrays.
+
+Aggregate types (arrays and structs) accept either a single
+table initializer or a flat list of
+initializers.
+
+The elements of an array are initialized, starting at index zero.
+If a single initializer is given for an array, it's repeated for all
+remaining elements. This doesn't happen if two or more initializers
+are given: all remaining uninitialized elements are filled with zero
+bytes.
+
+Byte arrays may also be initialized with a Lua string. This copies
+the whole string plus a terminating zero-byte. The copy stops early only
+if the array has a known, fixed size.
+
+The fields of a struct are initialized in the order of
+their declaration. Uninitialized fields are filled with zero
+bytes.
+
+Only the first field of a union can be initialized with a
+flat initializer.
+
+Elements or fields which are aggregates themselves are initialized
+with a single initializer, but this may be a table
+initializer or a compatible aggregate.
+
+Excess initializers cause an error.
+
+
+
+
Table Initializers
+
+The following rules apply if a Lua table is used to initialize an
+Array or a struct /union :
+
+
+
+If the table index [0] is non-nil , then the
+table is assumed to be zero-based. Otherwise it's assumed to be
+one-based.
+
+Array elements, starting at index zero, are initialized one-by-one
+with the consecutive table elements, starting at either index
+[0] or [1] . This process stops at the first
+nil table element.
+
+If exactly one array element was initialized, it's repeated for
+all the remaining elements. Otherwise all remaining uninitialized
+elements are filled with zero bytes.
+
+The above logic only applies to arrays with a known fixed size.
+A VLA is only initialized with the element(s) given in the table.
+Depending on the use case, you may need to explicitly add a
+NULL or 0 terminator to a VLA.
+
+If the table has a non-empty hash part, a
+struct /union is initialized by looking up each field
+name (as a string key) in the table. Each non-nil value is
+used to initialize the corresponding field.
+
+Otherwise a struct /union is initialized in the
+order of the declaration of its fields. Each field is initialized with
+the consecutive table elements, starting at either index [0]
+or [1] . This process stops at the first nil table
+element.
+
+Uninitialized fields of a struct are filled with zero
+bytes, except for the trailing VLA of a VLS.
+
+Initialization of a union stops after one field has been
+initialized. If no field has been initialized, the union is
+filled with zero bytes.
+
+Elements or fields which are aggregates themselves are initialized
+with a single initializer, but this may be a nested table
+initializer (or a compatible aggregate).
+
+Excess initializers for an array cause an error. Excess
+initializers for a struct /union are ignored.
+Unrelated table entries are ignored, too.
+
+
+
+Example:
+
+
+local ffi = require("ffi")
+
+ffi.cdef[[
+struct foo { int a, b; };
+union bar { int i; double d; };
+struct nested { int x; struct foo y; };
+]]
+
+ffi.new("int[3]", {}) --> 0, 0, 0
+ffi.new("int[3]", {1}) --> 1, 1, 1
+ffi.new("int[3]", {1,2}) --> 1, 2, 0
+ffi.new("int[3]", {1,2,3}) --> 1, 2, 3
+ffi.new("int[3]", {[0]=1}) --> 1, 1, 1
+ffi.new("int[3]", {[0]=1,2}) --> 1, 2, 0
+ffi.new("int[3]", {[0]=1,2,3}) --> 1, 2, 3
+ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers
+
+ffi.new("struct foo", {}) --> a = 0, b = 0
+ffi.new("struct foo", {1}) --> a = 1, b = 0
+ffi.new("struct foo", {1,2}) --> a = 1, b = 2
+ffi.new("struct foo", {[0]=1,2}) --> a = 1, b = 2
+ffi.new("struct foo", {b=2}) --> a = 0, b = 2
+ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2 'c' is ignored
+
+ffi.new("union bar", {}) --> i = 0, d = 0.0
+ffi.new("union bar", {1}) --> i = 1, d = ?
+ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ? '2' is ignored
+ffi.new("union bar", {d=2}) --> i = ?, d = 2.0
+
+ffi.new("struct nested", {1,{2,3}}) --> x = 1, y.a = 2, y.b = 3
+ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3
+
+
+
Operations on cdata Objects
+
+All of the standard Lua operators can be applied to cdata objects or a
+mix of a cdata object and another Lua object. The following list shows
+the valid combinations. All other combinations currently raise an
+error.
+
+
+Reference types are dereferenced before performing each of
+the operations below — the operation is applied to the
+C type pointed to by the reference.
+
+
+The pre-defined operations are always tried first before deferring to a
+metamethod for a ctype (if defined).
+
+
+
Indexing a cdata object
+
+
+Indexing a pointer/array : a cdata pointer/array can be
+indexed by a cdata number or a Lua number. The element address is
+computed as the base address plus the number value multiplied by the
+element size in bytes. A read access loads the element value and
+converts it to a Lua object . A write
+access converts a Lua object to the element
+type and stores the converted value to the element. An error is
+raised if the element size is undefined or a write access to a
+constant element is attempted.
+
+Dereferencing a struct /union field : a
+cdata struct /union or a pointer to a
+struct /union can be dereferenced by a string key,
+giving the field name. The field address is computed as the base
+address plus the relative offset of the field. A read access loads the
+field value and converts it to a Lua
+object . A write access converts a Lua
+object to the field type and stores the converted value to the
+field. An error is raised if a write access to a constant
+struct /union or a constant field is attempted.
+
+Indexing a complex number : a complex number can be indexed
+either by a cdata number or a Lua number with the values 0 or 1, or by
+the strings "re" or "im" . A read access loads the
+real part ([0] , .re ) or the imaginary part
+([1] , .im ) part of a complex number and
+converts it to a Lua number . The
+sub-parts of a complex number are immutable — assigning to an
+index of a complex number raises an error. Accessing out-of-bound
+indexes returns unspecified results, but is guaranteed not to trigger
+memory access violations.
+
+Indexing a vector : a vector is treated like an array for
+indexing purposes, except the vector elements are immutable —
+assigning to an index of a vector raises an error.
+
+
+
+Note: since there's (deliberately) no address-of operator, a cdata
+object holding a value type is effectively immutable after
+initialization. The JIT compiler benefits from this fact when applying
+certain optimizations.
+
+
+As a consequence of this, the elements of complex numbers and
+vectors are immutable. But the elements of an aggregate holding these
+types may be modified of course. I.e. you cannot assign to
+foo.c.im , but you can assign a (newly created) complex number
+to foo.c .
+
+
+
Calling a cdata object
+
+
+Constructor : a ctype object can be called and used as a
+constructor .
+
+C function call : a cdata function or cdata function
+pointer can be called. The passed arguments are
+converted to the C types of the
+parameters given by the function declaration. Arguments passed to the
+variable argument part of vararg C function use
+special conversion rules . This
+C function is called and the return value (if any) is
+converted to a Lua object .
+On Windows/x86 systems, __stdcall functions are automatically
+detected and a function declared as __cdecl (the default) is
+silently fixed up after the first call.
+
+
+
+
Arithmetic on cdata objects
+
+
+Pointer arithmetic : a cdata pointer/array and a cdata
+number or a Lua number can be added or subtracted. The number must be
+on the right hand side for a subtraction. The result is a pointer of
+the same type with an address plus or minus the number value
+multiplied by the element size in bytes. An error is raised if the
+element size is undefined.
+
+Pointer difference : two compatible cdata pointers/arrays
+can be subtracted. The result is the difference between their
+addresses, divided by the element size in bytes. An error is raised if
+the element size is undefined or zero.
+
+64 bit integer arithmetic : the standard arithmetic
+operators (+ - * / % ^ and unary
+minus) can be applied to two cdata numbers, or a cdata number and a
+Lua number. If one of them is an uint64_t , the other side is
+converted to an uint64_t and an unsigned arithmetic operation
+is performed. Otherwise both sides are converted to an
+int64_t and a signed arithmetic operation is performed. The
+result is a boxed 64 bit cdata object.
+
+These rules ensure that 64 bit integers are "sticky". Any
+expression involving at least one 64 bit integer operand results
+in another one. The undefined cases for the division, modulo and power
+operators return 2LL ^ 63 or
+2ULL ^ 63 .
+
+You'll have to explicitly convert a 64 bit integer to a Lua
+number (e.g. for regular floating-point calculations) with
+tonumber() . But note this may incur a precision loss.
+
+
+
+
Comparisons of cdata objects
+
+
+Pointer comparison : two compatible cdata pointers/arrays
+can be compared. The result is the same as an unsigned comparison of
+their addresses. nil is treated like a NULL pointer,
+which is compatible with any other pointer type.
+
+64 bit integer comparison : two cdata numbers, or a
+cdata number and a Lua number can be compared with each other. If one
+of them is an uint64_t , the other side is converted to an
+uint64_t and an unsigned comparison is performed. Otherwise
+both sides are converted to an int64_t and a signed
+comparison is performed.
+
+
+
+
cdata objects as table keys
+
+Lua tables may be indexed by cdata objects, but this doesn't provide
+any useful semantics — cdata objects are unsuitable as table
+keys!
+
+
+A cdata object is treated like any other garbage-collected object and
+is hashed and compared by its address for table indexing. Since
+there's no interning for cdata value types, the same value may be
+boxed in different cdata objects with different addresses. Thus
+t[1LL+1LL] and t[2LL] usually do not point to
+the same hash slot and they certainly do not point to the same
+hash slot as t[2] .
+
+
+It would seriously drive up implementation complexity and slow down
+the common case, if one were to add extra handling for by-value
+hashing and comparisons to Lua tables. Given the ubiquity of their use
+inside the VM, this is not acceptable.
+
+
+There are three viable alternatives, if you really need to use cdata
+objects as keys:
+
+
+
+If you can get by with the precision of Lua numbers
+(52 bits), then use tonumber() on a cdata number or
+combine multiple fields of a cdata aggregate to a Lua number. Then use
+the resulting Lua number as a key when indexing tables.
+One obvious benefit: t[tonumber(2LL)] does point to
+the same slot as t[2] .
+
+Otherwise use either tostring() on 64 bit integers
+or complex numbers or combine multiple fields of a cdata aggregate to
+a Lua string (e.g. with
+ffi.string() ). Then
+use the resulting Lua string as a key when indexing tables.
+
+Create your own specialized hash table implementation using the
+C types provided by the FFI library, just like you would in
+C code. Ultimately this may give much better performance than the
+other alternatives or what a generic by-value hash table could
+possibly provide.
+
+
+
+
Garbage Collection of cdata Objects
+
+All explicitly (ffi.new() , ffi.cast() etc.) or
+implicitly (accessors) created cdata objects are garbage collected.
+You need to ensure to retain valid references to cdata objects
+somewhere on a Lua stack, an upvalue or in a Lua table while they are
+still in use. Once the last reference to a cdata object is gone, the
+garbage collector will automatically free the memory used by it (at
+the end of the next GC cycle).
+
+
+Please note that pointers themselves are cdata objects, however they
+are not followed by the garbage collector. So e.g. if you
+assign a cdata array to a pointer, you must keep the cdata object
+holding the array alive as long as the pointer is still in use:
+
+
+ffi.cdef[[
+typedef struct { int *a; } foo_t;
+]]
+
+local s = ffi.new("foo_t", ffi.new("int[10]")) -- WRONG!
+
+local a = ffi.new("int[10]") -- OK
+local s = ffi.new("foo_t", a)
+-- Now do something with 's', but keep 'a' alive until you're done.
+
+
+Similar rules apply for Lua strings which are implicitly converted to
+"const char *" : the string object itself must be
+referenced somewhere or it'll be garbage collected eventually. The
+pointer will then point to stale data, which may have already been
+overwritten. Note that string literals are automatically kept
+alive as long as the function containing it (actually its prototype)
+is not garbage collected.
+
+
+Objects which are passed as an argument to an external C function
+are kept alive until the call returns. So it's generally safe to
+create temporary cdata objects in argument lists. This is a common
+idiom for passing specific C types to
+vararg functions .
+
+
+Memory areas returned by C functions (e.g. from malloc() )
+must be manually managed, of course (or use
+ffi.gc() ). Pointers to
+cdata objects are indistinguishable from pointers returned by C
+functions (which is one of the reasons why the GC cannot follow them).
+
+
+
Callbacks
+
+The LuaJIT FFI automatically generates special callback functions
+whenever a Lua function is converted to a C function pointer. This
+associates the generated callback function pointer with the C type
+of the function pointer and the Lua function object (closure).
+
+
+This can happen implicitly due to the usual conversions, e.g. when
+passing a Lua function to a function pointer argument. Or you can use
+ffi.cast() to explicitly cast a Lua function to a
+C function pointer.
+
+
+Currently only certain C function types can be used as callback
+functions. Neither C vararg functions nor functions with
+pass-by-value aggregate argument or result types are supported. There
+are no restrictions for the kind of Lua functions that can be called
+from the callback — no checks for the proper number of arguments
+are made. The return value of the Lua function will be converted to the
+result type and an error will be thrown for invalid conversions.
+
+
+It's allowed to throw errors across a callback invocation, but it's not
+advisable in general. Do this only if you know the C function, that
+called the callback, copes with the forced stack unwinding and doesn't
+leak resources.
+
+
+
Callback resource handling
+
+Callbacks take up resources — you can only have a limited number
+of them at the same time (500 - 1000, depending on the
+architecture). The associated Lua functions are anchored to prevent
+garbage collection, too.
+
+
+Callbacks due to implicit conversions are permanent! There is no
+way to guess their lifetime, since the C side might store the
+function pointer for later use (typical for GUI toolkits). The associated
+resources cannot be reclaimed until termination:
+
+
+ffi.cdef[[
+typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
+int EnumWindows(WNDENUMPROC func, intptr_t l);
+]]
+
+-- Implicit conversion to a callback via function pointer argument.
+local count = 0
+ffi.C.EnumWindows(function(hwnd, l)
+ count = count + 1
+ return true
+end, 0)
+-- The callback is permanent and its resources cannot be reclaimed!
+-- Ok, so this may not be a problem, if you do this only once.
+
+
+Note: this example shows that you must properly declare
+__stdcall callbacks on Windows/x86 systems. The calling
+convention cannot be automatically detected, unlike for
+__stdcall calls to Windows functions.
+
+
+For some use cases it's necessary to free up the resources or to
+dynamically redirect callbacks. Use an explicit cast to a
+C function pointer and keep the resulting cdata object. Then use
+the cb:free()
+or cb:set() methods
+on the cdata object:
+
+
+-- Explicitly convert to a callback via cast.
+local count = 0
+local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
+ count = count + 1
+ return true
+end)
+
+-- Pass it to a C function.
+ffi.C.EnumWindows(cb, 0)
+-- EnumWindows doesn't need the callback after it returns, so free it.
+
+cb:free()
+-- The callback function pointer is no longer valid and its resources
+-- will be reclaimed. The created Lua closure will be garbage collected.
+
+
+
+
+Callbacks are slow! First, the C to Lua transition itself
+has an unavoidable cost, similar to a lua_call() or
+lua_pcall() . Argument and result marshalling add to that cost.
+And finally, neither the C compiler nor LuaJIT can inline or
+optimize across the language barrier and hoist repeated computations out
+of a callback function.
+
+
+Do not use callbacks for performance-sensitive work: e.g. consider a
+numerical integration routine which takes a user-defined function to
+integrate over. It's a bad idea to call a user-defined Lua function from
+C code millions of times. The callback overhead will be absolutely
+detrimental for performance.
+
+
+It's considerably faster to write the numerical integration routine
+itself in Lua — the JIT compiler will be able to inline the
+user-defined function and optimize it together with its calling context,
+with very competitive performance.
+
+
+As a general guideline: use callbacks only when you must , because
+of existing C APIs. E.g. callback performance is irrelevant for a
+GUI application, which waits for user input most of the time, anyway.
+
+
+For new designs avoid push-style APIs (C function repeatedly
+calling a callback for each result). Instead use pull-style APIs
+(call a C function repeatedly to get a new result). Calls from Lua
+to C via the FFI are much faster than the other way round. Most well-designed
+libraries already use pull-style APIs (read/write, get/put).
+
+
+
C Library Namespaces
+
+A C library namespace is a special kind of object which allows
+access to the symbols contained in shared libraries or the default
+symbol namespace. The default
+ffi.C namespace is
+automatically created when the FFI library is loaded. C library
+namespaces for specific shared libraries may be created with the
+ffi.load() API
+function.
+
+
+Indexing a C library namespace object with a symbol name (a Lua
+string) automatically binds it to the library. First the symbol type
+is resolved — it must have been declared with
+ffi.cdef . Then the
+symbol address is resolved by searching for the symbol name in the
+associated shared libraries or the default symbol namespace. Finally,
+the resulting binding between the symbol name, the symbol type and its
+address is cached. Missing symbol declarations or nonexistent symbol
+names cause an error.
+
+
+This is what happens on a read access for the different kinds of
+symbols:
+
+
+
+External functions: a cdata object with the type of the function
+and its address is returned.
+
+External variables: the symbol address is dereferenced and the
+loaded value is converted to a Lua object
+and returned.
+
+Constant values (static const or enum
+constants): the constant is converted to a
+Lua object and returned.
+
+
+
+This is what happens on a write access :
+
+
+
+External variables: the value to be written is
+converted to the C type of the
+variable and then stored at the symbol address.
+
+Writing to constant variables or to any other symbol type causes
+an error, like any other attempted write to a constant location.
+
+
+
+C library namespaces themselves are garbage collected objects. If
+the last reference to the namespace object is gone, the garbage
+collector will eventually release the shared library reference and
+remove all memory associated with the namespace. Since this may
+trigger the removal of the shared library from the memory of the
+running process, it's generally not safe to use function
+cdata objects obtained from a library if the namespace object may be
+unreferenced.
+
+
+Performance notice: the JIT compiler specializes to the identity of
+namespace objects and to the strings used to index it. This
+effectively turns function cdata objects into constants. It's not
+useful and actually counter-productive to explicitly cache these
+function objects, e.g. local strlen = ffi.C.strlen . OTOH it
+is useful to cache the namespace itself, e.g. local C =
+ffi.C .
+
+
+
No Hand-holding!
+
+The FFI library has been designed as a low-level library . The
+goal is to interface with C code and C data types with a
+minimum of overhead. This means you can do anything you can do
+from C : access all memory, overwrite anything in memory, call
+machine code at any memory address and so on.
+
+
+The FFI library provides no memory safety , unlike regular Lua
+code. It will happily allow you to dereference a NULL
+pointer, to access arrays out of bounds or to misdeclare
+C functions. If you make a mistake, your application might crash,
+just like equivalent C code would.
+
+
+This behavior is inevitable, since the goal is to provide full
+interoperability with C code. Adding extra safety measures, like
+bounds checks, would be futile. There's no way to detect
+misdeclarations of C functions, since shared libraries only
+provide symbol names, but no type information. Likewise there's no way
+to infer the valid range of indexes for a returned pointer.
+
+
+Again: the FFI library is a low-level library. This implies it needs
+to be used with care, but it's flexibility and performance often
+outweigh this concern. If you're a C or C++ developer, it'll be easy
+to apply your existing knowledge. OTOH writing code for the FFI
+library is not for the faint of heart and probably shouldn't be the
+first exercise for someone with little experience in Lua, C or C++.
+
+
+As a corollary of the above, the FFI library is not safe for use by
+untrusted Lua code . If you're sandboxing untrusted Lua code, you
+definitely don't want to give this code access to the FFI library or
+to any cdata object (except 64 bit integers or complex
+numbers). Any properly engineered Lua sandbox needs to provide safety
+wrappers for many of the standard Lua library functions —
+similar wrappers need to be written for high-level operations on FFI
+data types, too.
+
+
+
Current Status
+
+The initial release of the FFI library has some limitations and is
+missing some features. Most of these will be fixed in future releases.
+
+
+C language support is
+currently incomplete:
+
+
+C declarations are not passed through a C pre-processor,
+yet.
+The C parser is able to evaluate most constant expressions
+commonly found in C header files. However it doesn't handle the
+full range of C expression semantics and may fail for some
+obscure constructs.
+static const declarations only work for integer types
+up to 32 bits. Neither declaring string constants nor
+floating-point constants is supported.
+Packed struct bitfields that cross container boundaries
+are not implemented.
+Native vector types may be defined with the GCC mode or
+vector_size attribute. But no operations other than loading,
+storing and initializing them are supported, yet.
+The volatile type qualifier is currently ignored by
+compiled code.
+ffi.cdef silently
+ignores all re-declarations.
+
+
+The JIT compiler already handles a large subset of all FFI operations.
+It automatically falls back to the interpreter for unimplemented
+operations (you can check for this with the
+-jv command line option).
+The following operations are currently not compiled and may exhibit
+suboptimal performance, especially when used in inner loops:
+
+
+Array/struct copies and bulk initializations.
+Bitfield accesses and initializations.
+Vector operations.
+Table initializers.
+Initialization of nested struct /union types.
+Allocations of variable-length arrays or structs.
+Allocations of C types with a size > 64 bytes or an
+alignment > 8 bytes.
+Conversions from lightuserdata to void * .
+Pointer differences for element sizes that are not a power of
+two.
+Calls to C functions with aggregates passed or returned by
+value.
+Calls to ctype metamethods which are not plain functions.
+ctype __newindex tables and non-string lookups in ctype
+__index tables.
+tostring() for cdata types.
+Calls to the following ffi.* API
+functions: cdef , load , typeof ,
+metatype , gc , sizeof , alignof ,
+offsetof .
+
+
+Other missing features:
+
+
+Bit operations for 64 bit types.
+Arithmetic for complex numbers.
+Passing structs by value to vararg C functions.
+C++ exception interoperability
+does not extend to C functions called via the FFI, if the call is
+compiled.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_ffi_tutorial.html b/src/LuaJIT/doc/ext_ffi_tutorial.html
new file mode 100644
index 000000000..69b4e6c50
--- /dev/null
+++ b/src/LuaJIT/doc/ext_ffi_tutorial.html
@@ -0,0 +1,605 @@
+
+
+
+FFI Tutorial
+
+
+
+
+
+
+
+
+
+
+
+
FFI Tutorial
+
+
+
+
+This page is intended to give you an overview of the features of the FFI
+library by presenting a few use cases and guidelines.
+
+
+This page makes no attempt to explain all of the FFI library, though.
+You'll want to have a look at the ffi.* API
+function reference and the FFI
+semantics to learn more.
+
+
+
Loading the FFI Library
+
+The FFI library is built into LuaJIT by default, but it's not loaded
+and initialized by default. The suggested way to use the FFI library
+is to add the following to the start of every Lua file that needs one
+of its functions:
+
+
+local ffi = require("ffi")
+
+
+Please note this doesn't define an ffi variable in the table
+of globals — you really need to use the local variable. The
+require function ensures the library is only loaded once.
+
+
+Note: If you want to experiment with the FFI from the interactive prompt
+of the command line executable, omit the local , as it doesn't
+preserve local variables across lines.
+
+
+
Accessing Standard System Functions
+
+The following code explains how to access standard system functions.
+We slowly print two lines of dots by sleeping for 10 milliseconds
+after each dot:
+
+
+
+①
+
+
+
+
+
+②
+③
+④
+
+
+
+⑤
+
+
+
+
+
+⑥ local ffi = require("ffi")
+ffi.cdef[[
+void Sleep(int ms);
+int poll(struct pollfd *fds, unsigned long nfds, int timeout);
+]]
+
+local sleep
+if ffi.os == "Windows" then
+ function sleep(s)
+ ffi.C.Sleep(s*1000)
+ end
+else
+ function sleep(s)
+ ffi.C.poll(nil, 0, s*1000)
+ end
+end
+
+for i=1,160 do
+ io.write("."); io.flush()
+ sleep(0.01)
+end
+io.write("\n")
+
+
+Here's the step-by-step explanation:
+
+
+① This defines the
+C library functions we're going to use. The part inside the
+double-brackets (in green) is just standard C syntax. You can
+usually get this info from the C header files or the
+documentation provided by each C library or C compiler.
+
+
+② The difficulty we're
+facing here, is that there are different standards to choose from.
+Windows has a simple Sleep() function. On other systems there
+are a variety of functions available to achieve sub-second sleeps, but
+with no clear consensus. Thankfully poll() can be used for
+this task, too, and it's present on most non-Windows systems. The
+check for ffi.os makes sure we use the Windows-specific
+function only on Windows systems.
+
+
+③ Here we're wrapping the
+call to the C function in a Lua function. This isn't strictly
+necessary, but it's helpful to deal with system-specific issues only
+in one part of the code. The way we're wrapping it ensures the check
+for the OS is only done during initialization and not for every call.
+
+
+④ A more subtle point is
+that we defined our sleep() function (for the sake of this
+example) as taking the number of seconds, but accepting fractional
+seconds. Multiplying this by 1000 gets us milliseconds, but that still
+leaves it a Lua number, which is a floating-point value. Alas, the
+Sleep() function only accepts an integer value. Luckily for
+us, the FFI library automatically performs the conversion when calling
+the function (truncating the FP value towards zero, like in C).
+
+
+Some readers will notice that Sleep() is part of
+KERNEL32.DLL and is also a stdcall function. So how
+can this possibly work? The FFI library provides the ffi.C
+default C library namespace, which allows calling functions from
+the default set of libraries, like a C compiler would. Also, the
+FFI library automatically detects stdcall functions, so you
+don't need to declare them as such.
+
+
+⑤ The poll()
+function takes a couple more arguments we're not going to use. You can
+simply use nil to pass a NULL pointer and 0
+for the nfds parameter. Please note that the
+number 0 does not convert to a pointer value ,
+unlike in C++. You really have to pass pointers to pointer arguments
+and numbers to number arguments.
+
+
+The page on FFI semantics has all
+of the gory details about
+conversions between Lua
+objects and C types . For the most part you don't have to deal
+with this, as it's performed automatically and it's carefully designed
+to bridge the semantic differences between Lua and C.
+
+
+⑥ Now that we have defined
+our own sleep() function, we can just call it from plain Lua
+code. That wasn't so bad, huh? Turning these boring animated dots into
+a fascinating best-selling game is left as an exercise for the reader.
+:-)
+
+
+
Accessing the zlib Compression Library
+
+The following code shows how to access the zlib compression library from Lua code.
+We'll define two convenience wrapper functions that take a string and
+compress or uncompress it to another string:
+
+
+
+①
+
+
+
+
+
+
+②
+
+
+③
+
+④
+
+
+⑤
+
+
+⑥
+
+
+
+
+
+
+
+⑦ local ffi = require("ffi")
+ffi.cdef[[
+unsigned long compressBound(unsigned long sourceLen);
+int compress2(uint8_t *dest, unsigned long *destLen,
+ const uint8_t *source, unsigned long sourceLen, int level);
+int uncompress(uint8_t *dest, unsigned long *destLen,
+ const uint8_t *source, unsigned long sourceLen);
+]]
+local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")
+
+local function compress(txt)
+ local n = zlib.compressBound(#txt)
+ local buf = ffi.new("uint8_t[?]", n)
+ local buflen = ffi.new("unsigned long[1]", n)
+ local res = zlib.compress2(buf, buflen, txt, #txt, 9)
+ assert(res == 0)
+ return ffi.string(buf, buflen[0])
+end
+
+local function uncompress(comp, n)
+ local buf = ffi.new("uint8_t[?]", n)
+ local buflen = ffi.new("unsigned long[1]", n)
+ local res = zlib.uncompress(buf, buflen, comp, #comp)
+ assert(res == 0)
+ return ffi.string(buf, buflen[0])
+end
+
+-- Simple test code.
+local txt = string.rep("abcd", 1000)
+print("Uncompressed size: ", #txt)
+local c = compress(txt)
+print("Compressed size: ", #c)
+local txt2 = uncompress(c, #txt)
+assert(txt2 == txt)
+
+
+Here's the step-by-step explanation:
+
+
+① This defines some of the
+C functions provided by zlib. For the sake of this example, some
+type indirections have been reduced and it uses the pre-defined
+fixed-size integer types, while still adhering to the zlib API/ABI.
+
+
+② This loads the zlib shared
+library. On POSIX systems it's named libz.so and usually
+comes pre-installed. Since ffi.load() automatically adds any
+missing standard prefixes/suffixes, we can simply load the
+"z" library. On Windows it's named zlib1.dll and
+you'll have to download it first from the
+» zlib site . The check for
+ffi.os makes sure we pass the right name to
+ffi.load() .
+
+
+③ First, the maximum size of
+the compression buffer is obtained by calling the
+zlib.compressBound function with the length of the
+uncompressed string. The next line allocates a byte buffer of this
+size. The [?] in the type specification indicates a
+variable-length array (VLA). The actual number of elements of this
+array is given as the 2nd argument to ffi.new() .
+
+
+④ This may look strange at
+first, but have a look at the declaration of the compress2
+function from zlib: the destination length is defined as a pointer!
+This is because you pass in the maximum buffer size and get back the
+actual length that was used.
+
+
+In C you'd pass in the address of a local variable
+(&buflen ). But since there's no address-of operator in
+Lua, we'll just pass in a one-element array. Conveniently it can be
+initialized with the maximum buffer size in one step. Calling the
+actual zlib.compress2 function is then straightforward.
+
+
+⑤ We want to return the
+compressed data as a Lua string, so we'll use ffi.string() .
+It needs a pointer to the start of the data and the actual length. The
+length has been returned in the buflen array, so we'll just
+get it from there.
+
+
+Note that since the function returns now, the buf and
+buflen variables will eventually be garbage collected. This
+is fine, because ffi.string() has copied the contents to a
+newly created (interned) Lua string. If you plan to call this function
+lots of times, consider reusing the buffers and/or handing back the
+results in buffers instead of strings. This will reduce the overhead
+for garbage collection and string interning.
+
+
+⑥ The uncompress
+functions does the exact opposite of the compress function.
+The compressed data doesn't include the size of the original string,
+so this needs to be passed in. Otherwise no surprises here.
+
+
+⑦ The code, that makes use
+of the functions we just defined, is just plain Lua code. It doesn't
+need to know anything about the LuaJIT FFI — the convenience
+wrapper functions completely hide it.
+
+
+One major advantage of the LuaJIT FFI is that you are now able to
+write those wrappers in Lua . And at a fraction of the time it
+would cost you to create an extra C module using the Lua/C API.
+Many of the simpler C functions can probably be used directly
+from your Lua code, without any wrappers.
+
+
+Side note: the zlib API uses the long type for passing
+lengths and sizes around. But all those zlib functions actually only
+deal with 32 bit values. This is an unfortunate choice for a
+public API, but may be explained by zlib's history — we'll just
+have to deal with it.
+
+
+First, you should know that a long is a 64 bit type e.g.
+on POSIX/x64 systems, but a 32 bit type on Windows/x64 and on
+32 bit systems. Thus a long result can be either a plain
+Lua number or a boxed 64 bit integer cdata object, depending on
+the target system.
+
+
+Ok, so the ffi.* functions generally accept cdata objects
+wherever you'd want to use a number. That's why we get a away with
+passing n to ffi.string() above. But other Lua
+library functions or modules don't know how to deal with this. So for
+maximum portability one needs to use tonumber() on returned
+long results before passing them on. Otherwise the
+application might work on some systems, but would fail in a POSIX/x64
+environment.
+
+
+
+
+The following code explains how to define metamethods for a C type.
+We define a simple point type and add some operations to it:
+
+
+
+①
+
+
+
+②
+
+③
+
+④
+
+
+
+⑤
+
+⑥ local ffi = require("ffi")
+ffi.cdef[[
+typedef struct { double x, y; } point_t;
+]]
+
+local point
+local mt = {
+ __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
+ __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
+ __index = {
+ area = function(a) return a.x*a.x + a.y*a.y end,
+ },
+}
+point = ffi.metatype("point_t", mt)
+
+local a = point(3, 4)
+print(a.x, a.y) --> 3 4
+print(#a) --> 5
+print(a:area()) --> 25
+local b = a + point(0.5, 8)
+print(#b) --> 12.5
+
+
+Here's the step-by-step explanation:
+
+
+① This defines the C type for a
+two-dimensional point object.
+
+
+② We have to declare the variable
+holding the point constructor first, because it's used inside of a
+metamethod.
+
+
+③ Let's define an __add
+metamethod which adds the coordinates of two points and creates a new
+point object. For simplicity, this function assumes that both arguments
+are points. But it could be any mix of objects, if at least one operand
+is of the required type (e.g. adding a point plus a number or vice
+versa). Our __len metamethod returns the distance of a point to
+the origin.
+
+
+④ If we run out of operators, we can
+define named methods, too. Here the __index table defines an
+area function. For custom indexing needs, one might want to
+define __index and __newindex functions instead.
+
+
+⑤ This associates the metamethods with
+our C type. This only needs to be done once. For convenience, a
+constructor is returned by
+ffi.metatype() .
+We're not required to use it, though. The original C type can still
+be used e.g. to create an array of points. The metamethods automatically
+apply to any and all uses of this type.
+
+
+Please note that the association with a metatable is permanent and
+the metatable must not be modified afterwards! Ditto for the
+__index table.
+
+
+⑥ Here are some simple usage examples
+for the point type and their expected results. The pre-defined
+operations (such as a.x ) can be freely mixed with the newly
+defined metamethods. Note that area is a method and must be
+called with the Lua syntax for methods: a:area() , not
+a.area() .
+
+
+The C type metamethod mechanism is most useful when used in
+conjunction with C libraries that are written in an object-oriented
+style. Creators return a pointer to a new instance and methods take an
+instance pointer as the first argument. Sometimes you can just point
+__index to the library namespace and __gc to the
+destructor and you're done. But often enough you'll want to add
+convenience wrappers, e.g. to return actual Lua strings or when
+returning multiple values.
+
+
+Some C libraries only declare instance pointers as an opaque
+void * type. In this case you can use a fake type for all
+declarations, e.g. a pointer to a named (incomplete) struct will do:
+typedef struct foo_type *foo_handle . The C side doesn't
+know what you declare with the LuaJIT FFI, but as long as the underlying
+types are compatible, everything still works.
+
+
+
Translating C Idioms
+
+Here's a list of common C idioms and their translation to the
+LuaJIT FFI:
+
+
+
+Idiom
+C code
+Lua code
+
+
+Pointer dereferenceint *p; x = *p; *p = y; x = p[0] p[0] = y
+
+Pointer indexingint i, *p; x = p[i]; p[i+1] = y; x = p[i] p[i+1] = y
+
+Array indexingint i, a[]; x = a[i]; a[i+1] = y; x = a[i] a[i+1] = y
+
+struct /union dereferencestruct foo s; x = s.field; s.field = y; x = s.field s.field = y
+
+struct /union pointer deref.struct foo *sp; x = sp->field; sp->field = y; x = s.field s.field = y
+
+Pointer arithmeticint i, *p; x = p + i; y = p - i; x = p + i y = p - i
+
+Pointer differenceint *p1, *p2; x = p1 - p2; x = p1 - p2
+
+Array element pointerint i, a[]; x = &a[i]; x = a+i
+
+Cast pointer to addressint *p; x = (intptr_t)p; x = tonumber( ffi.cast("intptr_t", p))
+
+Functions with outargsvoid foo(int *inoutlen); int len = x; foo(&len); y = len; local len = ffi.new("int[1]", x) foo(len) y = len[0]
+
+Vararg conversions int printf(char *fmt, ...); printf("%g", 1.0); printf("%d", 1); printf("%g", 1); printf("%d", ffi.new("int", 1) )
+
+
+
To Cache or Not to Cache
+
+It's a common Lua idiom to cache library functions in local variables
+or upvalues, e.g.:
+
+
+local byte, char = string.byte, string.char
+local function foo(x)
+ return char(byte(x)+1)
+end
+
+
+This replaces several hash-table lookups with a (faster) direct use of
+a local or an upvalue. This is less important with LuaJIT, since the
+JIT compiler optimizes hash-table lookups a lot and is even able to
+hoist most of them out of the inner loops. It can't eliminate
+all of them, though, and it saves some typing for often-used
+functions. So there's still a place for this, even with LuaJIT.
+
+
+The situation is a bit different with C function calls via the
+FFI library. The JIT compiler has special logic to eliminate all
+of the lookup overhead for functions resolved from a
+C library namespace !
+Thus it's not helpful and actually counter-productive to cache
+individual C functions like this:
+
+
+local funca , funcb = ffi.C.funcb, ffi.C.funcb -- Not helpful!
+local function foo(x, n)
+ for i=1,n do funcb (funca (x, i), 1) end
+end
+
+
+This turns them into indirect calls and generates bigger and slower
+machine code. Instead you'll want to cache the namespace itself and
+rely on the JIT compiler to eliminate the lookups:
+
+
+local C = ffi.C -- Instead use this!
+local function foo(x, n)
+ for i=1,n do C.funcb (C.funca (x, i), 1) end
+end
+
+
+This generates both shorter and faster code. So don't cache
+C functions , but do cache namespaces! Most often the
+namespace is already in a local variable at an outer scope, e.g. from
+local lib = ffi.load(...) . Note that copying
+it to a local variable in the function scope is unnecessary.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/ext_jit.html b/src/LuaJIT/doc/ext_jit.html
new file mode 100644
index 000000000..9985cd049
--- /dev/null
+++ b/src/LuaJIT/doc/ext_jit.html
@@ -0,0 +1,197 @@
+
+
+
+jit.* Library
+
+
+
+
+
+
+
+
+
+
+
jit.* Library
+
+
+
+
+The functions in this built-in module control the behavior of the JIT
+compiler engine. Note that JIT-compilation is fully automatic —
+you probably won't need to use any of the following functions unless
+you have special needs.
+
+
+
jit.on()
+jit.off()
+
+Turns the whole JIT compiler on (default) or off.
+
+
+These functions are typically used with the command line options
+-j on or -j off .
+
+
+
jit.flush()
+
+Flushes the whole cache of compiled code.
+
+
+
jit.on(func|true [,true|false])
+jit.off(func|true [,true|false])
+jit.flush(func|true [,true|false])
+
+jit.on enables JIT compilation for a Lua function (this is
+the default).
+
+
+jit.off disables JIT compilation for a Lua function and
+flushes any already compiled code from the code cache.
+
+
+jit.flush flushes the code, but doesn't affect the
+enable/disable status.
+
+
+The current function, i.e. the Lua function calling this library
+function, can also be specified by passing true as the first
+argument.
+
+
+If the second argument is true , JIT compilation is also
+enabled, disabled or flushed recursively for all sub-functions of a
+function. With false only the sub-functions are affected.
+
+
+The jit.on and jit.off functions only set a flag
+which is checked when the function is about to be compiled. They do
+not trigger immediate compilation.
+
+
+Typical usage is jit.off(true, true) in the main chunk
+of a module to turn off JIT compilation for the whole module for
+debugging purposes.
+
+
+
jit.flush(tr)
+
+Flushes the root trace, specified by its number, and all of its side
+traces from the cache. The code for the trace will be retained as long
+as there are any other traces which link to it.
+
+
+
status, ... = jit.status()
+
+Returns the current status of the JIT compiler. The first result is
+either true or false if the JIT compiler is turned
+on or off. The remaining results are strings for CPU-specific features
+and enabled optimizations.
+
+
+
jit.version
+
+Contains the LuaJIT version string.
+
+
+
jit.version_num
+
+Contains the version number of the LuaJIT core. Version xx.yy.zz
+is represented by the decimal number xxyyzz.
+
+
+
jit.os
+
+Contains the target OS name:
+"Windows", "Linux", "OSX", "BSD", "POSIX" or "Other".
+
+
+
jit.arch
+
+Contains the target architecture name:
+"x86", "x64" or "ppcspe".
+
+
+
jit.opt.* — JIT compiler optimization control
+
+This sub-module provides the backend for the -O command line
+option.
+
+
+You can also use it programmatically, e.g.:
+
+
+jit.opt.start(2) -- same as -O2
+jit.opt.start("-dce")
+jit.opt.start("hotloop=10", "hotexit=2")
+
+
+Unlike in LuaJIT 1.x, the module is built-in and
+optimization is turned on by default!
+It's no longer necessary to run require("jit.opt").start() ,
+which was one of the ways to enable optimization.
+
+
+
jit.util.* — JIT compiler introspection
+
+This sub-module holds functions to introspect the bytecode, generated
+traces, the IR and the generated machine code. The functionality
+provided by this module is still in flux and therefore undocumented.
+
+
+The debug modules -jbc , -jv and -jdump make
+extensive use of these functions. Please check out their source code,
+if you want to know more.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/extensions.html b/src/LuaJIT/doc/extensions.html
new file mode 100644
index 000000000..e03d71dc4
--- /dev/null
+++ b/src/LuaJIT/doc/extensions.html
@@ -0,0 +1,338 @@
+
+
+
+Extensions
+
+
+
+
+
+
+
+
+
+
+
+
Extensions
+
+
+
+
+LuaJIT is fully upwards-compatible with Lua 5.1. It supports all
+» standard Lua
+library functions and the full set of
+» Lua/C API
+functions .
+
+
+LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic
+loader level. This means you can compile a C module against the
+standard Lua headers and load the same shared library from either Lua
+or LuaJIT.
+
+
+LuaJIT extends the standard Lua VM with new functionality and adds
+several extension modules. Please note that this page is only about
+functional enhancements and not about performance enhancements,
+such as the optimized VM, the faster interpreter or the JIT compiler.
+
+
+
Extensions Modules
+
+LuaJIT comes with several built-in extension modules:
+
+
+
bit.* — Bitwise operations
+
+LuaJIT supports all bitwise operations as defined by
+» Lua BitOp :
+
+
+bit.tobit bit.tohex bit.bnot bit.band bit.bor bit.bxor
+bit.lshift bit.rshift bit.arshift bit.rol bit.ror bit.bswap
+
+
+This module is a LuaJIT built-in — you don't need to download or
+install Lua BitOp. The Lua BitOp site has full documentation for all
+» Lua BitOp API functions .
+
+
+Please make sure to require the module before using any of
+its functions:
+
+
+local bit = require("bit")
+
+
+An already installed Lua BitOp module is ignored by LuaJIT.
+This way you can use bit operations from both Lua and LuaJIT on a
+shared installation.
+
+
+
ffi.* — FFI library
+
+The FFI library allows calling external
+C functions and the use of C data structures from pure Lua
+code.
+
+
+
jit.* — JIT compiler control
+
+The functions in this module
+control the behavior of the JIT compiler engine .
+
+
+
C API extensions
+
+LuaJIT adds some
+extra functions to the Lua/C API .
+
+
+
Enhanced Standard Library Functions
+
+
xpcall(f, err [,args...]) passes arguments
+
+Unlike the standard implementation in Lua 5.1, xpcall()
+passes any arguments after the error function to the function
+which is called in a protected context.
+
+
+
loadfile() etc. handle UTF-8 source code
+
+Non-ASCII characters are handled transparently by the Lua source code parser.
+This allows the use of UTF-8 characters in identifiers and strings.
+A UTF-8 BOM is skipped at the start of the source code.
+
+
+
tostring() etc. canonicalize NaN and ±Inf
+
+All number-to-string conversions consistently convert non-finite numbers
+to the same strings on all platforms. NaN results in "nan" ,
+positive infinity results in "inf" and negative infinity results
+in "-inf" .
+
+
+
string.dump(f [,strip]) generates portable bytecode
+
+An extra argument has been added to string.dump() . If set to
+true , 'stripped' bytecode without debug information is
+generated. This speeds up later bytecode loading and reduces memory
+usage. See also the
+-b command line option .
+
+
+The generated bytecode is portable and can be loaded on any architecture
+that LuaJIT supports, independent of word size or endianess. However the
+bytecode compatibility versions must match. Bytecode stays compatible
+for dot releases (x.y.0 → x.y.1), but may change with major or
+minor releases (2.0 → 2.1) or between any beta release. Foreign
+bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded.
+
+
+
Enhanced PRNG for math.random()
+
+LuaJIT uses a Tausworthe PRNG with period 2^223 to implement
+math.random() and math.randomseed() . The quality of
+the PRNG results is much superior compared to the standard Lua
+implementation which uses the platform-specific ANSI rand().
+
+
+The PRNG generates the same sequences from the same seeds on all
+platforms and makes use of all bits in the seed argument.
+math.random() without arguments generates 52 pseudo-random bits
+for every call. The result is uniformly distributed between 0 and 1.
+It's correctly scaled up and rounded for math.random(n [,m]) to
+preserve uniformity.
+
+
+
io.* functions handle 64 bit file offsets
+
+The file I/O functions in the standard io.* library handle
+64 bit file offsets. In particular this means it's possible
+to open files larger than 2 Gigabytes and to reposition or obtain
+the current file position for offsets beyond 2 GB
+(fp:seek() method).
+
+
+
+
+debug.getinfo() and lua_getinfo() also return information
+about invoked metamethods. The namewhat field is set to
+"metamethod" and the name field has the name of
+the corresponding metamethod (e.g. "__index" ).
+
+
+
Fully Resumable VM
+
+The LuaJIT 2.x VM is fully resumable. This means you can yield from a
+coroutine even across contexts, where this would not possible with
+the standard Lua 5.1 VM: e.g. you can yield across pcall()
+and xpcall() , across iterators and across metamethods.
+
+
+Note however that LuaJIT 2.x doesn't use
+» Coco anymore. This means the
+overhead for creating coroutines is much smaller and no extra
+C stacks need to be allocated. OTOH you can no longer yield
+across arbitrary C functions. Keep this in mind when
+upgrading from LuaJIT 1.x.
+
+
+
C++ Exception Interoperability
+
+LuaJIT has built-in support for interoperating with C++ exceptions.
+The available range of features depends on the target platform and
+the toolchain used to compile LuaJIT:
+
+
+
+Platform
+Compiler
+Interoperability
+
+
+POSIX/x64, DWARF2 unwinding
+GCC 4.3+
+Full
+
+
+Other platforms, DWARF2 unwinding
+GCC
+Limited
+
+
+Windows/x64
+MSVC or WinSDK
+Full
+
+
+Windows/x86
+Any
+No
+
+
+Other platforms
+Other compilers
+No
+
+
+
+Full interoperability means:
+
+
+C++ exceptions can be caught on the Lua side with pcall() ,
+lua_pcall() etc.
+C++ exceptions will be converted to the generic Lua error
+"C++ exception" , unless you use the
+C call wrapper feature.
+It's safe to throw C++ exceptions across non-protected Lua frames
+on the C stack. The contents of the C++ exception object
+pass through unmodified.
+Lua errors can be caught on the C++ side with catch(...) .
+The corresponding Lua error message can be retrieved from the Lua stack.
+Throwing Lua errors across C++ frames is safe. C++ destructors
+will be called.
+
+
+Limited interoperability means:
+
+
+C++ exceptions can be caught on the Lua side with pcall() ,
+lua_pcall() etc.
+C++ exceptions will be converted to the generic Lua error
+"C++ exception" , unless you use the
+C call wrapper feature.
+C++ exceptions will be caught by non-protected Lua frames and
+are rethrown as a generic Lua error. The C++ exception object will
+be destroyed.
+Lua errors cannot be caught on the C++ side.
+Throwing Lua errors across C++ frames will not call
+C++ destructors.
+
+
+
+No interoperability means:
+
+
+It's not safe to throw C++ exceptions across Lua frames.
+C++ exceptions cannot be caught on the Lua side.
+Lua errors cannot be caught on the C++ side.
+Throwing Lua errors across C++ frames will not call
+C++ destructors.
+Additionally, on Windows/x86 with SEH-based C++ exceptions:
+it's not safe to throw a Lua error across any frames containing
+a C++ function with any try/catch construct or using variables with
+(implicit) destructors. This also applies to any functions which may be
+inlined in such a function. It doesn't matter whether lua_error()
+is called inside or outside of a try/catch or whether any object actually
+needs to be destroyed: the SEH chain is corrupted and this will eventually
+lead to the termination of the process.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/faq.html b/src/LuaJIT/doc/faq.html
new file mode 100644
index 000000000..e96fdd1ff
--- /dev/null
+++ b/src/LuaJIT/doc/faq.html
@@ -0,0 +1,180 @@
+
+
+
+Frequently Asked Questions (FAQ)
+
+
+
+
+
+
+
+
+
+
+
+
Frequently Asked Questions (FAQ)
+
+
+
+
+Q: Where can I learn more about LuaJIT and Lua?
+
+
+
+
+
+Q: Where can I learn more about the compiler technology used by LuaJIT?
+
+I'm planning to write more documentation about the internals of LuaJIT.
+In the meantime, please use the following Google Scholar searches
+to find relevant papers:
+Search for: » Trace Compiler
+Search for: » JIT Compiler
+Search for: » Dynamic Language Optimizations
+Search for: » SSA Form
+Search for: » Linear Scan Register Allocation
+Here is a list of the » innovative features in LuaJIT .
+And, you know, reading the source is of course the only way to enlightenment. :-)
+
+
+
+
+Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?
+Q: My vararg functions fail after switching to LuaJIT!
+LuaJIT is compatible to the Lua 5.1 language standard. It doesn't
+support the implicit arg parameter for old-style vararg
+functions from Lua 5.0. Please convert your code to the
+» Lua 5.1
+vararg syntax .
+
+
+
+Q: Why do I get this error: "bad FPU precision"?
+Q: I get weird behavior after initializing Direct3D.
+Q: Some FPU operations crash after I load a Delphi DLL.
+
+
+
+DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision
+mode by default. This violates the Windows ABI and interferes with the
+operation of many programs — LuaJIT is affected, too. Please make
+sure you always use the D3DCREATE_FPU_PRESERVE flag when
+initializing Direct3D.
+
+Direct3D version 10 or higher do not show this behavior anymore.
+Consider testing your application with older versions, too.
+
+Similarly, the Borland/Delphi runtime modifies the FPU control word and
+enables FP exceptions. Of course this violates the Windows ABI, too.
+Please check the Delphi docs for the Set8087CW method.
+
+
+
+
+Q: Sometimes Ctrl-C fails to stop my Lua program. Why?
+The interrupt signal handler sets a Lua debug hook. But this is
+currently ignored by compiled code (this will eventually be fixed). If
+your program is running in a tight loop and never falls back to the
+interpreter, the debug hook never runs and can't throw the
+"interrupted!" error. In the meantime you have to press Ctrl-C
+twice to get stop your program. That's similar to when it's stuck
+running inside a C function under the Lua interpreter.
+
+
+
+Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?
+Because it's a completely redesigned VM and has very little code
+in common with Lua anymore. Also, if the patch introduces changes to
+the Lua semantics, these would need to be reflected everywhere in the
+VM, from the interpreter up to all stages of the compiler. Please
+use only standard Lua language constructs. For many common needs you
+can use source transformations or use wrapper or proxy functions.
+The compiler will happily optimize away such indirections.
+
+
+
+Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?
+Because it's a compiler — it needs to generate native
+machine code. This means the code generator must be ported to each
+architecture. And the fast interpreter is written in assembler and
+must be ported, too. This is quite an undertaking.
+The install documentation shows the supported
+architectures. Other architectures will follow based on sufficient user
+demand and/or sponsoring.
+
+
+
+Q: When will feature X be added? When will the next version be released?
+When it's ready.
+C'mon, it's open source — I'm doing it on my own time and you're
+getting it for free. You can either contribute a patch or sponsor
+the development of certain features, if they are important to you.
+
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/img/contact.png b/src/LuaJIT/doc/img/contact.png
new file mode 100644
index 000000000..9c73dc594
Binary files /dev/null and b/src/LuaJIT/doc/img/contact.png differ
diff --git a/src/LuaJIT/doc/install.html b/src/LuaJIT/doc/install.html
new file mode 100644
index 000000000..501e3e569
--- /dev/null
+++ b/src/LuaJIT/doc/install.html
@@ -0,0 +1,567 @@
+
+
+
+Installation
+
+
+
+
+
+
+
+
+
+
+
+
Installation
+
+
+
+
+LuaJIT is only distributed as a source package. This page explains
+how to build and install LuaJIT with different operating systems
+and C compilers.
+
+
+For the impatient (on POSIX systems):
+
+
+make && sudo make install
+
+
+LuaJIT currently builds out-of-the box on most systems.
+Here's the compatibility matrix for the supported combinations of
+operating systems, CPUs and compilers:
+
+
+
+
Configuring LuaJIT
+
+The standard configuration should work fine for most installations.
+Usually there is no need to tweak the settings. The following files
+hold all user-configurable settings:
+
+
+src/luaconf.h sets some configuration variables.
+Makefile has settings for installing LuaJIT (POSIX
+only).
+src/Makefile has settings for compiling LuaJIT
+under POSIX, MinGW or Cygwin.
+src/msvcbuild.bat has settings for compiling LuaJIT with
+MSVC or WinSDK.
+
+
+Please read the instructions given in these files, before changing
+any settings.
+
+
+
POSIX Systems (Linux, OSX, *BSD etc.)
+
Prerequisites
+
+Depending on your distribution, you may need to install a package for
+GCC, the development headers and/or a complete SDK. E.g. on a current
+Debian/Ubuntu, install libc6-dev with the package manager.
+
+
+Download the current source package of LuaJIT (pick the .tar.gz),
+if you haven't already done so. Move it to a directory of your choice,
+open a terminal window and change to this directory. Now unpack the archive
+and change to the newly created directory:
+
+
+tar zxf LuaJIT-2.0.0-beta10.tar.gz
+cd LuaJIT-2.0.0-beta10
+
Building LuaJIT
+
+The supplied Makefiles try to auto-detect the settings needed for your
+operating system and your compiler. They need to be run with GNU Make,
+which is probably the default on your system, anyway. Simply run:
+
+
+make
+
+
+This always builds a native x86, x64 or PPC binary, depending on the host OS
+you're running this command on. Check the section on
+cross-compilation for more options.
+
+
+By default, modules are only searched under the prefix /usr/local .
+You can add an extra prefix to the search paths by appending the
+PREFIX option, e.g.:
+
+
+make PREFIX=/home/myself/lj2
+
+
+Note for OSX: if the MACOSX_DEPLOYMENT_TARGET environment
+variable is not set, then it's forced to 10.4 .
+
+
Installing LuaJIT
+
+The top-level Makefile installs LuaJIT by default under
+/usr/local , i.e. the executable ends up in
+/usr/local/bin and so on. You need root privileges
+to write to this path. So, assuming sudo is installed on your system,
+run the following command and enter your sudo password:
+
+
+sudo make install
+
+
+Otherwise specify the directory prefix as an absolute path, e.g.:
+
+
+make install PREFIX=/home/myself/lj2
+
+
+Obviously the prefixes given during build and installation need to be the same.
+
+
+Note: to avoid overwriting a previous version, the beta test releases
+only install the LuaJIT executable under the versioned name (i.e.
+luajit-2.0.0-beta10 ). You probably want to create a symlink
+for convenience, with a command like this:
+
+
+sudo ln -sf luajit-2.0.0-beta10 /usr/local/bin/luajit
+
+
+
Windows Systems
+
Prerequisites
+
+Either install one of the open source SDKs
+(» MinGW or
+» Cygwin ), which come with a modified
+GCC plus the required development headers.
+
+
+Or install Microsoft's Visual C++ (MSVC). The freely downloadable
+» Express Edition
+works just fine, but only contains an x86 compiler.
+
+
+The freely downloadable
+» Windows SDK
+only comes with command line tools, but this is all you need to build LuaJIT.
+It contains x86 and x64 compilers.
+
+
+Next, download the source package and unpack it using an archive manager
+(e.g. the Windows Explorer) to a directory of your choice.
+
+
Building with MSVC
+
+Open a "Visual Studio .NET Command Prompt", cd to the
+directory where you've unpacked the sources and run these commands:
+
+
+cd src
+msvcbuild
+
+
+Then follow the installation instructions below.
+
+
Building with the Windows SDK
+
+Open a "Windows SDK Command Shell" and select the x86 compiler:
+
+
+setenv /release /x86
+
+
+Or select the x64 compiler:
+
+
+setenv /release /x64
+
+
+Then cd to the directory where you've unpacked the sources
+and run these commands:
+
+
+cd src
+msvcbuild
+
+
+Then follow the installation instructions below.
+
+
Building with MinGW or Cygwin
+
+Open a command prompt window and make sure the MinGW or Cygwin programs
+are in your path. Then cd to the directory where
+you've unpacked the sources and run this command for MinGW:
+
+
+mingw32-make
+
+
+Or this command for Cygwin:
+
+
+make
+
+
+Then follow the installation instructions below.
+
+
Installing LuaJIT
+
+Copy luajit.exe and lua51.dll (built in the src
+directory) to a newly created directory (any location is ok).
+Add lua and lua\jit directories below it and copy
+all Lua files from the lib directory of the distribution
+to the latter directory.
+
+
+There are no hardcoded
+absolute path names — all modules are loaded relative to the
+directory where luajit.exe is installed
+(see src/luaconf.h ).
+
+
+
Cross-compiling LuaJIT
+
+The build system has limited support for cross-compilation. For details
+check the comments in src/Makefile . Here are some popular examples:
+
+
+You can cross-compile to a 32 bit binary on a multilib x64 OS by
+installing the multilib development packages (e.g. libc6-dev-i386
+on Debian/Ubuntu) and running:
+
+
+make CC="gcc -m32"
+
+
+You can cross-compile for a Windows target on Debian/Ubuntu by
+installing the mingw32 package and running:
+
+
+make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows
+
+
+You can cross-compile for an ARM target on an x86 or x64 host
+system using a standard GNU cross-compile toolchain (Binutils, GCC,
+EGLIBC). The CROSS prefix may vary depending on the
+--target of the toolchain:
+
+
+make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi-
+
+
+You can cross-compile for Android (ARM) using the » Android NDK .
+The environment variables need to match the install locations and the
+desired target platform. E.g. Android 2.2 corresponds to ABI level 8:
+
+
+NDK=/opt/android/ndk
+NDKABI=8
+NDKVER=$NDK/toolchains/arm-linux-androideabi-4.4.3
+NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
+NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
+make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"
+
+
+You can cross-compile for iOS 3.0+ (iPhone/iPad) using the » iOS SDK .
+The environment variables need to match the iOS SDK version:
+
+
+Note: the JIT compiler is disabled for iOS , because regular iOS Apps
+are not allowed to generate code at runtime. You'll only get the performance
+of the LuaJIT interpreter on iOS. This is still faster than plain Lua, but
+much slower than the JIT compiler. Please complain to Apple, not me.
+Or use Android. :-p
+
+
+ISDK=/Developer/Platforms/iPhoneOS.platform/Developer
+ISDKVER=iPhoneOS4.3.sdk
+ISDKP=$ISDK/usr/bin/
+ISDKF="-arch armv6 -isysroot $ISDK/SDKs/$ISDKVER"
+make HOST_CC="gcc -m32 -arch i386" CROSS=$ISDKP TARGET_FLAGS="$ISDKF" \
+ TARGET_SYS=iOS
+
+
+You can cross-compile for a PPC target or a
+PPC/e500v2 target on x86 or x64 host systems using a standard
+GNU cross-compile toolchain (Binutils, GCC, EGLIBC).
+The CROSS prefix may vary depending on the --target
+of the toolchain:
+
+
+# PPC
+make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-
+
+
+# PPC/e500v2
+make HOST_CC="gcc -m32" CROSS=powerpc-e500v2-linux-gnuspe-
+
+
+You can cross-compile for a big-endian or little-endian
+MIPS target on x86 or x64 host systems using a standard
+GNU cross-compile toolchain (Binutils, GCC, EGLIBC).
+The CROSS prefix may vary depending on the --target
+of the toolchain:
+
+
+# MIPS big-endian
+make HOST_CC="gcc -m32" CROSS=mips-linux-
+
+
+# MIPS little-endian
+make HOST_CC="gcc -m32" CROSS=mipsel-linux-
+
+
+Whenever the host OS and the target OS differ , you need to specify
+TARGET_SYS or you'll get assembler or linker errors. E.g. if
+you're compiling on a Windows or OSX host for embedded Linux or Android,
+you need to add TARGET_SYS=Linux to the examples above. For a
+minimal target OS, you may need to disable the built-in allocator in
+src/Makefile and use TARGET_SYS=Other .
+
+
+
Embedding LuaJIT
+
+LuaJIT is API-compatible with Lua 5.1. If you've already embedded Lua
+into your application, you probably don't need to do anything to switch
+to LuaJIT, except link with a different library:
+
+
+
Additional hints for initializing LuaJIT using the C API functions:
+
+Here's a
+» simple example
+for embedding Lua or LuaJIT into your application.
+Make sure you use luaL_newstate . Avoid using
+lua_newstate , since this uses the (slower) default memory
+allocator from your system (no support for this on x64).
+Make sure you use luaL_openlibs and not the old Lua 5.0 style
+of calling luaopen_base etc. directly.
+To change or extend the list of standard libraries to load, copy
+src/lib_init.c to your project and modify it accordingly.
+Make sure the jit library is loaded or the JIT compiler
+will not be activated.
+The bit.* module for bitwise operations
+is already built-in. There's no need to statically link
+» Lua BitOp to your application.
+
+
+
Hints for Distribution Maintainers
+
+The LuaJIT build system has extra provisions for the needs of most
+POSIX-based distributions. If you're a package maintainer for
+a distribution, please make use of these features and
+avoid patching, subverting, autotoolizing or messing up the build system
+in unspeakable ways.
+
+
+There should be absolutely no need to patch luaconf.h or any
+of the Makefiles. And please do not hand-pick files for your packages —
+simply use whatever make install creates. There's a reason
+for all of the files and directories it creates.
+
+
+The build system uses GNU make and auto-detects most settings based on
+the host you're building it on. This should work fine for native builds,
+even when sandboxed. You may need to pass some of the following flags to
+both the make and the make install command lines
+for a regular distribution build:
+
+
+PREFIX overrides the installation path and should usually
+be set to /usr . Setting this also changes the module paths and
+the -rpath of the shared library.
+DESTDIR is an absolute path which allows you to install
+to a shadow tree instead of the root tree of the build system.
+Have a look at the top-level Makefile and src/Makefile
+for additional variables to tweak. The following variables may be
+overridden, but it's not recommended, except for special needs
+like cross-builds:
+BUILDMODE, CC, HOST_CC, STATIC_CC, DYNAMIC_CC, CFLAGS, HOST_CFLAGS,
+TARGET_CFLAGS, LDFLAGS, HOST_LDFLAGS, TARGET_LDFLAGS, TARGET_SHLDFLAGS,
+TARGET_FLAGS, LIBS, HOST_LIBS, TARGET_LIBS, CROSS, HOST_SYS, TARGET_SYS
+
+
+
+The build system has a special target for an amalgamated build, i.e.
+make amalg . This compiles the LuaJIT core as one huge C file
+and allows GCC to generate faster and shorter code. Alas, this requires
+lots of memory during the build. This may be a problem for some users,
+that's why it's not enabled by default. But it shouldn't be a problem for
+most build farms. It's recommended that binary distributions use this
+target for their LuaJIT builds.
+
+
+The tl;dr version of the above:
+
+
+make amalg PREFIX=/usr && \
+make install PREFIX=/usr DESTDIR=/tmp/buildroot
+
+
+Finally, if you encounter any difficulties, please
+contact me first, instead of releasing a broken
+package onto unsuspecting users. Because they'll usually gonna complain
+to me (the upstream) and not you (the package maintainer), anyway.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/luajit.html b/src/LuaJIT/doc/luajit.html
new file mode 100644
index 000000000..ac4982146
--- /dev/null
+++ b/src/LuaJIT/doc/luajit.html
@@ -0,0 +1,144 @@
+
+
+
+LuaJIT
+
+
+
+
+
+
+
+
+
+
+
+
LuaJIT
+
+
+
+
+LuaJIT is a Just-In-Time Compiler for the Lua*
+programming language.
+
+
+LuaJIT is Copyright © 2005-2012 Mike Pall.
+LuaJIT is open source software, released under the
+» MIT license .
+
+
+* Lua is a powerful, dynamic and light-weight programming language
+designed for extending applications. Lua is also frequently used as a
+general-purpose, stand-alone language. More information about
+Lua can be found at: » http://www.lua.org/
+
+
Compatibility
+
+LuaJIT implements the full set of language features defined by Lua 5.1.
+The virtual machine (VM) is API- and ABI-compatible to the
+standard Lua interpreter and can be deployed as a drop-in replacement.
+
+
+LuaJIT offers more performance, at the expense of portability. It
+currently runs on all popular operating systems based on
+x86 or x64 CPUs (Linux, Windows, OSX etc.) or embedded
+systems based on ARM (Android, iOS), PPC or MIPS CPUs.
+Other platforms will be supported in the future, based on user demand
+and sponsoring.
+
+
+
Overview
+
+LuaJIT has been successfully used as a scripting middleware in
+games, 3D modellers, numerical simulations, trading platforms and many
+other specialty applications. It combines high flexibility with high
+performance and an unmatched low memory footprint : less than
+125K for the VM plus less than 85K for the JIT compiler (on x86).
+
+
+LuaJIT has been in continuous development since 2005. It's widely
+considered to be one of the fastest dynamic language
+implementations . It has outperformed other dynamic languages on many
+cross-language benchmarks since its first release — often by a
+substantial margin. In 2009 other dynamic language VMs started to catch up
+with the performance of LuaJIT 1.x. Well, I couldn't let that slide. ;-)
+
+
+2009 also marks the first release of the long-awaited LuaJIT 2.0 .
+The whole VM has been rewritten from the ground up and relentlessly
+optimized for performance. It combines a high-speed interpreter,
+written in assembler, with a state-of-the-art JIT compiler.
+
+
+An innovative trace compiler is integrated with advanced,
+SSA-based optimizations and a highly tuned code generation backend. This
+allows a substantial reduction of the overhead associated with dynamic
+language features.
+
+
+It's destined to break into the » performance
+range traditionally reserved for offline, static language compilers.
+
+
+
More ...
+
+Click on the LuaJIT sub-topics in the navigation bar to learn more
+about LuaJIT.
+
+
+Click on the Logo in the upper left corner to visit
+the LuaJIT project page on the web. All other links to online
+resources are marked with a '» '.
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/running.html b/src/LuaJIT/doc/running.html
new file mode 100644
index 000000000..6a3538e83
--- /dev/null
+++ b/src/LuaJIT/doc/running.html
@@ -0,0 +1,315 @@
+
+
+
+Running LuaJIT
+
+
+
+
+
+
+
+
+
+
+
+
Running LuaJIT
+
+
+
+
+LuaJIT has only a single stand-alone executable, called luajit on
+POSIX systems or luajit.exe on Windows. It can be used to run simple
+Lua statements or whole Lua applications from the command line. It has an
+interactive mode, too.
+
+
+Note: the beta test releases only install under the versioned name on
+POSIX systems (to avoid overwriting a previous version). You either need
+to type luajit-2.0.0-beta10 to start it or create a symlink
+with a command like this:
+
+
+sudo ln -sf luajit-2.0.0-beta10 /usr/local/bin/luajit
+
+
+Unlike previous versions optimization is turned on by default in
+LuaJIT 2.0! It's no longer necessary to use luajit -O .
+
+
+
Command Line Options
+
+The luajit stand-alone executable is just a slightly modified
+version of the regular lua stand-alone executable.
+It supports the same basic options, too. luajit -h
+prints a short list of the available options. Please have a look at the
+» Lua manual
+for details.
+
+
+LuaJIT has some additional options:
+
+
+
-b[options] input output
+
+This option saves or lists bytecode. The following additional options
+are accepted:
+
+
+-l — Only list bytecode.
+-s — Strip debug info (this is the default).
+-g — Keep debug info.
+-n name — Set module name (default: auto-detect from input name)
+-t type — Set output file type (default: auto-detect from output name).
+-a arch — Override architecture for object files (default: native).
+-o os — Override OS for object files (default: native).
+-e chunk — Use chunk string as input.
+- (a single minus sign) — Use stdin as input and/or stdout as output.
+
+
+The output file type is auto-detected from the extension of the output
+file name:
+
+
+c — C source file, exported bytecode data.
+h — C header file, static bytecode data.
+obj or o — Object file, exported bytecode data
+(OS- and architecture-specific).
+raw or any other extension — Raw bytecode file (portable).
+
+
+Notes:
+
+
+See also string.dump()
+for information on bytecode portability and compatibility.
+A file in raw bytecode format is auto-detected and can be loaded like
+any Lua source file. E.g. directly from the command line or with
+loadfile() , dofile() etc.
+To statically embed the bytecode of a module in your application,
+generate an object file and just link it with your application.
+On most ELF-based systems (e.g. Linux) you need to explicitly export the
+global symbols when linking your application, e.g. with: -Wl,-E
+require() tries to load embedded bytecode data from exported
+symbols (in *.exe or lua51.dll on Windows) and from
+shared libraries in package.cpath .
+
+
+Typical usage examples:
+
+
+luajit -b test.lua test.out # Save bytecode to test.out
+luajit -bg test.lua test.out # Keep debug info
+luajit -be "print('hello world')" test.out # Save cmdline script
+
+luajit -bl test.lua # List to stdout
+luajit -bl test.lua test.txt # List to test.txt
+luajit -ble "print('hello world')" # List cmdline script
+
+luajit -b test.lua test.obj # Generate object file
+# Link test.obj with your application and load it with require("test")
+
+
+
-j cmd[=arg[,arg...]]
+
+This option performs a LuaJIT control command or activates one of the
+loadable extension modules. The command is first looked up in the
+jit.* library. If no matching function is found, a module
+named jit.<cmd> is loaded and the start()
+function of the module is called with the specified arguments (if
+any). The space between -j and cmd is optional.
+
+
+Here are the available LuaJIT control commands:
+
+
+-jon — Turns the JIT compiler on (default).
+-joff — Turns the JIT compiler off (only use the interpreter).
+-jflush — Flushes the whole cache of compiled code.
+-jv — Shows verbose information about the progress of the JIT compiler.
+-jdump — Dumps the code and structures used in various compiler stages.
+
+
+The -jv and -jdump commands are extension modules
+written in Lua. They are mainly used for debugging the JIT compiler
+itself. For a description of their options and output format, please
+read the comment block at the start of their source.
+They can be found in the lib directory of the source
+distribution or installed under the jit directory. By default
+this is /usr/local/share/luajit-2.0.0-beta10/jit on POSIX
+systems.
+
+
+
-O[level]
+-O[+]flag -O-flag
+-Oparam=value
+
+This options allows fine-tuned control of the optimizations used by
+the JIT compiler. This is mainly intended for debugging LuaJIT itself.
+Please note that the JIT compiler is extremely fast (we are talking
+about the microsecond to millisecond range). Disabling optimizations
+doesn't have any visible impact on its overhead, but usually generates
+code that runs slower.
+
+
+The first form sets an optimization level — this enables a
+specific mix of optimization flags. -O0 turns off all
+optimizations and higher numbers enable more optimizations. Omitting
+the level (i.e. just -O ) sets the default optimization level,
+which is -O3 in the current version.
+
+
+The second form adds or removes individual optimization flags.
+The third form sets a parameter for the VM or the JIT compiler
+to a specific value.
+
+
+You can either use this option multiple times (like -Ocse
+-O-dce -Ohotloop=10 ) or separate several settings with a comma
+(like -O+cse,-dce,hotloop=10 ). The settings are applied from
+left to right and later settings override earlier ones. You can freely
+mix the three forms, but note that setting an optimization level
+overrides all earlier flags.
+
+
+Here are the available flags and at what optimization levels they
+are enabled:
+
+
+
+Flag
+-O1
+-O2
+-O3
+
+
+
+fold • • • Constant Folding, Simplifications and Reassociation
+
+cse • • • Common-Subexpression Elimination
+
+dce • • • Dead-Code Elimination
+
+narrow • • Narrowing of numbers to integers
+
+loop • • Loop Optimizations (code hoisting)
+
+fwd • Load Forwarding (L2L) and Store Forwarding (S2L)
+
+dse • Dead-Store Elimination
+
+abc • Array Bounds Check Elimination
+
+fuse • Fusion of operands into instructions
+
+
+Here are the parameters and their default settings:
+
+
+
+Parameter
+Default
+
+
+
+maxtrace 1000 Max. number of traces in the cache
+
+maxrecord 4000 Max. number of recorded IR instructions
+
+maxirconst 500 Max. number of IR constants of a trace
+
+maxside 100 Max. number of side traces of a root trace
+
+maxsnap 500 Max. number of snapshots for a trace
+
+hotloop 56 Number of iterations to detect a hot loop or hot call
+
+hotexit 10 Number of taken exits to start a side trace
+
+tryside 4 Number of attempts to compile a side trace
+
+instunroll 4 Max. unroll factor for instable loops
+
+loopunroll 15 Max. unroll factor for loop ops in side traces
+
+callunroll 3 Max. unroll factor for pseudo-recursive calls
+
+recunroll 2 Min. unroll factor for true recursion
+
+sizemcode 32 Size of each machine code area in KBytes (Windows: 64K)
+
+maxmcode 512 Max. total size of all machine code areas in KBytes
+
+
+
+
+
+
diff --git a/src/LuaJIT/doc/status.html b/src/LuaJIT/doc/status.html
new file mode 100644
index 000000000..b6853d62a
--- /dev/null
+++ b/src/LuaJIT/doc/status.html
@@ -0,0 +1,241 @@
+
+
+
+Status & Roadmap
+
+
+
+
+
+
+
+
+
+
+
+
Status & Roadmap
+
+
+
+
+The LuaJIT 1.x series represents
+the current stable branch .
+Only a single bug has been discovered in the last three years. So, if
+you need a rock-solid VM, you are encouraged to fetch the latest
+release of LuaJIT 1.x from the » Download
+page.
+
+
+LuaJIT 2.0 is the currently active
+development branch .
+It still has Beta Test status, but it's not undergoing substantial
+changes anymore.
+It has » much better performance than LuaJIT 1.x.
+It's nearly feature-complete, so you should definitely
+start to evaluate it for new projects right now.
+
+
+
Current Status
+
+This is a list of the things you should know about the LuaJIT 2.0 beta test:
+
+
+
+Obviously there will be some bugs in a VM which has been
+rewritten from the ground up. Please report your findings together with
+the circumstances needed to reproduce the bug. If possible, reduce the
+problem down to a simple test case.
+There is no formal bug tracker at the moment. The best place for
+discussion is the » LuaJIT mailing list . Of course
+you may also send your bug reports directly to me ,
+especially when they contain lengthy debug output or if you require
+confidentiality.
+
+
+The x86 JIT compiler only generates code for CPUs with support for
+SSE2 instructions. I.e. you need at least a P4, Core 2/i3/i5/i7,
+Atom or K8/K10 to get the full benefit.
+If you run LuaJIT on older CPUs without SSE2 support, the JIT compiler
+is disabled and the VM falls back to the LuaJIT interpreter. This is faster
+than the Lua interpreter, but not nearly as fast as the JIT compiler of course.
+Run the command line executable without arguments to show the current status
+(JIT: ON or JIT: OFF ).
+
+
+The VM is complete in the sense that it should run all Lua code
+just fine. It's considered a serious bug if the VM crashes or produces
+unexpected results — please report this. There are only very few
+known incompatibilities with standard Lua:
+
+
+The Lua debug API is missing a couple of features (return
+hooks for non-Lua functions) and shows slightly different behavior
+(no per-coroutine hooks, no tail call counting).
+
+
+Some of the configuration options of Lua 5.1 are not supported:
+
+The number type cannot be changed (it's always a double ).
+The stand-alone executable cannot be linked with readline
+to enable line editing. It's planned to add support for loading it
+on-demand.
+
+
+
+Most other issues you're likely to find (e.g. with the existing test
+suites) are differences in the implementation-defined behavior.
+These either have a good reason (like early tail call resolving which
+may cause differences in error reporting), are arbitrary design choices
+or are due to quirks in the VM. The latter cases may get fixed if a
+demonstrable need is shown.
+
+
+
+
+The JIT compiler falls back to the
+interpreter in some cases. All of this works transparently, so unless
+you use -jv , you'll probably never notice (the interpreter is
+» quite fast , too). Here are the known issues:
+
+
+Most known issues cause a NYI (not yet implemented) trace abort
+message. E.g. for calls to some internal library
+functions. Reporting these is only mildly useful, except if you have good
+example code that shows the problem. Obviously, reports accompanied with
+a patch to fix the issue are more than welcome. But please check back
+with me, before writing major improvements, to avoid duplication of
+effort.
+
+
+Some checks are missing in the JIT-compiled code for obscure situations
+with open upvalues aliasing one of the SSA slots later on (or
+vice versa). Bonus points, if you can find a real world test case for
+this.
+
+
+Currently some out-of-memory errors from on-trace code are not
+handled correctly. The error may fall through an on-trace
+pcall (x86) or it may be passed on to the function set with
+lua_atpanic (x64).
+
+
+
+
+
+
Roadmap
+
+Please refer to the
+» LuaJIT
+Roadmap 2011 for the latest release plan. Here's the general
+project plan for LuaJIT 2.0:
+
+
+
+The main goal right now is to stabilize LuaJIT 2.0 and get it out of
+beta test. Correctness has priority over completeness. This
+implies the first stable release will certainly NOT compile every
+library function call and will fall back to the interpreter from time
+to time. This is perfectly ok, since it still executes all Lua code,
+just not at the highest possible speed.
+
+
+The next step is to get it to compile more library functions and handle
+more cases where the compiler currently bails out. This doesn't mean it
+will compile every corner case. It's much more important that it
+performs well in a majority of use cases. Every compiler has to make
+these trade-offs — completeness just cannot be the
+overriding goal for a low-footprint, low-overhead JIT compiler.
+
+
+More optimizations will be added in parallel to the last step on
+an as-needed basis. Sinking of stores
+to aggregates and sinking of allocations are high on the list.
+More complex optimizations with less pay-off, such as value-range-propagation
+(VRP) will have to wait.
+
+
+LuaJIT 2.0 has been designed with portability in mind.
+Nonetheless, it compiles to native code and needs to be adapted to each
+architecture. The two major work items are porting the the fast interpreter,
+which is written in assembler, and porting the compiler backend.
+Most other portability issues like endianess or 32 vs. 64 bit CPUs
+have already been taken care of.
+Several ports are already available, thanks to the
+» LuaJIT sponsorship program .
+More ports will follow in the future — companies which are
+interested in sponsoring a port to a particular architecture, please
+use the given contact address.
+
+
+Documentation about the internals of LuaJIT is still sorely
+missing. Although the source code is included and is IMHO well
+commented, many basic design decisions are in need of an explanation.
+The rather un-traditional compiler architecture and the many highly
+optimized data structures are a barrier for outside participation in
+the development. Alas, as I've repeatedly stated, I'm better at
+writing code than papers and I'm not in need of any academic merits.
+Someday I will find the time for it. :-)
+
+
+Producing good code for unbiased branches is a key problem for trace
+compilers. This is the main cause for "trace explosion".
+Hyperblock scheduling promises to solve this nicely at the
+price of a major redesign of the compiler. This would also pave the
+way for emitting predicated instructions, which is a prerequisite
+for efficient vectorization .
+
+
+
+
+
+
+
diff --git a/src/LuaJIT/dynasm/dasm_arm.h b/src/LuaJIT/dynasm/dasm_arm.h
new file mode 100644
index 000000000..b770c2df3
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_arm.h
@@ -0,0 +1,448 @@
+/*
+** DynASM ARM encoding engine.
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include
+#include
+#include
+#include
+
+#define DASM_ARCH "arm"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC,
+ DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+static int dasm_imm12(unsigned int n)
+{
+ int i;
+ for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30))
+ if (n <= 255) return (int)(n + (i << 8));
+ return -1;
+}
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ if (n >= 0) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+ case DASM_IMM16:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+ if ((ins & 0x8000))
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ case DASM_IMML8:
+ case DASM_IMML12:
+ CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) :
+ (((-n)>>((ins>>5)&31)) == 0), RANGE_I);
+ b[pos++] = n;
+ break;
+ case DASM_IMM12:
+ CK(dasm_imm12((unsigned int)n) != -1, RANGE_I);
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: case DASM_IMM12: case DASM_IMM16:
+ case DASM_IMML8: case DASM_IMML12: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048));
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4;
+ patchrel:
+ if ((ins & 0x800) == 0) {
+ CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL);
+ cp[-1] |= ((n >> 2) & 0x00ffffff);
+ } else if ((ins & 0x1000)) {
+ CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL);
+ goto patchimml8;
+ } else {
+ CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL);
+ goto patchimml12;
+ }
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ case DASM_IMM12:
+ cp[-1] |= dasm_imm12((unsigned int)n);
+ break;
+ case DASM_IMM16:
+ cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff);
+ break;
+ case DASM_IMML8: patchimml8:
+ cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) :
+ ((-n & 0x0f) | ((-n & 0xf0) << 4));
+ break;
+ case DASM_IMML12: patchimml12:
+ cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/src/LuaJIT/dynasm/dasm_arm.lua b/src/LuaJIT/dynasm/dasm_arm.lua
new file mode 100644
index 000000000..cc4fa1774
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_arm.lua
@@ -0,0 +1,949 @@
+------------------------------------------------------------------------------
+-- DynASM ARM module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "arm",
+ description = "DynASM ARM module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable, rawget = assert, setmetatable, rawget
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub
+local concat, sort, insert = table.concat, table.sort, table.insert
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Return 8 digit hex number.
+local function tohex(x)
+ return sub(format("%08x", x), -8) -- Avoid 64 bit portability problem in Lua.
+end
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n <= 0x000fffff then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ if n <= 0x000fffff then
+ insert(actlist, pos+1, n)
+ n = map_action.ESC * 0x10000
+ end
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+
+-- Ext. register name -> int. name.
+local map_archdef = { sp = "r13", lr = "r14", pc = "r15", }
+
+-- Int. register name -> ext. name.
+local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", }
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ return map_reg_rev[s] or s
+end
+
+local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, }
+
+local map_cond = {
+ eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7,
+ hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14,
+ hs = 2, lo = 3,
+}
+
+------------------------------------------------------------------------------
+
+-- Template strings for ARM instructions.
+local map_op = {
+ -- Basic data processing instructions.
+ and_3 = "e0000000DNPs",
+ eor_3 = "e0200000DNPs",
+ sub_3 = "e0400000DNPs",
+ rsb_3 = "e0600000DNPs",
+ add_3 = "e0800000DNPs",
+ adc_3 = "e0a00000DNPs",
+ sbc_3 = "e0c00000DNPs",
+ rsc_3 = "e0e00000DNPs",
+ tst_2 = "e1100000NP",
+ teq_2 = "e1300000NP",
+ cmp_2 = "e1500000NP",
+ cmn_2 = "e1700000NP",
+ orr_3 = "e1800000DNPs",
+ mov_2 = "e1a00000DPs",
+ bic_3 = "e1c00000DNPs",
+ mvn_2 = "e1e00000DPs",
+
+ and_4 = "e0000000DNMps",
+ eor_4 = "e0200000DNMps",
+ sub_4 = "e0400000DNMps",
+ rsb_4 = "e0600000DNMps",
+ add_4 = "e0800000DNMps",
+ adc_4 = "e0a00000DNMps",
+ sbc_4 = "e0c00000DNMps",
+ rsc_4 = "e0e00000DNMps",
+ tst_3 = "e1100000NMp",
+ teq_3 = "e1300000NMp",
+ cmp_3 = "e1500000NMp",
+ cmn_3 = "e1700000NMp",
+ orr_4 = "e1800000DNMps",
+ mov_3 = "e1a00000DMps",
+ bic_4 = "e1c00000DNMps",
+ mvn_3 = "e1e00000DMps",
+
+ lsl_3 = "e1a00000DMws",
+ lsr_3 = "e1a00020DMws",
+ asr_3 = "e1a00040DMws",
+ ror_3 = "e1a00060DMws",
+ rrx_2 = "e1a00060DMs",
+
+ -- Multiply and multiply-accumulate.
+ mul_3 = "e0000090NMSs",
+ mla_4 = "e0200090NMSDs",
+ umaal_4 = "e0400090DNMSs", -- v6
+ mls_4 = "e0600090DNMSs", -- v6T2
+ umull_4 = "e0800090DNMSs",
+ umlal_4 = "e0a00090DNMSs",
+ smull_4 = "e0c00090DNMSs",
+ smlal_4 = "e0e00090DNMSs",
+
+ -- Halfword multiply and multiply-accumulate.
+ smlabb_4 = "e1000080NMSD", -- v5TE
+ smlatb_4 = "e10000a0NMSD", -- v5TE
+ smlabt_4 = "e10000c0NMSD", -- v5TE
+ smlatt_4 = "e10000e0NMSD", -- v5TE
+ smlawb_4 = "e1200080NMSD", -- v5TE
+ smulwb_3 = "e12000a0NMS", -- v5TE
+ smlawt_4 = "e12000c0NMSD", -- v5TE
+ smulwt_3 = "e12000e0NMS", -- v5TE
+ smlalbb_4 = "e1400080NMSD", -- v5TE
+ smlaltb_4 = "e14000a0NMSD", -- v5TE
+ smlalbt_4 = "e14000c0NMSD", -- v5TE
+ smlaltt_4 = "e14000e0NMSD", -- v5TE
+ smulbb_3 = "e1600080NMS", -- v5TE
+ smultb_3 = "e16000a0NMS", -- v5TE
+ smulbt_3 = "e16000c0NMS", -- v5TE
+ smultt_3 = "e16000e0NMS", -- v5TE
+
+ -- Miscellaneous data processing instructions.
+ clz_2 = "e16f0f10DM", -- v5T
+ rev_2 = "e6bf0f30DM", -- v6
+ rev16_2 = "e6bf0fb0DM", -- v6
+ revsh_2 = "e6ff0fb0DM", -- v6
+ sel_3 = "e6800fb0DNM", -- v6
+ usad8_3 = "e780f010NMS", -- v6
+ usada8_4 = "e7800010NMSD", -- v6
+ rbit_2 = "e6ff0f30DM", -- v6T2
+ movw_2 = "e3000000DW", -- v6T2
+ movt_2 = "e3400000DW", -- v6T2
+ -- Note: the X encodes width-1, not width.
+ sbfx_4 = "e7a00050DMvX", -- v6T2
+ ubfx_4 = "e7e00050DMvX", -- v6T2
+ -- Note: the X encodes the msb field, not the width.
+ bfc_3 = "e7c0001fDvX", -- v6T2
+ bfi_4 = "e7c00010DMvX", -- v6T2
+
+ -- Packing and unpacking instructions.
+ pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6
+ pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6
+ sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6
+ sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6
+ sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6
+ sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6
+ sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6
+ sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6
+ uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6
+ uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6
+ uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6
+ uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6
+ uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6
+ uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6
+
+ -- Saturating instructions.
+ qadd_3 = "e1000050DMN", -- v5TE
+ qsub_3 = "e1200050DMN", -- v5TE
+ qdadd_3 = "e1400050DMN", -- v5TE
+ qdsub_3 = "e1600050DMN", -- v5TE
+ -- Note: the X for ssat* encodes sat_imm-1, not sat_imm.
+ ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6
+ usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6
+ ssat16_3 = "e6a00f30DXM", -- v6
+ usat16_3 = "e6e00f30DXM", -- v6
+
+ -- Parallel addition and subtraction.
+ sadd16_3 = "e6100f10DNM", -- v6
+ sasx_3 = "e6100f30DNM", -- v6
+ ssax_3 = "e6100f50DNM", -- v6
+ ssub16_3 = "e6100f70DNM", -- v6
+ sadd8_3 = "e6100f90DNM", -- v6
+ ssub8_3 = "e6100ff0DNM", -- v6
+ qadd16_3 = "e6200f10DNM", -- v6
+ qasx_3 = "e6200f30DNM", -- v6
+ qsax_3 = "e6200f50DNM", -- v6
+ qsub16_3 = "e6200f70DNM", -- v6
+ qadd8_3 = "e6200f90DNM", -- v6
+ qsub8_3 = "e6200ff0DNM", -- v6
+ shadd16_3 = "e6300f10DNM", -- v6
+ shasx_3 = "e6300f30DNM", -- v6
+ shsax_3 = "e6300f50DNM", -- v6
+ shsub16_3 = "e6300f70DNM", -- v6
+ shadd8_3 = "e6300f90DNM", -- v6
+ shsub8_3 = "e6300ff0DNM", -- v6
+ uadd16_3 = "e6500f10DNM", -- v6
+ uasx_3 = "e6500f30DNM", -- v6
+ usax_3 = "e6500f50DNM", -- v6
+ usub16_3 = "e6500f70DNM", -- v6
+ uadd8_3 = "e6500f90DNM", -- v6
+ usub8_3 = "e6500ff0DNM", -- v6
+ uqadd16_3 = "e6600f10DNM", -- v6
+ uqasx_3 = "e6600f30DNM", -- v6
+ uqsax_3 = "e6600f50DNM", -- v6
+ uqsub16_3 = "e6600f70DNM", -- v6
+ uqadd8_3 = "e6600f90DNM", -- v6
+ uqsub8_3 = "e6600ff0DNM", -- v6
+ uhadd16_3 = "e6700f10DNM", -- v6
+ uhasx_3 = "e6700f30DNM", -- v6
+ uhsax_3 = "e6700f50DNM", -- v6
+ uhsub16_3 = "e6700f70DNM", -- v6
+ uhadd8_3 = "e6700f90DNM", -- v6
+ uhsub8_3 = "e6700ff0DNM", -- v6
+
+ -- Load/store instructions.
+ str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL",
+ strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL",
+ ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL",
+ ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL",
+ strh_2 = "e00000b0DL", strh_3 = "e00000b0DL",
+ ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL",
+ ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE
+ ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL",
+ strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE
+ ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL",
+
+ ldm_2 = "e8900000nR", ldmia_2 = "e8900000nR", ldmfd_2 = "e8900000nR",
+ ldmda_2 = "e8100000nR", ldmfa_2 = "e8100000nR",
+ ldmdb_2 = "e9100000nR", ldmea_2 = "e9100000nR",
+ ldmib_2 = "e9900000nR", ldmed_2 = "e9900000nR",
+ stm_2 = "e8800000nR", stmia_2 = "e8800000nR", stmfd_2 = "e8800000nR",
+ stmda_2 = "e8000000nR", stmfa_2 = "e8000000nR",
+ stmdb_2 = "e9000000nR", stmea_2 = "e9000000nR",
+ stmib_2 = "e9800000nR", stmed_2 = "e9800000nR",
+ pop_1 = "e8bd0000R", push_1 = "e92d0000R",
+
+ -- Branch instructions.
+ b_1 = "ea000000B",
+ bl_1 = "eb000000B",
+ blx_1 = "e12fff30C",
+ bx_1 = "e12fff10M",
+
+ -- Miscellaneous instructions.
+ nop_0 = "e1a00000",
+ mrs_1 = "e10f0000D",
+ bkpt_1 = "e1200070K", -- v5T
+ svc_1 = "ef000000T", swi_1 = "ef000000T",
+ ud_0 = "e7f001f0",
+
+ -- NYI: Advanced SIMD and VFP instructions.
+
+ -- NYI instructions, since I have no need for them right now:
+ -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh
+ -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe
+ -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb
+ -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2
+}
+
+-- Add mnemonics for "s" variants.
+do
+ local t = {}
+ for k,v in pairs(map_op) do
+ if sub(v, -1) == "s" then
+ local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2)
+ t[sub(k, 1, -3).."s"..sub(k, -2)] = v2
+ end
+ end
+ for k,v in pairs(t) do
+ map_op[k] = v
+ end
+end
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r(1?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 15 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_gpr_pm(expr)
+ local pm, expr2 = match(expr, "^([+-]?)(.*)$")
+ return parse_gpr(expr2), (pm == "-")
+end
+
+local function parse_reglist(reglist)
+ reglist = match(reglist, "^{%s*([^}]*)}$")
+ if not reglist then werror("register list expected") end
+ local rr = 0
+ for p in gmatch(reglist..",", "%s*([^,]*),") do
+ local rbit = 2^parse_gpr(gsub(p, "%s+$", ""))
+ if ((rr - (rr % rbit)) / rbit) % 2 ~= 0 then
+ werror("duplicate register `"..p.."'")
+ end
+ rr = rr + rbit
+ end
+ return rr
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ imm = match(imm, "^#(.*)$")
+ if not imm then werror("expected immediate operand") end
+ local n = tonumber(imm)
+ if n then
+ if n % 2^scale == 0 then
+ n = n / 2^scale
+ if signed then
+ if n >= 0 then
+ if n < 2^(bits-1) then return n*2^shift end
+ else
+ if n >= -(2^(bits-1))-1 then return (n+2^bits)*2^shift end
+ end
+ else
+ if n >= 0 and n <= 2^bits-1 then return n*2^shift end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_imm12(imm)
+ local n = tonumber(imm)
+ if n then
+ local m = n
+ for i=0,-15,-1 do
+ if m >= 0 and m <= 255 and n % 1 == 0 then return m + (i%16) * 256 end
+ local t = m % 4
+ m = (m - t) / 4 + t * 2^30
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM12", 0, imm)
+ return 0
+ end
+end
+
+local function parse_imm16(imm)
+ imm = match(imm, "^#(.*)$")
+ if not imm then werror("expected immediate operand") end
+ local n = tonumber(imm)
+ if n then
+ if n >= 0 and n <= 65535 and n % 1 == 0 then
+ local t = n % 4096
+ return (n - t) * 16 + t
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM16", 32*16, imm)
+ return 0
+ end
+end
+
+local function parse_imm_load(imm, ext)
+ local n = tonumber(imm)
+ if n then
+ if ext then
+ if n >= -255 and n <= 255 then
+ local up = 0x00800000
+ if n < 0 then n = -n; up = 0 end
+ return (n-(n%16))*16+(n%16) + up
+ end
+ else
+ if n >= -4095 and n <= 4095 then
+ if n >= 0 then return n+0x00800000 end
+ return -n
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12), imm)
+ return 0
+ end
+end
+
+local function parse_shift(shift, gprok)
+ if shift == "rrx" then
+ return 3 * 32
+ else
+ local s, s2 = match(shift, "^(%S+)%s*(.*)$")
+ s = map_shift[s]
+ if not s then werror("expected shift operand") end
+ if sub(s2, 1, 1) == "#" then
+ return parse_imm(s2, 5, 7, 0, false) + s * 32
+ else
+ if not gprok then werror("expected immediate shift operand") end
+ return parse_gpr(s2) * 256 + s * 32 + 16
+ end
+ end
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+local function parse_load(params, nparams, n, op)
+ local oplo = op % 256
+ local ext, ldrd = (oplo ~= 0), (oplo == 208)
+ local d
+ if (ldrd or oplo == 240) then
+ d = ((op - (op % 4096)) / 4096) % 16
+ if d % 2 ~= 0 then werror("odd destination register") end
+ end
+ local pn = params[n]
+ local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$")
+ local p2 = params[n+1]
+ if not p1 then
+ if not p2 then
+ if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then
+ local mode, n, s = parse_label(pn, false)
+ waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1)
+ return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0)
+ end
+ local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local d, tp = parse_gpr(reg)
+ if tp then
+ waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12),
+ format(tp.ctypefmt, tailr))
+ return op + d * 65536 + 0x01000000 + (ext and 0x00400000 or 0)
+ end
+ end
+ end
+ werror("expected address operand")
+ end
+ if wb == "!" then op = op + 0x00200000 end
+ if p2 then
+ if wb == "!" then werror("bad use of '!'") end
+ local p3 = params[n+2]
+ op = op + parse_gpr(p1) * 65536
+ local imm = match(p2, "^#(.*)$")
+ if imm then
+ local m = parse_imm_load(imm, ext)
+ if p3 then werror("too many parameters") end
+ op = op + m + (ext and 0x00400000 or 0)
+ else
+ local m, neg = parse_gpr_pm(p2)
+ if ldrd and (m == d or m-1 == d) then werror("register conflict") end
+ op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
+ if p3 then op = op + parse_shift(p3) end
+ end
+ else
+ local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$")
+ op = op + parse_gpr(p1a) * 65536 + 0x01000000
+ if p2 ~= "" then
+ local imm = match(p2, "^,%s*#(.*)$")
+ if imm then
+ local m = parse_imm_load(imm, ext)
+ op = op + m + (ext and 0x00400000 or 0)
+ else
+ local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$")
+ local m, neg = parse_gpr_pm(p2a)
+ if ldrd and (m == d or m-1 == d) then werror("register conflict") end
+ op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
+ if p3 ~= "" then
+ if ext then werror("too many parameters") end
+ op = op + parse_shift(p3)
+ end
+ end
+ else
+ if wb == "!" then werror("bad use of '!'") end
+ op = op + (ext and 0x00c00000 or 0x00800000)
+ end
+ end
+ return op
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n = 1
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 3 positions.
+ if secpos+3 > maxsecpos then wflush() end
+ local pos = wpos()
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ if p == "D" then
+ op = op + parse_gpr(params[n]) * 4096; n = n + 1
+ elseif p == "N" then
+ op = op + parse_gpr(params[n]) * 65536; n = n + 1
+ elseif p == "S" then
+ op = op + parse_gpr(params[n]) * 256; n = n + 1
+ elseif p == "M" then
+ op = op + parse_gpr(params[n]); n = n + 1
+ elseif p == "P" then
+ local imm = match(params[n], "^#(.*)$")
+ if imm then
+ op = op + parse_imm12(imm) + 0x02000000
+ else
+ op = op + parse_gpr(params[n])
+ end
+ n = n + 1
+ elseif p == "p" then
+ op = op + parse_shift(params[n], true); n = n + 1
+ elseif p == "L" then
+ op = parse_load(params, nparams, n, op)
+ elseif p == "B" then
+ local mode, n, s = parse_label(params[n], false)
+ waction("REL_"..mode, n, s, 1)
+ elseif p == "C" then -- blx gpr vs. blx label.
+ local p = params[n]
+ if match(p, "^([%w_]+):(r1?[0-9])$") or match(p, "^r(1?[0-9])$") then
+ op = op + parse_gpr(p)
+ else
+ if op < 0xe0000000 then werror("unconditional instruction") end
+ local mode, n, s = parse_label(p, false)
+ waction("REL_"..mode, n, s, 1)
+ op = 0xfa000000
+ end
+ elseif p == "n" then
+ local r, wb = match(params[n], "^([^!]*)(!?)$")
+ op = op + parse_gpr(r) * 65536 + (wb == "!" and 0x00200000 or 0)
+ n = n + 1
+ elseif p == "R" then
+ op = op + parse_reglist(params[n]); n = n + 1
+ elseif p == "W" then
+ op = op + parse_imm16(params[n]); n = n + 1
+ elseif p == "v" then
+ op = op + parse_imm(params[n], 5, 7, 0, false); n = n + 1
+ elseif p == "w" then
+ local imm = match(params[n], "^#(.*)$")
+ if imm then
+ op = op + parse_imm(params[n], 5, 7, 0, false); n = n + 1
+ else
+ op = op + parse_gpr(params[n]) * 256 + 16
+ end
+ elseif p == "X" then
+ op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1
+ elseif p == "K" then
+ local imm = tonumber(match(params[n], "^#(.*)$")); n = n + 1
+ if not imm or imm % 1 ~= 0 or imm < 0 or imm > 0xffff then
+ werror("bad immediate operand")
+ end
+ local t = imm % 16
+ op = op + (imm - t) * 16 + t
+ elseif p == "T" then
+ op = op + parse_imm(params[n], 24, 0, 0, false); n = n + 1
+ elseif p == "s" then
+ -- Ignored.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = function(t, k)
+ local v = map_coreop[k]
+ if v then return v end
+ local cc = sub(k, -4, -3)
+ local cv = map_cond[cc]
+ if cv then
+ local v = rawget(t, sub(k, 1, -5)..sub(k, -2))
+ if type(v) == "string" then return format("%x%s", cv, sub(v, 2)) end
+ end
+ end })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/src/LuaJIT/dynasm/dasm_mips.h b/src/LuaJIT/dynasm/dasm_mips.h
new file mode 100644
index 000000000..af87d99a7
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_mips.h
@@ -0,0 +1,415 @@
+/*
+** DynASM MIPS encoding engine.
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include
+#include
+#include
+#include
+
+#define DASM_ARCH "mips"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ if (n >= 0) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+#endif
+ n >>= ((ins>>10)&31);
+#ifdef DASM_CHECKS
+ if (ins & 0x8000)
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1);
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n);
+ if (ins & 2048)
+ n = n - (int)((char *)cp - base);
+ else
+ n = (n + (int)base) & 0x0fffffff;
+ patchrel:
+ CK((n & 3) == 0 &&
+ ((n + ((ins & 2048) ? 0x00020000 : 0)) >>
+ ((ins & 2048) ? 18 : 28)) == 0, RANGE_REL);
+ cp[-1] |= ((n>>2) & ((ins & 2048) ? 0x0000ffff: 0x03ffffff));
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/src/LuaJIT/dynasm/dasm_mips.lua b/src/LuaJIT/dynasm/dasm_mips.lua
new file mode 100644
index 000000000..aa33f0ccc
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_mips.lua
@@ -0,0 +1,959 @@
+------------------------------------------------------------------------------
+-- DynASM MIPS module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "mips",
+ description = "DynASM MIPS module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2012-01-23",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable = assert, setmetatable
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch = _s.match, _s.gmatch
+local concat, sort = table.concat, table.sort
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Return 8 digit hex number.
+local function tohex(x)
+ return sub(format("%08x", x), -8) -- Avoid 64 bit portability problem in Lua.
+end
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(0xff000000 + w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n >= 0xff000000 then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+local map_archdef = { sp="r29", ra="r31" } -- Ext. register name -> int. name.
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ if s == "r29" then return "sp"
+ elseif s == "r31" then return "ra" end
+ return s
+end
+
+------------------------------------------------------------------------------
+
+-- Template strings for MIPS instructions.
+local map_op = {
+ -- First-level opcodes.
+ j_1 = "08000000J",
+ jal_1 = "0c000000J",
+ b_1 = "10000000B",
+ beqz_2 = "10000000SB",
+ beq_3 = "10000000STB",
+ bnez_2 = "14000000SB",
+ bne_3 = "14000000STB",
+ blez_2 = "18000000SB",
+ bgtz_2 = "1c000000SB",
+ addi_3 = "20000000TSI",
+ li_2 = "24000000TI",
+ addiu_3 = "24000000TSI",
+ slti_3 = "28000000TSI",
+ sltiu_3 = "2c000000TSI",
+ andi_3 = "30000000TSU",
+ lu_2 = "34000000TU",
+ ori_3 = "34000000TSU",
+ xori_3 = "38000000TSU",
+ lui_2 = "3c000000TU",
+ beqzl_2 = "50000000SB",
+ beql_3 = "50000000STB",
+ bnezl_2 = "54000000SB",
+ bnel_3 = "54000000STB",
+ blezl_2 = "58000000SB",
+ bgtzl_2 = "5c000000SB",
+ lb_2 = "80000000TO",
+ lh_2 = "84000000TO",
+ lwl_2 = "88000000TO",
+ lw_2 = "8c000000TO",
+ lbu_2 = "90000000TO",
+ lhu_2 = "94000000TO",
+ lwr_2 = "98000000TO",
+ sb_2 = "a0000000TO",
+ sh_2 = "a4000000TO",
+ swl_2 = "a8000000TO",
+ sw_2 = "ac000000TO",
+ swr_2 = "b8000000TO",
+ cache_2 = "bc000000NO",
+ ll_2 = "c0000000TO",
+ lwc1_2 = "c4000000HO",
+ pref_2 = "cc000000NO",
+ ldc1_2 = "d4000000HO",
+ sc_2 = "e0000000TO",
+ swc1_2 = "e4000000HO",
+ sdc1_2 = "f4000000HO",
+
+ -- Opcode SPECIAL.
+ nop_0 = "00000000",
+ sll_3 = "00000000DTA",
+ movf_2 = "00000001DS",
+ movf_3 = "00000001DSC",
+ movt_2 = "00010001DS",
+ movt_3 = "00010001DSC",
+ srl_3 = "00000002DTA",
+ rotr_3 = "00200002DTA",
+ sra_3 = "00000003DTA",
+ sllv_3 = "00000004DTS",
+ srlv_3 = "00000006DTS",
+ rotrv_3 = "00000046DTS",
+ srav_3 = "00000007DTS",
+ jr_1 = "00000008S",
+ jalr_1 = "0000f809S",
+ jalr_2 = "00000009DS",
+ movz_3 = "0000000aDST",
+ movn_3 = "0000000bDST",
+ syscall_0 = "0000000c",
+ syscall_1 = "0000000cY",
+ break_0 = "0000000d",
+ break_1 = "0000000dY",
+ sync_0 = "0000000f",
+ mfhi_1 = "00000010D",
+ mthi_1 = "00000011S",
+ mflo_1 = "00000012D",
+ mtlo_1 = "00000013S",
+ mult_2 = "00000018ST",
+ multu_2 = "00000019ST",
+ div_2 = "0000001aST",
+ divu_2 = "0000001bST",
+ add_3 = "00000020DST",
+ move_2 = "00000021DS",
+ addu_3 = "00000021DST",
+ sub_3 = "00000022DST",
+ negu_2 = "00000023DT",
+ subu_3 = "00000023DST",
+ and_3 = "00000024DST",
+ or_3 = "00000025DST",
+ xor_3 = "00000026DST",
+ not_2 = "00000027DS",
+ nor_3 = "00000027DST",
+ slt_3 = "0000002aDST",
+ sltu_3 = "0000002bDST",
+ tge_2 = "00000030ST",
+ tge_3 = "00000030STZ",
+ tgeu_2 = "00000031ST",
+ tgeu_3 = "00000031STZ",
+ tlt_2 = "00000032ST",
+ tlt_3 = "00000032STZ",
+ tltu_2 = "00000033ST",
+ tltu_3 = "00000033STZ",
+ teq_2 = "00000034ST",
+ teq_3 = "00000034STZ",
+ tne_2 = "00000036ST",
+ tne_3 = "00000036STZ",
+
+ -- Opcode REGIMM.
+ bltz_2 = "04000000SB",
+ bgez_2 = "04010000SB",
+ bltzl_2 = "04020000SB",
+ bgezl_2 = "04030000SB",
+ tgei_2 = "04080000SI",
+ tgeiu_2 = "04090000SI",
+ tlti_2 = "040a0000SI",
+ tltiu_2 = "040b0000SI",
+ teqi_2 = "040c0000SI",
+ tnei_2 = "040e0000SI",
+ bltzal_2 = "04100000SB",
+ bal_1 = "04110000B",
+ bgezal_2 = "04110000SB",
+ bltzall_2 = "04120000SB",
+ bgezall_2 = "04130000SB",
+ synci_1 = "041f0000O",
+
+ -- Opcode SPECIAL2.
+ madd_2 = "70000000ST",
+ maddu_2 = "70000001ST",
+ mul_3 = "70000002DST",
+ msub_2 = "70000004ST",
+ msubu_2 = "70000005ST",
+ clz_2 = "70000020DS=",
+ clo_2 = "70000021DS=",
+ sdbbp_0 = "7000003f",
+ sdbbp_1 = "7000003fY",
+
+ -- Opcode SPECIAL3.
+ ext_4 = "7c000000TSAM", -- Note: last arg is msbd = size-1
+ ins_4 = "7c000004TSAM", -- Note: last arg is msb = pos+size-1
+ wsbh_2 = "7c0000a0DT",
+ seb_2 = "7c000420DT",
+ seh_2 = "7c000620DT",
+ rdhwr_2 = "7c00003bTD",
+
+ -- Opcode COP0.
+ mfc0_2 = "40000000TD",
+ mfc0_3 = "40000000TDW",
+ mtc0_2 = "40800000TD",
+ mtc0_3 = "40800000TDW",
+ rdpgpr_2 = "41400000DT",
+ di_0 = "41606000",
+ di_1 = "41606000T",
+ ei_0 = "41606020",
+ ei_1 = "41606020T",
+ wrpgpr_2 = "41c00000DT",
+ tlbr_0 = "42000001",
+ tlbwi_0 = "42000002",
+ tlbwr_0 = "42000006",
+ tlbp_0 = "42000008",
+ eret_0 = "42000018",
+ deret_0 = "4200001f",
+ wait_0 = "42000020",
+
+ -- Opcode COP1.
+ mfc1_2 = "44000000TG",
+ cfc1_2 = "44400000TG",
+ mfhc1_2 = "44600000TG",
+ mtc1_2 = "44800000TG",
+ ctc1_2 = "44c00000TG",
+ mthc1_2 = "44e00000TG",
+
+ bc1f_1 = "45000000B",
+ bc1f_2 = "45000000CB",
+ bc1t_1 = "45010000B",
+ bc1t_2 = "45010000CB",
+ bc1fl_1 = "45020000B",
+ bc1fl_2 = "45020000CB",
+ bc1tl_1 = "45030000B",
+ bc1tl_2 = "45030000CB",
+
+ ["add.s_3"] = "46000000FGH",
+ ["sub.s_3"] = "46000001FGH",
+ ["mul.s_3"] = "46000002FGH",
+ ["div.s_3"] = "46000003FGH",
+ ["sqrt.s_2"] = "46000004FG",
+ ["abs.s_2"] = "46000005FG",
+ ["mov.s_2"] = "46000006FG",
+ ["neg.s_2"] = "46000007FG",
+ ["round.l.s_2"] = "46000008FG",
+ ["trunc.l.s_2"] = "46000009FG",
+ ["ceil.l.s_2"] = "4600000aFG",
+ ["floor.l.s_2"] = "4600000bFG",
+ ["round.w.s_2"] = "4600000cFG",
+ ["trunc.w.s_2"] = "4600000dFG",
+ ["ceil.w.s_2"] = "4600000eFG",
+ ["floor.w.s_2"] = "4600000fFG",
+ ["movf.s_2"] = "46000011FG",
+ ["movf.s_3"] = "46000011FGC",
+ ["movt.s_2"] = "46010011FG",
+ ["movt.s_3"] = "46010011FGC",
+ ["movz.s_3"] = "46000012FGT",
+ ["movn.s_3"] = "46000013FGT",
+ ["recip.s_2"] = "46000015FG",
+ ["rsqrt.s_2"] = "46000016FG",
+ ["cvt.d.s_2"] = "46000021FG",
+ ["cvt.w.s_2"] = "46000024FG",
+ ["cvt.l.s_2"] = "46000025FG",
+ ["cvt.ps.s_3"] = "46000026FGH",
+ ["c.f.s_2"] = "46000030GH",
+ ["c.f.s_3"] = "46000030VGH",
+ ["c.un.s_2"] = "46000031GH",
+ ["c.un.s_3"] = "46000031VGH",
+ ["c.eq.s_2"] = "46000032GH",
+ ["c.eq.s_3"] = "46000032VGH",
+ ["c.ueq.s_2"] = "46000033GH",
+ ["c.ueq.s_3"] = "46000033VGH",
+ ["c.olt.s_2"] = "46000034GH",
+ ["c.olt.s_3"] = "46000034VGH",
+ ["c.ult.s_2"] = "46000035GH",
+ ["c.ult.s_3"] = "46000035VGH",
+ ["c.ole.s_2"] = "46000036GH",
+ ["c.ole.s_3"] = "46000036VGH",
+ ["c.ule.s_2"] = "46000037GH",
+ ["c.ule.s_3"] = "46000037VGH",
+ ["c.sf.s_2"] = "46000038GH",
+ ["c.sf.s_3"] = "46000038VGH",
+ ["c.ngle.s_2"] = "46000039GH",
+ ["c.ngle.s_3"] = "46000039VGH",
+ ["c.seq.s_2"] = "4600003aGH",
+ ["c.seq.s_3"] = "4600003aVGH",
+ ["c.ngl.s_2"] = "4600003bGH",
+ ["c.ngl.s_3"] = "4600003bVGH",
+ ["c.lt.s_2"] = "4600003cGH",
+ ["c.lt.s_3"] = "4600003cVGH",
+ ["c.nge.s_2"] = "4600003dGH",
+ ["c.nge.s_3"] = "4600003dVGH",
+ ["c.le.s_2"] = "4600003eGH",
+ ["c.le.s_3"] = "4600003eVGH",
+ ["c.ngt.s_2"] = "4600003fGH",
+ ["c.ngt.s_3"] = "4600003fVGH",
+
+ ["add.d_3"] = "46200000FGH",
+ ["sub.d_3"] = "46200001FGH",
+ ["mul.d_3"] = "46200002FGH",
+ ["div.d_3"] = "46200003FGH",
+ ["sqrt.d_2"] = "46200004FG",
+ ["abs.d_2"] = "46200005FG",
+ ["mov.d_2"] = "46200006FG",
+ ["neg.d_2"] = "46200007FG",
+ ["round.l.d_2"] = "46200008FG",
+ ["trunc.l.d_2"] = "46200009FG",
+ ["ceil.l.d_2"] = "4620000aFG",
+ ["floor.l.d_2"] = "4620000bFG",
+ ["round.w.d_2"] = "4620000cFG",
+ ["trunc.w.d_2"] = "4620000dFG",
+ ["ceil.w.d_2"] = "4620000eFG",
+ ["floor.w.d_2"] = "4620000fFG",
+ ["movf.d_2"] = "46200011FG",
+ ["movf.d_3"] = "46200011FGC",
+ ["movt.d_2"] = "46210011FG",
+ ["movt.d_3"] = "46210011FGC",
+ ["movz.d_3"] = "46200012FGT",
+ ["movn.d_3"] = "46200013FGT",
+ ["recip.d_2"] = "46200015FG",
+ ["rsqrt.d_2"] = "46200016FG",
+ ["cvt.s.d_2"] = "46200020FG",
+ ["cvt.w.d_2"] = "46200024FG",
+ ["cvt.l.d_2"] = "46200025FG",
+ ["c.f.d_2"] = "46200030GH",
+ ["c.f.d_3"] = "46200030VGH",
+ ["c.un.d_2"] = "46200031GH",
+ ["c.un.d_3"] = "46200031VGH",
+ ["c.eq.d_2"] = "46200032GH",
+ ["c.eq.d_3"] = "46200032VGH",
+ ["c.ueq.d_2"] = "46200033GH",
+ ["c.ueq.d_3"] = "46200033VGH",
+ ["c.olt.d_2"] = "46200034GH",
+ ["c.olt.d_3"] = "46200034VGH",
+ ["c.ult.d_2"] = "46200035GH",
+ ["c.ult.d_3"] = "46200035VGH",
+ ["c.ole.d_2"] = "46200036GH",
+ ["c.ole.d_3"] = "46200036VGH",
+ ["c.ule.d_2"] = "46200037GH",
+ ["c.ule.d_3"] = "46200037VGH",
+ ["c.sf.d_2"] = "46200038GH",
+ ["c.sf.d_3"] = "46200038VGH",
+ ["c.ngle.d_2"] = "46200039GH",
+ ["c.ngle.d_3"] = "46200039VGH",
+ ["c.seq.d_2"] = "4620003aGH",
+ ["c.seq.d_3"] = "4620003aVGH",
+ ["c.ngl.d_2"] = "4620003bGH",
+ ["c.ngl.d_3"] = "4620003bVGH",
+ ["c.lt.d_2"] = "4620003cGH",
+ ["c.lt.d_3"] = "4620003cVGH",
+ ["c.nge.d_2"] = "4620003dGH",
+ ["c.nge.d_3"] = "4620003dVGH",
+ ["c.le.d_2"] = "4620003eGH",
+ ["c.le.d_3"] = "4620003eVGH",
+ ["c.ngt.d_2"] = "4620003fGH",
+ ["c.ngt.d_3"] = "4620003fVGH",
+
+ ["add.ps_3"] = "46c00000FGH",
+ ["sub.ps_3"] = "46c00001FGH",
+ ["mul.ps_3"] = "46c00002FGH",
+ ["abs.ps_2"] = "46c00005FG",
+ ["mov.ps_2"] = "46c00006FG",
+ ["neg.ps_2"] = "46c00007FG",
+ ["movf.ps_2"] = "46c00011FG",
+ ["movf.ps_3"] = "46c00011FGC",
+ ["movt.ps_2"] = "46c10011FG",
+ ["movt.ps_3"] = "46c10011FGC",
+ ["movz.ps_3"] = "46c00012FGT",
+ ["movn.ps_3"] = "46c00013FGT",
+ ["cvt.s.pu_2"] = "46c00020FG",
+ ["cvt.s.pl_2"] = "46c00028FG",
+ ["pll.ps_3"] = "46c0002cFGH",
+ ["plu.ps_3"] = "46c0002dFGH",
+ ["pul.ps_3"] = "46c0002eFGH",
+ ["puu.ps_3"] = "46c0002fFGH",
+ ["c.f.ps_2"] = "46c00030GH",
+ ["c.f.ps_3"] = "46c00030VGH",
+ ["c.un.ps_2"] = "46c00031GH",
+ ["c.un.ps_3"] = "46c00031VGH",
+ ["c.eq.ps_2"] = "46c00032GH",
+ ["c.eq.ps_3"] = "46c00032VGH",
+ ["c.ueq.ps_2"] = "46c00033GH",
+ ["c.ueq.ps_3"] = "46c00033VGH",
+ ["c.olt.ps_2"] = "46c00034GH",
+ ["c.olt.ps_3"] = "46c00034VGH",
+ ["c.ult.ps_2"] = "46c00035GH",
+ ["c.ult.ps_3"] = "46c00035VGH",
+ ["c.ole.ps_2"] = "46c00036GH",
+ ["c.ole.ps_3"] = "46c00036VGH",
+ ["c.ule.ps_2"] = "46c00037GH",
+ ["c.ule.ps_3"] = "46c00037VGH",
+ ["c.sf.ps_2"] = "46c00038GH",
+ ["c.sf.ps_3"] = "46c00038VGH",
+ ["c.ngle.ps_2"] = "46c00039GH",
+ ["c.ngle.ps_3"] = "46c00039VGH",
+ ["c.seq.ps_2"] = "46c0003aGH",
+ ["c.seq.ps_3"] = "46c0003aVGH",
+ ["c.ngl.ps_2"] = "46c0003bGH",
+ ["c.ngl.ps_3"] = "46c0003bVGH",
+ ["c.lt.ps_2"] = "46c0003cGH",
+ ["c.lt.ps_3"] = "46c0003cVGH",
+ ["c.nge.ps_2"] = "46c0003dGH",
+ ["c.nge.ps_3"] = "46c0003dVGH",
+ ["c.le.ps_2"] = "46c0003eGH",
+ ["c.le.ps_3"] = "46c0003eVGH",
+ ["c.ngt.ps_2"] = "46c0003fGH",
+ ["c.ngt.ps_3"] = "46c0003fVGH",
+
+ ["cvt.s.w_2"] = "46800020FG",
+ ["cvt.d.w_2"] = "46800021FG",
+
+ ["cvt.s.l_2"] = "46a00020FG",
+ ["cvt.d.l_2"] = "46a00021FG",
+
+ -- Opcode COP1X.
+ lwxc1_2 = "4c000000FX",
+ ldxc1_2 = "4c000001FX",
+ luxc1_2 = "4c000005FX",
+ swxc1_2 = "4c000008FX",
+ sdxc1_2 = "4c000009FX",
+ suxc1_2 = "4c00000dFX",
+ prefx_2 = "4c00000fMX",
+ ["alnv.ps_4"] = "4c00001eFGHS",
+ ["madd.s_4"] = "4c000020FRGH",
+ ["madd.d_4"] = "4c000021FRGH",
+ ["madd.ps_4"] = "4c000026FRGH",
+ ["msub.s_4"] = "4c000028FRGH",
+ ["msub.d_4"] = "4c000029FRGH",
+ ["msub.ps_4"] = "4c00002eFRGH",
+ ["nmadd.s_4"] = "4c000030FRGH",
+ ["nmadd.d_4"] = "4c000031FRGH",
+ ["nmadd.ps_4"] = "4c000036FRGH",
+ ["nmsub.s_4"] = "4c000038FRGH",
+ ["nmsub.d_4"] = "4c000039FRGH",
+ ["nmsub.ps_4"] = "4c00003eFRGH",
+}
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_fpr(expr)
+ local r = match(expr, "^f([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ local n = tonumber(imm)
+ if n then
+ if n % 2^scale == 0 then
+ n = n / 2^scale
+ if signed then
+ if n >= 0 then
+ if n < 2^(bits-1) then return n*2^shift end
+ else
+ if n >= -(2^(bits-1))-1 then return (n+2^bits)*2^shift end
+ end
+ else
+ if n >= 0 and n <= 2^bits-1 then return n*2^shift end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ elseif match(imm, "^[rf]([1-3]?[0-9])$") or
+ match(imm, "^([%w_]+):([rf][1-3]?[0-9])$") then
+ werror("expected immediate operand, got register")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_disp(disp)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = parse_gpr(reg)*2^21
+ local extname = match(imm, "^extern%s+(%S+)$")
+ if extname then
+ waction("REL_EXT", map_extern[extname], nil, 1)
+ return r
+ else
+ return r + parse_imm(imm, 16, 0, 0, true)
+ end
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if tp then
+ waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
+ return r*2^21
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_index(idx)
+ local rt, rs = match(idx, "^(.*)%(([%w_:]+)%)$")
+ if rt then
+ rt = parse_gpr(rt)
+ rs = parse_gpr(rs)
+ return rt*2^16 + rs*2^21
+ end
+ werror("bad index `"..idx.."'")
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n = 1
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 2 positions (ins/ext).
+ if secpos+2 > maxsecpos then wflush() end
+ local pos = wpos()
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ if p == "D" then
+ op = op + parse_gpr(params[n]) * 2^11; n = n + 1
+ elseif p == "T" then
+ op = op + parse_gpr(params[n]) * 2^16; n = n + 1
+ elseif p == "S" then
+ op = op + parse_gpr(params[n]) * 2^21; n = n + 1
+ elseif p == "F" then
+ op = op + parse_fpr(params[n]) * 2^6; n = n + 1
+ elseif p == "G" then
+ op = op + parse_fpr(params[n]) * 2^11; n = n + 1
+ elseif p == "H" then
+ op = op + parse_fpr(params[n]) * 2^16; n = n + 1
+ elseif p == "R" then
+ op = op + parse_fpr(params[n]) * 2^21; n = n + 1
+ elseif p == "I" then
+ op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
+ elseif p == "U" then
+ op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
+ elseif p == "O" then
+ op = op + parse_disp(params[n]); n = n + 1
+ elseif p == "X" then
+ op = op + parse_index(params[n]); n = n + 1
+ elseif p == "B" or p == "J" then
+ local mode, n, s = parse_label(params[n], false)
+ if p == "B" then n = n + 2048 end
+ waction("REL_"..mode, n, s, 1)
+ n = n + 1
+ elseif p == "A" then
+ op = op + parse_imm(params[n], 5, 6, 0, false); n = n + 1
+ elseif p == "M" then
+ op = op + parse_imm(params[n], 5, 11, 0, false); n = n + 1
+ elseif p == "N" then
+ op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1
+ elseif p == "C" then
+ op = op + parse_imm(params[n], 3, 18, 0, false); n = n + 1
+ elseif p == "V" then
+ op = op + parse_imm(params[n], 3, 8, 0, false); n = n + 1
+ elseif p == "W" then
+ op = op + parse_imm(params[n], 3, 0, 0, false); n = n + 1
+ elseif p == "Y" then
+ op = op + parse_imm(params[n], 20, 6, 0, false); n = n + 1
+ elseif p == "Z" then
+ op = op + parse_imm(params[n], 10, 6, 0, false); n = n + 1
+ elseif p == "=" then
+ local d = ((op - op % 2^11) / 2^11) % 32
+ op = op + d * 2^16 -- Copy D to T for clz, clo.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = map_coreop })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/src/LuaJIT/dynasm/dasm_ppc.h b/src/LuaJIT/dynasm/dasm_ppc.h
new file mode 100644
index 000000000..bf5957e72
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_ppc.h
@@ -0,0 +1,411 @@
+/*
+** DynASM PPC encoding engine.
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include
+#include
+#include
+#include
+
+#define DASM_ARCH "ppc"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ if (n >= 0) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+#endif
+ n >>= ((ins>>10)&31);
+#ifdef DASM_CHECKS
+ if (ins & 0x8000)
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1) - 4;
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base);
+ patchrel:
+ CK((n & 3) == 0 &&
+ (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >>
+ ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL);
+ cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc));
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/src/LuaJIT/dynasm/dasm_ppc.lua b/src/LuaJIT/dynasm/dasm_ppc.lua
new file mode 100644
index 000000000..dc2af69e7
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_ppc.lua
@@ -0,0 +1,1230 @@
+------------------------------------------------------------------------------
+-- DynASM PPC module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "ppc",
+ description = "DynASM PPC module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable = assert, setmetatable
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch = _s.match, _s.gmatch
+local concat, sort = table.concat, table.sort
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Return 8 digit hex number.
+local function tohex(x)
+ return sub(format("%08x", x), -8) -- Avoid 64 bit portability problem in Lua.
+end
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n <= 0xffffff then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+local map_archdef = { sp = "r1" } -- Ext. register name -> int. name.
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ if s == "r1" then return "sp" end
+ return s
+end
+
+local map_cond = {
+ lt = 0, gt = 1, eq = 2, so = 3,
+ ge = 4, le = 5, ne = 6, ns = 7,
+}
+
+------------------------------------------------------------------------------
+
+-- Template strings for PPC instructions.
+local map_op = {
+ tdi_3 = "08000000ARI",
+ twi_3 = "0c000000ARI",
+ mulli_3 = "1c000000RRI",
+ subfic_3 = "20000000RRI",
+ cmplwi_3 = "28000000XRU",
+ cmplwi_2 = "28000000-RU",
+ cmpldi_3 = "28200000XRU",
+ cmpldi_2 = "28200000-RU",
+ cmpwi_3 = "2c000000XRI",
+ cmpwi_2 = "2c000000-RI",
+ cmpdi_3 = "2c200000XRI",
+ cmpdi_2 = "2c200000-RI",
+ addic_3 = "30000000RRI",
+ ["addic._3"] = "34000000RRI",
+ addi_3 = "38000000RR0I",
+ li_2 = "38000000RI",
+ la_2 = "38000000RD",
+ addis_3 = "3c000000RR0I",
+ lis_2 = "3c000000RI",
+ lus_2 = "3c000000RU",
+ bc_3 = "40000000AAK",
+ bcl_3 = "40000001AAK",
+ bdnz_1 = "42000000K",
+ bdz_1 = "42400000K",
+ sc_0 = "44000000",
+ b_1 = "48000000J",
+ bl_1 = "48000001J",
+ rlwimi_5 = "50000000RR~AAA.",
+ rlwinm_5 = "54000000RR~AAA.",
+ rlwnm_5 = "5c000000RR~RAA.",
+ ori_3 = "60000000RR~U",
+ nop_0 = "60000000",
+ oris_3 = "64000000RR~U",
+ xori_3 = "68000000RR~U",
+ xoris_3 = "6c000000RR~U",
+ ["andi._3"] = "70000000RR~U",
+ ["andis._3"] = "74000000RR~U",
+ lwz_2 = "80000000RD",
+ lwzu_2 = "84000000RD",
+ lbz_2 = "88000000RD",
+ lbzu_2 = "8c000000RD",
+ stw_2 = "90000000RD",
+ stwu_2 = "94000000RD",
+ stb_2 = "98000000RD",
+ stbu_2 = "9c000000RD",
+ lhz_2 = "a0000000RD",
+ lhzu_2 = "a4000000RD",
+ lha_2 = "a8000000RD",
+ lhau_2 = "ac000000RD",
+ sth_2 = "b0000000RD",
+ sthu_2 = "b4000000RD",
+ lmw_2 = "b8000000RD",
+ stmw_2 = "bc000000RD",
+ lfs_2 = "c0000000FD",
+ lfsu_2 = "c4000000FD",
+ lfd_2 = "c8000000FD",
+ lfdu_2 = "cc000000FD",
+ stfs_2 = "d0000000FD",
+ stfsu_2 = "d4000000FD",
+ stfd_2 = "d8000000FD",
+ stfdu_2 = "dc000000FD",
+ ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4.
+ ldu_2 = "e8000001RD",
+ lwa_2 = "e8000002RD",
+ std_2 = "f8000000RD",
+ stdu_2 = "f8000001RD",
+
+ -- Primary opcode 19:
+ mcrf_2 = "4c000000XX",
+ isync_0 = "4c00012c",
+ crnor_3 = "4c000042CCC",
+ crnot_2 = "4c000042CC=",
+ crandc_3 = "4c000102CCC",
+ crxor_3 = "4c000182CCC",
+ crclr_1 = "4c000182C==",
+ crnand_3 = "4c0001c2CCC",
+ crand_3 = "4c000202CCC",
+ creqv_3 = "4c000242CCC",
+ crset_1 = "4c000242C==",
+ crorc_3 = "4c000342CCC",
+ cror_3 = "4c000382CCC",
+ crmove_2 = "4c000382CC=",
+ bclr_2 = "4c000020AA",
+ bclrl_2 = "4c000021AA",
+ bcctr_2 = "4c000420AA",
+ bcctrl_2 = "4c000421AA",
+ blr_0 = "4e800020",
+ blrl_0 = "4e800021",
+ bctr_0 = "4e800420",
+ bctrl_0 = "4e800421",
+
+ -- Primary opcode 31:
+ cmpw_3 = "7c000000XRR",
+ cmpw_2 = "7c000000-RR",
+ cmpd_3 = "7c200000XRR",
+ cmpd_2 = "7c200000-RR",
+ tw_3 = "7c000008ARR",
+ subfc_3 = "7c000010RRR.",
+ subc_3 = "7c000010RRR~.",
+ mulhdu_3 = "7c000012RRR.",
+ addc_3 = "7c000014RRR.",
+ mulhwu_3 = "7c000016RRR.",
+ isel_4 = "7c00001eRRRC",
+ isellt_3 = "7c00001eRRR",
+ iselgt_3 = "7c00005eRRR",
+ iseleq_3 = "7c00009eRRR",
+ mfcr_1 = "7c000026R",
+ mtcrf_2 = "7c000120GR",
+ -- NYI: mtocrf, mfocrf
+ lwarx_3 = "7c000028RR0R",
+ ldx_3 = "7c00002aRR0R",
+ lwzx_3 = "7c00002eRR0R",
+ slw_3 = "7c000030RR~R.",
+ cntlzw_2 = "7c000034RR~",
+ sld_3 = "7c000036RR~R.",
+ and_3 = "7c000038RR~R.",
+ cmplw_3 = "7c000040XRR",
+ cmplw_2 = "7c000040-RR",
+ cmpld_3 = "7c200040XRR",
+ cmpld_2 = "7c200040-RR",
+ subf_3 = "7c000050RRR.",
+ sub_3 = "7c000050RRR~.",
+ ldux_3 = "7c00006aRR0R",
+ dcbst_2 = "7c00006c-RR",
+ lwzux_3 = "7c00006eRR0R",
+ cntlzd_2 = "7c000074RR~",
+ andc_3 = "7c000078RR~R.",
+ td_3 = "7c000088ARR",
+ mulhd_3 = "7c000092RRR.",
+ mulhw_3 = "7c000096RRR.",
+ ldarx_3 = "7c0000a8RR0R",
+ dcbf_2 = "7c0000ac-RR",
+ lbzx_3 = "7c0000aeRR0R",
+ neg_2 = "7c0000d0RR.",
+ lbzux_3 = "7c0000eeRR0R",
+ popcntb_2 = "7c0000f4RR~",
+ not_2 = "7c0000f8RR~%.",
+ nor_3 = "7c0000f8RR~R.",
+ subfe_3 = "7c000110RRR.",
+ sube_3 = "7c000110RRR~.",
+ adde_3 = "7c000114RRR.",
+ stdx_3 = "7c00012aRR0R",
+ stwcx_3 = "7c00012cRR0R.",
+ stwx_3 = "7c00012eRR0R",
+ prtyw_2 = "7c000134RR~",
+ stdux_3 = "7c00016aRR0R",
+ stwux_3 = "7c00016eRR0R",
+ prtyd_2 = "7c000174RR~",
+ subfze_2 = "7c000190RR.",
+ addze_2 = "7c000194RR.",
+ stdcx_3 = "7c0001acRR0R.",
+ stbx_3 = "7c0001aeRR0R",
+ subfme_2 = "7c0001d0RR.",
+ mulld_3 = "7c0001d2RRR.",
+ addme_2 = "7c0001d4RR.",
+ mullw_3 = "7c0001d6RRR.",
+ dcbtst_2 = "7c0001ec-RR",
+ stbux_3 = "7c0001eeRR0R",
+ add_3 = "7c000214RRR.",
+ dcbt_2 = "7c00022c-RR",
+ lhzx_3 = "7c00022eRR0R",
+ eqv_3 = "7c000238RR~R.",
+ eciwx_3 = "7c00026cRR0R",
+ lhzux_3 = "7c00026eRR0R",
+ xor_3 = "7c000278RR~R.",
+ mfspefscr_1 = "7c0082a6R",
+ mfxer_1 = "7c0102a6R",
+ mflr_1 = "7c0802a6R",
+ mfctr_1 = "7c0902a6R",
+ lwax_3 = "7c0002aaRR0R",
+ lhax_3 = "7c0002aeRR0R",
+ mftb_1 = "7c0c42e6R",
+ mftbu_1 = "7c0d42e6R",
+ lwaux_3 = "7c0002eaRR0R",
+ lhaux_3 = "7c0002eeRR0R",
+ sthx_3 = "7c00032eRR0R",
+ orc_3 = "7c000338RR~R.",
+ ecowx_3 = "7c00036cRR0R",
+ sthux_3 = "7c00036eRR0R",
+ or_3 = "7c000378RR~R.",
+ mr_2 = "7c000378RR~%.",
+ divdu_3 = "7c000392RRR.",
+ divwu_3 = "7c000396RRR.",
+ mtspefscr_1 = "7c0083a6R",
+ mtxer_1 = "7c0103a6R",
+ mtlr_1 = "7c0803a6R",
+ mtctr_1 = "7c0903a6R",
+ dcbi_2 = "7c0003ac-RR",
+ nand_3 = "7c0003b8RR~R.",
+ divd_3 = "7c0003d2RRR.",
+ divw_3 = "7c0003d6RRR.",
+ cmpb_3 = "7c0003f8RR~R.",
+ mcrxr_1 = "7c000400X",
+ subfco_3 = "7c000410RRR.",
+ subco_3 = "7c000410RRR~.",
+ addco_3 = "7c000414RRR.",
+ ldbrx_3 = "7c000428RR0R",
+ lswx_3 = "7c00042aRR0R",
+ lwbrx_3 = "7c00042cRR0R",
+ lfsx_3 = "7c00042eFR0R",
+ srw_3 = "7c000430RR~R.",
+ srd_3 = "7c000436RR~R.",
+ subfo_3 = "7c000450RRR.",
+ subo_3 = "7c000450RRR~.",
+ lfsux_3 = "7c00046eFR0R",
+ lswi_3 = "7c0004aaRR0A",
+ sync_0 = "7c0004ac",
+ lwsync_0 = "7c2004ac",
+ ptesync_0 = "7c4004ac",
+ lfdx_3 = "7c0004aeFR0R",
+ nego_2 = "7c0004d0RR.",
+ lfdux_3 = "7c0004eeFR0R",
+ subfeo_3 = "7c000510RRR.",
+ subeo_3 = "7c000510RRR~.",
+ addeo_3 = "7c000514RRR.",
+ stdbrx_3 = "7c000528RR0R",
+ stswx_3 = "7c00052aRR0R",
+ stwbrx_3 = "7c00052cRR0R",
+ stfsx_3 = "7c00052eFR0R",
+ stfsux_3 = "7c00056eFR0R",
+ subfzeo_2 = "7c000590RR.",
+ addzeo_2 = "7c000594RR.",
+ stswi_3 = "7c0005aaRR0A",
+ stfdx_3 = "7c0005aeFR0R",
+ subfmeo_2 = "7c0005d0RR.",
+ mulldo_3 = "7c0005d2RRR.",
+ addmeo_2 = "7c0005d4RR.",
+ mullwo_3 = "7c0005d6RRR.",
+ dcba_2 = "7c0005ec-RR",
+ stfdux_3 = "7c0005eeFR0R",
+ addo_3 = "7c000614RRR.",
+ lhbrx_3 = "7c00062cRR0R",
+ sraw_3 = "7c000630RR~R.",
+ srad_3 = "7c000634RR~R.",
+ srawi_3 = "7c000670RR~A.",
+ eieio_0 = "7c0006ac",
+ lfiwax_3 = "7c0006aeFR0R",
+ sthbrx_3 = "7c00072cRR0R",
+ extsh_2 = "7c000734RR~.",
+ extsb_2 = "7c000774RR~.",
+ divduo_3 = "7c000792RRR.",
+ divwou_3 = "7c000796RRR.",
+ icbi_2 = "7c0007ac-RR",
+ stfiwx_3 = "7c0007aeFR0R",
+ extsw_2 = "7c0007b4RR~.",
+ divdo_3 = "7c0007d2RRR.",
+ divwo_3 = "7c0007d6RRR.",
+ dcbz_2 = "7c0007ec-RR",
+
+ -- Primary opcode 59:
+ fdivs_3 = "ec000024FFF.",
+ fsubs_3 = "ec000028FFF.",
+ fadds_3 = "ec00002aFFF.",
+ fsqrts_2 = "ec00002cF-F.",
+ fres_2 = "ec000030F-F.",
+ fmuls_3 = "ec000032FF-F.",
+ frsqrtes_2 = "ec000034F-F.",
+ fmsubs_4 = "ec000038FFFF~.",
+ fmadds_4 = "ec00003aFFFF~.",
+ fnmsubs_4 = "ec00003cFFFF~.",
+ fnmadds_4 = "ec00003eFFFF~.",
+
+ -- Primary opcode 63:
+ fdiv_3 = "fc000024FFF.",
+ fsub_3 = "fc000028FFF.",
+ fadd_3 = "fc00002aFFF.",
+ fsqrt_2 = "fc00002cF-F.",
+ fsel_4 = "fc00002eFFFF~.",
+ fre_2 = "fc000030F-F.",
+ fmul_3 = "fc000032FF-F.",
+ frsqrte_2 = "fc000034F-F.",
+ fmsub_4 = "fc000038FFFF~.",
+ fmadd_4 = "fc00003aFFFF~.",
+ fnmsub_4 = "fc00003cFFFF~.",
+ fnmadd_4 = "fc00003eFFFF~.",
+ fcmpu_3 = "fc000000XFF",
+ fcpsgn_3 = "fc000010FFF.",
+ fcmpo_3 = "fc000040XFF",
+ mtfsb1_1 = "fc00004cA",
+ fneg_2 = "fc000050F-F.",
+ mcrfs_2 = "fc000080XX",
+ mtfsb0_1 = "fc00008cA",
+ fmr_2 = "fc000090F-F.",
+ frsp_2 = "fc000018F-F.",
+ fctiw_2 = "fc00001cF-F.",
+ fctiwz_2 = "fc00001eF-F.",
+ mtfsfi_2 = "fc00010cAA", -- NYI: upshift.
+ fnabs_2 = "fc000110F-F.",
+ fabs_2 = "fc000210F-F.",
+ frin_2 = "fc000310F-F.",
+ friz_2 = "fc000350F-F.",
+ frip_2 = "fc000390F-F.",
+ frim_2 = "fc0003d0F-F.",
+ mffs_1 = "fc00048eF.",
+ -- NYI: mtfsf, mtfsb0, mtfsb1.
+ fctid_2 = "fc00065cF-F.",
+ fctidz_2 = "fc00065eF-F.",
+ fcfid_2 = "fc00069cF-F.",
+
+ -- Primary opcode 4, SPE APU extension:
+ evaddw_3 = "10000200RRR",
+ evaddiw_3 = "10000202RAR~",
+ evsubw_3 = "10000204RRR~",
+ evsubiw_3 = "10000206RAR~",
+ evabs_2 = "10000208RR",
+ evneg_2 = "10000209RR",
+ evextsb_2 = "1000020aRR",
+ evextsh_2 = "1000020bRR",
+ evrndw_2 = "1000020cRR",
+ evcntlzw_2 = "1000020dRR",
+ evcntlsw_2 = "1000020eRR",
+ brinc_3 = "1000020fRRR",
+ evand_3 = "10000211RRR",
+ evandc_3 = "10000212RRR",
+ evxor_3 = "10000216RRR",
+ evor_3 = "10000217RRR",
+ evmr_2 = "10000217RR=",
+ evnor_3 = "10000218RRR",
+ evnot_2 = "10000218RR=",
+ eveqv_3 = "10000219RRR",
+ evorc_3 = "1000021bRRR",
+ evnand_3 = "1000021eRRR",
+ evsrwu_3 = "10000220RRR",
+ evsrws_3 = "10000221RRR",
+ evsrwiu_3 = "10000222RRA",
+ evsrwis_3 = "10000223RRA",
+ evslw_3 = "10000224RRR",
+ evslwi_3 = "10000226RRA",
+ evrlw_3 = "10000228RRR",
+ evsplati_2 = "10000229RS",
+ evrlwi_3 = "1000022aRRA",
+ evsplatfi_2 = "1000022bRS",
+ evmergehi_3 = "1000022cRRR",
+ evmergelo_3 = "1000022dRRR",
+ evcmpgtu_3 = "10000230XRR",
+ evcmpgtu_2 = "10000230-RR",
+ evcmpgts_3 = "10000231XRR",
+ evcmpgts_2 = "10000231-RR",
+ evcmpltu_3 = "10000232XRR",
+ evcmpltu_2 = "10000232-RR",
+ evcmplts_3 = "10000233XRR",
+ evcmplts_2 = "10000233-RR",
+ evcmpeq_3 = "10000234XRR",
+ evcmpeq_2 = "10000234-RR",
+ evsel_4 = "10000278RRRW",
+ evsel_3 = "10000278RRR",
+ evfsadd_3 = "10000280RRR",
+ evfssub_3 = "10000281RRR",
+ evfsabs_2 = "10000284RR",
+ evfsnabs_2 = "10000285RR",
+ evfsneg_2 = "10000286RR",
+ evfsmul_3 = "10000288RRR",
+ evfsdiv_3 = "10000289RRR",
+ evfscmpgt_3 = "1000028cXRR",
+ evfscmpgt_2 = "1000028c-RR",
+ evfscmplt_3 = "1000028dXRR",
+ evfscmplt_2 = "1000028d-RR",
+ evfscmpeq_3 = "1000028eXRR",
+ evfscmpeq_2 = "1000028e-RR",
+ evfscfui_2 = "10000290R-R",
+ evfscfsi_2 = "10000291R-R",
+ evfscfuf_2 = "10000292R-R",
+ evfscfsf_2 = "10000293R-R",
+ evfsctui_2 = "10000294R-R",
+ evfsctsi_2 = "10000295R-R",
+ evfsctuf_2 = "10000296R-R",
+ evfsctsf_2 = "10000297R-R",
+ evfsctuiz_2 = "10000298R-R",
+ evfsctsiz_2 = "1000029aR-R",
+ evfststgt_3 = "1000029cXRR",
+ evfststgt_2 = "1000029c-RR",
+ evfststlt_3 = "1000029dXRR",
+ evfststlt_2 = "1000029d-RR",
+ evfststeq_3 = "1000029eXRR",
+ evfststeq_2 = "1000029e-RR",
+ efsadd_3 = "100002c0RRR",
+ efssub_3 = "100002c1RRR",
+ efsabs_2 = "100002c4RR",
+ efsnabs_2 = "100002c5RR",
+ efsneg_2 = "100002c6RR",
+ efsmul_3 = "100002c8RRR",
+ efsdiv_3 = "100002c9RRR",
+ efscmpgt_3 = "100002ccXRR",
+ efscmpgt_2 = "100002cc-RR",
+ efscmplt_3 = "100002cdXRR",
+ efscmplt_2 = "100002cd-RR",
+ efscmpeq_3 = "100002ceXRR",
+ efscmpeq_2 = "100002ce-RR",
+ efscfd_2 = "100002cfR-R",
+ efscfui_2 = "100002d0R-R",
+ efscfsi_2 = "100002d1R-R",
+ efscfuf_2 = "100002d2R-R",
+ efscfsf_2 = "100002d3R-R",
+ efsctui_2 = "100002d4R-R",
+ efsctsi_2 = "100002d5R-R",
+ efsctuf_2 = "100002d6R-R",
+ efsctsf_2 = "100002d7R-R",
+ efsctuiz_2 = "100002d8R-R",
+ efsctsiz_2 = "100002daR-R",
+ efststgt_3 = "100002dcXRR",
+ efststgt_2 = "100002dc-RR",
+ efststlt_3 = "100002ddXRR",
+ efststlt_2 = "100002dd-RR",
+ efststeq_3 = "100002deXRR",
+ efststeq_2 = "100002de-RR",
+ efdadd_3 = "100002e0RRR",
+ efdsub_3 = "100002e1RRR",
+ efdcfuid_2 = "100002e2R-R",
+ efdcfsid_2 = "100002e3R-R",
+ efdabs_2 = "100002e4RR",
+ efdnabs_2 = "100002e5RR",
+ efdneg_2 = "100002e6RR",
+ efdmul_3 = "100002e8RRR",
+ efddiv_3 = "100002e9RRR",
+ efdctuidz_2 = "100002eaR-R",
+ efdctsidz_2 = "100002ebR-R",
+ efdcmpgt_3 = "100002ecXRR",
+ efdcmpgt_2 = "100002ec-RR",
+ efdcmplt_3 = "100002edXRR",
+ efdcmplt_2 = "100002ed-RR",
+ efdcmpeq_3 = "100002eeXRR",
+ efdcmpeq_2 = "100002ee-RR",
+ efdcfs_2 = "100002efR-R",
+ efdcfui_2 = "100002f0R-R",
+ efdcfsi_2 = "100002f1R-R",
+ efdcfuf_2 = "100002f2R-R",
+ efdcfsf_2 = "100002f3R-R",
+ efdctui_2 = "100002f4R-R",
+ efdctsi_2 = "100002f5R-R",
+ efdctuf_2 = "100002f6R-R",
+ efdctsf_2 = "100002f7R-R",
+ efdctuiz_2 = "100002f8R-R",
+ efdctsiz_2 = "100002faR-R",
+ efdtstgt_3 = "100002fcXRR",
+ efdtstgt_2 = "100002fc-RR",
+ efdtstlt_3 = "100002fdXRR",
+ efdtstlt_2 = "100002fd-RR",
+ efdtsteq_3 = "100002feXRR",
+ efdtsteq_2 = "100002fe-RR",
+ evlddx_3 = "10000300RR0R",
+ evldd_2 = "10000301R8",
+ evldwx_3 = "10000302RR0R",
+ evldw_2 = "10000303R8",
+ evldhx_3 = "10000304RR0R",
+ evldh_2 = "10000305R8",
+ evlwhex_3 = "10000310RR0R",
+ evlwhe_2 = "10000311R4",
+ evlwhoux_3 = "10000314RR0R",
+ evlwhou_2 = "10000315R4",
+ evlwhosx_3 = "10000316RR0R",
+ evlwhos_2 = "10000317R4",
+ evstddx_3 = "10000320RR0R",
+ evstdd_2 = "10000321R8",
+ evstdwx_3 = "10000322RR0R",
+ evstdw_2 = "10000323R8",
+ evstdhx_3 = "10000324RR0R",
+ evstdh_2 = "10000325R8",
+ evstwhex_3 = "10000330RR0R",
+ evstwhe_2 = "10000331R4",
+ evstwhox_3 = "10000334RR0R",
+ evstwho_2 = "10000335R4",
+ evstwwex_3 = "10000338RR0R",
+ evstwwe_2 = "10000339R4",
+ evstwwox_3 = "1000033cRR0R",
+ evstwwo_2 = "1000033dR4",
+ evmhessf_3 = "10000403RRR",
+ evmhossf_3 = "10000407RRR",
+ evmheumi_3 = "10000408RRR",
+ evmhesmi_3 = "10000409RRR",
+ evmhesmf_3 = "1000040bRRR",
+ evmhoumi_3 = "1000040cRRR",
+ evmhosmi_3 = "1000040dRRR",
+ evmhosmf_3 = "1000040fRRR",
+ evmhessfa_3 = "10000423RRR",
+ evmhossfa_3 = "10000427RRR",
+ evmheumia_3 = "10000428RRR",
+ evmhesmia_3 = "10000429RRR",
+ evmhesmfa_3 = "1000042bRRR",
+ evmhoumia_3 = "1000042cRRR",
+ evmhosmia_3 = "1000042dRRR",
+ evmhosmfa_3 = "1000042fRRR",
+ evmwhssf_3 = "10000447RRR",
+ evmwlumi_3 = "10000448RRR",
+ evmwhumi_3 = "1000044cRRR",
+ evmwhsmi_3 = "1000044dRRR",
+ evmwhsmf_3 = "1000044fRRR",
+ evmwssf_3 = "10000453RRR",
+ evmwumi_3 = "10000458RRR",
+ evmwsmi_3 = "10000459RRR",
+ evmwsmf_3 = "1000045bRRR",
+ evmwhssfa_3 = "10000467RRR",
+ evmwlumia_3 = "10000468RRR",
+ evmwhumia_3 = "1000046cRRR",
+ evmwhsmia_3 = "1000046dRRR",
+ evmwhsmfa_3 = "1000046fRRR",
+ evmwssfa_3 = "10000473RRR",
+ evmwumia_3 = "10000478RRR",
+ evmwsmia_3 = "10000479RRR",
+ evmwsmfa_3 = "1000047bRRR",
+ evmra_2 = "100004c4RR",
+ evdivws_3 = "100004c6RRR",
+ evdivwu_3 = "100004c7RRR",
+ evmwssfaa_3 = "10000553RRR",
+ evmwumiaa_3 = "10000558RRR",
+ evmwsmiaa_3 = "10000559RRR",
+ evmwsmfaa_3 = "1000055bRRR",
+ evmwssfan_3 = "100005d3RRR",
+ evmwumian_3 = "100005d8RRR",
+ evmwsmian_3 = "100005d9RRR",
+ evmwsmfan_3 = "100005dbRRR",
+ evmergehilo_3 = "1000022eRRR",
+ evmergelohi_3 = "1000022fRRR",
+ evlhhesplatx_3 = "10000308RR0R",
+ evlhhesplat_2 = "10000309R2",
+ evlhhousplatx_3 = "1000030cRR0R",
+ evlhhousplat_2 = "1000030dR2",
+ evlhhossplatx_3 = "1000030eRR0R",
+ evlhhossplat_2 = "1000030fR2",
+ evlwwsplatx_3 = "10000318RR0R",
+ evlwwsplat_2 = "10000319R4",
+ evlwhsplatx_3 = "1000031cRR0R",
+ evlwhsplat_2 = "1000031dR4",
+ evaddusiaaw_2 = "100004c0RR",
+ evaddssiaaw_2 = "100004c1RR",
+ evsubfusiaaw_2 = "100004c2RR",
+ evsubfssiaaw_2 = "100004c3RR",
+ evaddumiaaw_2 = "100004c8RR",
+ evaddsmiaaw_2 = "100004c9RR",
+ evsubfumiaaw_2 = "100004caRR",
+ evsubfsmiaaw_2 = "100004cbRR",
+ evmheusiaaw_3 = "10000500RRR",
+ evmhessiaaw_3 = "10000501RRR",
+ evmhessfaaw_3 = "10000503RRR",
+ evmhousiaaw_3 = "10000504RRR",
+ evmhossiaaw_3 = "10000505RRR",
+ evmhossfaaw_3 = "10000507RRR",
+ evmheumiaaw_3 = "10000508RRR",
+ evmhesmiaaw_3 = "10000509RRR",
+ evmhesmfaaw_3 = "1000050bRRR",
+ evmhoumiaaw_3 = "1000050cRRR",
+ evmhosmiaaw_3 = "1000050dRRR",
+ evmhosmfaaw_3 = "1000050fRRR",
+ evmhegumiaa_3 = "10000528RRR",
+ evmhegsmiaa_3 = "10000529RRR",
+ evmhegsmfaa_3 = "1000052bRRR",
+ evmhogumiaa_3 = "1000052cRRR",
+ evmhogsmiaa_3 = "1000052dRRR",
+ evmhogsmfaa_3 = "1000052fRRR",
+ evmwlusiaaw_3 = "10000540RRR",
+ evmwlssiaaw_3 = "10000541RRR",
+ evmwlumiaaw_3 = "10000548RRR",
+ evmwlsmiaaw_3 = "10000549RRR",
+ evmheusianw_3 = "10000580RRR",
+ evmhessianw_3 = "10000581RRR",
+ evmhessfanw_3 = "10000583RRR",
+ evmhousianw_3 = "10000584RRR",
+ evmhossianw_3 = "10000585RRR",
+ evmhossfanw_3 = "10000587RRR",
+ evmheumianw_3 = "10000588RRR",
+ evmhesmianw_3 = "10000589RRR",
+ evmhesmfanw_3 = "1000058bRRR",
+ evmhoumianw_3 = "1000058cRRR",
+ evmhosmianw_3 = "1000058dRRR",
+ evmhosmfanw_3 = "1000058fRRR",
+ evmhegumian_3 = "100005a8RRR",
+ evmhegsmian_3 = "100005a9RRR",
+ evmhegsmfan_3 = "100005abRRR",
+ evmhogumian_3 = "100005acRRR",
+ evmhogsmian_3 = "100005adRRR",
+ evmhogsmfan_3 = "100005afRRR",
+ evmwlusianw_3 = "100005c0RRR",
+ evmwlssianw_3 = "100005c1RRR",
+ evmwlumianw_3 = "100005c8RRR",
+ evmwlsmianw_3 = "100005c9RRR",
+
+ -- NYI: some 64 bit PowerPC and Book E instructions:
+ -- rldicl, rldicr, rldic, rldimi, rldcl, rldcr, sradi, 64 bit ext. add/sub,
+ -- extended addressing branches, cache management, loads and stores
+}
+
+-- Add mnemonics for "." variants.
+do
+ local t = {}
+ for k,v in pairs(map_op) do
+ if sub(v, -1) == "." then
+ local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2)
+ t[sub(k, 1, -3).."."..sub(k, -2)] = v2
+ end
+ end
+ for k,v in pairs(t) do
+ map_op[k] = v
+ end
+end
+
+-- Add more branch mnemonics.
+for cond,c in pairs(map_cond) do
+ local b1 = "b"..cond
+ local c1 = (c%4)*0x00010000 + (c < 4 and 0x01000000 or 0)
+ -- bX[l]
+ map_op[b1.."_1"] = tohex(0x40800000 + c1).."K"
+ map_op[b1.."y_1"] = tohex(0x40a00000 + c1).."K"
+ map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K"
+ map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK"
+ map_op[b1.."y_2"] = tohex(0x40a00000 + c1).."-XK"
+ map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK"
+ -- bXlr[l]
+ map_op[b1.."lr_0"] = tohex(0x4c800020 + c1)
+ map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1)
+ map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1)
+ map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1)
+ -- bXctr[l]
+ map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X"
+ map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X"
+ map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X"
+ map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X"
+end
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_fpr(expr)
+ local r = match(expr, "^f([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_cr(expr)
+ local r = match(expr, "^cr([0-7])$")
+ if r then return tonumber(r) end
+ werror("bad condition register name `"..expr.."'")
+end
+
+local function parse_cond(expr)
+ local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$")
+ if r then
+ r = tonumber(r)
+ local c = map_cond[cond]
+ if c and c < 4 then return r*4+c end
+ end
+ werror("bad condition bit name `"..expr.."'")
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ local n = tonumber(imm)
+ if n then
+ if n % 2^scale == 0 then
+ n = n / 2^scale
+ if signed then
+ if n >= 0 then
+ if n < 2^(bits-1) then return n*2^shift end
+ else
+ if n >= -(2^(bits-1))-1 then return (n+2^bits)*2^shift end
+ end
+ else
+ if n >= 0 and n <= 2^bits-1 then return n*2^shift end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ elseif match(imm, "^r([1-3]?[0-9])$") or
+ match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then
+ werror("expected immediate operand, got register")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_disp(disp)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ return r*65536 + parse_imm(imm, 16, 0, 0, true)
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ if tp then
+ waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
+ return r*65536
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_u5disp(disp, scale)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ return r*65536 + parse_imm(imm, 5, 11, scale, false)
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ if tp then
+ waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr))
+ return r*65536
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n, rs = 1, 26
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 3 positions (rlwinm).
+ if secpos+3 > maxsecpos then wflush() end
+ local pos = wpos()
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ if p == "R" then
+ rs = rs - 5; op = op + parse_gpr(params[n]) * 2^rs; n = n + 1
+ elseif p == "F" then
+ rs = rs - 5; op = op + parse_fpr(params[n]) * 2^rs; n = n + 1
+ elseif p == "A" then
+ rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1
+ elseif p == "S" then
+ rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1
+ elseif p == "I" then
+ op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
+ elseif p == "U" then
+ op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
+ elseif p == "D" then
+ op = op + parse_disp(params[n]); n = n + 1
+ elseif p == "2" then
+ op = op + parse_u5disp(params[n], 1); n = n + 1
+ elseif p == "4" then
+ op = op + parse_u5disp(params[n], 2); n = n + 1
+ elseif p == "8" then
+ op = op + parse_u5disp(params[n], 3); n = n + 1
+ elseif p == "C" then
+ rs = rs - 5; op = op + parse_cond(params[n]) * 2^rs; n = n + 1
+ elseif p == "X" then
+ rs = rs - 5; op = op + parse_cr(params[n]) * 2^(rs+2); n = n + 1
+ elseif p == "W" then
+ op = op + parse_cr(params[n]); n = n + 1
+ elseif p == "G" then
+ op = op + parse_imm(params[n], 8, 12, 0, false); n = n + 1
+ elseif p == "J" or p == "K" then
+ local mode, n, s = parse_label(params[n], false)
+ if p == "K" then n = n + 2048 end
+ waction("REL_"..mode, n, s, 1)
+ n = n + 1
+ elseif p == "0" then
+ local mm = 2^rs
+ local t = op % mm
+ if ((op - t) / mm) % 32 == 0 then werror("cannot use r0") end
+ elseif p == "=" or p == "%" then
+ local mm = 2^(rs + (p == "%" and 5 or 0))
+ local t = ((op - op % mm) / mm) % 32
+ rs = rs - 5
+ op = op + t * 2^rs
+ elseif p == "~" then
+ local mm = 2^rs
+ local t1l = op % mm
+ local t1h = (op - t1l) / mm
+ local t2l = t1h % 32
+ local t2h = (t1h - t2l) / 32
+ local t3l = t2h % 32
+ op = ((t2h - t3l + t2l)*32 + t3l)*mm + t1l
+ elseif p == "-" then
+ rs = rs - 5
+ elseif p == "." then
+ -- Ignored.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = map_coreop })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/src/LuaJIT/dynasm/dasm_proto.h b/src/LuaJIT/dynasm/dasm_proto.h
new file mode 100644
index 000000000..30028116d
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_proto.h
@@ -0,0 +1,83 @@
+/*
+** DynASM encoding engine prototypes.
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#ifndef _DASM_PROTO_H
+#define _DASM_PROTO_H
+
+#include
+#include
+
+#define DASM_IDENT "DynASM 1.3.0"
+#define DASM_VERSION 10300 /* 1.3.0 */
+
+#ifndef Dst_DECL
+#define Dst_DECL dasm_State **Dst
+#endif
+
+#ifndef Dst_REF
+#define Dst_REF (*Dst)
+#endif
+
+#ifndef DASM_FDEF
+#define DASM_FDEF extern
+#endif
+
+#ifndef DASM_M_GROW
+#define DASM_M_GROW(ctx, t, p, sz, need) \
+ do { \
+ size_t _sz = (sz), _need = (need); \
+ if (_sz < _need) { \
+ if (_sz < 16) _sz = 16; \
+ while (_sz < _need) _sz += _sz; \
+ (p) = (t *)realloc((p), _sz); \
+ if ((p) == NULL) exit(1); \
+ (sz) = _sz; \
+ } \
+ } while(0)
+#endif
+
+#ifndef DASM_M_FREE
+#define DASM_M_FREE(ctx, p, sz) free(p)
+#endif
+
+/* Internal DynASM encoder state. */
+typedef struct dasm_State dasm_State;
+
+
+/* Initialize and free DynASM state. */
+DASM_FDEF void dasm_init(Dst_DECL, int maxsection);
+DASM_FDEF void dasm_free(Dst_DECL);
+
+/* Setup global array. Must be called before dasm_setup(). */
+DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl);
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc);
+
+/* Setup encoder. */
+DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist);
+
+/* Feed encoder with actions. Calls are generated by pre-processor. */
+DASM_FDEF void dasm_put(Dst_DECL, int start, ...);
+
+/* Link sections and return the resulting size. */
+DASM_FDEF int dasm_link(Dst_DECL, size_t *szp);
+
+/* Encode sections into buffer. */
+DASM_FDEF int dasm_encode(Dst_DECL, void *buffer);
+
+/* Get PC label offset. */
+DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc);
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch);
+#else
+#define dasm_checkstep(a, b) 0
+#endif
+
+
+#endif /* _DASM_PROTO_H */
diff --git a/src/LuaJIT/dynasm/dasm_x64.lua b/src/LuaJIT/dynasm/dasm_x64.lua
new file mode 100644
index 000000000..bae72ac96
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_x64.lua
@@ -0,0 +1,12 @@
+------------------------------------------------------------------------------
+-- DynASM x64 module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+-- This module just sets 64 bit mode for the combined x86/x64 module.
+-- All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+x64 = true -- Using a global is an ugly, but effective solution.
+return require("dasm_x86")
diff --git a/src/LuaJIT/dynasm/dasm_x86.h b/src/LuaJIT/dynasm/dasm_x86.h
new file mode 100644
index 000000000..7c6dcd3d4
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_x86.h
@@ -0,0 +1,470 @@
+/*
+** DynASM x86 encoding engine.
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include
+#include
+#include
+#include
+
+#define DASM_ARCH "x86"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. DASM_STOP must be 255. */
+enum {
+ DASM_DISP = 233,
+ DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB,
+ DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC,
+ DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN,
+ DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_VREG 0x15000000
+#define DASM_S_UNDEF_L 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned char *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs, mrm = 4;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ int action = *p++;
+ if (action < DASM_DISP) {
+ ofs++;
+ } else if (action <= DASM_REL_A) {
+ int n = va_arg(ap, int);
+ b[pos++] = n;
+ switch (action) {
+ case DASM_DISP:
+ if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; }
+ case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob;
+ case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */
+ case DASM_IMM_D: ofs += 4; break;
+ case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob;
+ case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break;
+ case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob;
+ case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break;
+ case DASM_SPACE: p++; ofs += n; break;
+ case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */
+ case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG);
+ if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue;
+ }
+ mrm = 4;
+ } else {
+ int *pl, n;
+ switch (action) {
+ case DASM_REL_LG:
+ case DASM_IMM_LG:
+ n = *p++; pl = D->lglabels + n;
+ if (n <= 246) { CKPL(lg, LG); goto putrel; } /* Bkwd rel or global. */
+ pl -= 246; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ ofs += 4; /* Maximum offset needed. */
+ if (action == DASM_REL_LG || action == DASM_REL_PC)
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_ALIGN:
+ ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_EXTERN: p += 2; ofs += 4; break;
+ case DASM_ESC: p++; ofs++; break;
+ case DASM_MARK: mrm = p[-2]; break;
+ case DASM_SECTION:
+ n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n];
+ case DASM_STOP: goto stop;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ int op, action = *p++;
+ switch (action) {
+ case DASM_REL_LG: p++; op = p[-3]; goto rel_pc;
+ case DASM_REL_PC: op = p[-2]; rel_pc: {
+ int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0);
+ if (shrink) { /* Shrinkable branch opcode? */
+ int lofs, lpos = b[pos];
+ if (lpos < 0) goto noshrink; /* Ext global? */
+ lofs = *DASM_POS2PTR(D, lpos);
+ if (lpos > pos) { /* Fwd label: add cumulative section offsets. */
+ int i;
+ for (i = secnum; i < DASM_POS2SEC(lpos); i++)
+ lofs += D->sections[i].ofs;
+ } else {
+ lofs -= ofs; /* Bkwd label: unfix offset. */
+ }
+ lofs -= b[pos+1]; /* Short branch ok? */
+ if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */
+ else { noshrink: shrink = 0; } /* No, cannot shrink op. */
+ }
+ b[pos+1] = shrink;
+ pos += 2;
+ break;
+ }
+ case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++;
+ case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W:
+ case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB:
+ case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break;
+ case DASM_LABEL_LG: p++;
+ case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */
+ case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */
+ case DASM_EXTERN: p += 2; break;
+ case DASM_ESC: p++; break;
+ case DASM_MARK: break;
+ case DASM_SECTION: case DASM_STOP: goto stop;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#define dasmb(x) *cp++ = (unsigned char)(x)
+#ifndef DASM_ALIGNED_WRITES
+#define dasmw(x) \
+ do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0)
+#define dasmd(x) \
+ do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0)
+#else
+#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0)
+#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ unsigned char *base = (unsigned char *)buffer;
+ unsigned char *cp = base;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ unsigned char *mark = NULL;
+ while (1) {
+ int action = *p++;
+ int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0;
+ switch (action) {
+ case DASM_DISP: if (!mark) mark = cp; {
+ unsigned char *mm = mark;
+ if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL;
+ if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7;
+ if (mrm != 5) { mm[-1] -= 0x80; break; } }
+ if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40;
+ }
+ case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break;
+ case DASM_IMM_DB: if (((n+128)&-256) == 0) {
+ db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb;
+ } else mark = NULL;
+ case DASM_IMM_D: wd: dasmd(n); break;
+ case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL;
+ case DASM_IMM_W: dasmw(n); break;
+ case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; }
+ case DASM_REL_LG: p++; if (n >= 0) goto rel_pc;
+ b++; n = (int)(ptrdiff_t)D->globals[-n];
+ case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */
+ case DASM_REL_PC: rel_pc: {
+ int shrink = *b++;
+ int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; }
+ n = *pb - ((int)(cp-base) + 4-shrink);
+ if (shrink == 0) goto wd;
+ if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb;
+ goto wb;
+ }
+ case DASM_IMM_LG:
+ p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; }
+ case DASM_IMM_PC: {
+ int *pb = DASM_POS2PTR(D, n);
+ n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base);
+ goto wd;
+ }
+ case DASM_LABEL_LG: {
+ int idx = *p++;
+ if (idx >= 10)
+ D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n));
+ break;
+ }
+ case DASM_LABEL_PC: case DASM_SETLABEL: break;
+ case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; }
+ case DASM_ALIGN:
+ n = *p++;
+ while (((cp-base) & n)) *cp++ = 0x90; /* nop */
+ break;
+ case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd;
+ case DASM_MARK: mark = cp; break;
+ case DASM_ESC: action = *p++;
+ default: *cp++ = action; break;
+ case DASM_SECTION: case DASM_STOP: goto stop;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/src/LuaJIT/dynasm/dasm_x86.lua b/src/LuaJIT/dynasm/dasm_x86.lua
new file mode 100644
index 000000000..3bebb83cf
--- /dev/null
+++ b/src/LuaJIT/dynasm/dasm_x86.lua
@@ -0,0 +1,1931 @@
+------------------------------------------------------------------------------
+-- DynASM x86/x64 module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+local x64 = x64
+
+-- Module information:
+local _info = {
+ arch = x64 and "x64" or "x86",
+ description = "DynASM x86/x64 module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, unpack, setmetatable = assert, unpack or table.unpack, setmetatable
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local find, match, gmatch, gsub = _s.find, _s.match, _s.gmatch, _s.gsub
+local concat, sort = table.concat, table.sort
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ -- int arg, 1 buffer pos:
+ "DISP", "IMM_S", "IMM_B", "IMM_W", "IMM_D", "IMM_WB", "IMM_DB",
+ -- action arg (1 byte), int arg, 1 buffer pos (reg/num):
+ "VREG", "SPACE", -- !x64: VREG support NYI.
+ -- ptrdiff_t arg, 1 buffer pos (address): !x64
+ "SETLABEL", "REL_A",
+ -- action arg (1 byte) or int arg, 2 buffer pos (link, offset):
+ "REL_LG", "REL_PC",
+ -- action arg (1 byte) or int arg, 1 buffer pos (link):
+ "IMM_LG", "IMM_PC",
+ -- action arg (1 byte) or int arg, 1 buffer pos (offset):
+ "LABEL_LG", "LABEL_PC",
+ -- action arg (1 byte), 1 buffer pos (offset):
+ "ALIGN",
+ -- action args (2 bytes), no buffer pos.
+ "EXTERN",
+ -- action arg (1 byte), no buffer pos.
+ "ESC",
+ -- no action arg, no buffer pos.
+ "MARK",
+ -- action arg (1 byte), no buffer pos, terminal action:
+ "SECTION",
+ -- no args, no buffer pos, terminal action:
+ "STOP"
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number (dynamically generated below).
+local map_action = {}
+-- First action number. Everything below does not need to be escaped.
+local actfirst = 256-#action_names
+
+-- Action list buffer and string (only used to remove dupes).
+local actlist = {}
+local actstr = ""
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Compute action numbers for action names.
+for n,name in ipairs(action_names) do
+ local num = actfirst + n - 1
+ map_action[name] = num
+end
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ local last = actlist[nn] or 255
+ actlist[nn] = nil -- Remove last byte.
+ if nn == 0 then nn = 1 end
+ out:write("static const unsigned char ", name, "[", nn, "] = {\n")
+ local s = " "
+ for n,b in ipairs(actlist) do
+ s = s..b..","
+ if #s >= 75 then
+ assert(out:write(s, "\n"))
+ s = " "
+ end
+ end
+ out:write(s, last, "\n};\n\n") -- Add last byte back.
+end
+
+------------------------------------------------------------------------------
+
+-- Add byte to action list.
+local function wputxb(n)
+ assert(n >= 0 and n <= 255 and n % 1 == 0, "byte out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, a, num)
+ wputxb(assert(map_action[action], "bad action name `"..action.."'"))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Add call to embedded DynASM C code.
+local function wcall(func, args)
+ wline(format("dasm_%s(Dst, %s);", func, concat(args, ", ")), true)
+end
+
+-- Delete duplicate action list chunks. A tad slow, but so what.
+local function dedupechunk(offset)
+ local al, as = actlist, actstr
+ local chunk = char(unpack(al, offset+1, #al))
+ local orig = find(as, chunk, 1, true)
+ if orig then
+ actargs[1] = orig-1 -- Replace with original offset.
+ for i=offset+1,#al do al[i] = nil end -- Kill dupe.
+ else
+ actstr = as..chunk
+ end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ local offset = actargs[1]
+ if #actlist == offset then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ dedupechunk(offset)
+ wcall("put", actargs) -- Add call to dasm_put().
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped byte.
+local function wputb(n)
+ if n >= actfirst then waction("ESC") end -- Need to escape byte.
+ wputxb(n)
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 10
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_@]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 246 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=10,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=10,next_global-1 do
+ out:write(" ", prefix, gsub(t[i], "@.*", ""), ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=10,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = -1
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n < -256 then werror("too many extern labels") end
+ next_extern = n - 1
+ t[name] = n
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ local t = {}
+ for name, n in pairs(map_extern) do t[-n] = name end
+ out:write("Extern labels:\n")
+ for i=1,-next_extern-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ local t = {}
+ for name, n in pairs(map_extern) do t[-n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=1,-next_extern-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+local map_archdef = {} -- Ext. register name -> int. name.
+local map_reg_rev = {} -- Int. register name -> ext. name.
+local map_reg_num = {} -- Int. register name -> register number.
+local map_reg_opsize = {} -- Int. register name -> operand size.
+local map_reg_valid_base = {} -- Int. register name -> valid base register?
+local map_reg_valid_index = {} -- Int. register name -> valid index register?
+local map_reg_needrex = {} -- Int. register name -> need rex vs. no rex.
+local reg_list = {} -- Canonical list of int. register names.
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for _PTx macros).
+
+local addrsize = x64 and "q" or "d" -- Size for address operands.
+
+-- Helper functions to fill register maps.
+local function mkrmap(sz, cl, names)
+ local cname = format("@%s", sz)
+ reg_list[#reg_list+1] = cname
+ map_archdef[cl] = cname
+ map_reg_rev[cname] = cl
+ map_reg_num[cname] = -1
+ map_reg_opsize[cname] = sz
+ if sz == addrsize or sz == "d" then
+ map_reg_valid_base[cname] = true
+ map_reg_valid_index[cname] = true
+ end
+ if names then
+ for n,name in ipairs(names) do
+ local iname = format("@%s%x", sz, n-1)
+ reg_list[#reg_list+1] = iname
+ map_archdef[name] = iname
+ map_reg_rev[iname] = name
+ map_reg_num[iname] = n-1
+ map_reg_opsize[iname] = sz
+ if sz == "b" and n > 4 then map_reg_needrex[iname] = false end
+ if sz == addrsize or sz == "d" then
+ map_reg_valid_base[iname] = true
+ map_reg_valid_index[iname] = true
+ end
+ end
+ end
+ for i=0,(x64 and sz ~= "f") and 15 or 7 do
+ local needrex = sz == "b" and i > 3
+ local iname = format("@%s%x%s", sz, i, needrex and "R" or "")
+ if needrex then map_reg_needrex[iname] = true end
+ local name
+ if sz == "o" then name = format("xmm%d", i)
+ elseif sz == "f" then name = format("st%d", i)
+ else name = format("r%d%s", i, sz == addrsize and "" or sz) end
+ map_archdef[name] = iname
+ if not map_reg_rev[iname] then
+ reg_list[#reg_list+1] = iname
+ map_reg_rev[iname] = name
+ map_reg_num[iname] = i
+ map_reg_opsize[iname] = sz
+ if sz == addrsize or sz == "d" then
+ map_reg_valid_base[iname] = true
+ map_reg_valid_index[iname] = true
+ end
+ end
+ end
+ reg_list[#reg_list+1] = ""
+end
+
+-- Integer registers (qword, dword, word and byte sized).
+if x64 then
+ mkrmap("q", "Rq", {"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi"})
+end
+mkrmap("d", "Rd", {"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"})
+mkrmap("w", "Rw", {"ax", "cx", "dx", "bx", "sp", "bp", "si", "di"})
+mkrmap("b", "Rb", {"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"})
+map_reg_valid_index[map_archdef.esp] = false
+if x64 then map_reg_valid_index[map_archdef.rsp] = false end
+map_archdef["Ra"] = "@"..addrsize
+
+-- FP registers (internally tword sized, but use "f" as operand size).
+mkrmap("f", "Rf")
+
+-- SSE registers (oword sized, but qword and dword accessible).
+mkrmap("o", "xmm")
+
+-- Operand size prefixes to codes.
+local map_opsize = {
+ byte = "b", word = "w", dword = "d", qword = "q", oword = "o", tword = "t",
+ aword = addrsize,
+}
+
+-- Operand size code to number.
+local map_opsizenum = {
+ b = 1, w = 2, d = 4, q = 8, o = 16, t = 10,
+}
+
+-- Operand size code to name.
+local map_opsizename = {
+ b = "byte", w = "word", d = "dword", q = "qword", o = "oword", t = "tword",
+ f = "fpword",
+}
+
+-- Valid index register scale factors.
+local map_xsc = {
+ ["1"] = 0, ["2"] = 1, ["4"] = 2, ["8"] = 3,
+}
+
+-- Condition codes.
+local map_cc = {
+ o = 0, no = 1, b = 2, nb = 3, e = 4, ne = 5, be = 6, nbe = 7,
+ s = 8, ns = 9, p = 10, np = 11, l = 12, nl = 13, le = 14, nle = 15,
+ c = 2, nae = 2, nc = 3, ae = 3, z = 4, nz = 5, na = 6, a = 7,
+ pe = 10, po = 11, nge = 12, ge = 13, ng = 14, g = 15,
+}
+
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ return gsub(s, "@%w+", map_reg_rev)
+end
+
+-- Dump register names and numbers
+local function dumpregs(out)
+ out:write("Register names, sizes and internal numbers:\n")
+ for _,reg in ipairs(reg_list) do
+ if reg == "" then
+ out:write("\n")
+ else
+ local name = map_reg_rev[reg]
+ local num = map_reg_num[reg]
+ local opsize = map_opsizename[map_reg_opsize[reg]]
+ out:write(format(" %-5s %-8s %s\n", name, opsize,
+ num < 0 and "(variable)" or num))
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Put action for label arg (IMM_LG, IMM_PC, REL_LG, REL_PC).
+local function wputlabel(aprefix, imm, num)
+ if type(imm) == "number" then
+ if imm < 0 then
+ waction("EXTERN")
+ wputxb(aprefix == "IMM_" and 0 or 1)
+ imm = -imm-1
+ else
+ waction(aprefix.."LG", nil, num);
+ end
+ wputxb(imm)
+ else
+ waction(aprefix.."PC", imm, num)
+ end
+end
+
+-- Put signed byte or arg.
+local function wputsbarg(n)
+ if type(n) == "number" then
+ if n < -128 or n > 127 then
+ werror("signed immediate byte out of range")
+ end
+ if n < 0 then n = n + 256 end
+ wputb(n)
+ else waction("IMM_S", n) end
+end
+
+-- Put unsigned byte or arg.
+local function wputbarg(n)
+ if type(n) == "number" then
+ if n < 0 or n > 255 then
+ werror("unsigned immediate byte out of range")
+ end
+ wputb(n)
+ else waction("IMM_B", n) end
+end
+
+-- Put unsigned word or arg.
+local function wputwarg(n)
+ if type(n) == "number" then
+ if n < 0 or n > 65535 then
+ werror("unsigned immediate word out of range")
+ end
+ local r = n%256; n = (n-r)/256; wputb(r); wputb(n);
+ else waction("IMM_W", n) end
+end
+
+-- Put signed or unsigned dword or arg.
+local function wputdarg(n)
+ local tn = type(n)
+ if tn == "number" then
+ if n < 0 then n = n + 4294967296 end
+ local r = n%256; n = (n-r)/256; wputb(r);
+ r = n%256; n = (n-r)/256; wputb(r);
+ r = n%256; n = (n-r)/256; wputb(r); wputb(n);
+ elseif tn == "table" then
+ wputlabel("IMM_", n[1], 1)
+ else
+ waction("IMM_D", n)
+ end
+end
+
+-- Put operand-size dependent number or arg (defaults to dword).
+local function wputszarg(sz, n)
+ if not sz or sz == "d" or sz == "q" then wputdarg(n)
+ elseif sz == "w" then wputwarg(n)
+ elseif sz == "b" then wputbarg(n)
+ elseif sz == "s" then wputsbarg(n)
+ else werror("bad operand size") end
+end
+
+-- Put multi-byte opcode with operand-size dependent modifications.
+local function wputop(sz, op, rex)
+ local r
+ if rex ~= 0 and not x64 then werror("bad operand size") end
+ if sz == "w" then wputb(102) end
+ -- Needs >32 bit numbers, but only for crc32 eax, word [ebx]
+ if op >= 4294967296 then r = op%4294967296 wputb((op-r)/4294967296) op = r end
+ if op >= 16777216 then r = op % 16777216 wputb((op-r) / 16777216) op = r end
+ if op >= 65536 then
+ if rex ~= 0 then
+ local opc3 = op - op % 256
+ if opc3 == 0x0f3a00 or opc3 == 0x0f3800 then
+ wputb(64 + rex % 16); rex = 0
+ end
+ end
+ r = op % 65536 wputb((op-r) / 65536) op = r
+ end
+ if op >= 256 then
+ r = op % 256
+ local b = (op-r) / 256
+ if b == 15 and rex ~= 0 then wputb(64 + rex % 16); rex = 0 end
+ wputb(b)
+ op = r
+ end
+ if rex ~= 0 then wputb(64 + rex % 16) end
+ if sz == "b" then op = op - 1 end
+ wputb(op)
+end
+
+-- Put ModRM or SIB formatted byte.
+local function wputmodrm(m, s, rm, vs, vrm)
+ assert(m < 4 and s < 16 and rm < 16, "bad modrm operands")
+ wputb(64*m + 8*(s%8) + (rm%8))
+end
+
+-- Put ModRM/SIB plus optional displacement.
+local function wputmrmsib(t, imark, s, vsreg)
+ local vreg, vxreg
+ local reg, xreg = t.reg, t.xreg
+ if reg and reg < 0 then reg = 0; vreg = t.vreg end
+ if xreg and xreg < 0 then xreg = 0; vxreg = t.vxreg end
+ if s < 0 then s = 0 end
+
+ -- Register mode.
+ if sub(t.mode, 1, 1) == "r" then
+ wputmodrm(3, s, reg)
+ if vsreg then waction("VREG", vsreg); wputxb(2) end
+ if vreg then waction("VREG", vreg); wputxb(0) end
+ return
+ end
+
+ local disp = t.disp
+ local tdisp = type(disp)
+ -- No base register?
+ if not reg then
+ local riprel = false
+ if xreg then
+ -- Indexed mode with index register only.
+ -- [xreg*xsc+disp] -> (0, s, esp) (xsc, xreg, ebp)
+ wputmodrm(0, s, 4)
+ if imark == "I" then waction("MARK") end
+ if vsreg then waction("VREG", vsreg); wputxb(2) end
+ wputmodrm(t.xsc, xreg, 5)
+ if vxreg then waction("VREG", vxreg); wputxb(3) end
+ else
+ -- Pure 32 bit displacement.
+ if x64 and tdisp ~= "table" then
+ wputmodrm(0, s, 4) -- [disp] -> (0, s, esp) (0, esp, ebp)
+ if imark == "I" then waction("MARK") end
+ wputmodrm(0, 4, 5)
+ else
+ riprel = x64
+ wputmodrm(0, s, 5) -- [disp|rip-label] -> (0, s, ebp)
+ if imark == "I" then waction("MARK") end
+ end
+ if vsreg then waction("VREG", vsreg); wputxb(2) end
+ end
+ if riprel then -- Emit rip-relative displacement.
+ if match("UWSiI", imark) then
+ werror("NYI: rip-relative displacement followed by immediate")
+ end
+ -- The previous byte in the action buffer cannot be 0xe9 or 0x80-0x8f.
+ wputlabel("REL_", disp[1], 2)
+ else
+ wputdarg(disp)
+ end
+ return
+ end
+
+ local m
+ if tdisp == "number" then -- Check displacement size at assembly time.
+ if disp == 0 and (reg%8) ~= 5 then -- [ebp] -> [ebp+0] (in SIB, too)
+ if not vreg then m = 0 end -- Force DISP to allow [Rd(5)] -> [ebp+0]
+ elseif disp >= -128 and disp <= 127 then m = 1
+ else m = 2 end
+ elseif tdisp == "table" then
+ m = 2
+ end
+
+ -- Index register present or esp as base register: need SIB encoding.
+ if xreg or (reg%8) == 4 then
+ wputmodrm(m or 2, s, 4) -- ModRM.
+ if m == nil or imark == "I" then waction("MARK") end
+ if vsreg then waction("VREG", vsreg); wputxb(2) end
+ wputmodrm(t.xsc or 0, xreg or 4, reg) -- SIB.
+ if vxreg then waction("VREG", vxreg); wputxb(3) end
+ if vreg then waction("VREG", vreg); wputxb(1) end
+ else
+ wputmodrm(m or 2, s, reg) -- ModRM.
+ if (imark == "I" and (m == 1 or m == 2)) or
+ (m == nil and (vsreg or vreg)) then waction("MARK") end
+ if vsreg then waction("VREG", vsreg); wputxb(2) end
+ if vreg then waction("VREG", vreg); wputxb(1) end
+ end
+
+ -- Put displacement.
+ if m == 1 then wputsbarg(disp)
+ elseif m == 2 then wputdarg(disp)
+ elseif m == nil then waction("DISP", disp) end
+end
+
+------------------------------------------------------------------------------
+
+-- Return human-readable operand mode string.
+local function opmodestr(op, args)
+ local m = {}
+ for i=1,#args do
+ local a = args[i]
+ m[#m+1] = sub(a.mode, 1, 1)..(a.opsize or "?")
+ end
+ return op.." "..concat(m, ",")
+end
+
+-- Convert number to valid integer or nil.
+local function toint(expr)
+ local n = tonumber(expr)
+ if n then
+ if n % 1 ~= 0 or n < -2147483648 or n > 4294967295 then
+ werror("bad integer number `"..expr.."'")
+ end
+ return n
+ end
+end
+
+-- Parse immediate expression.
+local function immexpr(expr)
+ -- &expr (pointer)
+ if sub(expr, 1, 1) == "&" then
+ return "iPJ", format("(ptrdiff_t)(%s)", sub(expr,2))
+ end
+
+ local prefix = sub(expr, 1, 2)
+ -- =>expr (pc label reference)
+ if prefix == "=>" then
+ return "iJ", sub(expr, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "iJ", map_global[sub(expr, 3)]
+ end
+
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(expr, "^([<>])([1-9])$")
+ if dir then -- Fwd: 247-255, Bkwd: 1-9.
+ return "iJ", lnum + (dir == ">" and 246 or 0)
+ end
+
+ local extname = match(expr, "^extern%s+(%S+)$")
+ if extname then
+ return "iJ", map_extern[extname]
+ end
+
+ -- expr (interpreted as immediate)
+ return "iI", expr
+end
+
+-- Parse displacement expression: +-num, +-expr, +-opsize*num
+local function dispexpr(expr)
+ local disp = expr == "" and 0 or toint(expr)
+ if disp then return disp end
+ local c, dispt = match(expr, "^([+-])%s*(.+)$")
+ if c == "+" then
+ expr = dispt
+ elseif not c then
+ werror("bad displacement expression `"..expr.."'")
+ end
+ local opsize, tailops = match(dispt, "^(%w+)%s*%*%s*(.+)$")
+ local ops, imm = map_opsize[opsize], toint(tailops)
+ if ops and imm then
+ if c == "-" then imm = -imm end
+ return imm*map_opsizenum[ops]
+ end
+ local mode, iexpr = immexpr(dispt)
+ if mode == "iJ" then
+ if c == "-" then werror("cannot invert label reference") end
+ return { iexpr }
+ end
+ return expr -- Need to return original signed expression.
+end
+
+-- Parse register or type expression.
+local function rtexpr(expr)
+ if not expr then return end
+ local tname, ovreg = match(expr, "^([%w_]+):(@[%w_]+)$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ local rnum = map_reg_num[reg]
+ if not rnum then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ if not map_reg_valid_base[reg] then
+ werror("bad base register override `"..(map_reg_rev[reg] or reg).."'")
+ end
+ return reg, rnum, tp
+ end
+ return expr, map_reg_num[expr]
+end
+
+-- Parse operand and return { mode, opsize, reg, xreg, xsc, disp, imm }.
+local function parseoperand(param)
+ local t = {}
+
+ local expr = param
+ local opsize, tailops = match(param, "^(%w+)%s*(.+)$")
+ if opsize then
+ t.opsize = map_opsize[opsize]
+ if t.opsize then expr = tailops end
+ end
+
+ local br = match(expr, "^%[%s*(.-)%s*%]$")
+ repeat
+ if br then
+ t.mode = "xm"
+
+ -- [disp]
+ t.disp = toint(br)
+ if t.disp then
+ t.mode = x64 and "xm" or "xmO"
+ break
+ end
+
+ -- [reg...]
+ local tp
+ local reg, tailr = match(br, "^([@%w_:]+)%s*(.*)$")
+ reg, t.reg, tp = rtexpr(reg)
+ if not t.reg then
+ -- [expr]
+ t.mode = x64 and "xm" or "xmO"
+ t.disp = dispexpr("+"..br)
+ break
+ end
+
+ if t.reg == -1 then
+ t.vreg, tailr = match(tailr, "^(%b())(.*)$")
+ if not t.vreg then werror("bad variable register expression") end
+ end
+
+ -- [xreg*xsc] or [xreg*xsc+-disp] or [xreg*xsc+-expr]
+ local xsc, tailsc = match(tailr, "^%*%s*([1248])%s*(.*)$")
+ if xsc then
+ if not map_reg_valid_index[reg] then
+ werror("bad index register `"..map_reg_rev[reg].."'")
+ end
+ t.xsc = map_xsc[xsc]
+ t.xreg = t.reg
+ t.vxreg = t.vreg
+ t.reg = nil
+ t.vreg = nil
+ t.disp = dispexpr(tailsc)
+ break
+ end
+ if not map_reg_valid_base[reg] then
+ werror("bad base register `"..map_reg_rev[reg].."'")
+ end
+
+ -- [reg] or [reg+-disp]
+ t.disp = toint(tailr) or (tailr == "" and 0)
+ if t.disp then break end
+
+ -- [reg+xreg...]
+ local xreg, tailx = match(tailr, "^+%s*([@%w_:]+)%s*(.*)$")
+ xreg, t.xreg, tp = rtexpr(xreg)
+ if not t.xreg then
+ -- [reg+-expr]
+ t.disp = dispexpr(tailr)
+ break
+ end
+ if not map_reg_valid_index[xreg] then
+ werror("bad index register `"..map_reg_rev[xreg].."'")
+ end
+
+ if t.xreg == -1 then
+ t.vxreg, tailx = match(tailx, "^(%b())(.*)$")
+ if not t.vxreg then werror("bad variable register expression") end
+ end
+
+ -- [reg+xreg*xsc...]
+ local xsc, tailsc = match(tailx, "^%*%s*([1248])%s*(.*)$")
+ if xsc then
+ t.xsc = map_xsc[xsc]
+ tailx = tailsc
+ end
+
+ -- [...] or [...+-disp] or [...+-expr]
+ t.disp = dispexpr(tailx)
+ else
+ -- imm or opsize*imm
+ local imm = toint(expr)
+ if not imm and sub(expr, 1, 1) == "*" and t.opsize then
+ imm = toint(sub(expr, 2))
+ if imm then
+ imm = imm * map_opsizenum[t.opsize]
+ t.opsize = nil
+ end
+ end
+ if imm then
+ if t.opsize then werror("bad operand size override") end
+ local m = "i"
+ if imm == 1 then m = m.."1" end
+ if imm >= 4294967168 and imm <= 4294967295 then imm = imm-4294967296 end
+ if imm >= -128 and imm <= 127 then m = m.."S" end
+ t.imm = imm
+ t.mode = m
+ break
+ end
+
+ local tp
+ local reg, tailr = match(expr, "^([@%w_:]+)%s*(.*)$")
+ reg, t.reg, tp = rtexpr(reg)
+ if t.reg then
+ if t.reg == -1 then
+ t.vreg, tailr = match(tailr, "^(%b())(.*)$")
+ if not t.vreg then werror("bad variable register expression") end
+ end
+ -- reg
+ if tailr == "" then
+ if t.opsize then werror("bad operand size override") end
+ t.opsize = map_reg_opsize[reg]
+ if t.opsize == "f" then
+ t.mode = t.reg == 0 and "fF" or "f"
+ else
+ if reg == "@w4" or (x64 and reg == "@d4") then
+ wwarn("bad idea, try again with `"..(x64 and "rsp'" or "esp'"))
+ end
+ t.mode = t.reg == 0 and "rmR" or (reg == "@b1" and "rmC" or "rm")
+ end
+ t.needrex = map_reg_needrex[reg]
+ break
+ end
+
+ -- type[idx], type[idx].field, type->field -> [reg+offset_expr]
+ if not tp then werror("bad operand `"..param.."'") end
+ t.mode = "xm"
+ t.disp = format(tp.ctypefmt, tailr)
+ else
+ t.mode, t.imm = immexpr(expr)
+ if sub(t.mode, -1) == "J" then
+ if t.opsize and t.opsize ~= addrsize then
+ werror("bad operand size override")
+ end
+ t.opsize = addrsize
+ end
+ end
+ end
+ until true
+ return t
+end
+
+------------------------------------------------------------------------------
+-- x86 Template String Description
+-- ===============================
+--
+-- Each template string is a list of [match:]pattern pairs,
+-- separated by "|". The first match wins. No match means a
+-- bad or unsupported combination of operand modes or sizes.
+--
+-- The match part and the ":" is omitted if the operation has
+-- no operands. Otherwise the first N characters are matched
+-- against the mode strings of each of the N operands.
+--
+-- The mode string for each operand type is (see parseoperand()):
+-- Integer register: "rm", +"R" for eax, ax, al, +"C" for cl
+-- FP register: "f", +"F" for st0
+-- Index operand: "xm", +"O" for [disp] (pure offset)
+-- Immediate: "i", +"S" for signed 8 bit, +"1" for 1,
+-- +"I" for arg, +"P" for pointer
+-- Any: +"J" for valid jump targets
+--
+-- So a match character "m" (mixed) matches both an integer register
+-- and an index operand (to be encoded with the ModRM/SIB scheme).
+-- But "r" matches only a register and "x" only an index operand
+-- (e.g. for FP memory access operations).
+--
+-- The operand size match string starts right after the mode match
+-- characters and ends before the ":". "dwb" or "qdwb" is assumed, if empty.
+-- The effective data size of the operation is matched against this list.
+--
+-- If only the regular "b", "w", "d", "q", "t" operand sizes are
+-- present, then all operands must be the same size. Unspecified sizes
+-- are ignored, but at least one operand must have a size or the pattern
+-- won't match (use the "byte", "word", "dword", "qword", "tword"
+-- operand size overrides. E.g.: mov dword [eax], 1).
+--
+-- If the list has a "1" or "2" prefix, the operand size is taken
+-- from the respective operand and any other operand sizes are ignored.
+-- If the list contains only ".", all operand sizes are ignored.
+-- If the list has a "/" prefix, the concatenated (mixed) operand sizes
+-- are compared to the match.
+--
+-- E.g. "rrdw" matches for either two dword registers or two word
+-- registers. "Fx2dq" matches an st0 operand plus an index operand
+-- pointing to a dword (float) or qword (double).
+--
+-- Every character after the ":" is part of the pattern string:
+-- Hex chars are accumulated to form the opcode (left to right).
+-- "n" disables the standard opcode mods
+-- (otherwise: -1 for "b", o16 prefix for "w", rex.w for "q")
+-- "X" Force REX.W.
+-- "r"/"R" adds the reg. number from the 1st/2nd operand to the opcode.
+-- "m"/"M" generates ModRM/SIB from the 1st/2nd operand.
+-- The spare 3 bits are either filled with the last hex digit or
+-- the result from a previous "r"/"R". The opcode is restored.
+--
+-- All of the following characters force a flush of the opcode:
+-- "o"/"O" stores a pure 32 bit disp (offset) from the 1st/2nd operand.
+-- "S" stores a signed 8 bit immediate from the last operand.
+-- "U" stores an unsigned 8 bit immediate from the last operand.
+-- "W" stores an unsigned 16 bit immediate from the last operand.
+-- "i" stores an operand sized immediate from the last operand.
+-- "I" dito, but generates an action code to optionally modify
+-- the opcode (+2) for a signed 8 bit immediate.
+-- "J" generates one of the REL action codes from the last operand.
+--
+------------------------------------------------------------------------------
+
+-- Template strings for x86 instructions. Ordered by first opcode byte.
+-- Unimplemented opcodes (deliberate omissions) are marked with *.
+local map_op = {
+ -- 00-05: add...
+ -- 06: *push es
+ -- 07: *pop es
+ -- 08-0D: or...
+ -- 0E: *push cs
+ -- 0F: two byte opcode prefix
+ -- 10-15: adc...
+ -- 16: *push ss
+ -- 17: *pop ss
+ -- 18-1D: sbb...
+ -- 1E: *push ds
+ -- 1F: *pop ds
+ -- 20-25: and...
+ es_0 = "26",
+ -- 27: *daa
+ -- 28-2D: sub...
+ cs_0 = "2E",
+ -- 2F: *das
+ -- 30-35: xor...
+ ss_0 = "36",
+ -- 37: *aaa
+ -- 38-3D: cmp...
+ ds_0 = "3E",
+ -- 3F: *aas
+ inc_1 = x64 and "m:FF0m" or "rdw:40r|m:FF0m",
+ dec_1 = x64 and "m:FF1m" or "rdw:48r|m:FF1m",
+ push_1 = (x64 and "rq:n50r|rw:50r|mq:nFF6m|mw:FF6m" or
+ "rdw:50r|mdw:FF6m").."|S.:6AS|ib:n6Ai|i.:68i",
+ pop_1 = x64 and "rq:n58r|rw:58r|mq:n8F0m|mw:8F0m" or "rdw:58r|mdw:8F0m",
+ -- 60: *pusha, *pushad, *pushaw
+ -- 61: *popa, *popad, *popaw
+ -- 62: *bound rdw,x
+ -- 63: x86: *arpl mw,rw
+ movsxd_2 = x64 and "rm/qd:63rM",
+ fs_0 = "64",
+ gs_0 = "65",
+ o16_0 = "66",
+ a16_0 = not x64 and "67" or nil,
+ a32_0 = x64 and "67",
+ -- 68: push idw
+ -- 69: imul rdw,mdw,idw
+ -- 6A: push ib
+ -- 6B: imul rdw,mdw,S
+ -- 6C: *insb
+ -- 6D: *insd, *insw
+ -- 6E: *outsb
+ -- 6F: *outsd, *outsw
+ -- 70-7F: jcc lb
+ -- 80: add... mb,i
+ -- 81: add... mdw,i
+ -- 82: *undefined
+ -- 83: add... mdw,S
+ test_2 = "mr:85Rm|rm:85rM|Ri:A9ri|mi:F70mi",
+ -- 86: xchg rb,mb
+ -- 87: xchg rdw,mdw
+ -- 88: mov mb,r
+ -- 89: mov mdw,r
+ -- 8A: mov r,mb
+ -- 8B: mov r,mdw
+ -- 8C: *mov mdw,seg
+ lea_2 = "rx1dq:8DrM",
+ -- 8E: *mov seg,mdw
+ -- 8F: pop mdw
+ nop_0 = "90",
+ xchg_2 = "Rrqdw:90R|rRqdw:90r|rm:87rM|mr:87Rm",
+ cbw_0 = "6698",
+ cwde_0 = "98",
+ cdqe_0 = "4898",
+ cwd_0 = "6699",
+ cdq_0 = "99",
+ cqo_0 = "4899",
+ -- 9A: *call iw:idw
+ wait_0 = "9B",
+ fwait_0 = "9B",
+ pushf_0 = "9C",
+ pushfd_0 = not x64 and "9C",
+ pushfq_0 = x64 and "9C",
+ popf_0 = "9D",
+ popfd_0 = not x64 and "9D",
+ popfq_0 = x64 and "9D",
+ sahf_0 = "9E",
+ lahf_0 = "9F",
+ mov_2 = "OR:A3o|RO:A1O|mr:89Rm|rm:8BrM|rib:nB0ri|ridw:B8ri|mi:C70mi",
+ movsb_0 = "A4",
+ movsw_0 = "66A5",
+ movsd_0 = "A5",
+ cmpsb_0 = "A6",
+ cmpsw_0 = "66A7",
+ cmpsd_0 = "A7",
+ -- A8: test Rb,i
+ -- A9: test Rdw,i
+ stosb_0 = "AA",
+ stosw_0 = "66AB",
+ stosd_0 = "AB",
+ lodsb_0 = "AC",
+ lodsw_0 = "66AD",
+ lodsd_0 = "AD",
+ scasb_0 = "AE",
+ scasw_0 = "66AF",
+ scasd_0 = "AF",
+ -- B0-B7: mov rb,i
+ -- B8-BF: mov rdw,i
+ -- C0: rol... mb,i
+ -- C1: rol... mdw,i
+ ret_1 = "i.:nC2W",
+ ret_0 = "C3",
+ -- C4: *les rdw,mq
+ -- C5: *lds rdw,mq
+ -- C6: mov mb,i
+ -- C7: mov mdw,i
+ -- C8: *enter iw,ib
+ leave_0 = "C9",
+ -- CA: *retf iw
+ -- CB: *retf
+ int3_0 = "CC",
+ int_1 = "i.:nCDU",
+ into_0 = "CE",
+ -- CF: *iret
+ -- D0: rol... mb,1
+ -- D1: rol... mdw,1
+ -- D2: rol... mb,cl
+ -- D3: rol... mb,cl
+ -- D4: *aam ib
+ -- D5: *aad ib
+ -- D6: *salc
+ -- D7: *xlat
+ -- D8-DF: floating point ops
+ -- E0: *loopne
+ -- E1: *loope
+ -- E2: *loop
+ -- E3: *jcxz, *jecxz
+ -- E4: *in Rb,ib
+ -- E5: *in Rdw,ib
+ -- E6: *out ib,Rb
+ -- E7: *out ib,Rdw
+ call_1 = x64 and "mq:nFF2m|J.:E8nJ" or "md:FF2m|J.:E8J",
+ jmp_1 = x64 and "mq:nFF4m|J.:E9nJ" or "md:FF4m|J.:E9J", -- short: EB
+ -- EA: *jmp iw:idw
+ -- EB: jmp ib
+ -- EC: *in Rb,dx
+ -- ED: *in Rdw,dx
+ -- EE: *out dx,Rb
+ -- EF: *out dx,Rdw
+ -- F0: *lock
+ int1_0 = "F1",
+ repne_0 = "F2",
+ repnz_0 = "F2",
+ rep_0 = "F3",
+ repe_0 = "F3",
+ repz_0 = "F3",
+ -- F4: *hlt
+ cmc_0 = "F5",
+ -- F6: test... mb,i; div... mb
+ -- F7: test... mdw,i; div... mdw
+ clc_0 = "F8",
+ stc_0 = "F9",
+ -- FA: *cli
+ cld_0 = "FC",
+ std_0 = "FD",
+ -- FE: inc... mb
+ -- FF: inc... mdw
+
+ -- misc ops
+ not_1 = "m:F72m",
+ neg_1 = "m:F73m",
+ mul_1 = "m:F74m",
+ imul_1 = "m:F75m",
+ div_1 = "m:F76m",
+ idiv_1 = "m:F77m",
+
+ imul_2 = "rmqdw:0FAFrM|rIqdw:69rmI|rSqdw:6BrmS|riqdw:69rmi",
+ imul_3 = "rmIqdw:69rMI|rmSqdw:6BrMS|rmiqdw:69rMi",
+
+ movzx_2 = "rm/db:0FB6rM|rm/qb:|rm/wb:0FB6rM|rm/dw:0FB7rM|rm/qw:",
+ movsx_2 = "rm/db:0FBErM|rm/qb:|rm/wb:0FBErM|rm/dw:0FBFrM|rm/qw:",
+
+ bswap_1 = "rqd:0FC8r",
+ bsf_2 = "rmqdw:0FBCrM",
+ bsr_2 = "rmqdw:0FBDrM",
+ bt_2 = "mrqdw:0FA3Rm|miqdw:0FBA4mU",
+ btc_2 = "mrqdw:0FBBRm|miqdw:0FBA7mU",
+ btr_2 = "mrqdw:0FB3Rm|miqdw:0FBA6mU",
+ bts_2 = "mrqdw:0FABRm|miqdw:0FBA5mU",
+
+ rdtsc_0 = "0F31", -- P1+
+ cpuid_0 = "0FA2", -- P1+
+
+ -- floating point ops
+ fst_1 = "ff:DDD0r|xd:D92m|xq:nDD2m",
+ fstp_1 = "ff:DDD8r|xd:D93m|xq:nDD3m|xt:DB7m",
+ fld_1 = "ff:D9C0r|xd:D90m|xq:nDD0m|xt:DB5m",
+
+ fpop_0 = "DDD8", -- Alias for fstp st0.
+
+ fist_1 = "xw:nDF2m|xd:DB2m",
+ fistp_1 = "xw:nDF3m|xd:DB3m|xq:nDF7m",
+ fild_1 = "xw:nDF0m|xd:DB0m|xq:nDF5m",
+
+ fxch_0 = "D9C9",
+ fxch_1 = "ff:D9C8r",
+ fxch_2 = "fFf:D9C8r|Fff:D9C8R",
+
+ fucom_1 = "ff:DDE0r",
+ fucom_2 = "Fff:DDE0R",
+ fucomp_1 = "ff:DDE8r",
+ fucomp_2 = "Fff:DDE8R",
+ fucomi_1 = "ff:DBE8r", -- P6+
+ fucomi_2 = "Fff:DBE8R", -- P6+
+ fucomip_1 = "ff:DFE8r", -- P6+
+ fucomip_2 = "Fff:DFE8R", -- P6+
+ fcomi_1 = "ff:DBF0r", -- P6+
+ fcomi_2 = "Fff:DBF0R", -- P6+
+ fcomip_1 = "ff:DFF0r", -- P6+
+ fcomip_2 = "Fff:DFF0R", -- P6+
+ fucompp_0 = "DAE9",
+ fcompp_0 = "DED9",
+
+ fldcw_1 = "xw:nD95m",
+ fstcw_1 = "xw:n9BD97m",
+ fnstcw_1 = "xw:nD97m",
+ fstsw_1 = "Rw:n9BDFE0|xw:n9BDD7m",
+ fnstsw_1 = "Rw:nDFE0|xw:nDD7m",
+ fclex_0 = "9BDBE2",
+ fnclex_0 = "DBE2",
+
+ fnop_0 = "D9D0",
+ -- D9D1-D9DF: unassigned
+
+ fchs_0 = "D9E0",
+ fabs_0 = "D9E1",
+ -- D9E2: unassigned
+ -- D9E3: unassigned
+ ftst_0 = "D9E4",
+ fxam_0 = "D9E5",
+ -- D9E6: unassigned
+ -- D9E7: unassigned
+ fld1_0 = "D9E8",
+ fldl2t_0 = "D9E9",
+ fldl2e_0 = "D9EA",
+ fldpi_0 = "D9EB",
+ fldlg2_0 = "D9EC",
+ fldln2_0 = "D9ED",
+ fldz_0 = "D9EE",
+ -- D9EF: unassigned
+
+ f2xm1_0 = "D9F0",
+ fyl2x_0 = "D9F1",
+ fptan_0 = "D9F2",
+ fpatan_0 = "D9F3",
+ fxtract_0 = "D9F4",
+ fprem1_0 = "D9F5",
+ fdecstp_0 = "D9F6",
+ fincstp_0 = "D9F7",
+ fprem_0 = "D9F8",
+ fyl2xp1_0 = "D9F9",
+ fsqrt_0 = "D9FA",
+ fsincos_0 = "D9FB",
+ frndint_0 = "D9FC",
+ fscale_0 = "D9FD",
+ fsin_0 = "D9FE",
+ fcos_0 = "D9FF",
+
+ -- SSE, SSE2
+ andnpd_2 = "rmo:660F55rM",
+ andnps_2 = "rmo:0F55rM",
+ andpd_2 = "rmo:660F54rM",
+ andps_2 = "rmo:0F54rM",
+ clflush_1 = "x.:0FAE7m",
+ cmppd_3 = "rmio:660FC2rMU",
+ cmpps_3 = "rmio:0FC2rMU",
+ cmpsd_3 = "rrio:F20FC2rMU|rxi/oq:",
+ cmpss_3 = "rrio:F30FC2rMU|rxi/od:",
+ comisd_2 = "rro:660F2FrM|rx/oq:",
+ comiss_2 = "rro:0F2FrM|rx/od:",
+ cvtdq2pd_2 = "rro:F30FE6rM|rx/oq:",
+ cvtdq2ps_2 = "rmo:0F5BrM",
+ cvtpd2dq_2 = "rmo:F20FE6rM",
+ cvtpd2ps_2 = "rmo:660F5ArM",
+ cvtpi2pd_2 = "rx/oq:660F2ArM",
+ cvtpi2ps_2 = "rx/oq:0F2ArM",
+ cvtps2dq_2 = "rmo:660F5BrM",
+ cvtps2pd_2 = "rro:0F5ArM|rx/oq:",
+ cvtsd2si_2 = "rr/do:F20F2DrM|rr/qo:|rx/dq:|rxq:",
+ cvtsd2ss_2 = "rro:F20F5ArM|rx/oq:",
+ cvtsi2sd_2 = "rm/od:F20F2ArM|rm/oq:F20F2ArXM",
+ cvtsi2ss_2 = "rm/od:F30F2ArM|rm/oq:F30F2ArXM",
+ cvtss2sd_2 = "rro:F30F5ArM|rx/od:",
+ cvtss2si_2 = "rr/do:F20F2CrM|rr/qo:|rxd:|rx/qd:",
+ cvttpd2dq_2 = "rmo:660FE6rM",
+ cvttps2dq_2 = "rmo:F30F5BrM",
+ cvttsd2si_2 = "rr/do:F20F2CrM|rr/qo:|rx/dq:|rxq:",
+ cvttss2si_2 = "rr/do:F30F2CrM|rr/qo:|rxd:|rx/qd:",
+ ldmxcsr_1 = "xd:0FAE2m",
+ lfence_0 = "0FAEE8",
+ maskmovdqu_2 = "rro:660FF7rM",
+ mfence_0 = "0FAEF0",
+ movapd_2 = "rmo:660F28rM|mro:660F29Rm",
+ movaps_2 = "rmo:0F28rM|mro:0F29Rm",
+ movd_2 = "rm/od:660F6ErM|rm/oq:660F6ErXM|mr/do:660F7ERm|mr/qo:",
+ movdqa_2 = "rmo:660F6FrM|mro:660F7FRm",
+ movdqu_2 = "rmo:F30F6FrM|mro:F30F7FRm",
+ movhlps_2 = "rro:0F12rM",
+ movhpd_2 = "rx/oq:660F16rM|xr/qo:n660F17Rm",
+ movhps_2 = "rx/oq:0F16rM|xr/qo:n0F17Rm",
+ movlhps_2 = "rro:0F16rM",
+ movlpd_2 = "rx/oq:660F12rM|xr/qo:n660F13Rm",
+ movlps_2 = "rx/oq:0F12rM|xr/qo:n0F13Rm",
+ movmskpd_2 = "rr/do:660F50rM",
+ movmskps_2 = "rr/do:0F50rM",
+ movntdq_2 = "xro:660FE7Rm",
+ movnti_2 = "xrqd:0FC3Rm",
+ movntpd_2 = "xro:660F2BRm",
+ movntps_2 = "xro:0F2BRm",
+ movq_2 = "rro:F30F7ErM|rx/oq:|xr/qo:n660FD6Rm",
+ movsd_2 = "rro:F20F10rM|rx/oq:|xr/qo:nF20F11Rm",
+ movss_2 = "rro:F30F10rM|rx/od:|xr/do:F30F11Rm",
+ movupd_2 = "rmo:660F10rM|mro:660F11Rm",
+ movups_2 = "rmo:0F10rM|mro:0F11Rm",
+ orpd_2 = "rmo:660F56rM",
+ orps_2 = "rmo:0F56rM",
+ packssdw_2 = "rmo:660F6BrM",
+ packsswb_2 = "rmo:660F63rM",
+ packuswb_2 = "rmo:660F67rM",
+ paddb_2 = "rmo:660FFCrM",
+ paddd_2 = "rmo:660FFErM",
+ paddq_2 = "rmo:660FD4rM",
+ paddsb_2 = "rmo:660FECrM",
+ paddsw_2 = "rmo:660FEDrM",
+ paddusb_2 = "rmo:660FDCrM",
+ paddusw_2 = "rmo:660FDDrM",
+ paddw_2 = "rmo:660FFDrM",
+ pand_2 = "rmo:660FDBrM",
+ pandn_2 = "rmo:660FDFrM",
+ pause_0 = "F390",
+ pavgb_2 = "rmo:660FE0rM",
+ pavgw_2 = "rmo:660FE3rM",
+ pcmpeqb_2 = "rmo:660F74rM",
+ pcmpeqd_2 = "rmo:660F76rM",
+ pcmpeqw_2 = "rmo:660F75rM",
+ pcmpgtb_2 = "rmo:660F64rM",
+ pcmpgtd_2 = "rmo:660F66rM",
+ pcmpgtw_2 = "rmo:660F65rM",
+ pextrw_3 = "rri/do:660FC5rMU|xri/wo:660F3A15nrMU", -- Mem op: SSE4.1 only.
+ pinsrw_3 = "rri/od:660FC4rMU|rxi/ow:",
+ pmaddwd_2 = "rmo:660FF5rM",
+ pmaxsw_2 = "rmo:660FEErM",
+ pmaxub_2 = "rmo:660FDErM",
+ pminsw_2 = "rmo:660FEArM",
+ pminub_2 = "rmo:660FDArM",
+ pmovmskb_2 = "rr/do:660FD7rM",
+ pmulhuw_2 = "rmo:660FE4rM",
+ pmulhw_2 = "rmo:660FE5rM",
+ pmullw_2 = "rmo:660FD5rM",
+ pmuludq_2 = "rmo:660FF4rM",
+ por_2 = "rmo:660FEBrM",
+ prefetchnta_1 = "xb:n0F180m",
+ prefetcht0_1 = "xb:n0F181m",
+ prefetcht1_1 = "xb:n0F182m",
+ prefetcht2_1 = "xb:n0F183m",
+ psadbw_2 = "rmo:660FF6rM",
+ pshufd_3 = "rmio:660F70rMU",
+ pshufhw_3 = "rmio:F30F70rMU",
+ pshuflw_3 = "rmio:F20F70rMU",
+ pslld_2 = "rmo:660FF2rM|rio:660F726mU",
+ pslldq_2 = "rio:660F737mU",
+ psllq_2 = "rmo:660FF3rM|rio:660F736mU",
+ psllw_2 = "rmo:660FF1rM|rio:660F716mU",
+ psrad_2 = "rmo:660FE2rM|rio:660F724mU",
+ psraw_2 = "rmo:660FE1rM|rio:660F714mU",
+ psrld_2 = "rmo:660FD2rM|rio:660F722mU",
+ psrldq_2 = "rio:660F733mU",
+ psrlq_2 = "rmo:660FD3rM|rio:660F732mU",
+ psrlw_2 = "rmo:660FD1rM|rio:660F712mU",
+ psubb_2 = "rmo:660FF8rM",
+ psubd_2 = "rmo:660FFArM",
+ psubq_2 = "rmo:660FFBrM",
+ psubsb_2 = "rmo:660FE8rM",
+ psubsw_2 = "rmo:660FE9rM",
+ psubusb_2 = "rmo:660FD8rM",
+ psubusw_2 = "rmo:660FD9rM",
+ psubw_2 = "rmo:660FF9rM",
+ punpckhbw_2 = "rmo:660F68rM",
+ punpckhdq_2 = "rmo:660F6ArM",
+ punpckhqdq_2 = "rmo:660F6DrM",
+ punpckhwd_2 = "rmo:660F69rM",
+ punpcklbw_2 = "rmo:660F60rM",
+ punpckldq_2 = "rmo:660F62rM",
+ punpcklqdq_2 = "rmo:660F6CrM",
+ punpcklwd_2 = "rmo:660F61rM",
+ pxor_2 = "rmo:660FEFrM",
+ rcpps_2 = "rmo:0F53rM",
+ rcpss_2 = "rro:F30F53rM|rx/od:",
+ rsqrtps_2 = "rmo:0F52rM",
+ rsqrtss_2 = "rmo:F30F52rM",
+ sfence_0 = "0FAEF8",
+ shufpd_3 = "rmio:660FC6rMU",
+ shufps_3 = "rmio:0FC6rMU",
+ stmxcsr_1 = "xd:0FAE3m",
+ ucomisd_2 = "rro:660F2ErM|rx/oq:",
+ ucomiss_2 = "rro:0F2ErM|rx/od:",
+ unpckhpd_2 = "rmo:660F15rM",
+ unpckhps_2 = "rmo:0F15rM",
+ unpcklpd_2 = "rmo:660F14rM",
+ unpcklps_2 = "rmo:0F14rM",
+ xorpd_2 = "rmo:660F57rM",
+ xorps_2 = "rmo:0F57rM",
+
+ -- SSE3 ops
+ fisttp_1 = "xw:nDF1m|xd:DB1m|xq:nDD1m",
+ addsubpd_2 = "rmo:660FD0rM",
+ addsubps_2 = "rmo:F20FD0rM",
+ haddpd_2 = "rmo:660F7CrM",
+ haddps_2 = "rmo:F20F7CrM",
+ hsubpd_2 = "rmo:660F7DrM",
+ hsubps_2 = "rmo:F20F7DrM",
+ lddqu_2 = "rxo:F20FF0rM",
+ movddup_2 = "rmo:F20F12rM",
+ movshdup_2 = "rmo:F30F16rM",
+ movsldup_2 = "rmo:F30F12rM",
+
+ -- SSSE3 ops
+ pabsb_2 = "rmo:660F381CrM",
+ pabsd_2 = "rmo:660F381ErM",
+ pabsw_2 = "rmo:660F381DrM",
+ palignr_3 = "rmio:660F3A0FrMU",
+ phaddd_2 = "rmo:660F3802rM",
+ phaddsw_2 = "rmo:660F3803rM",
+ phaddw_2 = "rmo:660F3801rM",
+ phsubd_2 = "rmo:660F3806rM",
+ phsubsw_2 = "rmo:660F3807rM",
+ phsubw_2 = "rmo:660F3805rM",
+ pmaddubsw_2 = "rmo:660F3804rM",
+ pmulhrsw_2 = "rmo:660F380BrM",
+ pshufb_2 = "rmo:660F3800rM",
+ psignb_2 = "rmo:660F3808rM",
+ psignd_2 = "rmo:660F380ArM",
+ psignw_2 = "rmo:660F3809rM",
+
+ -- SSE4.1 ops
+ blendpd_3 = "rmio:660F3A0DrMU",
+ blendps_3 = "rmio:660F3A0CrMU",
+ blendvpd_3 = "rmRo:660F3815rM",
+ blendvps_3 = "rmRo:660F3814rM",
+ dppd_3 = "rmio:660F3A41rMU",
+ dpps_3 = "rmio:660F3A40rMU",
+ extractps_3 = "mri/do:660F3A17RmU|rri/qo:660F3A17RXmU",
+ insertps_3 = "rrio:660F3A41rMU|rxi/od:",
+ movntdqa_2 = "rmo:660F382ArM",
+ mpsadbw_3 = "rmio:660F3A42rMU",
+ packusdw_2 = "rmo:660F382BrM",
+ pblendvb_3 = "rmRo:660F3810rM",
+ pblendw_3 = "rmio:660F3A0ErMU",
+ pcmpeqq_2 = "rmo:660F3829rM",
+ pextrb_3 = "rri/do:660F3A14nRmU|rri/qo:|xri/bo:",
+ pextrd_3 = "mri/do:660F3A16RmU",
+ pextrq_3 = "mri/qo:660F3A16RmU",
+ -- pextrw is SSE2, mem operand is SSE4.1 only
+ phminposuw_2 = "rmo:660F3841rM",
+ pinsrb_3 = "rri/od:660F3A20nrMU|rxi/ob:",
+ pinsrd_3 = "rmi/od:660F3A22rMU",
+ pinsrq_3 = "rmi/oq:660F3A22rXMU",
+ pmaxsb_2 = "rmo:660F383CrM",
+ pmaxsd_2 = "rmo:660F383DrM",
+ pmaxud_2 = "rmo:660F383FrM",
+ pmaxuw_2 = "rmo:660F383ErM",
+ pminsb_2 = "rmo:660F3838rM",
+ pminsd_2 = "rmo:660F3839rM",
+ pminud_2 = "rmo:660F383BrM",
+ pminuw_2 = "rmo:660F383ArM",
+ pmovsxbd_2 = "rro:660F3821rM|rx/od:",
+ pmovsxbq_2 = "rro:660F3822rM|rx/ow:",
+ pmovsxbw_2 = "rro:660F3820rM|rx/oq:",
+ pmovsxdq_2 = "rro:660F3825rM|rx/oq:",
+ pmovsxwd_2 = "rro:660F3823rM|rx/oq:",
+ pmovsxwq_2 = "rro:660F3824rM|rx/od:",
+ pmovzxbd_2 = "rro:660F3831rM|rx/od:",
+ pmovzxbq_2 = "rro:660F3832rM|rx/ow:",
+ pmovzxbw_2 = "rro:660F3830rM|rx/oq:",
+ pmovzxdq_2 = "rro:660F3835rM|rx/oq:",
+ pmovzxwd_2 = "rro:660F3833rM|rx/oq:",
+ pmovzxwq_2 = "rro:660F3834rM|rx/od:",
+ pmuldq_2 = "rmo:660F3828rM",
+ pmulld_2 = "rmo:660F3840rM",
+ ptest_2 = "rmo:660F3817rM",
+ roundpd_3 = "rmio:660F3A09rMU",
+ roundps_3 = "rmio:660F3A08rMU",
+ roundsd_3 = "rrio:660F3A0BrMU|rxi/oq:",
+ roundss_3 = "rrio:660F3A0ArMU|rxi/od:",
+
+ -- SSE4.2 ops
+ crc32_2 = "rmqd:F20F38F1rM|rm/dw:66F20F38F1rM|rm/db:F20F38F0rM|rm/qb:",
+ pcmpestri_3 = "rmio:660F3A61rMU",
+ pcmpestrm_3 = "rmio:660F3A60rMU",
+ pcmpgtq_2 = "rmo:660F3837rM",
+ pcmpistri_3 = "rmio:660F3A63rMU",
+ pcmpistrm_3 = "rmio:660F3A62rMU",
+ popcnt_2 = "rmqdw:F30FB8rM",
+
+ -- SSE4a
+ extrq_2 = "rro:660F79rM",
+ extrq_3 = "riio:660F780mUU",
+ insertq_2 = "rro:F20F79rM",
+ insertq_4 = "rriio:F20F78rMUU",
+ lzcnt_2 = "rmqdw:F30FBDrM",
+ movntsd_2 = "xr/qo:nF20F2BRm",
+ movntss_2 = "xr/do:F30F2BRm",
+ -- popcnt is also in SSE4.2
+}
+
+------------------------------------------------------------------------------
+
+-- Arithmetic ops.
+for name,n in pairs{ add = 0, ["or"] = 1, adc = 2, sbb = 3,
+ ["and"] = 4, sub = 5, xor = 6, cmp = 7 } do
+ local n8 = n * 8
+ map_op[name.."_2"] = format(
+ "mr:%02XRm|rm:%02XrM|mI1qdw:81%XmI|mS1qdw:83%XmS|Ri1qdwb:%02Xri|mi1qdwb:81%Xmi",
+ 1+n8, 3+n8, n, n, 5+n8, n)
+end
+
+-- Shift ops.
+for name,n in pairs{ rol = 0, ror = 1, rcl = 2, rcr = 3,
+ shl = 4, shr = 5, sar = 7, sal = 4 } do
+ map_op[name.."_2"] = format("m1:D1%Xm|mC1qdwb:D3%Xm|mi:C1%XmU", n, n, n)
+end
+
+-- Conditional ops.
+for cc,n in pairs(map_cc) do
+ map_op["j"..cc.."_1"] = format("J.:n0F8%XJ", n) -- short: 7%X
+ map_op["set"..cc.."_1"] = format("mb:n0F9%X2m", n)
+ map_op["cmov"..cc.."_2"] = format("rmqdw:0F4%XrM", n) -- P6+
+end
+
+-- FP arithmetic ops.
+for name,n in pairs{ add = 0, mul = 1, com = 2, comp = 3,
+ sub = 4, subr = 5, div = 6, divr = 7 } do
+ local nc = 192 + n * 8
+ local nr = nc + (n < 4 and 0 or (n % 2 == 0 and 8 or -8))
+ local fn = "f"..name
+ map_op[fn.."_1"] = format("ff:D8%02Xr|xd:D8%Xm|xq:nDC%Xm", nc, n, n)
+ if n == 2 or n == 3 then
+ map_op[fn.."_2"] = format("Fff:D8%02XR|Fx2d:D8%XM|Fx2q:nDC%XM", nc, n, n)
+ else
+ map_op[fn.."_2"] = format("Fff:D8%02XR|fFf:DC%02Xr|Fx2d:D8%XM|Fx2q:nDC%XM", nc, nr, n, n)
+ map_op[fn.."p_1"] = format("ff:DE%02Xr", nr)
+ map_op[fn.."p_2"] = format("fFf:DE%02Xr", nr)
+ end
+ map_op["fi"..name.."_1"] = format("xd:DA%Xm|xw:nDE%Xm", n, n)
+end
+
+-- FP conditional moves.
+for cc,n in pairs{ b=0, e=1, be=2, u=3, nb=4, ne=5, nbe=6, nu=7 } do
+ local n4 = n % 4
+ local nc = 56000 + n4 * 8 + (n-n4) * 64
+ map_op["fcmov"..cc.."_1"] = format("ff:%04Xr", nc) -- P6+
+ map_op["fcmov"..cc.."_2"] = format("Fff:%04XR", nc) -- P6+
+end
+
+-- SSE FP arithmetic ops.
+for name,n in pairs{ sqrt = 1, add = 8, mul = 9,
+ sub = 12, min = 13, div = 14, max = 15 } do
+ map_op[name.."ps_2"] = format("rmo:0F5%XrM", n)
+ map_op[name.."ss_2"] = format("rro:F30F5%XrM|rx/od:", n)
+ map_op[name.."pd_2"] = format("rmo:660F5%XrM", n)
+ map_op[name.."sd_2"] = format("rro:F20F5%XrM|rx/oq:", n)
+end
+
+------------------------------------------------------------------------------
+
+-- Process pattern string.
+local function dopattern(pat, args, sz, op, needrex)
+ local digit, addin
+ local opcode = 0
+ local szov = sz
+ local narg = 1
+ local rex = 0
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 5 positions.
+ if secpos+5 > maxsecpos then wflush() end
+
+ -- Process each character.
+ for c in gmatch(pat.."|", ".") do
+ if match(c, "%x") then -- Hex digit.
+ digit = byte(c) - 48
+ if digit > 48 then digit = digit - 39
+ elseif digit > 16 then digit = digit - 7 end
+ opcode = opcode*16 + digit
+ addin = nil
+ elseif c == "n" then -- Disable operand size mods for opcode.
+ szov = nil
+ elseif c == "X" then -- Force REX.W.
+ rex = 8
+ elseif c == "r" then -- Merge 1st operand regno. into opcode.
+ addin = args[1]; opcode = opcode + (addin.reg % 8)
+ if narg < 2 then narg = 2 end
+ elseif c == "R" then -- Merge 2nd operand regno. into opcode.
+ addin = args[2]; opcode = opcode + (addin.reg % 8)
+ narg = 3
+ elseif c == "m" or c == "M" then -- Encode ModRM/SIB.
+ local s
+ if addin then
+ s = addin.reg
+ opcode = opcode - (s%8) -- Undo regno opcode merge.
+ else
+ s = opcode % 16 -- Undo last digit.
+ opcode = (opcode - s) / 16
+ end
+ local nn = c == "m" and 1 or 2
+ local t = args[nn]
+ if narg <= nn then narg = nn + 1 end
+ if szov == "q" and rex == 0 then rex = rex + 8 end
+ if t.reg and t.reg > 7 then rex = rex + 1 end
+ if t.xreg and t.xreg > 7 then rex = rex + 2 end
+ if s > 7 then rex = rex + 4 end
+ if needrex then rex = rex + 16 end
+ wputop(szov, opcode, rex); opcode = nil
+ local imark = sub(pat, -1) -- Force a mark (ugly).
+ -- Put ModRM/SIB with regno/last digit as spare.
+ wputmrmsib(t, imark, s, addin and addin.vreg)
+ addin = nil
+ else
+ if opcode then -- Flush opcode.
+ if szov == "q" and rex == 0 then rex = rex + 8 end
+ if needrex then rex = rex + 16 end
+ if addin and addin.reg == -1 then
+ wputop(szov, opcode - 7, rex)
+ waction("VREG", addin.vreg); wputxb(0)
+ else
+ if addin and addin.reg > 7 then rex = rex + 1 end
+ wputop(szov, opcode, rex)
+ end
+ opcode = nil
+ end
+ if c == "|" then break end
+ if c == "o" then -- Offset (pure 32 bit displacement).
+ wputdarg(args[1].disp); if narg < 2 then narg = 2 end
+ elseif c == "O" then
+ wputdarg(args[2].disp); narg = 3
+ else
+ -- Anything else is an immediate operand.
+ local a = args[narg]
+ narg = narg + 1
+ local mode, imm = a.mode, a.imm
+ if mode == "iJ" and not match("iIJ", c) then
+ werror("bad operand size for label")
+ end
+ if c == "S" then
+ wputsbarg(imm)
+ elseif c == "U" then
+ wputbarg(imm)
+ elseif c == "W" then
+ wputwarg(imm)
+ elseif c == "i" or c == "I" then
+ if mode == "iJ" then
+ wputlabel("IMM_", imm, 1)
+ elseif mode == "iI" and c == "I" then
+ waction(sz == "w" and "IMM_WB" or "IMM_DB", imm)
+ else
+ wputszarg(sz, imm)
+ end
+ elseif c == "J" then
+ if mode == "iPJ" then
+ waction("REL_A", imm) -- !x64 (secpos)
+ else
+ wputlabel("REL_", imm, 2)
+ end
+ else
+ werror("bad char `"..c.."' in pattern `"..pat.."' for `"..op.."'")
+ end
+ end
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Mapping of operand modes to short names. Suppress output with '#'.
+local map_modename = {
+ r = "reg", R = "eax", C = "cl", x = "mem", m = "mrm", i = "imm",
+ f = "stx", F = "st0", J = "lbl", ["1"] = "1",
+ I = "#", S = "#", O = "#",
+}
+
+-- Return a table/string showing all possible operand modes.
+local function templatehelp(template, nparams)
+ if nparams == 0 then return "" end
+ local t = {}
+ for tm in gmatch(template, "[^%|]+") do
+ local s = map_modename[sub(tm, 1, 1)]
+ s = s..gsub(sub(tm, 2, nparams), ".", function(c)
+ return ", "..map_modename[c]
+ end)
+ if not match(s, "#") then t[#t+1] = s end
+ end
+ return t
+end
+
+-- Match operand modes against mode match part of template.
+local function matchtm(tm, args)
+ for i=1,#args do
+ if not match(args[i].mode, sub(tm, i, i)) then return end
+ end
+ return true
+end
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return templatehelp(template, nparams) end
+ local args = {}
+
+ -- Zero-operand opcodes have no match part.
+ if #params == 0 then
+ dopattern(template, args, "d", params.op, nil)
+ return
+ end
+
+ -- Determine common operand size (coerce undefined size) or flag as mixed.
+ local sz, szmix, needrex
+ for i,p in ipairs(params) do
+ args[i] = parseoperand(p)
+ local nsz = args[i].opsize
+ if nsz then
+ if sz and sz ~= nsz then szmix = true else sz = nsz end
+ end
+ local nrex = args[i].needrex
+ if nrex ~= nil then
+ if needrex == nil then
+ needrex = nrex
+ elseif needrex ~= nrex then
+ werror("bad mix of byte-addressable registers")
+ end
+ end
+ end
+
+ -- Try all match:pattern pairs (separated by '|').
+ local gotmatch, lastpat
+ for tm in gmatch(template, "[^%|]+") do
+ -- Split off size match (starts after mode match) and pattern string.
+ local szm, pat = match(tm, "^(.-):(.*)$", #args+1)
+ if pat == "" then pat = lastpat else lastpat = pat end
+ if matchtm(tm, args) then
+ local prefix = sub(szm, 1, 1)
+ if prefix == "/" then -- Match both operand sizes.
+ if args[1].opsize == sub(szm, 2, 2) and
+ args[2].opsize == sub(szm, 3, 3) then
+ dopattern(pat, args, sz, params.op, needrex) -- Process pattern.
+ return
+ end
+ else -- Match common operand size.
+ local szp = sz
+ if szm == "" then szm = x64 and "qdwb" or "dwb" end -- Default sizes.
+ if prefix == "1" then szp = args[1].opsize; szmix = nil
+ elseif prefix == "2" then szp = args[2].opsize; szmix = nil end
+ if not szmix and (prefix == "." or match(szm, szp or "#")) then
+ dopattern(pat, args, szp, params.op, needrex) -- Process pattern.
+ return
+ end
+ end
+ gotmatch = true
+ end
+ end
+
+ local msg = "bad operand mode"
+ if gotmatch then
+ if szmix then
+ msg = "mixed operand size"
+ else
+ msg = sz and "bad operand size" or "missing operand size"
+ end
+ end
+
+ werror(msg.." in `"..opmodestr(params.op, args).."'")
+end
+
+------------------------------------------------------------------------------
+
+-- x64-specific opcode for 64 bit immediates and displacements.
+if x64 then
+ function map_op.mov64_2(params)
+ if not params then return { "reg, imm", "reg, [disp]", "[disp], reg" } end
+ if secpos+2 > maxsecpos then wflush() end
+ local opcode, op64, sz, rex
+ local op64 = match(params[1], "^%[%s*(.-)%s*%]$")
+ if op64 then
+ local a = parseoperand(params[2])
+ if a.mode ~= "rmR" then werror("bad operand mode") end
+ sz = a.opsize
+ rex = sz == "q" and 8 or 0
+ opcode = 0xa3
+ else
+ op64 = match(params[2], "^%[%s*(.-)%s*%]$")
+ local a = parseoperand(params[1])
+ if op64 then
+ if a.mode ~= "rmR" then werror("bad operand mode") end
+ sz = a.opsize
+ rex = sz == "q" and 8 or 0
+ opcode = 0xa1
+ else
+ if sub(a.mode, 1, 1) ~= "r" or a.opsize ~= "q" then
+ werror("bad operand mode")
+ end
+ op64 = params[2]
+ opcode = 0xb8 + (a.reg%8) -- !x64: no VREG support.
+ rex = a.reg > 7 and 9 or 8
+ end
+ end
+ wputop(sz, opcode, rex)
+ waction("IMM_D", format("(unsigned int)(%s)", op64))
+ waction("IMM_D", format("(unsigned int)((%s)>>32)", op64))
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+local function op_data(params)
+ if not params then return "imm..." end
+ local sz = sub(params.op, 2, 2)
+ if sz == "a" then sz = addrsize end
+ for _,p in ipairs(params) do
+ local a = parseoperand(p)
+ if sub(a.mode, 1, 1) ~= "i" or (a.opsize and a.opsize ~= sz) then
+ werror("bad mode or size in `"..p.."'")
+ end
+ if a.mode == "iJ" then
+ wputlabel("IMM_", a.imm, 1)
+ else
+ wputszarg(sz, a.imm)
+ end
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+map_op[".byte_*"] = op_data
+map_op[".sbyte_*"] = op_data
+map_op[".word_*"] = op_data
+map_op[".dword_*"] = op_data
+map_op[".aword_*"] = op_data
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_2"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr [, addr]" end
+ if secpos+2 > maxsecpos then wflush() end
+ local a = parseoperand(params[1])
+ local mode, imm = a.mode, a.imm
+ if type(imm) == "number" and (mode == "iJ" or (imm >= 1 and imm <= 9)) then
+ -- Local label (1: ... 9:) or global label (->global:).
+ waction("LABEL_LG", nil, 1)
+ wputxb(imm)
+ elseif mode == "iJ" then
+ -- PC label (=>pcexpr:).
+ waction("LABEL_PC", imm)
+ else
+ werror("bad label definition")
+ end
+ -- SETLABEL must immediately follow LABEL_LG/LABEL_PC.
+ local addr = params[2]
+ if addr then
+ local a = parseoperand(addr)
+ if a.mode == "iPJ" then
+ waction("SETLABEL", a.imm)
+ else
+ werror("bad label assignment")
+ end
+ end
+end
+map_op[".label_1"] = map_op[".label_2"]
+
+------------------------------------------------------------------------------
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1]) or map_opsizenum[map_opsize[params[1]]]
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", nil, 1)
+ wputxb(align-1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+-- Spacing pseudo-opcode.
+map_op[".space_2"] = function(params)
+ if not params then return "num [, filler]" end
+ if secpos+1 > maxsecpos then wflush() end
+ waction("SPACE", params[1])
+ local fill = params[2]
+ if fill then
+ fill = tonumber(fill)
+ if not fill or fill < 0 or fill > 255 then werror("bad filler") end
+ end
+ wputxb(fill or 0)
+end
+map_op[".space_1"] = map_op[".space_2"]
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ if reg and not map_reg_valid_base[reg] then
+ werror("bad base register `"..(map_reg_rev[reg] or reg).."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg and map_reg_rev[tp.reg] or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION")
+ wputxb(num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpregs(out)
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = map_coreop })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/src/LuaJIT/dynasm/dynasm.lua b/src/LuaJIT/dynasm/dynasm.lua
new file mode 100644
index 000000000..2ef816477
--- /dev/null
+++ b/src/LuaJIT/dynasm/dynasm.lua
@@ -0,0 +1,1084 @@
+------------------------------------------------------------------------------
+-- DynASM. A dynamic assembler for code generation engines.
+-- Originally designed and implemented for LuaJIT.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- See below for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Application information.
+local _info = {
+ name = "DynASM",
+ description = "A dynamic assembler for code generation engines",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ url = "http://luajit.org/dynasm.html",
+ license = "MIT",
+ copyright = [[
+Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+]],
+}
+
+-- Cache library functions.
+local type, pairs, ipairs = type, pairs, ipairs
+local pcall, error, assert = pcall, error, assert
+local _s = string
+local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub
+local format, rep, upper = _s.format, _s.rep, _s.upper
+local _t = table
+local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort
+local exit = os.exit
+local io = io
+local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr
+
+------------------------------------------------------------------------------
+
+-- Program options.
+local g_opt = {}
+
+-- Global state for current file.
+local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch
+local g_errcount = 0
+
+-- Write buffer for output file.
+local g_wbuffer, g_capbuffer
+
+------------------------------------------------------------------------------
+
+-- Write an output line (or callback function) to the buffer.
+local function wline(line, needindent)
+ local buf = g_capbuffer or g_wbuffer
+ buf[#buf+1] = needindent and g_indent..line or line
+ g_synclineno = g_synclineno + 1
+end
+
+-- Write assembler line as a comment, if requestd.
+local function wcomment(aline)
+ if g_opt.comment then
+ wline(g_opt.comment..aline..g_opt.endcomment, true)
+ end
+end
+
+-- Resync CPP line numbers.
+local function wsync()
+ if g_synclineno ~= g_lineno and g_opt.cpp then
+ wline("# "..g_lineno..' "'..g_fname..'"')
+ g_synclineno = g_lineno
+ end
+end
+
+-- Dummy action flush function. Replaced with arch-specific function later.
+local function wflush(term)
+end
+
+-- Dump all buffered output lines.
+local function wdumplines(out, buf)
+ for _,line in ipairs(buf) do
+ if type(line) == "string" then
+ assert(out:write(line, "\n"))
+ else
+ -- Special callback to dynamically insert lines after end of processing.
+ line(out)
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Emit an error. Processing continues with next statement.
+local function werror(msg)
+ error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0)
+end
+
+-- Emit a fatal error. Processing stops.
+local function wfatal(msg)
+ g_errcount = "fatal"
+ werror(msg)
+end
+
+-- Print a warning. Processing continues.
+local function wwarn(msg)
+ stderr:write(format("%s:%s: warning: %s:\n%s\n",
+ g_fname, g_lineno, msg, g_curline))
+end
+
+-- Print caught error message. But suppress excessive errors.
+local function wprinterr(...)
+ if type(g_errcount) == "number" then
+ -- Regular error.
+ g_errcount = g_errcount + 1
+ if g_errcount < 21 then -- Seems to be a reasonable limit.
+ stderr:write(...)
+ elseif g_errcount == 21 then
+ stderr:write(g_fname,
+ ":*: warning: too many errors (suppressed further messages).\n")
+ end
+ else
+ -- Fatal error.
+ stderr:write(...)
+ return true -- Stop processing.
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Map holding all option handlers.
+local opt_map = {}
+local opt_current
+
+-- Print error and exit with error status.
+local function opterror(...)
+ stderr:write("dynasm.lua: ERROR: ", ...)
+ stderr:write("\n")
+ exit(1)
+end
+
+-- Get option parameter.
+local function optparam(args)
+ local argn = args.argn
+ local p = args[argn]
+ if not p then
+ opterror("missing parameter for option `", opt_current, "'.")
+ end
+ args.argn = argn + 1
+ return p
+end
+
+------------------------------------------------------------------------------
+
+-- Core pseudo-opcodes.
+local map_coreop = {}
+-- Dummy opcode map. Replaced by arch-specific map.
+local map_op = {}
+
+-- Forward declarations.
+local dostmt
+local readfile
+
+------------------------------------------------------------------------------
+
+-- Map for defines (initially empty, chains to arch-specific map).
+local map_def = {}
+
+-- Pseudo-opcode to define a substitution.
+map_coreop[".define_2"] = function(params, nparams)
+ if not params then return nparams == 1 and "name" or "name, subst" end
+ local name, def = params[1], params[2] or "1"
+ if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end
+ map_def[name] = def
+end
+map_coreop[".define_1"] = map_coreop[".define_2"]
+
+-- Define a substitution on the command line.
+function opt_map.D(args)
+ local namesubst = optparam(args)
+ local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$")
+ if name then
+ map_def[name] = subst
+ elseif match(namesubst, "^[%a_][%w_]*$") then
+ map_def[namesubst] = "1"
+ else
+ opterror("bad define")
+ end
+end
+
+-- Undefine a substitution on the command line.
+function opt_map.U(args)
+ local name = optparam(args)
+ if match(name, "^[%a_][%w_]*$") then
+ map_def[name] = nil
+ else
+ opterror("bad define")
+ end
+end
+
+-- Helper for definesubst.
+local gotsubst
+
+local function definesubst_one(word)
+ local subst = map_def[word]
+ if subst then gotsubst = word; return subst else return word end
+end
+
+-- Iteratively substitute defines.
+local function definesubst(stmt)
+ -- Limit number of iterations.
+ for i=1,100 do
+ gotsubst = false
+ stmt = gsub(stmt, "#?[%w_]+", definesubst_one)
+ if not gotsubst then break end
+ end
+ if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end
+ return stmt
+end
+
+-- Dump all defines.
+local function dumpdefines(out, lvl)
+ local t = {}
+ for name in pairs(map_def) do
+ t[#t+1] = name
+ end
+ sort(t)
+ out:write("Defines:\n")
+ for _,name in ipairs(t) do
+ local subst = map_def[name]
+ if g_arch then subst = g_arch.revdef(subst) end
+ out:write(format(" %-20s %s\n", name, subst))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Support variables for conditional assembly.
+local condlevel = 0
+local condstack = {}
+
+-- Evaluate condition with a Lua expression. Substitutions already performed.
+local function cond_eval(cond)
+ local func, err
+ if setfenv then
+ func, err = loadstring("return "..cond, "=expr")
+ else
+ -- No globals. All unknown identifiers evaluate to nil.
+ func, err = load("return "..cond, "=expr", "t", {})
+ end
+ if func then
+ if setfenv then
+ setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil.
+ end
+ local ok, res = pcall(func)
+ if ok then
+ if res == 0 then return false end -- Oh well.
+ return not not res
+ end
+ err = res
+ end
+ wfatal("bad condition: "..err)
+end
+
+-- Skip statements until next conditional pseudo-opcode at the same level.
+local function stmtskip()
+ local dostmt_save = dostmt
+ local lvl = 0
+ dostmt = function(stmt)
+ local op = match(stmt, "^%s*(%S+)")
+ if op == ".if" then
+ lvl = lvl + 1
+ elseif lvl ~= 0 then
+ if op == ".endif" then lvl = lvl - 1 end
+ elseif op == ".elif" or op == ".else" or op == ".endif" then
+ dostmt = dostmt_save
+ dostmt(stmt)
+ end
+ end
+end
+
+-- Pseudo-opcodes for conditional assembly.
+map_coreop[".if_1"] = function(params)
+ if not params then return "condition" end
+ local lvl = condlevel + 1
+ local res = cond_eval(params[1])
+ condlevel = lvl
+ condstack[lvl] = res
+ if not res then stmtskip() end
+end
+
+map_coreop[".elif_1"] = function(params)
+ if not params then return "condition" end
+ if condlevel == 0 then wfatal(".elif without .if") end
+ local lvl = condlevel
+ local res = condstack[lvl]
+ if res then
+ if res == "else" then wfatal(".elif after .else") end
+ else
+ res = cond_eval(params[1])
+ if res then
+ condstack[lvl] = res
+ return
+ end
+ end
+ stmtskip()
+end
+
+map_coreop[".else_0"] = function(params)
+ if condlevel == 0 then wfatal(".else without .if") end
+ local lvl = condlevel
+ local res = condstack[lvl]
+ condstack[lvl] = "else"
+ if res then
+ if res == "else" then wfatal(".else after .else") end
+ stmtskip()
+ end
+end
+
+map_coreop[".endif_0"] = function(params)
+ local lvl = condlevel
+ if lvl == 0 then wfatal(".endif without .if") end
+ condlevel = lvl - 1
+end
+
+-- Check for unfinished conditionals.
+local function checkconds()
+ if g_errcount ~= "fatal" and condlevel ~= 0 then
+ wprinterr(g_fname, ":*: error: unbalanced conditional\n")
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Search for a file in the given path and open it for reading.
+local function pathopen(path, name)
+ local dirsep = match(package.path, "\\") and "\\" or "/"
+ for _,p in ipairs(path) do
+ local fullname = p == "" and name or p..dirsep..name
+ local fin = io.open(fullname, "r")
+ if fin then
+ g_fname = fullname
+ return fin
+ end
+ end
+end
+
+-- Include a file.
+map_coreop[".include_1"] = function(params)
+ if not params then return "filename" end
+ local name = params[1]
+ -- Save state. Ugly, I know. but upvalues are fast.
+ local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent
+ -- Read the included file.
+ local fatal = readfile(pathopen(g_opt.include, name) or
+ wfatal("include file `"..name.."' not found"))
+ -- Restore state.
+ g_synclineno = -1
+ g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi
+ if fatal then wfatal("in include file") end
+end
+
+-- Make .include and conditionals initially available, too.
+map_op[".include_1"] = map_coreop[".include_1"]
+map_op[".if_1"] = map_coreop[".if_1"]
+map_op[".elif_1"] = map_coreop[".elif_1"]
+map_op[".else_0"] = map_coreop[".else_0"]
+map_op[".endif_0"] = map_coreop[".endif_0"]
+
+------------------------------------------------------------------------------
+
+-- Support variables for macros.
+local mac_capture, mac_lineno, mac_name
+local mac_active = {}
+local mac_list = {}
+
+-- Pseudo-opcode to define a macro.
+map_coreop[".macro_*"] = function(mparams)
+ if not mparams then return "name [, params...]" end
+ -- Split off and validate macro name.
+ local name = remove(mparams, 1)
+ if not name then werror("missing macro name") end
+ if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]*$")) then
+ wfatal("bad macro name `"..name.."'")
+ end
+ -- Validate macro parameter names.
+ local mdup = {}
+ for _,mp in ipairs(mparams) do
+ if not match(mp, "^[%a_][%w_]*$") then
+ wfatal("bad macro parameter name `"..mp.."'")
+ end
+ if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end
+ mdup[mp] = true
+ end
+ -- Check for duplicate or recursive macro definitions.
+ local opname = name.."_"..#mparams
+ if map_op[opname] or map_op[name.."_*"] then
+ wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)")
+ end
+ if mac_capture then wfatal("recursive macro definition") end
+
+ -- Enable statement capture.
+ local lines = {}
+ mac_lineno = g_lineno
+ mac_name = name
+ mac_capture = function(stmt) -- Statement capture function.
+ -- Stop macro definition with .endmacro pseudo-opcode.
+ if not match(stmt, "^%s*.endmacro%s*$") then
+ lines[#lines+1] = stmt
+ return
+ end
+ mac_capture = nil
+ mac_lineno = nil
+ mac_name = nil
+ mac_list[#mac_list+1] = opname
+ -- Add macro-op definition.
+ map_op[opname] = function(params)
+ if not params then return mparams, lines end
+ -- Protect against recursive macro invocation.
+ if mac_active[opname] then wfatal("recursive macro invocation") end
+ mac_active[opname] = true
+ -- Setup substitution map.
+ local subst = {}
+ for i,mp in ipairs(mparams) do subst[mp] = params[i] end
+ local mcom
+ if g_opt.maccomment and g_opt.comment then
+ mcom = " MACRO "..name.." ("..#mparams..")"
+ wcomment("{"..mcom)
+ end
+ -- Loop through all captured statements
+ for _,stmt in ipairs(lines) do
+ -- Substitute macro parameters.
+ local st = gsub(stmt, "[%w_]+", subst)
+ st = definesubst(st)
+ st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b.
+ if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end
+ -- Emit statement. Use a protected call for better diagnostics.
+ local ok, err = pcall(dostmt, st)
+ if not ok then
+ -- Add the captured statement to the error.
+ wprinterr(err, "\n", g_indent, "| ", stmt,
+ "\t[MACRO ", name, " (", #mparams, ")]\n")
+ end
+ end
+ if mcom then wcomment("}"..mcom) end
+ mac_active[opname] = nil
+ end
+ end
+end
+
+-- An .endmacro pseudo-opcode outside of a macro definition is an error.
+map_coreop[".endmacro_0"] = function(params)
+ wfatal(".endmacro without .macro")
+end
+
+-- Dump all macros and their contents (with -PP only).
+local function dumpmacros(out, lvl)
+ sort(mac_list)
+ out:write("Macros:\n")
+ for _,opname in ipairs(mac_list) do
+ local name = sub(opname, 1, -3)
+ local params, lines = map_op[opname]()
+ out:write(format(" %-20s %s\n", name, concat(params, ", ")))
+ if lvl > 1 then
+ for _,line in ipairs(lines) do
+ out:write(" |", line, "\n")
+ end
+ out:write("\n")
+ end
+ end
+ out:write("\n")
+end
+
+-- Check for unfinished macro definitions.
+local function checkmacros()
+ if mac_capture then
+ wprinterr(g_fname, ":", mac_lineno,
+ ": error: unfinished .macro `", mac_name ,"'\n")
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Support variables for captures.
+local cap_lineno, cap_name
+local cap_buffers = {}
+local cap_used = {}
+
+-- Start a capture.
+map_coreop[".capture_1"] = function(params)
+ if not params then return "name" end
+ wflush()
+ local name = params[1]
+ if not match(name, "^[%a_][%w_]*$") then
+ wfatal("bad capture name `"..name.."'")
+ end
+ if cap_name then
+ wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno)
+ end
+ cap_name = name
+ cap_lineno = g_lineno
+ -- Create or continue a capture buffer and start the output line capture.
+ local buf = cap_buffers[name]
+ if not buf then buf = {}; cap_buffers[name] = buf end
+ g_capbuffer = buf
+ g_synclineno = 0
+end
+
+-- Stop a capture.
+map_coreop[".endcapture_0"] = function(params)
+ wflush()
+ if not cap_name then wfatal(".endcapture without a valid .capture") end
+ cap_name = nil
+ cap_lineno = nil
+ g_capbuffer = nil
+ g_synclineno = 0
+end
+
+-- Dump a capture buffer.
+map_coreop[".dumpcapture_1"] = function(params)
+ if not params then return "name" end
+ wflush()
+ local name = params[1]
+ if not match(name, "^[%a_][%w_]*$") then
+ wfatal("bad capture name `"..name.."'")
+ end
+ cap_used[name] = true
+ wline(function(out)
+ local buf = cap_buffers[name]
+ if buf then wdumplines(out, buf) end
+ end)
+ g_synclineno = 0
+end
+
+-- Dump all captures and their buffers (with -PP only).
+local function dumpcaptures(out, lvl)
+ out:write("Captures:\n")
+ for name,buf in pairs(cap_buffers) do
+ out:write(format(" %-20s %4s)\n", name, "("..#buf))
+ if lvl > 1 then
+ local bar = rep("=", 76)
+ out:write(" ", bar, "\n")
+ for _,line in ipairs(buf) do
+ out:write(" ", line, "\n")
+ end
+ out:write(" ", bar, "\n\n")
+ end
+ end
+ out:write("\n")
+end
+
+-- Check for unfinished or unused captures.
+local function checkcaptures()
+ if cap_name then
+ wprinterr(g_fname, ":", cap_lineno,
+ ": error: unfinished .capture `", cap_name,"'\n")
+ return
+ end
+ for name in pairs(cap_buffers) do
+ if not cap_used[name] then
+ wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n")
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Sections names.
+local map_sections = {}
+
+-- Pseudo-opcode to define code sections.
+-- TODO: Data sections, BSS sections. Needs extra C code and API.
+map_coreop[".section_*"] = function(params)
+ if not params then return "name..." end
+ if #map_sections > 0 then werror("duplicate section definition") end
+ wflush()
+ for sn,name in ipairs(params) do
+ local opname = "."..name.."_0"
+ if not match(name, "^[%a][%w_]*$") or
+ map_op[opname] or map_op["."..name.."_*"] then
+ werror("bad section name `"..name.."'")
+ end
+ map_sections[#map_sections+1] = name
+ wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1))
+ map_op[opname] = function(params) g_arch.section(sn-1) end
+ end
+ wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections))
+end
+
+-- Dump all sections.
+local function dumpsections(out, lvl)
+ out:write("Sections:\n")
+ for _,name in ipairs(map_sections) do
+ out:write(format(" %s\n", name))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Load architecture-specific module.
+local function loadarch(arch)
+ if not match(arch, "^[%w_]+$") then return "bad arch name" end
+ local ok, m_arch = pcall(require, "dasm_"..arch)
+ if not ok then return "cannot load module: "..m_arch end
+ g_arch = m_arch
+ wflush = m_arch.passcb(wline, werror, wfatal, wwarn)
+ m_arch.setup(arch, g_opt)
+ map_op, map_def = m_arch.mergemaps(map_coreop, map_def)
+end
+
+-- Dump architecture description.
+function opt_map.dumparch(args)
+ local name = optparam(args)
+ if not g_arch then
+ local err = loadarch(name)
+ if err then opterror(err) end
+ end
+
+ local t = {}
+ for name in pairs(map_coreop) do t[#t+1] = name end
+ for name in pairs(map_op) do t[#t+1] = name end
+ sort(t)
+
+ local out = stdout
+ local _arch = g_arch._info
+ out:write(format("%s version %s, released %s, %s\n",
+ _info.name, _info.version, _info.release, _info.url))
+ g_arch.dumparch(out)
+
+ local pseudo = true
+ out:write("Pseudo-Opcodes:\n")
+ for _,sname in ipairs(t) do
+ local name, nparam = match(sname, "^(.+)_([0-9%*])$")
+ if name then
+ if pseudo and sub(name, 1, 1) ~= "." then
+ out:write("\nOpcodes:\n")
+ pseudo = false
+ end
+ local f = map_op[sname]
+ local s
+ if nparam ~= "*" then nparam = nparam + 0 end
+ if nparam == 0 then
+ s = ""
+ elseif type(f) == "string" then
+ s = map_op[".template__"](nil, f, nparam)
+ else
+ s = f(nil, nparam)
+ end
+ if type(s) == "table" then
+ for _,s2 in ipairs(s) do
+ out:write(format(" %-12s %s\n", name, s2))
+ end
+ else
+ out:write(format(" %-12s %s\n", name, s))
+ end
+ end
+ end
+ out:write("\n")
+ exit(0)
+end
+
+-- Pseudo-opcode to set the architecture.
+-- Only initially available (map_op is replaced when called).
+map_op[".arch_1"] = function(params)
+ if not params then return "name" end
+ local err = loadarch(params[1])
+ if err then wfatal(err) end
+end
+
+-- Dummy .arch pseudo-opcode to improve the error report.
+map_coreop[".arch_1"] = function(params)
+ if not params then return "name" end
+ wfatal("duplicate .arch statement")
+end
+
+------------------------------------------------------------------------------
+
+-- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'.
+map_coreop[".nop_*"] = function(params)
+ if not params then return "[ignored...]" end
+end
+
+-- Pseudo-opcodes to raise errors.
+map_coreop[".error_1"] = function(params)
+ if not params then return "message" end
+ werror(params[1])
+end
+
+map_coreop[".fatal_1"] = function(params)
+ if not params then return "message" end
+ wfatal(params[1])
+end
+
+-- Dump all user defined elements.
+local function dumpdef(out)
+ local lvl = g_opt.dumpdef
+ if lvl == 0 then return end
+ dumpsections(out, lvl)
+ dumpdefines(out, lvl)
+ if g_arch then g_arch.dumpdef(out, lvl) end
+ dumpmacros(out, lvl)
+ dumpcaptures(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Helper for splitstmt.
+local splitlvl
+
+local function splitstmt_one(c)
+ if c == "(" then
+ splitlvl = ")"..splitlvl
+ elseif c == "[" then
+ splitlvl = "]"..splitlvl
+ elseif c == "{" then
+ splitlvl = "}"..splitlvl
+ elseif c == ")" or c == "]" or c == "}" then
+ if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end
+ splitlvl = sub(splitlvl, 2)
+ elseif splitlvl == "" then
+ return " \0 "
+ end
+ return c
+end
+
+-- Split statement into (pseudo-)opcode and params.
+local function splitstmt(stmt)
+ -- Convert label with trailing-colon into .label statement.
+ local label = match(stmt, "^%s*(.+):%s*$")
+ if label then return ".label", {label} end
+
+ -- Split at commas and equal signs, but obey parentheses and brackets.
+ splitlvl = ""
+ stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one)
+ if splitlvl ~= "" then werror("unbalanced () or []") end
+
+ -- Split off opcode.
+ local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$")
+ if not op then werror("bad statement syntax") end
+
+ -- Split parameters.
+ local params = {}
+ for p in gmatch(other, "%s*(%Z+)%z?") do
+ params[#params+1] = gsub(p, "%s+$", "")
+ end
+ if #params > 16 then werror("too many parameters") end
+
+ params.op = op
+ return op, params
+end
+
+-- Process a single statement.
+dostmt = function(stmt)
+ -- Ignore empty statements.
+ if match(stmt, "^%s*$") then return end
+
+ -- Capture macro defs before substitution.
+ if mac_capture then return mac_capture(stmt) end
+ stmt = definesubst(stmt)
+
+ -- Emit C code without parsing the line.
+ if sub(stmt, 1, 1) == "|" then
+ local tail = sub(stmt, 2)
+ wflush()
+ if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end
+ return
+ end
+
+ -- Split into (pseudo-)opcode and params.
+ local op, params = splitstmt(stmt)
+
+ -- Get opcode handler (matching # of parameters or generic handler).
+ local f = map_op[op.."_"..#params] or map_op[op.."_*"]
+ if not f then
+ if not g_arch then wfatal("first statement must be .arch") end
+ -- Improve error report.
+ for i=0,9 do
+ if map_op[op.."_"..i] then
+ werror("wrong number of parameters for `"..op.."'")
+ end
+ end
+ werror("unknown statement `"..op.."'")
+ end
+
+ -- Call opcode handler or special handler for template strings.
+ if type(f) == "string" then
+ map_op[".template__"](params, f)
+ else
+ f(params)
+ end
+end
+
+-- Process a single line.
+local function doline(line)
+ if g_opt.flushline then wflush() end
+
+ -- Assembler line?
+ local indent, aline = match(line, "^(%s*)%|(.*)$")
+ if not aline then
+ -- No, plain C code line, need to flush first.
+ wflush()
+ wsync()
+ wline(line, false)
+ return
+ end
+
+ g_indent = indent -- Remember current line indentation.
+
+ -- Emit C code (even from macros). Avoids echo and line parsing.
+ if sub(aline, 1, 1) == "|" then
+ if not mac_capture then
+ wsync()
+ elseif g_opt.comment then
+ wsync()
+ wcomment(aline)
+ end
+ dostmt(aline)
+ return
+ end
+
+ -- Echo assembler line as a comment.
+ if g_opt.comment then
+ wsync()
+ wcomment(aline)
+ end
+
+ -- Strip assembler comments.
+ aline = gsub(aline, "//.*$", "")
+
+ -- Split line into statements at semicolons.
+ if match(aline, ";") then
+ for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end
+ else
+ dostmt(aline)
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Write DynASM header.
+local function dasmhead(out)
+ out:write(format([[
+/*
+** This file has been pre-processed with DynASM.
+** %s
+** DynASM version %s, DynASM %s version %s
+** DO NOT EDIT! The original file is in "%s".
+*/
+
+#if DASM_VERSION != %d
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+]], _info.url,
+ _info.version, g_arch._info.arch, g_arch._info.version,
+ g_fname, _info.vernum))
+end
+
+-- Read input file.
+readfile = function(fin)
+ g_indent = ""
+ g_lineno = 0
+ g_synclineno = -1
+
+ -- Process all lines.
+ for line in fin:lines() do
+ g_lineno = g_lineno + 1
+ g_curline = line
+ local ok, err = pcall(doline, line)
+ if not ok and wprinterr(err, "\n") then return true end
+ end
+ wflush()
+
+ -- Close input file.
+ assert(fin == stdin or fin:close())
+end
+
+-- Write output file.
+local function writefile(outfile)
+ local fout
+
+ -- Open output file.
+ if outfile == nil or outfile == "-" then
+ fout = stdout
+ else
+ fout = assert(io.open(outfile, "w"))
+ end
+
+ -- Write all buffered lines
+ wdumplines(fout, g_wbuffer)
+
+ -- Close output file.
+ assert(fout == stdout or fout:close())
+
+ -- Optionally dump definitions.
+ dumpdef(fout == stdout and stderr or stdout)
+end
+
+-- Translate an input file to an output file.
+local function translate(infile, outfile)
+ g_wbuffer = {}
+ g_indent = ""
+ g_lineno = 0
+ g_synclineno = -1
+
+ -- Put header.
+ wline(dasmhead)
+
+ -- Read input file.
+ local fin
+ if infile == "-" then
+ g_fname = "(stdin)"
+ fin = stdin
+ else
+ g_fname = infile
+ fin = assert(io.open(infile, "r"))
+ end
+ readfile(fin)
+
+ -- Check for errors.
+ if not g_arch then
+ wprinterr(g_fname, ":*: error: missing .arch directive\n")
+ end
+ checkconds()
+ checkmacros()
+ checkcaptures()
+
+ if g_errcount ~= 0 then
+ stderr:write(g_fname, ":*: info: ", g_errcount, " error",
+ (type(g_errcount) == "number" and g_errcount > 1) and "s" or "",
+ " in input file -- no output file generated.\n")
+ dumpdef(stderr)
+ exit(1)
+ end
+
+ -- Write output file.
+ writefile(outfile)
+end
+
+------------------------------------------------------------------------------
+
+-- Print help text.
+function opt_map.help()
+ stdout:write("DynASM -- ", _info.description, ".\n")
+ stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n")
+ stdout:write[[
+
+Usage: dynasm [OPTION]... INFILE.dasc|-
+
+ -h, --help Display this help text.
+ -V, --version Display version and copyright information.
+
+ -o, --outfile FILE Output file name (default is stdout).
+ -I, --include DIR Add directory to the include search path.
+
+ -c, --ccomment Use /* */ comments for assembler lines.
+ -C, --cppcomment Use // comments for assembler lines (default).
+ -N, --nocomment Suppress assembler lines in output.
+ -M, --maccomment Show macro expansions as comments (default off).
+
+ -L, --nolineno Suppress CPP line number information in output.
+ -F, --flushline Flush action list for every line.
+
+ -D NAME[=SUBST] Define a substitution.
+ -U NAME Undefine a substitution.
+
+ -P, --dumpdef Dump defines, macros, etc. Repeat for more output.
+ -A, --dumparch ARCH Load architecture ARCH and dump description.
+]]
+ exit(0)
+end
+
+-- Print version information.
+function opt_map.version()
+ stdout:write(format("%s version %s, released %s\n%s\n\n%s",
+ _info.name, _info.version, _info.release, _info.url, _info.copyright))
+ exit(0)
+end
+
+-- Misc. options.
+function opt_map.outfile(args) g_opt.outfile = optparam(args) end
+function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end
+function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end
+function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end
+function opt_map.nocomment() g_opt.comment = false end
+function opt_map.maccomment() g_opt.maccomment = true end
+function opt_map.nolineno() g_opt.cpp = false end
+function opt_map.flushline() g_opt.flushline = true end
+function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end
+
+------------------------------------------------------------------------------
+
+-- Short aliases for long options.
+local opt_alias = {
+ h = "help", ["?"] = "help", V = "version",
+ o = "outfile", I = "include",
+ c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment",
+ L = "nolineno", F = "flushline",
+ P = "dumpdef", A = "dumparch",
+}
+
+-- Parse single option.
+local function parseopt(opt, args)
+ opt_current = #opt == 1 and "-"..opt or "--"..opt
+ local f = opt_map[opt] or opt_map[opt_alias[opt]]
+ if not f then
+ opterror("unrecognized option `", opt_current, "'. Try `--help'.\n")
+ end
+ f(args)
+end
+
+-- Parse arguments.
+local function parseargs(args)
+ -- Default options.
+ g_opt.comment = "//|"
+ g_opt.endcomment = ""
+ g_opt.cpp = true
+ g_opt.dumpdef = 0
+ g_opt.include = { "" }
+
+ -- Process all option arguments.
+ args.argn = 1
+ repeat
+ local a = args[args.argn]
+ if not a then break end
+ local lopt, opt = match(a, "^%-(%-?)(.+)")
+ if not opt then break end
+ args.argn = args.argn + 1
+ if lopt == "" then
+ -- Loop through short options.
+ for o in gmatch(opt, ".") do parseopt(o, args) end
+ else
+ -- Long option.
+ parseopt(opt, args)
+ end
+ until false
+
+ -- Check for proper number of arguments.
+ local nargs = #args - args.argn + 1
+ if nargs ~= 1 then
+ if nargs == 0 then
+ if g_opt.dumpdef > 0 then return dumpdef(stdout) end
+ end
+ opt_map.help()
+ end
+
+ -- Translate a single input file to a single output file
+ -- TODO: Handle multiple files?
+ translate(args[args.argn], g_opt.outfile)
+end
+
+------------------------------------------------------------------------------
+
+-- Add the directory dynasm.lua resides in to the Lua module search path.
+local arg = arg
+if arg and arg[0] then
+ local prefix = match(arg[0], "^(.*[/\\])")
+ if prefix then package.path = prefix.."?.lua;"..package.path end
+end
+
+-- Start DynASM.
+parseargs{...}
+
+------------------------------------------------------------------------------
+
diff --git a/src/LuaJIT/etc/luajit.1 b/src/LuaJIT/etc/luajit.1
new file mode 100644
index 000000000..bd1074a04
--- /dev/null
+++ b/src/LuaJIT/etc/luajit.1
@@ -0,0 +1,85 @@
+.TH luajit 1 "" "" "LuaJIT documentation"
+.SH NAME
+luajit \- Just-In-Time Compiler for the Lua Language
+\fB
+.SH SYNOPSIS
+.B luajit
+[\fIoptions\fR]... [\fIscript\fR [\fIargs\fR]...]
+.SH "WEB SITE"
+.IR http://luajit.org
+.SH DESCRIPTION
+.PP
+This is the command-line program to run Lua programs with \fBLuaJIT\fR.
+.PP
+\fBLuaJIT\fR is a just-in-time (JIT) compiler for the Lua language.
+The virtual machine (VM) is based on a fast interpreter combined with
+a trace compiler. It can significantly improve the performance of Lua programs.
+.PP
+\fBLuaJIT\fR is API\- and ABI-compatible with the VM of the standard
+Lua\ 5.1 interpreter. When embedding the VM into an application,
+the built library can be used as a drop-in replacement.
+.SH OPTIONS
+.TP
+.BI "\-e " chunk
+Run the given chunk of Lua code.
+.TP
+.BI "\-l " library
+Load the named library, just like \fBrequire("\fR\fIlibrary\fR\fB")\fR.
+.TP
+.BI "\-b " ...
+Save or list bytecode. Run without arguments to get help on options.
+.TP
+.BI "\-j " command
+Perform LuaJIT control command (optional space after \fB\-j\fR).
+.TP
+.BI "\-O" [opt]
+Control LuaJIT optimizations.
+.TP
+.B "\-i"
+Run in interactive mode.
+.TP
+.B "\-v"
+Show \fBLuaJIT\fR version.
+.TP
+.B "\-\-"
+Stop processing options.
+.TP
+.B "\-"
+Read script from stdin instead.
+.PP
+After all options are processed, the given \fIscript\fR is run.
+The arguments are passed in the global \fIarg\fR table.
+.PP
+Interactive mode is only entered, if no \fIscript\fR and no \fB\-e\fR
+option is given. Interactive mode can be left with EOF (\fICtrl\-Z\fB).
+.SH EXAMPLES
+.TP
+luajit hello.lua world
+
+Prints "Hello world", assuming \fIhello.lua\fR contains:
+.br
+ print("Hello", arg[1])
+.TP
+luajit \-e "local x=0; for i=1,1e9 do x=x+i end; print(x)"
+
+Calculates the sum of the numbers from 1 to 1000000000.
+.br
+And finishes in a reasonable amount of time, too.
+.TP
+luajit \-jv \-e "for i=1,10 do for j=1,10 do for k=1,100 do end end end"
+
+Runs some nested loops and shows the resulting traces.
+.SH COPYRIGHT
+.PP
+\fBLuaJIT\fR is Copyright \(co 2005-2012 Mike Pall.
+.br
+\fBLuaJIT\fR is open source software, released under the MIT license.
+.SH SEE ALSO
+.PP
+More details in the provided HTML docs or at:
+.IR http://luajit.org
+.br
+More about the Lua language can be found at:
+.IR http://lua.org/docs.html
+.PP
+lua(1)
diff --git a/src/LuaJIT/etc/luajit.pc b/src/LuaJIT/etc/luajit.pc
new file mode 100644
index 000000000..9fc4754a4
--- /dev/null
+++ b/src/LuaJIT/etc/luajit.pc
@@ -0,0 +1,24 @@
+# Package information for LuaJIT to be used by pkg-config.
+majver=2
+minver=0
+relver=0
+version=${majver}.${minver}.${relver}-beta10
+abiver=5.1
+
+prefix=/usr/local
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+libname=luajit-${abiver}
+includedir=${prefix}/include/luajit-${majver}.${minver}
+
+INSTALL_LMOD=${prefix}/share/lua/${abiver}
+INSTALL_CMOD=${prefix}/lib/lua/${abiver}
+
+Name: LuaJIT
+Description: Just-in-time compiler for Lua
+URL: http://luajit.org
+Version: ${version}
+Requires:
+Libs: -L${libdir} -l${libname}
+Libs.private: -Wl,-E -lm -ldl
+Cflags: -I${includedir}
diff --git a/src/lua/etc/strict.lua b/src/LuaJIT/etc/strict.lua
similarity index 99%
rename from src/lua/etc/strict.lua
rename to src/LuaJIT/etc/strict.lua
index 604619dd2..e57b0cc18 100644
--- a/src/lua/etc/strict.lua
+++ b/src/LuaJIT/etc/strict.lua
@@ -31,7 +31,7 @@ mt.__newindex = function (t, n, v)
end
rawset(t, n, v)
end
-
+
mt.__index = function (t, n)
if not mt.__declared[n] and what() ~= "C" then
error("variable '"..n.."' is not declared", 2)
diff --git a/src/LuaJIT/lib/bc.lua b/src/LuaJIT/lib/bc.lua
new file mode 100644
index 000000000..15317bcd0
--- /dev/null
+++ b/src/LuaJIT/lib/bc.lua
@@ -0,0 +1,192 @@
+----------------------------------------------------------------------------
+-- LuaJIT bytecode listing module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module lists the bytecode of a Lua function. If it's loaded by -jbc
+-- it hooks into the parser and lists all functions of a chunk as they
+-- are parsed.
+--
+-- Example usage:
+--
+-- luajit -jbc -e 'local x=0; for i=1,1e6 do x=x+i end; print(x)'
+-- luajit -jbc=- foo.lua
+-- luajit -jbc=foo.list foo.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_LISTFILE. The file is overwritten every time the module
+-- is started.
+--
+-- This module can also be used programmatically:
+--
+-- local bc = require("jit.bc")
+--
+-- local function foo() print("hello") end
+--
+-- bc.dump(foo) --> -- BYTECODE -- [...]
+-- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello"
+--
+-- local out = {
+-- -- Do something with each line:
+-- write = function(t, ...) io.write(...) end,
+-- close = function(t) end,
+-- flush = function(t) end,
+-- }
+-- bc.dump(foo, out)
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20000, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local bit = require("bit")
+local sub, gsub, format = string.sub, string.gsub, string.format
+local byte, band, shr = string.byte, bit.band, bit.rshift
+local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck
+local funcuvname = jutil.funcuvname
+local bcnames = vmdef.bcnames
+local stdout, stderr = io.stdout, io.stderr
+
+------------------------------------------------------------------------------
+
+local function ctlsub(c)
+ if c == "\n" then return "\\n"
+ elseif c == "\r" then return "\\r"
+ elseif c == "\t" then return "\\t"
+ elseif c == "\r" then return "\\r"
+ else return format("\\%03d", byte(c))
+ end
+end
+
+-- Return one bytecode line.
+local function bcline(func, pc, prefix)
+ local ins, m = funcbc(func, pc)
+ if not ins then return end
+ local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128)
+ local a = band(shr(ins, 8), 0xff)
+ local oidx = 6*band(ins, 0xff)
+ local op = sub(bcnames, oidx+1, oidx+6)
+ local s = format("%04d %s %-6s %3s ",
+ pc, prefix or " ", op, ma == 0 and "" or a)
+ local d = shr(ins, 16)
+ if mc == 13*128 then -- BCMjump
+ return format("%s=> %04d\n", s, pc+d-0x7fff)
+ end
+ if mb ~= 0 then
+ d = band(d, 0xff)
+ elseif mc == 0 then
+ return s.."\n"
+ end
+ local kc
+ if mc == 10*128 then -- BCMstr
+ kc = funck(func, -d-1)
+ kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub))
+ elseif mc == 9*128 then -- BCMnum
+ kc = funck(func, d)
+ if op == "TSETM " then kc = kc - 2^52 end
+ elseif mc == 12*128 then -- BCMfunc
+ local fi = funcinfo(funck(func, -d-1))
+ if fi.ffid then
+ kc = vmdef.ffnames[fi.ffid]
+ else
+ kc = fi.loc
+ end
+ elseif mc == 5*128 then -- BCMuv
+ kc = funcuvname(func, d)
+ end
+ if ma == 5 then -- BCMuv
+ local ka = funcuvname(func, a)
+ if kc then kc = ka.." ; "..kc else kc = ka end
+ end
+ if mb ~= 0 then
+ local b = shr(ins, 24)
+ if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end
+ return format("%s%3d %3d\n", s, b, d)
+ end
+ if kc then return format("%s%3d ; %s\n", s, d, kc) end
+ if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits
+ return format("%s%3d\n", s, d)
+end
+
+-- Collect branch targets of a function.
+local function bctargets(func)
+ local target = {}
+ for pc=1,1000000000 do
+ local ins, m = funcbc(func, pc)
+ if not ins then break end
+ if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end
+ end
+ return target
+end
+
+-- Dump bytecode instructions of a function.
+local function bcdump(func, out, all)
+ if not out then out = stdout end
+ local fi = funcinfo(func)
+ if all and fi.children then
+ for n=-1,-1000000000,-1 do
+ local k = funck(func, n)
+ if not k then break end
+ if type(k) == "proto" then bcdump(k, out, true) end
+ end
+ end
+ out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined))
+ local target = bctargets(func)
+ for pc=1,1000000000 do
+ local s = bcline(func, pc, target[pc] and "=>")
+ if not s then break end
+ out:write(s)
+ end
+ out:write("\n")
+ out:flush()
+end
+
+------------------------------------------------------------------------------
+
+-- Active flag and output file handle.
+local active, out
+
+-- List handler.
+local function h_list(func)
+ return bcdump(func, out)
+end
+
+-- Detach list handler.
+local function bclistoff()
+ if active then
+ active = false
+ jit.attach(h_list)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach list handler.
+local function bcliston(outfile)
+ if active then bclistoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(h_list, "bc")
+ active = true
+end
+
+-- Public module functions.
+module(...)
+
+line = bcline
+dump = bcdump
+targets = bctargets
+
+on = bcliston
+off = bclistoff
+start = bcliston -- For -j command line option.
+
diff --git a/src/LuaJIT/lib/bcsave.lua b/src/LuaJIT/lib/bcsave.lua
new file mode 100644
index 000000000..7ac0ce356
--- /dev/null
+++ b/src/LuaJIT/lib/bcsave.lua
@@ -0,0 +1,496 @@
+----------------------------------------------------------------------------
+-- LuaJIT module to save/list bytecode.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module saves or lists the bytecode for an input file.
+-- It's run by the -b command line option.
+--
+------------------------------------------------------------------------------
+
+local jit = require("jit")
+assert(jit.version_num == 20000, "LuaJIT core/library version mismatch")
+
+-- Symbol name prefix for LuaJIT bytecode.
+local LJBC_PREFIX = "luaJIT_BC_"
+
+------------------------------------------------------------------------------
+
+local function usage()
+ io.stderr:write[[
+Save LuaJIT bytecode: luajit -b[options] input output
+ -l Only list bytecode.
+ -s Strip debug info (default).
+ -g Keep debug info.
+ -n name Set module name (default: auto-detect from input name).
+ -t type Set output file type (default: auto-detect from output name).
+ -a arch Override architecture for object files (default: native).
+ -o os Override OS for object files (default: native).
+ -e chunk Use chunk string as input.
+ -- Stop handling options.
+ - Use stdin as input and/or stdout as output.
+
+File types: c h obj o raw (default)
+]]
+ os.exit(1)
+end
+
+local function check(ok, ...)
+ if ok then return ok, ... end
+ io.stderr:write("luajit: ", ...)
+ io.stderr:write("\n")
+ os.exit(1)
+end
+
+local function readfile(input)
+ if type(input) == "function" then return input end
+ if input == "-" then input = nil end
+ return check(loadfile(input))
+end
+
+local function savefile(name, mode)
+ if name == "-" then return io.stdout end
+ return check(io.open(name, mode))
+end
+
+------------------------------------------------------------------------------
+
+local map_type = {
+ raw = "raw", c = "c", h = "h", o = "obj", obj = "obj",
+}
+
+local map_arch = {
+ x86 = true, x64 = true, arm = true, ppc = true, ppcspe = true,
+}
+
+local map_os = {
+ linux = true, windows = true, osx = true, freebsd = true, netbsd = true,
+ openbsd = true, solaris = true,
+}
+
+local function checkarg(str, map, err)
+ str = string.lower(str)
+ local s = check(map[str], "unknown ", err)
+ return s == true and str or s
+end
+
+local function detecttype(str)
+ local ext = string.match(string.lower(str), "%.(%a+)$")
+ return map_type[ext] or "raw"
+end
+
+local function checkmodname(str)
+ check(string.match(str, "^[%w_.%-]+$"), "bad module name")
+ return string.gsub(str, "[%.%-]", "_")
+end
+
+local function detectmodname(str)
+ if type(str) == "string" then
+ local tail = string.match(str, "[^/\\]+$")
+ if tail then str = tail end
+ local head = string.match(str, "^(.*)%.[^.]*$")
+ if head then str = head end
+ str = string.match(str, "^[%w_.%-]+")
+ else
+ str = nil
+ end
+ check(str, "cannot derive module name, use -n name")
+ return string.gsub(str, "[%.%-]", "_")
+end
+
+------------------------------------------------------------------------------
+
+local function bcsave_tail(fp, output, s)
+ local ok, err = fp:write(s)
+ if ok and output ~= "-" then ok, err = fp:close() end
+ check(ok, "cannot write ", output, ": ", err)
+end
+
+local function bcsave_raw(output, s)
+ local fp = savefile(output, "wb")
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_c(ctx, output, s)
+ local fp = savefile(output, "w")
+ if ctx.type == "c" then
+ fp:write(string.format([[
+#ifdef _cplusplus
+extern "C"
+#endif
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+const char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname))
+ else
+ fp:write(string.format([[
+#define %s%s_SIZE %d
+static const char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname))
+ end
+ local t, n, m = {}, 0, 0
+ for i=1,#s do
+ local b = tostring(string.byte(s, i))
+ m = m + #b + 1
+ if m > 78 then
+ fp:write(table.concat(t, ",", 1, n), ",\n")
+ n, m = 0, #b + 1
+ end
+ n = n + 1
+ t[n] = b
+ end
+ bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n")
+end
+
+local function bcsave_elfobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint32_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF32header;
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint64_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF64header;
+typedef struct {
+ uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize;
+} ELF32sectheader;
+typedef struct {
+ uint32_t name, type;
+ uint64_t flags, addr, ofs, size;
+ uint32_t link, info;
+ uint64_t align, entsize;
+} ELF64sectheader;
+typedef struct {
+ uint32_t name, value, size;
+ uint8_t info, other;
+ uint16_t sectidx;
+} ELF32symbol;
+typedef struct {
+ uint32_t name;
+ uint8_t info, other;
+ uint16_t sectidx;
+ uint64_t value, size;
+} ELF64symbol;
+typedef struct {
+ ELF32header hdr;
+ ELF32sectheader sect[6];
+ ELF32symbol sym[2];
+ uint8_t space[4096];
+} ELF32obj;
+typedef struct {
+ ELF64header hdr;
+ ELF64sectheader sect[6];
+ ELF64symbol sym[2];
+ uint8_t space[4096];
+} ELF64obj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local is64, isbe = false, false
+ if ctx.arch == "x64" then
+ is64 = true
+ elseif ctx.arch == "ppc" or ctx.arch == "ppcspe" then
+ isbe = true
+ end
+
+ -- Handle different host/target endianess.
+ local function f32(x) return x end
+ local f16, fofs = f32, f32
+ if ffi.abi("be") ~= isbe then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ if is64 then
+ function fofs(x) return bit.bswap(x)*(2ll^32) end
+ else
+ fofs = f32
+ end
+ end
+
+ -- Create ELF object and fill in header.
+ local o = ffi.new(is64 and "ELF64obj" or "ELF32obj")
+ local hdr = o.hdr
+ if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi.
+ local bf = assert(io.open("/bin/ls", "rb"))
+ local bs = bf:read(9)
+ bf:close()
+ ffi.copy(o, bs, 9)
+ check(hdr.emagic[0] == 127, "no support for writing native object files")
+ else
+ hdr.emagic = "\127ELF"
+ hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0
+ end
+ hdr.eclass = is64 and 2 or 1
+ hdr.eendian = isbe and 2 or 1
+ hdr.eversion = 1
+ hdr.type = f16(1)
+ hdr.machine = f16(({ x86=3, x64=62, arm=40, ppc=20, ppcspe=20 })[ctx.arch])
+ hdr.version = f32(1)
+ hdr.shofs = fofs(ffi.offsetof(o, "sect"))
+ hdr.ehsize = f16(ffi.sizeof(hdr))
+ hdr.shentsize = f16(ffi.sizeof(o.sect[0]))
+ hdr.shnum = f16(6)
+ hdr.shstridx = f16(2)
+
+ -- Fill in sections and symbols.
+ local sofs, ofs = ffi.offsetof(o, "space"), 1
+ for i,name in ipairs{
+ ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack",
+ } do
+ local sect = o.sect[i]
+ sect.align = fofs(1)
+ sect.name = f32(ofs)
+ ffi.copy(o.space+ofs, name)
+ ofs = ofs + #name+1
+ end
+ o.sect[1].type = f32(2) -- .symtab
+ o.sect[1].link = f32(3)
+ o.sect[1].info = f32(1)
+ o.sect[1].align = fofs(8)
+ o.sect[1].ofs = fofs(ffi.offsetof(o, "sym"))
+ o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0]))
+ o.sect[1].size = fofs(ffi.sizeof(o.sym))
+ o.sym[1].name = f32(1)
+ o.sym[1].sectidx = f16(4)
+ o.sym[1].size = fofs(#s)
+ o.sym[1].info = 17
+ o.sect[2].type = f32(3) -- .shstrtab
+ o.sect[2].ofs = fofs(sofs)
+ o.sect[2].size = fofs(ofs)
+ o.sect[3].type = f32(3) -- .strtab
+ o.sect[3].ofs = fofs(sofs + ofs)
+ o.sect[3].size = fofs(#symname+1)
+ ffi.copy(o.space+ofs+1, symname)
+ ofs = ofs + #symname + 2
+ o.sect[4].type = f32(1) -- .rodata
+ o.sect[4].flags = fofs(2)
+ o.sect[4].ofs = fofs(sofs + ofs)
+ o.sect[4].size = fofs(#s)
+ o.sect[5].type = f32(1) -- .note.GNU-stack
+ o.sect[5].ofs = fofs(sofs + ofs + #s)
+
+ -- Write ELF object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_peobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint16_t arch, nsects;
+ uint32_t time, symtabofs, nsyms;
+ uint16_t opthdrsz, flags;
+} PEheader;
+typedef struct {
+ char name[8];
+ uint32_t vsize, vaddr, size, ofs, relocofs, lineofs;
+ uint16_t nreloc, nline;
+ uint32_t flags;
+} PEsection;
+typedef struct __attribute((packed)) {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ };
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl, naux;
+} PEsym;
+typedef struct __attribute((packed)) {
+ uint32_t size;
+ uint16_t nreloc, nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel, unused[3];
+} PEsymaux;
+typedef struct {
+ PEheader hdr;
+ PEsection sect[2];
+ // Must be an even number of symbol structs.
+ PEsym sym0;
+ PEsymaux sym0aux;
+ PEsym sym1;
+ PEsymaux sym1aux;
+ PEsym sym2;
+ PEsym sym3;
+ uint32_t strtabsize;
+ uint8_t space[4096];
+} PEobj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local is64 = false
+ if ctx.arch == "x86" then
+ symname = "_"..symname
+ elseif ctx.arch == "x64" then
+ is64 = true
+ end
+ local symexport = " /EXPORT:"..symname..",DATA "
+
+ -- The file format is always little-endian. Swap if the host is big-endian.
+ local function f32(x) return x end
+ local f16 = f32
+ if ffi.abi("be") then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ end
+
+ -- Create PE object and fill in header.
+ local o = ffi.new("PEobj")
+ local hdr = o.hdr
+ hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2 })[ctx.arch])
+ hdr.nsects = f16(2)
+ hdr.symtabofs = f32(ffi.offsetof(o, "sym0"))
+ hdr.nsyms = f32(6)
+
+ -- Fill in sections and symbols.
+ o.sect[0].name = ".drectve"
+ o.sect[0].size = f32(#symexport)
+ o.sect[0].flags = f32(0x00100a00)
+ o.sym0.sect = f16(1)
+ o.sym0.scl = 3
+ o.sym0.name = ".drectve"
+ o.sym0.naux = 1
+ o.sym0aux.size = f32(#symexport)
+ o.sect[1].name = ".rdata"
+ o.sect[1].size = f32(#s)
+ o.sect[1].flags = f32(0x40300040)
+ o.sym1.sect = f16(2)
+ o.sym1.scl = 3
+ o.sym1.name = ".rdata"
+ o.sym1.naux = 1
+ o.sym1aux.size = f32(#s)
+ o.sym2.sect = f16(2)
+ o.sym2.scl = 2
+ o.sym2.nameref[1] = f32(4)
+ o.sym3.sect = f16(-1)
+ o.sym3.scl = 2
+ o.sym3.value = f32(1)
+ o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant.
+ ffi.copy(o.space, symname)
+ local ofs = #symname + 1
+ o.strtabsize = f32(ofs + 4)
+ o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs)
+ ffi.copy(o.space + ofs, symexport)
+ ofs = ofs + #symexport
+ o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs)
+
+ -- Write PE object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_machobj(ctx, output, s, ffi)
+ check(false, "NYI: no support for writing OSX object files")
+end
+
+local function bcsave_obj(ctx, output, s)
+ local ok, ffi = pcall(require, "ffi")
+ check(ok, "FFI library required to write this file type")
+ if ctx.os == "windows" then
+ return bcsave_peobj(ctx, output, s, ffi)
+ elseif ctx.os == "osx" then
+ return bcsave_machobj(ctx, output, s, ffi)
+ else
+ return bcsave_elfobj(ctx, output, s, ffi)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local function bclist(input, output)
+ local f = readfile(input)
+ require("jit.bc").dump(f, savefile(output, "w"), true)
+end
+
+local function bcsave(ctx, input, output)
+ local f = readfile(input)
+ local s = string.dump(f, ctx.strip)
+ local t = ctx.type
+ if not t then
+ t = detecttype(output)
+ ctx.type = t
+ end
+ if t == "raw" then
+ bcsave_raw(output, s)
+ else
+ if not ctx.modname then ctx.modname = detectmodname(input) end
+ if t == "obj" then
+ bcsave_obj(ctx, output, s)
+ else
+ bcsave_c(ctx, output, s)
+ end
+ end
+end
+
+local function docmd(...)
+ local arg = {...}
+ local n = 1
+ local list = false
+ local ctx = {
+ strip = true, arch = jit.arch, os = string.lower(jit.os),
+ type = false, modname = false,
+ }
+ while n <= #arg do
+ local a = arg[n]
+ if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then
+ table.remove(arg, n)
+ if a == "--" then break end
+ for m=2,#a do
+ local opt = string.sub(a, m, m)
+ if opt == "l" then
+ list = true
+ elseif opt == "s" then
+ ctx.strip = true
+ elseif opt == "g" then
+ ctx.strip = false
+ else
+ if arg[n] == nil or m ~= #a then usage() end
+ if opt == "e" then
+ if n ~= 1 then usage() end
+ arg[1] = check(loadstring(arg[1]))
+ elseif opt == "n" then
+ ctx.modname = checkmodname(table.remove(arg, n))
+ elseif opt == "t" then
+ ctx.type = checkarg(table.remove(arg, n), map_type, "file type")
+ elseif opt == "a" then
+ ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture")
+ elseif opt == "o" then
+ ctx.os = checkarg(table.remove(arg, n), map_os, "OS name")
+ else
+ usage()
+ end
+ end
+ end
+ else
+ n = n + 1
+ end
+ end
+ if list then
+ if #arg == 0 or #arg > 2 then usage() end
+ bclist(arg[1], arg[2] or "-")
+ else
+ if #arg ~= 2 then usage() end
+ bcsave(ctx, arg[1], arg[2])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Public module functions.
+module(...)
+
+start = docmd -- Process -b command line option.
+
diff --git a/src/LuaJIT/lib/dis_arm.lua b/src/LuaJIT/lib/dis_arm.lua
new file mode 100644
index 000000000..0fcd1bed6
--- /dev/null
+++ b/src/LuaJIT/lib/dis_arm.lua
@@ -0,0 +1,543 @@
+----------------------------------------------------------------------------
+-- LuaJIT ARM disassembler module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles most user-mode ARMv7 instructions
+-- NYI: Advanced SIMD and VFP instructions.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Opcode maps
+------------------------------------------------------------------------------
+
+local map_loadc = {
+ shift = 9, mask = 7,
+ [5] = {
+ shift = 0, mask = 0 -- NYI VFP load/store.
+ },
+ _ = {
+ shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc.
+ },
+}
+
+local map_datac = {
+ shift = 24, mask = 1,
+ [0] = {
+ shift = 9, mask = 7,
+ [5] = {
+ shift = 0, mask = 0 -- NYI VFP data.
+ },
+ _ = {
+ shift = 0, mask = 0 -- NYI cdp, mcr, mrc.
+ },
+ },
+ "svcT",
+}
+
+local map_loadcu = {
+ shift = 0, mask = 0, -- NYI unconditional CP load/store.
+}
+
+local map_datacu = {
+ shift = 0, mask = 0, -- NYI unconditional CP data.
+}
+
+local map_simddata = {
+ shift = 0, mask = 0, -- NYI SIMD data.
+}
+
+local map_simdload = {
+ shift = 0, mask = 0, -- NYI SIMD load/store, preload.
+}
+
+local map_preload = {
+ shift = 0, mask = 0, -- NYI preload.
+}
+
+local map_media = {
+ shift = 20, mask = 31,
+ [0] = false,
+ { --01
+ shift = 5, mask = 7,
+ [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM",
+ "sadd8DNM", false, false, "ssub8DNM",
+ },
+ { --02
+ shift = 5, mask = 7,
+ [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM",
+ "qadd8DNM", false, false, "qsub8DNM",
+ },
+ { --03
+ shift = 5, mask = 7,
+ [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM",
+ "shadd8DNM", false, false, "shsub8DNM",
+ },
+ false,
+ { --05
+ shift = 5, mask = 7,
+ [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM",
+ "uadd8DNM", false, false, "usub8DNM",
+ },
+ { --06
+ shift = 5, mask = 7,
+ [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM",
+ "uqadd8DNM", false, false, "uqsub8DNM",
+ },
+ { --07
+ shift = 5, mask = 7,
+ [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM",
+ "uhadd8DNM", false, false, "uhsub8DNM",
+ },
+ { --08
+ shift = 5, mask = 7,
+ [0] = "pkhbtDNMU", false, "pkhtbDNMU",
+ { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", },
+ "pkhbtDNMU", "selDNM", "pkhtbDNMU",
+ },
+ false,
+ { --0a
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", },
+ "ssatDxMu", false, "ssatDxMu",
+ },
+ { --0b
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "revDM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", },
+ "ssatDxMu", "rev16DM", "ssatDxMu",
+ },
+ { --0c
+ shift = 5, mask = 7,
+ [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", },
+ },
+ false,
+ { --0e
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "usat16DwM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", },
+ "usatDwMu", false, "usatDwMu",
+ },
+ { --0f
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "rbitDM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", },
+ "usatDwMu", "revshDM", "usatDwMu",
+ },
+ { --10
+ shift = 12, mask = 15,
+ [15] = {
+ shift = 5, mask = 7,
+ "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS",
+ },
+ _ = {
+ shift = 5, mask = 7,
+ [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD",
+ },
+ },
+ false, false, false,
+ { --14
+ shift = 5, mask = 7,
+ [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS",
+ },
+ { --15
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", },
+ { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", },
+ false, false, false, false,
+ "smmlsNMSD", "smmlsrNMSD",
+ },
+ false, false,
+ { --18
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", },
+ },
+ false,
+ { --1a
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1b
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1c
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1d
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1e
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+ { --1f
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+}
+
+local map_load = {
+ shift = 21, mask = 9,
+ {
+ shift = 20, mask = 5,
+ [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL",
+ },
+ _ = {
+ shift = 20, mask = 5,
+ [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL",
+ }
+}
+
+local map_load1 = {
+ shift = 4, mask = 1,
+ [0] = map_load, map_media,
+}
+
+local map_loadm = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "stmdaNR", "stmNR",
+ { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR",
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", },
+ "ldmdbNR", "ldmibNR",
+ },
+}
+
+local map_data = {
+ shift = 21, mask = 15,
+ [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs",
+ "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs",
+ "tstNP", "teqNP", "cmpNP", "cmnNP",
+ "orrDNPs", "movDPs", "bicDNPs", "mvnDPs",
+}
+
+local map_mul = {
+ shift = 21, mask = 7,
+ [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS",
+ "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs",
+}
+
+local map_sync = {
+ shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd.
+ [0] = "swpDMN", false, false, false,
+ "swpbDMN", false, false, false,
+ "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN",
+ "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN",
+}
+
+local map_mulh = {
+ shift = 21, mask = 3,
+ [0] = { shift = 5, mask = 3,
+ [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", },
+ { shift = 5, mask = 3,
+ [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", },
+}
+
+local map_misc = {
+ shift = 4, mask = 7,
+ -- NYI: decode PSR bits of msr.
+ [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", },
+ { shift = 21, mask = 3, "bxM", false, "clzDM", },
+ { shift = 21, mask = 3, "bxjM", },
+ { shift = 21, mask = 3, "blxM", },
+ false,
+ { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", },
+ false,
+ { shift = 21, mask = 3, "bkptK", },
+}
+
+local map_datar = {
+ shift = 4, mask = 9,
+ [9] = {
+ shift = 5, mask = 3,
+ [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, },
+ { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", },
+ { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", },
+ { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", },
+ },
+ _ = {
+ shift = 20, mask = 25,
+ [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, },
+ _ = {
+ shift = 0, mask = 0xffffffff,
+ [bor(0xe1a00000)] = "nop",
+ _ = map_data,
+ }
+ },
+}
+
+local map_datai = {
+ shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12.
+ [16] = "movwDW", [20] = "movtDW",
+ [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", },
+ [22] = "msrNW",
+ _ = map_data,
+}
+
+local map_branch = {
+ shift = 24, mask = 1,
+ [0] = "bB", "blB"
+}
+
+local map_condins = {
+ [0] = map_datar, map_datai, map_load, map_load1,
+ map_loadm, map_branch, map_loadc, map_datac
+}
+
+-- NYI: setend.
+local map_uncondins = {
+ [0] = false, map_simddata, map_simdload, map_preload,
+ false, "blxB", map_loadcu, map_datacu,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
+}
+
+local map_cond = {
+ [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al",
+}
+
+local map_shift = { [0] = "lsl", "lsr", "asr", "ror", }
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then
+ extra = "\t->"..sym
+ elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then
+ extra = "\t; 0x"..tohex(ctx.rel)
+ end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-5s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-5s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Format operand 2 of load/store opcodes.
+local function fmtload(ctx, op, pos)
+ local base = map_gpr[band(rshift(op, 16), 15)]
+ local x, ofs
+ local ext = (band(op, 0x04000000) == 0)
+ if not ext and band(op, 0x02000000) == 0 then
+ ofs = band(op, 4095)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ elseif ext and band(op, 0x00400000) ~= 0 then
+ ofs = band(op, 15) + band(rshift(op, 4), 0xf0)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ else
+ ofs = map_gpr[band(op, 15)]
+ if ext or band(op, 0xfe0) == 0 then
+ elseif band(op, 0xfe0) == 0x60 then
+ ofs = format("%s, rrx", ofs)
+ else
+ local sh = band(rshift(op, 7), 31)
+ if sh == 0 then sh = 32 end
+ ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh)
+ end
+ if band(op, 0x00800000) == 0 then ofs = "-"..ofs end
+ end
+ if ofs == "#0" then
+ x = format("[%s]", base)
+ elseif band(op, 0x01000000) == 0 then
+ x = format("[%s], %s", base, ofs)
+ else
+ x = format("[%s, %s]", base, ofs)
+ end
+ if band(op, 0x01200000) == 0x01200000 then x = x.."!" end
+ return x
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+ local operands = {}
+ local suffix = ""
+ local last, name, pat
+ ctx.op = op
+ ctx.rel = nil
+
+ local cond = rshift(op, 28)
+ local opat
+ if cond == 15 then
+ opat = map_uncondins[band(rshift(op, 25), 7)]
+ else
+ if cond ~= 14 then suffix = map_cond[cond] end
+ opat = map_condins[band(rshift(op, 25), 7)]
+ end
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ name, pat = match(opat, "^([a-z0-9]*)(.*)")
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "D" then
+ x = map_gpr[band(rshift(op, 12), 15)]
+ elseif p == "N" then
+ x = map_gpr[band(rshift(op, 16), 15)]
+ elseif p == "S" then
+ x = map_gpr[band(rshift(op, 8), 15)]
+ elseif p == "M" then
+ x = map_gpr[band(op, 15)]
+ elseif p == "P" then
+ if band(op, 0x02000000) ~= 0 then
+ x = ror(band(op, 255), 2*band(rshift(op, 8), 15))
+ else
+ x = map_gpr[band(op, 15)]
+ if band(op, 0xff0) ~= 0 then
+ operands[#operands+1] = x
+ local s = map_shift[band(rshift(op, 5), 3)]
+ local r = nil
+ if band(op, 0xf90) == 0 then
+ if s == "ror" then s = "rrx" else r = "#32" end
+ elseif band(op, 0x10) == 0 then
+ r = "#"..band(rshift(op, 7), 31)
+ else
+ r = map_gpr[band(rshift(op, 8), 15)]
+ end
+ if name == "mov" then name = s; x = r
+ elseif r then x = format("%s %s", s, r)
+ else x = s end
+ end
+ end
+ elseif p == "L" then
+ x = fmtload(ctx, op, pos, false)
+ elseif p == "B" then
+ local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6)
+ if cond == 15 then addr = addr + band(rshift(op, 23), 2) end
+ ctx.rel = addr
+ x = "0x"..tohex(addr)
+ elseif p == "R" then
+ if band(op, 0x00200000) ~= 0 and #operands == 1 then
+ operands[1] = operands[1].."!"
+ end
+ local t = {}
+ for i=0,15 do
+ if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end
+ end
+ x = "{"..concat(t, ", ").."}"
+ elseif p == "W" then
+ x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000)
+ elseif p == "T" then
+ x = "#0x"..tohex(band(op, 0x00ffffff), 6)
+ elseif p == "U" then
+ x = band(rshift(op, 7), 31)
+ if x == 0 then x = nil end
+ elseif p == "u" then
+ x = band(rshift(op, 7), 31)
+ if band(op, 0x40) == 0 then
+ if x == 0 then x = nil else x = "lsl #"..x end
+ else
+ if x == 0 then x = "asr #32" else x = "asr #"..x end
+ end
+ elseif p == "v" then
+ x = band(rshift(op, 7), 31)
+ elseif p == "w" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "x" then
+ x = band(rshift(op, 16), 31) + 1
+ elseif p == "X" then
+ x = band(rshift(op, 16), 31) - last + 1
+ elseif p == "K" then
+ x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4)
+ elseif p == "s" then
+ if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end
+ else
+ assert(false)
+ end
+ if x then
+ last = x
+ if type(x) == "number" then x = "#"..x end
+ operands[#operands+1] = x
+ end
+ end
+
+ return putop(ctx, name..suffix, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ctx.pos = ofs
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ return map_gpr[r]
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+disass = disass_
+regname = regname_
+
diff --git a/src/LuaJIT/lib/dis_mips.lua b/src/LuaJIT/lib/dis_mips.lua
new file mode 100644
index 000000000..2edfdf40d
--- /dev/null
+++ b/src/LuaJIT/lib/dis_mips.lua
@@ -0,0 +1,428 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS disassembler module.
+--
+-- Copyright (C) 2005-2011 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all standard MIPS32R1/R2 instructions.
+-- Default mode is big-endian, but see: dis_mipsel.lua
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps
+------------------------------------------------------------------------------
+
+local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
+local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", }
+local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", }
+
+local map_special = {
+ shift = 0, mask = 63,
+ [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
+ map_movci, map_srl, "sraDTA",
+ "sllvDTS", false, map_srlv, "sravDTS",
+ "jrS", "jalrD1S", "movzDST", "movnDST",
+ "syscallY", "breakY", false, "sync",
+ "mfhiD", "mthiS", "mfloD", "mtloS",
+ false, false, false, false,
+ "multST", "multuST", "divST", "divuST",
+ false, false, false, false,
+ "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
+ "andDST", "orDST", "xorDST", "nor|notDST0",
+ false, false, "sltDST", "sltuDST",
+ false, false, false, false,
+ "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
+ "teqSTZ", false, "tneSTZ",
+}
+
+local map_special2 = {
+ shift = 0, mask = 63,
+ [0] = "maddST", "madduST", "mulDST", false,
+ "msubST", "msubuST",
+ [32] = "clzDS", [33] = "cloDS",
+ [63] = "sdbbpY",
+}
+
+local map_bshfl = {
+ shift = 6, mask = 31,
+ [2] = "wsbhDT",
+ [16] = "sebDT",
+ [24] = "sehDT",
+}
+
+local map_special3 = {
+ shift = 0, mask = 63,
+ [0] = "extTSAK", [4] = "insTSAL",
+ [32] = map_bshfl,
+ [59] = "rdhwrTD",
+}
+
+local map_regimm = {
+ shift = 16, mask = 31,
+ [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB",
+ false, false, false, false,
+ "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI",
+ "teqiSI", false, "tneiSI", false,
+ "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, "synciSO",
+}
+
+local map_cop0 = {
+ shift = 25, mask = 1,
+ [0] = {
+ shift = 21, mask = 15,
+ [0] = "mfc0TDW", [4] = "mtc0TDW",
+ [10] = "rdpgprDT",
+ [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
+ [14] = "wrpgprDT",
+ }, {
+ shift = 0, mask = 63,
+ [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
+ [24] = "eret", [31] = "deret",
+ [32] = "wait",
+ },
+}
+
+local map_cop1s = {
+ shift = 0, mask = 63,
+ [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
+ "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
+ "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
+ "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" },
+ "movz.sFGT", "movn.sFGT",
+ false, "recip.sFG", "rsqrt.sFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ false, "cvt.d.sFG", false, false,
+ "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH",
+ "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH",
+ "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH",
+ "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH",
+}
+
+local map_cop1d = {
+ shift = 0, mask = 63,
+ [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
+ "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
+ "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
+ "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" },
+ "movz.dFGT", "movn.dFGT",
+ false, "recip.dFG", "rsqrt.dFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.dFG", false, false, false,
+ "cvt.w.dFG", "cvt.l.dFG", false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH",
+ "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH",
+ "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH",
+ "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH",
+}
+
+local map_cop1ps = {
+ shift = 0, mask = 63,
+ [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false,
+ false, "abs.psFG", "mov.psFG", "neg.psFG",
+ false, false, false, false,
+ false, false, false, false,
+ false,
+ { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" },
+ "movz.psFGT", "movn.psFGT",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.puFG", false, false, false,
+ false, false, false, false,
+ "cvt.s.plFG", false, false, false,
+ "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH",
+ "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH",
+ "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH",
+ "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH",
+ "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH",
+}
+
+local map_cop1w = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.wFG", [33] = "cvt.d.wFG",
+}
+
+local map_cop1l = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.lFG", [33] = "cvt.d.lFG",
+}
+
+local map_cop1bc = {
+ shift = 16, mask = 3,
+ [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB",
+}
+
+local map_cop1 = {
+ shift = 21, mask = 31,
+ [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG",
+ "mtc1TG", false, "ctc1TG", "mthc1TG",
+ map_cop1bc, false, false, false,
+ false, false, false, false,
+ map_cop1s, map_cop1d, false, false,
+ map_cop1w, map_cop1l, map_cop1ps,
+}
+
+local map_cop1x = {
+ shift = 0, mask = 63,
+ [0] = "lwxc1FSX", "ldxc1FSX", false, false,
+ false, "luxc1FSX", false, false,
+ "swxc1FSX", "sdxc1FSX", false, false,
+ false, "suxc1FSX", false, "prefxMSX",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ false, false, "alnv.psFGHS", false,
+ "madd.sFRGH", "madd.dFRGH", false, false,
+ false, false, "madd.psFRGH", false,
+ "msub.sFRGH", "msub.dFRGH", false, false,
+ false, false, "msub.psFRGH", false,
+ "nmadd.sFRGH", "nmadd.dFRGH", false, false,
+ false, false, "nmadd.psFRGH", false,
+ "nmsub.sFRGH", "nmsub.dFRGH", false, false,
+ false, false, "nmsub.psFRGH", false,
+}
+
+local map_pri = {
+ [0] = map_special, map_regimm, "jJ", "jalJ",
+ "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB",
+ "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI",
+ "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU",
+ map_cop0, map_cop1, false, map_cop1x,
+ "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB",
+ false, false, false, false,
+ map_special2, false, false, map_special3,
+ "lbTSO", "lhTSO", "lwlTSO", "lwTSO",
+ "lbuTSO", "lhuTSO", "lwrTSO", false,
+ "sbTSO", "shTSO", "swlTSO", "swTSO",
+ false, false, "swrTSO", "cacheNSO",
+ "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO",
+ false, "ldc1HSO", "ldc2TSO", false,
+ "scTSO", "swc1HSO", "swc2TSO", false,
+ false, "sdc1HSO", "sdc2TSO", false,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra",
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+local function get_be(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+end
+
+local function get_le(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local op = ctx:get()
+ local operands = {}
+ local last = nil
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = map_pri[rshift(op, 26)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "S" then
+ x = map_gpr[band(rshift(op, 21), 31)]
+ elseif p == "T" then
+ x = map_gpr[band(rshift(op, 16), 31)]
+ elseif p == "D" then
+ x = map_gpr[band(rshift(op, 11), 31)]
+ elseif p == "F" then
+ x = "f"..band(rshift(op, 6), 31)
+ elseif p == "G" then
+ x = "f"..band(rshift(op, 11), 31)
+ elseif p == "H" then
+ x = "f"..band(rshift(op, 16), 31)
+ elseif p == "R" then
+ x = "f"..band(rshift(op, 21), 31)
+ elseif p == "A" then
+ x = band(rshift(op, 6), 31)
+ elseif p == "M" then
+ x = band(rshift(op, 11), 31)
+ elseif p == "N" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "C" then
+ x = band(rshift(op, 18), 7)
+ if x == 0 then x = nil end
+ elseif p == "K" then
+ x = band(rshift(op, 11), 31) + 1
+ elseif p == "L" then
+ x = band(rshift(op, 11), 31) - last + 1
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "O" then
+ local disp = arshift(lshift(op, 16), 16)
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "X" then
+ local index = map_gpr[band(rshift(op, 16), 31)]
+ operands[#operands] = format("%s(%s)", index, last)
+ elseif p == "B" then
+ x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "J" then
+ x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "V" then
+ x = band(rshift(op, 8), 7)
+ if x == 0 then x = nil end
+ elseif p == "W" then
+ x = band(op, 7)
+ if x == 0 then x = nil end
+ elseif p == "Y" then
+ x = band(rshift(op, 6), 0x000fffff)
+ if x == 0 then x = nil end
+ elseif p == "Z" then
+ x = band(rshift(op, 6), 1023)
+ if x == 0 then x = nil end
+ elseif p == "0" then
+ if last == "r0" or last == 0 then
+ local n = #operands
+ operands[n] = nil
+ last = operands[n-1]
+ if altname then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if a1 then name, altname = a1, a2
+ else name = altname end
+ end
+ end
+ elseif p == "1" then
+ if last == "ra" then
+ operands[#operands] = nil
+ end
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ ctx.get = get_be
+ return ctx
+end
+
+local function create_el_(code, addr, out)
+ local ctx = create_(code, addr, out)
+ ctx.get = get_le
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+local function disass_el_(code, addr, out)
+ create_el_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+create_el = create_el_
+disass = disass_
+disass_el = disass_el_
+regname = regname_
+
diff --git a/src/LuaJIT/lib/dis_mipsel.lua b/src/LuaJIT/lib/dis_mipsel.lua
new file mode 100644
index 000000000..4c5a65155
--- /dev/null
+++ b/src/LuaJIT/lib/dis_mipsel.lua
@@ -0,0 +1,20 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPSEL disassembler wrapper module.
+--
+-- Copyright (C) 2005-2011 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the little-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local require = require
+
+module(...)
+
+local dis_mips = require(_PACKAGE.."dis_mips")
+
+create = dis_mips.create_el
+disass = dis_mips.disass_el
+regname = dis_mips.regname
+
diff --git a/src/LuaJIT/lib/dis_ppc.lua b/src/LuaJIT/lib/dis_ppc.lua
new file mode 100644
index 000000000..ad8599fcc
--- /dev/null
+++ b/src/LuaJIT/lib/dis_ppc.lua
@@ -0,0 +1,591 @@
+----------------------------------------------------------------------------
+-- LuaJIT PPC disassembler module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all common, non-privileged 32/64 bit PowerPC instructions
+-- plus the e500 SPE instructions and some Cell/Xenon extensions.
+--
+-- NYI: VMX, VMX128
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps
+------------------------------------------------------------------------------
+
+local map_crops = {
+ shift = 1, mask = 1023,
+ [0] = "mcrfXX",
+ [33] = "crnor|crnotCCC=", [129] = "crandcCCC",
+ [193] = "crxor|crclrCCC%", [225] = "crnandCCC",
+ [257] = "crandCCC", [289] = "creqv|crsetCCC%",
+ [417] = "crorcCCC", [449] = "cror|crmoveCCC=",
+ [16] = "b_lrKB", [528] = "b_ctrKB",
+ [150] = "isync",
+}
+
+local map_rlwinm = setmetatable({
+ shift = 0, mask = -1,
+},
+{ __index = function(t, x)
+ local rot = band(rshift(x, 11), 31)
+ local mb = band(rshift(x, 6), 31)
+ local me = band(rshift(x, 1), 31)
+ if mb == 0 and me == 31-rot then
+ return "slwiRR~A."
+ elseif me == 31 and mb == 32-rot then
+ return "srwiRR~-A."
+ else
+ return "rlwinmRR~AAA."
+ end
+ end
+})
+
+local map_rld = {
+ shift = 2, mask = 7,
+ [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.",
+ {
+ shift = 1, mask = 1,
+ [0] = "rldclRR~HM.", "rldcrRR~HM.",
+ },
+}
+
+local map_ext = setmetatable({
+ shift = 1, mask = 1023,
+
+ [0] = "cmp_YLRR", [32] = "cmpl_YLRR",
+ [4] = "twARR", [68] = "tdARR",
+
+ [8] = "subfcRRR.", [40] = "subfRRR.",
+ [104] = "negRR.", [136] = "subfeRRR.",
+ [200] = "subfzeRR.", [232] = "subfmeRR.",
+ [520] = "subfcoRRR.", [552] = "subfoRRR.",
+ [616] = "negoRR.", [648] = "subfeoRRR.",
+ [712] = "subfzeoRR.", [744] = "subfmeoRR.",
+
+ [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.",
+ [457] = "divduRRR.", [489] = "divdRRR.",
+ [745] = "mulldoRRR.",
+ [969] = "divduoRRR.", [1001] = "divdoRRR.",
+
+ [10] = "addcRRR.", [138] = "addeRRR.",
+ [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.",
+ [522] = "addcoRRR.", [650] = "addeoRRR.",
+ [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.",
+
+ [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.",
+ [459] = "divwuRRR.", [491] = "divwRRR.",
+ [747] = "mullwoRRR.",
+ [971] = "divwouRRR.", [1003] = "divwoRRR.",
+
+ [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR",
+
+ [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", },
+ [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", },
+ [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", },
+ [339] = {
+ shift = 11, mask = 1023,
+ [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR",
+ },
+ [467] = {
+ shift = 11, mask = 1023,
+ [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR",
+ },
+
+ [20] = "lwarxRR0R", [84] = "ldarxRR0R",
+
+ [21] = "ldxRR0R", [53] = "lduxRRR",
+ [149] = "stdxRR0R", [181] = "stduxRRR",
+ [341] = "lwaxRR0R", [373] = "lwauxRRR",
+
+ [23] = "lwzxRR0R", [55] = "lwzuxRRR",
+ [87] = "lbzxRR0R", [119] = "lbzuxRRR",
+ [151] = "stwxRR0R", [183] = "stwuxRRR",
+ [215] = "stbxRR0R", [247] = "stbuxRRR",
+ [279] = "lhzxRR0R", [311] = "lhzuxRRR",
+ [343] = "lhaxRR0R", [375] = "lhauxRRR",
+ [407] = "sthxRR0R", [439] = "sthuxRRR",
+
+ [54] = "dcbst-R0R", [86] = "dcbf-R0R",
+ [150] = "stwcxRR0R.", [214] = "stdcxRR0R.",
+ [246] = "dcbtst-R0R", [278] = "dcbt-R0R",
+ [310] = "eciwxRR0R", [438] = "ecowxRR0R",
+ [470] = "dcbi-RR",
+
+ [598] = {
+ shift = 21, mask = 3,
+ [0] = "sync", "lwsync", "ptesync",
+ },
+ [758] = "dcba-RR",
+ [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R",
+
+ [26] = "cntlzwRR~", [58] = "cntlzdRR~",
+ [122] = "popcntbRR~",
+ [154] = "prtywRR~", [186] = "prtydRR~",
+
+ [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.",
+ [284] = "eqvRR~R.", [316] = "xorRR~R.",
+ [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.",
+ [508] = "cmpbRR~R",
+
+ [512] = "mcrxrX",
+
+ [532] = "ldbrxRR0R", [660] = "stdbrxRR0R",
+
+ [533] = "lswxRR0R", [597] = "lswiRR0A",
+ [661] = "stswxRR0R", [725] = "stswiRR0A",
+
+ [534] = "lwbrxRR0R", [662] = "stwbrxRR0R",
+ [790] = "lhbrxRR0R", [918] = "sthbrxRR0R",
+
+ [535] = "lfsxFR0R", [567] = "lfsuxFRR",
+ [599] = "lfdxFR0R", [631] = "lfduxFRR",
+ [663] = "stfsxFR0R", [695] = "stfsuxFRR",
+ [727] = "stfdxFR0R", [759] = "stfduxFR0R",
+ [855] = "lfiwaxFR0R",
+ [983] = "stfiwxFR0R",
+
+ [24] = "slwRR~R.",
+
+ [27] = "sldRR~R.", [536] = "srwRR~R.",
+ [792] = "srawRR~R.", [824] = "srawiRR~A.",
+
+ [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.",
+ [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.",
+
+ [539] = "srdRR~R.",
+},
+{ __index = function(t, x)
+ if band(x, 31) == 15 then return "iselRRRC" end
+ end
+})
+
+local map_ld = {
+ shift = 0, mask = 3,
+ [0] = "ldRRE", "lduRRE", "lwaRRE",
+}
+
+local map_std = {
+ shift = 0, mask = 3,
+ [0] = "stdRRE", "stduRRE",
+}
+
+local map_fps = {
+ shift = 5, mask = 1,
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivsFFF.", false,
+ "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false,
+ "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false,
+ "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.",
+ }
+}
+
+local map_fpd = {
+ shift = 5, mask = 1,
+ [0] = {
+ shift = 1, mask = 1023,
+ [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX",
+ [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>",
+ [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.",
+ [136] = "fnabsF-F.", [264] = "fabsF-F.",
+ [12] = "frspF-F.",
+ [14] = "fctiwF-F.", [15] = "fctiwzF-F.",
+ [583] = "mffsF.", [711] = "mtfsfZF.",
+ [392] = "frinF-F.", [424] = "frizF-F.",
+ [456] = "fripF-F.", [488] = "frimF-F.",
+ [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.",
+ },
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivFFF.", false,
+ "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.",
+ "freF-F.", "fmulFF-F.", "frsqrteF-F.", false,
+ "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.",
+ }
+}
+
+local map_spe = {
+ shift = 0, mask = 2047,
+
+ [512] = "evaddwRRR", [514] = "evaddiwRAR~",
+ [516] = "evsubwRRR~", [518] = "evsubiwRAR~",
+ [520] = "evabsRR", [521] = "evnegRR",
+ [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR",
+ [525] = "evcntlzwRR", [526] = "evcntlswRR",
+
+ [527] = "brincRRR",
+
+ [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR",
+ [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=",
+ [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR",
+
+ [544] = "evsrwuRRR", [545] = "evsrwsRRR",
+ [546] = "evsrwiuRRA", [547] = "evsrwisRRA",
+ [548] = "evslwRRR", [550] = "evslwiRRA",
+ [552] = "evrlwRRR", [553] = "evsplatiRS",
+ [554] = "evrlwiRRA", [555] = "evsplatfiRS",
+ [556] = "evmergehiRRR", [557] = "evmergeloRRR",
+ [558] = "evmergehiloRRR", [559] = "evmergelohiRRR",
+
+ [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR",
+ [562] = "evcmpltuYRR", [563] = "evcmpltsYRR",
+ [564] = "evcmpeqYRR",
+
+ [632] = "evselRRR", [633] = "evselRRRW",
+ [634] = "evselRRRW", [635] = "evselRRRW",
+ [636] = "evselRRRW", [637] = "evselRRRW",
+ [638] = "evselRRRW", [639] = "evselRRRW",
+
+ [640] = "evfsaddRRR", [641] = "evfssubRRR",
+ [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR",
+ [648] = "evfsmulRRR", [649] = "evfsdivRRR",
+ [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR",
+ [656] = "evfscfuiR-R", [657] = "evfscfsiR-R",
+ [658] = "evfscfufR-R", [659] = "evfscfsfR-R",
+ [660] = "evfsctuiR-R", [661] = "evfsctsiR-R",
+ [662] = "evfsctufR-R", [663] = "evfsctsfR-R",
+ [664] = "evfsctuizR-R", [666] = "evfsctsizR-R",
+ [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR",
+
+ [704] = "efsaddRRR", [705] = "efssubRRR",
+ [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR",
+ [712] = "efsmulRRR", [713] = "efsdivRRR",
+ [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR",
+ [719] = "efscfdR-R",
+ [720] = "efscfuiR-R", [721] = "efscfsiR-R",
+ [722] = "efscfufR-R", [723] = "efscfsfR-R",
+ [724] = "efsctuiR-R", [725] = "efsctsiR-R",
+ [726] = "efsctufR-R", [727] = "efsctsfR-R",
+ [728] = "efsctuizR-R", [730] = "efsctsizR-R",
+ [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR",
+
+ [736] = "efdaddRRR", [737] = "efdsubRRR",
+ [738] = "efdcfuidR-R", [739] = "efdcfsidR-R",
+ [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR",
+ [744] = "efdmulRRR", [745] = "efddivRRR",
+ [746] = "efdctuidzR-R", [747] = "efdctsidzR-R",
+ [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR",
+ [751] = "efdcfsR-R",
+ [752] = "efdcfuiR-R", [753] = "efdcfsiR-R",
+ [754] = "efdcfufR-R", [755] = "efdcfsfR-R",
+ [756] = "efdctuiR-R", [757] = "efdctsiR-R",
+ [758] = "efdctufR-R", [759] = "efdctsfR-R",
+ [760] = "efdctuizR-R", [762] = "efdctsizR-R",
+ [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR",
+
+ [768] = "evlddxRR0R", [769] = "evlddRR8",
+ [770] = "evldwxRR0R", [771] = "evldwRR8",
+ [772] = "evldhxRR0R", [773] = "evldhRR8",
+ [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2",
+ [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2",
+ [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2",
+ [784] = "evlwhexRR0R", [785] = "evlwheRR4",
+ [788] = "evlwhouxRR0R", [789] = "evlwhouRR4",
+ [790] = "evlwhosxRR0R", [791] = "evlwhosRR4",
+ [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4",
+ [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4",
+
+ [800] = "evstddxRR0R", [801] = "evstddRR8",
+ [802] = "evstdwxRR0R", [803] = "evstdwRR8",
+ [804] = "evstdhxRR0R", [805] = "evstdhRR8",
+ [816] = "evstwhexRR0R", [817] = "evstwheRR4",
+ [820] = "evstwhoxRR0R", [821] = "evstwhoRR4",
+ [824] = "evstwwexRR0R", [825] = "evstwweRR4",
+ [828] = "evstwwoxRR0R", [829] = "evstwwoRR4",
+
+ [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR",
+ [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR",
+ [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR",
+ [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR",
+ [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR",
+ [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR",
+ [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR",
+ [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR",
+ [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR",
+ [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR",
+ [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR",
+ [1147] = "evmwsmfaRRR",
+
+ [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR",
+ [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR",
+ [1220] = "evmraRR",
+ [1222] = "evdivwsRRR", [1223] = "evdivwuRRR",
+ [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR",
+ [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR",
+
+ [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR",
+ [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR",
+ [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR",
+ [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR",
+ [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR",
+ [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR",
+ [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR",
+ [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR",
+ [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR",
+ [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR",
+ [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR",
+ [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR",
+ [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR",
+ [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR",
+ [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR",
+ [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR",
+ [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR",
+ [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR",
+ [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR",
+ [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR",
+ [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR",
+ [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR",
+ [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR",
+ [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR",
+ [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR",
+ [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR",
+}
+
+local map_pri = {
+ [0] = false, false, "tdiARI", "twiARI",
+ map_spe, false, false, "mulliRRI",
+ "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI",
+ "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I",
+ "b_KBJ", "sc", "bKJ", map_crops,
+ "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.",
+ "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U",
+ "andi.RR~U", "andis.RR~U", map_rld, map_ext,
+ "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD",
+ "stwRRD", "stwuRRD", "stbRRD", "stbuRRD",
+ "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD",
+ "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD",
+ "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD",
+ "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD",
+ false, false, map_ld, map_fps,
+ false, false, map_std, map_fpd,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+}
+
+local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", }
+
+-- Format a condition bit.
+local function condfmt(cond)
+ if cond <= 3 then
+ return map_cond[band(cond, 3)]
+ else
+ return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+ local operands = {}
+ local last = nil
+ local rs = 21
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = map_pri[rshift(b0, 2)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)]
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "R" then
+ x = map_gpr[band(rshift(op, rs), 31)]
+ rs = rs - 5
+ elseif p == "F" then
+ x = "f"..band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "A" then
+ x = band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "S" then
+ x = arshift(lshift(op, 27-rs), 27)
+ rs = rs - 5
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "D" or p == "E" then
+ local disp = arshift(lshift(op, 16), 16)
+ if p == "E" then disp = band(disp, -4) end
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p >= "2" and p <= "8" then
+ local disp = band(rshift(op, rs), 31) * p
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "H" then
+ x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4)
+ rs = rs - 5
+ elseif p == "M" then
+ x = band(rshift(op, rs), 31) + band(op, 0x20)
+ elseif p == "C" then
+ x = condfmt(band(rshift(op, rs), 31))
+ rs = rs - 5
+ elseif p == "B" then
+ local bo = rshift(op, 21)
+ local cond = band(rshift(op, 16), 31)
+ local cn = ""
+ rs = rs - 10
+ if band(bo, 4) == 0 then
+ cn = band(bo, 2) == 0 and "dnz" or "dz"
+ if band(bo, 0x10) == 0 then
+ cn = cn..(band(bo, 8) == 0 and "f" or "t")
+ end
+ if band(bo, 0x10) == 0 then x = condfmt(cond) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ elseif band(bo, 0x10) == 0 then
+ cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)]
+ if cond > 3 then x = "cr"..rshift(cond, 2) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ end
+ name = gsub(name, "_", cn)
+ elseif p == "J" then
+ x = arshift(lshift(op, 27-rs), 29-rs)*4
+ if band(op, 2) == 0 then x = ctx.addr + pos + x end
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "K" then
+ if band(op, 1) ~= 0 then name = name.."l" end
+ if band(op, 2) ~= 0 then name = name.."a" end
+ elseif p == "X" or p == "Y" then
+ x = band(rshift(op, rs+2), 7)
+ if x == 0 and p == "Y" then x = nil else x = "cr"..x end
+ rs = rs - 5
+ elseif p == "W" then
+ x = "cr"..band(op, 7)
+ elseif p == "Z" then
+ x = band(rshift(op, rs-4), 255)
+ rs = rs - 10
+ elseif p == ">" then
+ operands[#operands] = rshift(operands[#operands], 1)
+ elseif p == "0" then
+ if last == "r0" then
+ operands[#operands] = nil
+ if altname then name = altname end
+ end
+ elseif p == "L" then
+ name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w")
+ elseif p == "." then
+ if band(op, 1) == 1 then name = name.."." end
+ elseif p == "N" then
+ if op == 0x60000000 then name = "nop"; break end
+ elseif p == "~" then
+ local n = #operands
+ operands[n-1], operands[n] = operands[n], operands[n-1]
+ elseif p == "=" then
+ local n = #operands
+ if last == operands[n-1] then
+ operands[n] = nil
+ name = altname
+ end
+ elseif p == "%" then
+ local n = #operands
+ if last == operands[n-1] and last == operands[n-2] then
+ operands[n] = nil
+ operands[n-1] = nil
+ name = altname
+ end
+ elseif p == "-" then
+ rs = rs - 5
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+disass = disass_
+regname = regname_
+
diff --git a/src/LuaJIT/lib/dis_x64.lua b/src/LuaJIT/lib/dis_x64.lua
new file mode 100644
index 000000000..d95d7ded6
--- /dev/null
+++ b/src/LuaJIT/lib/dis_x64.lua
@@ -0,0 +1,20 @@
+----------------------------------------------------------------------------
+-- LuaJIT x64 disassembler wrapper module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the 64 bit functions from the combined
+-- x86/x64 disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local require = require
+
+module(...)
+
+local dis_x86 = require(_PACKAGE.."dis_x86")
+
+create = dis_x86.create64
+disass = dis_x86.disass64
+regname = dis_x86.regname64
+
diff --git a/src/LuaJIT/lib/dis_x86.lua b/src/LuaJIT/lib/dis_x86.lua
new file mode 100644
index 000000000..5aeeb4498
--- /dev/null
+++ b/src/LuaJIT/lib/dis_x86.lua
@@ -0,0 +1,836 @@
+----------------------------------------------------------------------------
+-- LuaJIT x86/x64 disassembler module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- Sending small code snippets to an external disassembler and mixing the
+-- output with our own stuff was too fragile. So I had to bite the bullet
+-- and write yet another x86 disassembler. Oh well ...
+--
+-- The output format is very similar to what ndisasm generates. But it has
+-- been developed independently by looking at the opcode tables from the
+-- Intel and AMD manuals. The supported instruction set is quite extensive
+-- and reflects what a current generation Intel or AMD CPU implements in
+-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3,
+-- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM)
+-- instructions.
+--
+-- Notes:
+-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported.
+-- * No attempt at optimization has been made -- it's fast enough for my needs.
+-- * The public API may change when more architectures are added.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local lower, rep = string.lower, string.rep
+
+-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on.
+local map_opc1_32 = {
+--0x
+[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es",
+"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*",
+--1x
+"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss",
+"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds",
+--2x
+"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa",
+"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das",
+--3x
+"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa",
+"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas",
+--4x
+"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR",
+"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR",
+--5x
+"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR",
+"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR",
+--6x
+"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr",
+"fs:seg","gs:seg","o16:","a16",
+"pushUi","imulVrmi","pushBs","imulVrms",
+"insb","insVS","outsb","outsVS",
+--7x
+"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj",
+"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj",
+--8x
+"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms",
+"testBmr","testVmr","xchgBrm","xchgVrm",
+"movBmr","movVmr","movBrm","movVrm",
+"movVmg","leaVrm","movWgm","popUm",
+--9x
+"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR",
+"xchgVaR","xchgVaR","xchgVaR","xchgVaR",
+"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait",
+"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf",
+--Ax
+"movBao","movVao","movBoa","movVoa",
+"movsb","movsVS","cmpsb","cmpsVS",
+"testBai","testVai","stosb","stosVS",
+"lodsb","lodsVS","scasb","scasVS",
+--Bx
+"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi",
+"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI",
+--Cx
+"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi",
+"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS",
+--Dx
+"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb",
+"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7",
+--Ex
+"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj",
+"inBau","inVau","outBua","outVua",
+"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda",
+--Fx
+"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm",
+"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm",
+}
+assert(#map_opc1_32 == 255)
+
+-- Map for 1st opcode byte in 64 bit mode (overrides only).
+local map_opc1_64 = setmetatable({
+ [0x06]=false, [0x07]=false, [0x0e]=false,
+ [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false,
+ [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false,
+ [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:",
+ [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb",
+ [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb",
+ [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb",
+ [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb",
+ [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false,
+ [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false,
+}, { __index = map_opc1_32 })
+
+-- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you.
+-- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2
+local map_opc2 = {
+--0x
+[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret",
+"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu",
+--1x
+"movupsXrm|movssXrm|movupdXrm|movsdXrm",
+"movupsXmr|movssXmr|movupdXmr|movsdXmr",
+"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm",
+"movlpsXmr||movlpdXmr",
+"unpcklpsXrm||unpcklpdXrm",
+"unpckhpsXrm||unpckhpdXrm",
+"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm",
+"movhpsXmr||movhpdXmr",
+"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm",
+"hintnopVm","hintnopVm","hintnopVm","hintnopVm",
+--2x
+"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil,
+"movapsXrm||movapdXrm",
+"movapsXmr||movapdXmr",
+"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt",
+"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr",
+"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm",
+"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm",
+"ucomissXrm||ucomisdXrm",
+"comissXrm||comisdXrm",
+--3x
+"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec",
+"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil,
+--4x
+"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm",
+"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm",
+"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm",
+"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm",
+--5x
+"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm",
+"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm",
+"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm",
+"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm",
+"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm",
+"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm",
+"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm",
+"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm",
+"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm",
+--6x
+"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm",
+"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm",
+"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm",
+"||punpcklqdqXrm","||punpckhqdqXrm",
+"movPrVSm","movqMrm|movdquXrm|movdqaXrm",
+--7x
+"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu",
+"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu",
+"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|",
+"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$",
+nil,nil,
+"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm",
+"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr",
+--8x
+"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj",
+"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj",
+--9x
+"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm",
+"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm",
+--Ax
+"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil,
+"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm",
+--Bx
+"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr",
+"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt",
+"|popcntVrm","ud2Dp","bt!Vmu","btcVmr",
+"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt",
+--Cx
+"xaddBmr","xaddVmr",
+"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|",
+"pinsrwPrWmu","pextrwDrPmu",
+"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp",
+"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR",
+--Dx
+"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm",
+"paddqPrm","pmullwPrm",
+"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm",
+"psubusbPrm","psubuswPrm","pminubPrm","pandPrm",
+"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm",
+--Ex
+"pavgbPrm","psrawPrm","psradPrm","pavgwPrm",
+"pmulhuwPrm","pmulhwPrm",
+"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr",
+"psubsbPrm","psubswPrm","pminswPrm","porPrm",
+"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm",
+--Fx
+"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm",
+"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$",
+"psubbPrm","psubwPrm","psubdPrm","psubqPrm",
+"paddbPrm","paddwPrm","padddPrm","ud",
+}
+assert(map_opc2[255] == "ud")
+
+-- Map for three-byte opcodes. Can't wait for their next invention.
+local map_opc3 = {
+["38"] = { -- [66] 0f 38 xx
+--0x
+[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm",
+"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm",
+"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm",
+nil,nil,nil,nil,
+--1x
+"||pblendvbXrma",nil,nil,nil,
+"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm",
+nil,nil,nil,nil,
+"pabsbPrm","pabswPrm","pabsdPrm",nil,
+--2x
+"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm",
+"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil,
+"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm",
+nil,nil,nil,nil,
+--3x
+"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm",
+"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm",
+"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm",
+"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm",
+--4x
+"||pmulddXrm","||phminposuwXrm",
+--Fx
+[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt",
+},
+
+["3a"] = { -- [66] 0f 3a xx
+--0x
+[0x00]=nil,nil,nil,nil,nil,nil,nil,nil,
+"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu",
+"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu",
+--1x
+nil,nil,nil,nil,
+"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru",
+nil,nil,nil,nil,nil,nil,nil,nil,
+--2x
+"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil,
+--4x
+[0x40] = "||dppsXrmu",
+[0x41] = "||dppdXrmu",
+[0x42] = "||mpsadbwXrmu",
+--6x
+[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu",
+[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu",
+},
+}
+
+-- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands).
+local map_opcvm = {
+[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff",
+[0xc8]="monitor",[0xc9]="mwait",
+[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave",
+[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga",
+[0xf8]="swapgs",[0xf9]="rdtscp",
+}
+
+-- Map for FP opcodes. And you thought stack machines are simple?
+local map_opcfp = {
+-- D8-DF 00-BF: opcodes with a memory operand.
+-- D8
+[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm",
+"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm",
+-- DA
+"fiaddDm","fimulDm","ficomDm","ficompDm",
+"fisubDm","fisubrDm","fidivDm","fidivrDm",
+-- DB
+"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp",
+-- DC
+"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm",
+-- DD
+"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm",
+-- DE
+"fiaddWm","fimulWm","ficomWm","ficompWm",
+"fisubWm","fisubrWm","fidivWm","fidivrWm",
+-- DF
+"fildWm","fisttpWm","fistWm","fistpWm",
+"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm",
+-- xx C0-FF: opcodes with a pseudo-register operand.
+-- D8
+"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf",
+-- D9
+"fldFf","fxchFf",{"fnop"},nil,
+{"fchs","fabs",nil,nil,"ftst","fxam"},
+{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"},
+{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"},
+{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"},
+-- DA
+"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil,
+-- DB
+"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf",
+{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil,
+-- DC
+"fadd toFf","fmul toFf",nil,nil,
+"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf",
+-- DD
+"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil,
+-- DE
+"faddpFf","fmulpFf",nil,{nil,"fcompp"},
+"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf",
+-- DF
+nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil,
+}
+assert(map_opcfp[126] == "fcomipFf")
+
+-- Map for opcode groups. The subkey is sp from the ModRM byte.
+local map_opcgroup = {
+ arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" },
+ shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" },
+ testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ incb = { "inc", "dec" },
+ incd = { "inc", "dec", "callUmp", "$call farDmp",
+ "jmpUmp", "$jmp farDmp", "pushUm" },
+ sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" },
+ sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt",
+ "smsw", nil, "lmsw", "vm*$invlpg" },
+ bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" },
+ cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil,
+ nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" },
+ pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" },
+ pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" },
+ pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" },
+ pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" },
+ fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr",
+ nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" },
+ prefetch = { "prefetch", "prefetchw" },
+ prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" },
+}
+
+------------------------------------------------------------------------------
+
+-- Maps for register names.
+local map_regs = {
+ B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" },
+ D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" },
+ Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" },
+ M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext!
+ X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" },
+}
+local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" }
+
+-- Maps for size names.
+local map_sz2n = {
+ B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16,
+}
+local map_sz2prefix = {
+ B = "byte", W = "word", D = "dword",
+ Q = "qword",
+ M = "qword", X = "xword",
+ F = "dword", G = "qword", -- No need for sizes/register names for these two.
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local code, pos, hex = ctx.code, ctx.pos, ""
+ local hmax = ctx.hexdump
+ if hmax > 0 then
+ for i=ctx.start,pos-1 do
+ hex = hex..format("%02X", byte(code, i, i))
+ end
+ if #hex > hmax then hex = sub(hex, 1, hmax)..". "
+ else hex = hex..rep(" ", hmax-#hex+2) end
+ end
+ if operands then text = text.." "..operands end
+ if ctx.o16 then text = "o16 "..text; ctx.o16 = false end
+ if ctx.a32 then text = "a32 "..text; ctx.a32 = false end
+ if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end
+ if ctx.rex then
+ local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "")..
+ (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "")
+ if t ~= "" then text = "rex."..t.." "..text end
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false
+ end
+ if ctx.seg then
+ local text2, n = gsub(text, "%[", "["..ctx.seg..":")
+ if n == 0 then text = ctx.seg.." "..text else text = text2 end
+ ctx.seg = false
+ end
+ if ctx.lock then text = "lock "..text; ctx.lock = false end
+ local imm = ctx.imm
+ if imm then
+ local sym = ctx.symtab[imm]
+ if sym then text = text.."\t->"..sym end
+ end
+ ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text))
+ ctx.mrm = false
+ ctx.start = pos
+ ctx.imm = nil
+end
+
+-- Clear all prefix flags.
+local function clearprefixes(ctx)
+ ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false; ctx.a32 = false
+end
+
+-- Fallback for incomplete opcodes at the end.
+local function incomplete(ctx)
+ ctx.pos = ctx.stop+1
+ clearprefixes(ctx)
+ return putop(ctx, "(incomplete)")
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ clearprefixes(ctx)
+ return putop(ctx, "(unknown)")
+end
+
+-- Return an immediate of the specified size.
+local function getimm(ctx, pos, n)
+ if pos+n-1 > ctx.stop then return incomplete(ctx) end
+ local code = ctx.code
+ if n == 1 then
+ local b1 = byte(code, pos, pos)
+ return b1
+ elseif n == 2 then
+ local b1, b2 = byte(code, pos, pos+1)
+ return b1+b2*256
+ else
+ local b1, b2, b3, b4 = byte(code, pos, pos+3)
+ local imm = b1+b2*256+b3*65536+b4*16777216
+ ctx.imm = imm
+ return imm
+ end
+end
+
+-- Process pattern string and generate the operands.
+local function putpat(ctx, name, pat)
+ local operands, regs, sz, mode, sp, rm, sc, rx, sdisp
+ local code, pos, stop = ctx.code, ctx.pos, ctx.stop
+
+ -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "V" or p == "U" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false
+ elseif ctx.o16 then sz = "W"; ctx.o16 = false
+ elseif p == "U" and ctx.x64 then sz = "Q"
+ else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "T" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "B" then
+ sz = "B"
+ regs = ctx.rex and map_regs.B64 or map_regs.B
+ elseif match(p, "[WDQMXFG]") then
+ sz = p
+ regs = map_regs[sz]
+ elseif p == "P" then
+ sz = ctx.o16 and "X" or "M"; ctx.o16 = false
+ regs = map_regs[sz]
+ elseif p == "S" then
+ name = name..lower(sz)
+ elseif p == "s" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = imm <= 127 and format("+0x%02x", imm)
+ or format("-0x%02x", 256-imm)
+ pos = pos+1
+ elseif p == "u" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = format("0x%02x", imm)
+ pos = pos+1
+ elseif p == "w" then
+ local imm = getimm(ctx, pos, 2); if not imm then return end
+ x = format("0x%x", imm)
+ pos = pos+2
+ elseif p == "o" then -- [offset]
+ if ctx.x64 then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("[0x%08x%08x]", imm2, imm1)
+ pos = pos+8
+ else
+ local imm = getimm(ctx, pos, 4); if not imm then return end
+ x = format("[0x%08x]", imm)
+ pos = pos+4
+ end
+ elseif p == "i" or p == "I" then
+ local n = map_sz2n[sz]
+ if n == 8 and ctx.x64 and p == "I" then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("0x%08x%08x", imm2, imm1)
+ else
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then
+ imm = (0xffffffff+1)-imm
+ x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm)
+ else
+ x = format(imm > 65535 and "0x%08x" or "0x%x", imm)
+ end
+ end
+ pos = pos+n
+ elseif p == "j" then
+ local n = map_sz2n[sz]
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "B" and imm > 127 then imm = imm-256
+ elseif imm > 2147483647 then imm = imm-4294967296 end
+ pos = pos+n
+ imm = imm + pos + ctx.addr
+ if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end
+ ctx.imm = imm
+ if sz == "W" then
+ x = format("word 0x%04x", imm%65536)
+ elseif ctx.x64 then
+ local lo = imm % 0x1000000
+ x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo)
+ else
+ x = format("0x%08x", imm)
+ end
+ elseif p == "R" then
+ local r = byte(code, pos-1, pos-1)%8
+ if ctx.rexb then r = r + 8; ctx.rexb = false end
+ x = regs[r+1]
+ elseif p == "a" then x = regs[1]
+ elseif p == "c" then x = "cl"
+ elseif p == "d" then x = "dx"
+ elseif p == "1" then x = "1"
+ else
+ if not mode then
+ mode = ctx.mrm
+ if not mode then
+ if pos > stop then return incomplete(ctx) end
+ mode = byte(code, pos, pos)
+ pos = pos+1
+ end
+ rm = mode%8; mode = (mode-rm)/8
+ sp = mode%8; mode = (mode-sp)/8
+ sdisp = ""
+ if mode < 3 then
+ if rm == 4 then
+ if pos > stop then return incomplete(ctx) end
+ sc = byte(code, pos, pos)
+ pos = pos+1
+ rm = sc%8; sc = (sc-rm)/8
+ rx = sc%8; sc = (sc-rx)/8
+ if ctx.rexx then rx = rx + 8; ctx.rexx = false end
+ if rx == 4 then rx = nil end
+ end
+ if mode > 0 or rm == 5 then
+ local dsz = mode
+ if dsz ~= 1 then dsz = 4 end
+ local disp = getimm(ctx, pos, dsz); if not disp then return end
+ if mode == 0 then rm = nil end
+ if rm or rx or (not sc and ctx.x64 and not ctx.a32) then
+ if dsz == 1 and disp > 127 then
+ sdisp = format("-0x%x", 256-disp)
+ elseif disp >= 0 and disp <= 0x7fffffff then
+ sdisp = format("+0x%x", disp)
+ else
+ sdisp = format("-0x%x", (0xffffffff+1)-disp)
+ end
+ else
+ sdisp = format(ctx.x64 and not ctx.a32 and
+ not (disp >= 0 and disp <= 0x7fffffff)
+ and "0xffffffff%08x" or "0x%08x", disp)
+ end
+ pos = pos+dsz
+ end
+ end
+ if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end
+ if ctx.rexr then sp = sp + 8; ctx.rexr = false end
+ end
+ if p == "m" then
+ if mode == 3 then x = regs[rm+1]
+ else
+ local aregs = ctx.a32 and map_regs.D or ctx.aregs
+ local srm, srx = "", ""
+ if rm then srm = aregs[rm+1]
+ elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end
+ ctx.a32 = false
+ if rx then
+ if rm then srm = srm.."+" end
+ srx = aregs[rx+1]
+ if sc > 0 then srx = srx.."*"..(2^sc) end
+ end
+ x = format("[%s%s%s]", srm, srx, sdisp)
+ end
+ if mode < 3 and
+ (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck.
+ x = map_sz2prefix[sz].." "..x
+ end
+ elseif p == "r" then x = regs[sp+1]
+ elseif p == "g" then x = map_segregs[sp+1]
+ elseif p == "p" then -- Suppress prefix.
+ elseif p == "f" then x = "st"..rm
+ elseif p == "x" then
+ if sp == 0 and ctx.lock and not ctx.x64 then
+ x = "CR8"; ctx.lock = false
+ else
+ x = "CR"..sp
+ end
+ elseif p == "y" then x = "DR"..sp
+ elseif p == "z" then x = "TR"..sp
+ elseif p == "t" then
+ else
+ error("bad pattern `"..pat.."'")
+ end
+ end
+ if x then operands = operands and operands..", "..x or x end
+ end
+ ctx.pos = pos
+ return putop(ctx, name, operands)
+end
+
+-- Forward declaration.
+local map_act
+
+-- Fetch and cache MRM byte.
+local function getmrm(ctx)
+ local mrm = ctx.mrm
+ if not mrm then
+ local pos = ctx.pos
+ if pos > ctx.stop then return nil end
+ mrm = byte(ctx.code, pos, pos)
+ ctx.pos = pos+1
+ ctx.mrm = mrm
+ end
+ return mrm
+end
+
+-- Dispatch to handler depending on pattern.
+local function dispatch(ctx, opat, patgrp)
+ if not opat then return unknown(ctx) end
+ if match(opat, "%|") then -- MMX/SSE variants depending on prefix.
+ local p
+ if ctx.rep then
+ p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)"
+ ctx.rep = false
+ elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false
+ else p = "^[^%|]*" end
+ opat = match(opat, p)
+ if not opat then return unknown(ctx) end
+-- ctx.rep = false; ctx.o16 = false
+ --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi]
+ --XXX remove in branches?
+ end
+ if match(opat, "%$") then -- reg$mem variants.
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)")
+ if opat == "" then return unknown(ctx) end
+ end
+ if opat == "" then return unknown(ctx) end
+ local name, pat = match(opat, "^([a-z0-9 ]*)(.*)")
+ if pat == "" and patgrp then pat = patgrp end
+ return map_act[sub(pat, 1, 1)](ctx, name, pat)
+end
+
+-- Get a pattern from an opcode map and dispatch to handler.
+local function dispatchmap(ctx, opcmap)
+ local pos = ctx.pos
+ local opat = opcmap[byte(ctx.code, pos, pos)]
+ pos = pos + 1
+ ctx.pos = pos
+ return dispatch(ctx, opat)
+end
+
+-- Map for action codes. The key is the first char after the name.
+map_act = {
+ -- Simple opcodes without operands.
+ [""] = function(ctx, name, pat)
+ return putop(ctx, name)
+ end,
+
+ -- Operand size chars fall right through.
+ B = putpat, W = putpat, D = putpat, Q = putpat,
+ V = putpat, U = putpat, T = putpat,
+ M = putpat, X = putpat, P = putpat,
+ F = putpat, G = putpat,
+
+ -- Collect prefixes.
+ [":"] = function(ctx, name, pat)
+ ctx[pat == ":" and name or sub(pat, 2)] = name
+ if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes.
+ end,
+
+ -- Chain to special handler specified by name.
+ ["*"] = function(ctx, name, pat)
+ return map_act[name](ctx, name, sub(pat, 2))
+ end,
+
+ -- Use named subtable for opcode group.
+ ["!"] = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2))
+ end,
+
+ -- o16,o32[,o64] variants.
+ sz = function(ctx, name, pat)
+ if ctx.o16 then ctx.o16 = false
+ else
+ pat = match(pat, ",(.*)")
+ if ctx.rexw then
+ local p = match(pat, ",(.*)")
+ if p then pat = p; ctx.rexw = false end
+ end
+ end
+ pat = match(pat, "^[^,]*")
+ return dispatch(ctx, pat)
+ end,
+
+ -- Two-byte opcode dispatch.
+ opc2 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc2)
+ end,
+
+ -- Three-byte opcode dispatch.
+ opc3 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc3[pat])
+ end,
+
+ -- VMX/SVM dispatch.
+ vm = function(ctx, name, pat)
+ return dispatch(ctx, map_opcvm[ctx.mrm])
+ end,
+
+ -- Floating point opcode dispatch.
+ fp = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ local rm = mrm%8
+ local idx = pat*8 + ((mrm-rm)/8)%8
+ if mrm >= 192 then idx = idx + 64 end
+ local opat = map_opcfp[idx]
+ if type(opat) == "table" then opat = opat[rm+1] end
+ return dispatch(ctx, opat)
+ end,
+
+ -- REX prefix.
+ rex = function(ctx, name, pat)
+ if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed.
+ for p in gmatch(pat, ".") do ctx["rex"..p] = true end
+ ctx.rex = true
+ end,
+
+ -- Special case for nop with REX prefix.
+ nop = function(ctx, name, pat)
+ return dispatch(ctx, ctx.rex and pat or "nop")
+ end,
+}
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ofs = ofs + 1
+ ctx.start = ofs
+ ctx.pos = ofs
+ ctx.stop = stop
+ ctx.imm = nil
+ ctx.mrm = false
+ clearprefixes(ctx)
+ while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end
+ if ctx.pos ~= ctx.start then incomplete(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = (addr or 0) - 1
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 16
+ ctx.x64 = false
+ ctx.map1 = map_opc1_32
+ ctx.aregs = map_regs.D
+ return ctx
+end
+
+local function create64_(code, addr, out)
+ local ctx = create_(code, addr, out)
+ ctx.x64 = true
+ ctx.map1 = map_opc1_64
+ ctx.aregs = map_regs.Q
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+local function disass64_(code, addr, out)
+ create64_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 8 then return map_regs.D[r+1] end
+ return map_regs.X[r-7]
+end
+
+local function regname64_(r)
+ if r < 16 then return map_regs.Q[r+1] end
+ return map_regs.X[r-15]
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+create64 = create64_
+disass = disass_
+disass64 = disass64_
+regname = regname_
+regname64 = regname64_
+
diff --git a/src/LuaJIT/lib/dump.lua b/src/LuaJIT/lib/dump.lua
new file mode 100644
index 000000000..3d62c4eae
--- /dev/null
+++ b/src/LuaJIT/lib/dump.lua
@@ -0,0 +1,696 @@
+----------------------------------------------------------------------------
+-- LuaJIT compiler dump module.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module can be used to debug the JIT compiler itself. It dumps the
+-- code representations and structures used in various compiler stages.
+--
+-- Example usage:
+--
+-- luajit -jdump -e "local x=0; for i=1,1e6 do x=x+i end; print(x)"
+-- luajit -jdump=im -e "for i=1,1000 do for j=1,1000 do end end" | less -R
+-- luajit -jdump=is myapp.lua | less -R
+-- luajit -jdump=-b myapp.lua
+-- luajit -jdump=+aH,myapp.html myapp.lua
+-- luajit -jdump=ixT,myapp.dump myapp.lua
+--
+-- The first argument specifies the dump mode. The second argument gives
+-- the output file name. Default output is to stdout, unless the environment
+-- variable LUAJIT_DUMPFILE is set. The file is overwritten every time the
+-- module is started.
+--
+-- Different features can be turned on or off with the dump mode. If the
+-- mode starts with a '+', the following features are added to the default
+-- set of features; a '-' removes them. Otherwise the features are replaced.
+--
+-- The following dump features are available (* marks the default):
+--
+-- * t Print a line for each started, ended or aborted trace (see also -jv).
+-- * b Dump the traced bytecode.
+-- * i Dump the IR (intermediate representation).
+-- r Augment the IR with register/stack slots.
+-- s Dump the snapshot map.
+-- * m Dump the generated machine code.
+-- x Print each taken trace exit.
+-- X Print each taken trace exit and the contents of all registers.
+--
+-- The output format can be set with the following characters:
+--
+-- T Plain text output.
+-- A ANSI-colored text output
+-- H Colorized HTML + CSS output.
+--
+-- The default output format is plain text. It's set to ANSI-colored text
+-- if the COLORTERM variable is set. Note: this is independent of any output
+-- redirection, which is actually considered a feature.
+--
+-- You probably want to use less -R to enjoy viewing ANSI-colored text from
+-- a pipe or a file. Add this to your ~/.bashrc: export LESS="-R"
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20000, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc
+local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek
+local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap
+local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr
+local bit = require("bit")
+local band, shl, shr = bit.band, bit.lshift, bit.rshift
+local sub, gsub, format = string.sub, string.gsub, string.format
+local byte, char, rep = string.byte, string.char, string.rep
+local type, tostring = type, tostring
+local stdout, stderr = io.stdout, io.stderr
+
+-- Load other modules on-demand.
+local bcline, disass
+
+-- Active flag, output file handle and dump mode.
+local active, out, dumpmode
+
+------------------------------------------------------------------------------
+
+local symtabmt = { __index = false }
+local symtab = {}
+local nexitsym = 0
+
+-- Fill nested symbol table with per-trace exit stub addresses.
+local function fillsymtab_tr(tr, nexit)
+ local t = {}
+ symtabmt.__index = t
+ if jit.arch == "mips" or jit.arch == "mipsel" then
+ t[traceexitstub(tr, 0)] = "exit"
+ return
+ end
+ for i=0,nexit-1 do
+ local addr = traceexitstub(tr, i)
+ t[addr] = tostring(i)
+ end
+ local addr = traceexitstub(tr, nexit)
+ if addr then t[addr] = "stack_check" end
+end
+
+-- Fill symbol table with trace exit stub addresses.
+local function fillsymtab(tr, nexit)
+ local t = symtab
+ if nexitsym == 0 then
+ local ircall = vmdef.ircall
+ for i=0,#ircall do
+ local addr = ircalladdr(i)
+ if addr ~= 0 then t[addr] = ircall[i] end
+ end
+ end
+ if nexitsym == 1000000 then -- Per-trace exit stubs.
+ fillsymtab_tr(tr, nexit)
+ elseif nexit > nexitsym then -- Shared exit stubs.
+ for i=nexitsym,nexit-1 do
+ local addr = traceexitstub(i)
+ if addr == nil then -- Fall back to per-trace exit stubs.
+ fillsymtab_tr(tr, nexit)
+ setmetatable(symtab, symtabmt)
+ nexit = 1000000
+ break
+ end
+ t[addr] = tostring(i)
+ end
+ nexitsym = nexit
+ end
+ return t
+end
+
+local function dumpwrite(s)
+ out:write(s)
+end
+
+-- Disassemble machine code.
+local function dump_mcode(tr)
+ local info = traceinfo(tr)
+ if not info then return end
+ local mcode, addr, loop = tracemc(tr)
+ if not mcode then return end
+ if not disass then disass = require("jit.dis_"..jit.arch) end
+ out:write("---- TRACE ", tr, " mcode ", #mcode, "\n")
+ local ctx = disass.create(mcode, addr, dumpwrite)
+ ctx.hexdump = 0
+ ctx.symtab = fillsymtab(tr, info.nexit)
+ if loop ~= 0 then
+ symtab[addr+loop] = "LOOP"
+ ctx:disass(0, loop)
+ out:write("->LOOP:\n")
+ ctx:disass(loop, #mcode-loop)
+ symtab[addr+loop] = nil
+ else
+ ctx:disass(0, #mcode)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local irtype_text = {
+ [0] = "nil",
+ "fal",
+ "tru",
+ "lud",
+ "str",
+ "p32",
+ "thr",
+ "pro",
+ "fun",
+ "p64",
+ "cdt",
+ "tab",
+ "udt",
+ "flt",
+ "num",
+ "i8 ",
+ "u8 ",
+ "i16",
+ "u16",
+ "int",
+ "u32",
+ "i64",
+ "u64",
+ "sfp",
+}
+
+local colortype_ansi = {
+ [0] = "%s",
+ "%s",
+ "%s",
+ "\027[36m%s\027[m",
+ "\027[32m%s\027[m",
+ "%s",
+ "\027[1m%s\027[m",
+ "%s",
+ "\027[1m%s\027[m",
+ "%s",
+ "\027[33m%s\027[m",
+ "\027[31m%s\027[m",
+ "\027[36m%s\027[m",
+ "\027[34m%s\027[m",
+ "\027[34m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+}
+
+local function colorize_text(s, t)
+ return s
+end
+
+local function colorize_ansi(s, t)
+ return format(colortype_ansi[t], s)
+end
+
+local irtype_ansi = setmetatable({},
+ { __index = function(tab, t)
+ local s = colorize_ansi(irtype_text[t], t); tab[t] = s; return s; end })
+
+local html_escape = { ["<"] = "<", [">"] = ">", ["&"] = "&", }
+
+local function colorize_html(s, t)
+ s = gsub(s, "[<>&]", html_escape)
+ return format('%s ', irtype_text[t], s)
+end
+
+local irtype_html = setmetatable({},
+ { __index = function(tab, t)
+ local s = colorize_html(irtype_text[t], t); tab[t] = s; return s; end })
+
+local header_html = [[
+
+]]
+
+local colorize, irtype
+
+-- Lookup tables to convert some literals into names.
+local litname = {
+ ["SLOAD "] = setmetatable({}, { __index = function(t, mode)
+ local s = ""
+ if band(mode, 1) ~= 0 then s = s.."P" end
+ if band(mode, 2) ~= 0 then s = s.."F" end
+ if band(mode, 4) ~= 0 then s = s.."T" end
+ if band(mode, 8) ~= 0 then s = s.."C" end
+ if band(mode, 16) ~= 0 then s = s.."R" end
+ if band(mode, 32) ~= 0 then s = s.."I" end
+ t[mode] = s
+ return s
+ end}),
+ ["XLOAD "] = { [0] = "", "R", "V", "RV", "U", "RU", "VU", "RVU", },
+ ["CONV "] = setmetatable({}, { __index = function(t, mode)
+ local s = irtype[band(mode, 31)]
+ s = irtype[band(shr(mode, 5), 31)].."."..s
+ if band(mode, 0x400) ~= 0 then s = s.." trunc"
+ elseif band(mode, 0x800) ~= 0 then s = s.." sext" end
+ local c = shr(mode, 14)
+ if c == 2 then s = s.." index" elseif c == 3 then s = s.." check" end
+ t[mode] = s
+ return s
+ end}),
+ ["FLOAD "] = vmdef.irfield,
+ ["FREF "] = vmdef.irfield,
+ ["FPMATH"] = vmdef.irfpm,
+}
+
+local function ctlsub(c)
+ if c == "\n" then return "\\n"
+ elseif c == "\r" then return "\\r"
+ elseif c == "\t" then return "\\t"
+ elseif c == "\r" then return "\\r"
+ else return format("\\%03d", byte(c))
+ end
+end
+
+local function fmtfunc(func, pc)
+ local fi = funcinfo(func, pc)
+ if fi.loc then
+ return fi.loc
+ elseif fi.ffid then
+ return vmdef.ffnames[fi.ffid]
+ elseif fi.addr then
+ return format("C:%x", fi.addr)
+ else
+ return "(?)"
+ end
+end
+
+local function formatk(tr, idx)
+ local k, t, slot = tracek(tr, idx)
+ local tn = type(k)
+ local s
+ if tn == "number" then
+ if k == 2^52+2^51 then
+ s = "bias"
+ else
+ s = format("%+.14g", k)
+ end
+ elseif tn == "string" then
+ s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub))
+ elseif tn == "function" then
+ s = fmtfunc(k)
+ elseif tn == "table" then
+ s = format("{%p}", k)
+ elseif tn == "userdata" then
+ if t == 12 then
+ s = format("userdata:%p", k)
+ else
+ s = format("[%p]", k)
+ if s == "[0x00000000]" then s = "NULL" end
+ end
+ elseif t == 21 then -- int64_t
+ s = sub(tostring(k), 1, -3)
+ if sub(s, 1, 1) ~= "-" then s = "+"..s end
+ else
+ s = tostring(k) -- For primitives.
+ end
+ s = colorize(format("%-4s", s), t)
+ if slot then
+ s = format("%s @%d", s, slot)
+ end
+ return s
+end
+
+local function printsnap(tr, snap)
+ local n = 2
+ for s=0,snap[1]-1 do
+ local sn = snap[n]
+ if shr(sn, 24) == s then
+ n = n + 1
+ local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS
+ if ref < 0 then
+ out:write(formatk(tr, ref))
+ elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM
+ out:write(colorize(format("%04d/%04d", ref, ref+1), 14))
+ else
+ local m, ot, op1, op2 = traceir(tr, ref)
+ out:write(colorize(format("%04d", ref), band(ot, 31)))
+ end
+ out:write(band(sn, 0x10000) == 0 and " " or "|") -- SNAP_FRAME
+ else
+ out:write("---- ")
+ end
+ end
+ out:write("]\n")
+end
+
+-- Dump snapshots (not interleaved with IR).
+local function dump_snap(tr)
+ out:write("---- TRACE ", tr, " snapshots\n")
+ for i=0,1000000000 do
+ local snap = tracesnap(tr, i)
+ if not snap then break end
+ out:write(format("#%-3d %04d [ ", i, snap[0]))
+ printsnap(tr, snap)
+ end
+end
+
+-- Return a register name or stack slot for a rid/sp location.
+local function ridsp_name(ridsp)
+ if not disass then disass = require("jit.dis_"..jit.arch) end
+ local rid = band(ridsp, 0xff)
+ if ridsp > 255 then return format("[%x]", shr(ridsp, 8)*4) end
+ if rid < 128 then return disass.regname(rid) end
+ return ""
+end
+
+-- Dump CALL* function ref and return optional ctype.
+local function dumpcallfunc(tr, ins)
+ local ctype
+ if ins > 0 then
+ local m, ot, op1, op2 = traceir(tr, ins)
+ if band(ot, 31) == 0 then -- nil type means CARG(func, ctype).
+ ins = op1
+ ctype = formatk(tr, op2)
+ end
+ end
+ if ins < 0 then
+ out:write(format("[0x%x](", tonumber((tracek(tr, ins)))))
+ else
+ out:write(format("%04d (", ins))
+ end
+ return ctype
+end
+
+-- Recursively gather CALL* args and dump them.
+local function dumpcallargs(tr, ins)
+ if ins < 0 then
+ out:write(formatk(tr, ins))
+ else
+ local m, ot, op1, op2 = traceir(tr, ins)
+ local oidx = 6*shr(ot, 8)
+ local op = sub(vmdef.irnames, oidx+1, oidx+6)
+ if op == "CARG " then
+ dumpcallargs(tr, op1)
+ if op2 < 0 then
+ out:write(" ", formatk(tr, op2))
+ else
+ out:write(" ", format("%04d", op2))
+ end
+ else
+ out:write(format("%04d", ins))
+ end
+ end
+end
+
+-- Dump IR and interleaved snapshots.
+local function dump_ir(tr, dumpsnap, dumpreg)
+ local info = traceinfo(tr)
+ if not info then return end
+ local nins = info.nins
+ out:write("---- TRACE ", tr, " IR\n")
+ local irnames = vmdef.irnames
+ local snapref = 65536
+ local snap, snapno
+ if dumpsnap then
+ snap = tracesnap(tr, 0)
+ snapref = snap[0]
+ snapno = 0
+ end
+ for ins=1,nins do
+ if ins >= snapref then
+ if dumpreg then
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ else
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ end
+ printsnap(tr, snap)
+ snapno = snapno + 1
+ snap = tracesnap(tr, snapno)
+ snapref = snap and snap[0] or 65536
+ end
+ local m, ot, op1, op2, ridsp = traceir(tr, ins)
+ local oidx, t = 6*shr(ot, 8), band(ot, 31)
+ local op = sub(irnames, oidx+1, oidx+6)
+ if op == "LOOP " then
+ if dumpreg then
+ out:write(format("%04d ------------ LOOP ------------\n", ins))
+ else
+ out:write(format("%04d ------ LOOP ------------\n", ins))
+ end
+ elseif op ~= "NOP " and op ~= "CARG " and
+ (dumpreg or op ~= "RENAME") then
+ if dumpreg then
+ out:write(format("%04d %-5s ", ins, ridsp_name(ridsp)))
+ else
+ out:write(format("%04d ", ins))
+ end
+ out:write(format("%s%s %s %s ",
+ band(ot, 128) == 0 and " " or ">",
+ band(ot, 64) == 0 and " " or "+",
+ irtype[t], op))
+ local m1, m2 = band(m, 3), band(m, 3*4)
+ if sub(op, 1, 4) == "CALL" then
+ local ctype
+ if m2 == 1*4 then -- op2 == IRMlit
+ out:write(format("%-10s (", vmdef.ircall[op2]))
+ else
+ ctype = dumpcallfunc(tr, op2)
+ end
+ if op1 ~= -1 then dumpcallargs(tr, op1) end
+ out:write(")")
+ if ctype then out:write(" ctype ", ctype) end
+ elseif op == "CNEW " and op2 == -1 then
+ out:write(formatk(tr, op1))
+ elseif m1 ~= 3 then -- op1 != IRMnone
+ if op1 < 0 then
+ out:write(formatk(tr, op1))
+ else
+ out:write(format(m1 == 0 and "%04d" or "#%-3d", op1))
+ end
+ if m2 ~= 3*4 then -- op2 != IRMnone
+ if m2 == 1*4 then -- op2 == IRMlit
+ local litn = litname[op]
+ if litn and litn[op2] then
+ out:write(" ", litn[op2])
+ elseif op == "UREFO " or op == "UREFC " then
+ out:write(format(" #%-3d", shr(op2, 8)))
+ else
+ out:write(format(" #%-3d", op2))
+ end
+ elseif op2 < 0 then
+ out:write(" ", formatk(tr, op2))
+ else
+ out:write(format(" %04d", op2))
+ end
+ end
+ end
+ out:write("\n")
+ end
+ end
+ if snap then
+ if dumpreg then
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ else
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ end
+ printsnap(tr, snap)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local recprefix = ""
+local recdepth = 0
+
+-- Format trace error message.
+local function fmterr(err, info)
+ if type(err) == "number" then
+ if type(info) == "function" then info = fmtfunc(info) end
+ err = format(vmdef.traceerr[err], info)
+ end
+ return err
+end
+
+-- Dump trace states.
+local function dump_trace(what, tr, func, pc, otr, oex)
+ if what == "stop" or (what == "abort" and dumpmode.a) then
+ if dumpmode.i then dump_ir(tr, dumpmode.s, dumpmode.r and what == "stop")
+ elseif dumpmode.s then dump_snap(tr) end
+ if dumpmode.m then dump_mcode(tr) end
+ end
+ if what == "start" then
+ if dumpmode.H then out:write('\n') end
+ out:write("---- TRACE ", tr, " ", what)
+ if otr then out:write(" ", otr, "/", oex) end
+ out:write(" ", fmtfunc(func, pc), "\n")
+ recprefix = ""
+ elseif what == "stop" or what == "abort" then
+ out:write("---- TRACE ", tr, " ", what)
+ recprefix = nil
+ if what == "abort" then
+ out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n")
+ else
+ local info = traceinfo(tr)
+ local link, ltype = info.link, info.linktype
+ if link == tr or link == 0 then
+ out:write(" -> ", ltype, "\n")
+ elseif ltype == "root" then
+ out:write(" -> ", link, "\n")
+ else
+ out:write(" -> ", link, " ", ltype, "\n")
+ end
+ end
+ if dumpmode.H then out:write(" \n\n") else out:write("\n") end
+ else
+ out:write("---- TRACE ", what, "\n\n")
+ end
+ out:flush()
+end
+
+-- Dump recorded bytecode.
+local function dump_record(tr, func, pc, depth, callee)
+ if depth ~= recdepth then
+ recdepth = depth
+ recprefix = rep(" .", depth)
+ end
+ local line
+ if pc >= 0 then
+ line = bcline(func, pc, recprefix)
+ if dumpmode.H then line = gsub(line, "[<>&]", html_escape) end
+ else
+ line = "0000 "..recprefix.." FUNCC \n"
+ callee = func
+ end
+ if pc <= 0 then
+ out:write(sub(line, 1, -2), " ; ", fmtfunc(func), "\n")
+ else
+ out:write(line)
+ end
+ if pc >= 0 and band(funcbc(func, pc), 0xff) < 16 then -- ORDER BC
+ out:write(bcline(func, pc+1, recprefix)) -- Write JMP for cond.
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Dump taken trace exits.
+local function dump_texit(tr, ex, ngpr, nfpr, ...)
+ out:write("---- TRACE ", tr, " exit ", ex, "\n")
+ if dumpmode.X then
+ local regs = {...}
+ if jit.arch == "x64" then
+ for i=1,ngpr do
+ out:write(format(" %016x", regs[i]))
+ if i % 4 == 0 then out:write("\n") end
+ end
+ else
+ for i=1,ngpr do
+ out:write(format(" %08x", regs[i]))
+ if i % 8 == 0 then out:write("\n") end
+ end
+ end
+ if jit.arch == "mips" or jit.arch == "mipsel" then
+ for i=1,nfpr,2 do
+ out:write(format(" %+17.14g", regs[ngpr+i]))
+ if i % 8 == 7 then out:write("\n") end
+ end
+ else
+ for i=1,nfpr do
+ out:write(format(" %+17.14g", regs[ngpr+i]))
+ if i % 4 == 0 then out:write("\n") end
+ end
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Detach dump handlers.
+local function dumpoff()
+ if active then
+ active = false
+ jit.attach(dump_texit)
+ jit.attach(dump_record)
+ jit.attach(dump_trace)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach dump handlers.
+local function dumpon(opt, outfile)
+ if active then dumpoff() end
+
+ local colormode = os.getenv("COLORTERM") and "A" or "T"
+ if opt then
+ opt = gsub(opt, "[TAH]", function(mode) colormode = mode; return ""; end)
+ end
+
+ local m = { t=true, b=true, i=true, m=true, }
+ if opt and opt ~= "" then
+ local o = sub(opt, 1, 1)
+ if o ~= "+" and o ~= "-" then m = {} end
+ for i=1,#opt do m[sub(opt, i, i)] = (o ~= "-") end
+ end
+ dumpmode = m
+
+ if m.t or m.b or m.i or m.s or m.m then
+ jit.attach(dump_trace, "trace")
+ end
+ if m.b then
+ jit.attach(dump_record, "record")
+ if not bcline then bcline = require("jit.bc").line end
+ end
+ if m.x or m.X then
+ jit.attach(dump_texit, "texit")
+ end
+
+ if not outfile then outfile = os.getenv("LUAJIT_DUMPFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stdout
+ end
+
+ m[colormode] = true
+ if colormode == "A" then
+ colorize = colorize_ansi
+ irtype = irtype_ansi
+ elseif colormode == "H" then
+ colorize = colorize_html
+ irtype = irtype_html
+ out:write(header_html)
+ else
+ colorize = colorize_text
+ irtype = irtype_text
+ end
+
+ active = true
+end
+
+-- Public module functions.
+module(...)
+
+on = dumpon
+off = dumpoff
+start = dumpon -- For -j command line option.
+
diff --git a/src/LuaJIT/lib/v.lua b/src/LuaJIT/lib/v.lua
new file mode 100644
index 000000000..8f58fe3b7
--- /dev/null
+++ b/src/LuaJIT/lib/v.lua
@@ -0,0 +1,167 @@
+----------------------------------------------------------------------------
+-- Verbose mode of the LuaJIT compiler.
+--
+-- Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module shows verbose information about the progress of the
+-- JIT compiler. It prints one line for each generated trace. This module
+-- is useful to see which code has been compiled or where the compiler
+-- punts and falls back to the interpreter.
+--
+-- Example usage:
+--
+-- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end"
+-- luajit -jv=myapp.out myapp.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the
+-- module is started.
+--
+-- The output from the first example should look like this:
+--
+-- [TRACE 1 (command line):1 loop]
+-- [TRACE 2 (1/3) (command line):1 -> 1]
+--
+-- The first number in each line is the internal trace number. Next are
+-- the file name ('(command line)') and the line number (':1') where the
+-- trace has started. Side traces also show the parent trace number and
+-- the exit number where they are attached to in parentheses ('(1/3)').
+-- An arrow at the end shows where the trace links to ('-> 1'), unless
+-- it loops to itself.
+--
+-- In this case the inner loop gets hot and is traced first, generating
+-- a root trace. Then the last exit from the 1st trace gets hot, too,
+-- and triggers generation of the 2nd trace. The side trace follows the
+-- path along the outer loop and *around* the inner loop, back to its
+-- start, and then links to the 1st trace. Yes, this may seem unusual,
+-- if you know how traditional compilers work. Trace compilers are full
+-- of surprises like this -- have fun! :-)
+--
+-- Aborted traces are shown like this:
+--
+-- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50]
+--
+-- Don't worry -- trace aborts are quite common, even in programs which
+-- can be fully compiled. The compiler may retry several times until it
+-- finds a suitable trace.
+--
+-- Of course this doesn't work with features that are not-yet-implemented
+-- (NYI error messages). The VM simply falls back to the interpreter. This
+-- may not matter at all if the particular trace is not very high up in
+-- the CPU usage profile. Oh, and the interpreter is quite fast, too.
+--
+-- Also check out the -jdump module, which prints all the gory details.
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20000, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo
+local type, format = type, string.format
+local stdout, stderr = io.stdout, io.stderr
+
+-- Active flag and output file handle.
+local active, out
+
+------------------------------------------------------------------------------
+
+local startloc, startex
+
+local function fmtfunc(func, pc)
+ local fi = funcinfo(func, pc)
+ if fi.loc then
+ return fi.loc
+ elseif fi.ffid then
+ return vmdef.ffnames[fi.ffid]
+ elseif fi.addr then
+ return format("C:%x", fi.addr)
+ else
+ return "(?)"
+ end
+end
+
+-- Format trace error message.
+local function fmterr(err, info)
+ if type(err) == "number" then
+ if type(info) == "function" then info = fmtfunc(info) end
+ err = format(vmdef.traceerr[err], info)
+ end
+ return err
+end
+
+-- Dump trace states.
+local function dump_trace(what, tr, func, pc, otr, oex)
+ if what == "start" then
+ startloc = fmtfunc(func, pc)
+ startex = otr and "("..otr.."/"..oex..") " or ""
+ else
+ if what == "abort" then
+ local loc = fmtfunc(func, pc)
+ if loc ~= startloc then
+ out:write(format("[TRACE --- %s%s -- %s at %s]\n",
+ startex, startloc, fmterr(otr, oex), loc))
+ else
+ out:write(format("[TRACE --- %s%s -- %s]\n",
+ startex, startloc, fmterr(otr, oex)))
+ end
+ elseif what == "stop" then
+ local info = traceinfo(tr)
+ local link, ltype = info.link, info.linktype
+ if ltype == "interpreter" then
+ out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n",
+ tr, startex, startloc))
+ elseif link == tr or link == 0 then
+ out:write(format("[TRACE %3s %s%s %s]\n",
+ tr, startex, startloc, ltype))
+ elseif ltype == "root" then
+ out:write(format("[TRACE %3s %s%s -> %d]\n",
+ tr, startex, startloc, link))
+ else
+ out:write(format("[TRACE %3s %s%s -> %d %s]\n",
+ tr, startex, startloc, link, ltype))
+ end
+ else
+ out:write(format("[TRACE %s]\n", what))
+ end
+ out:flush()
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Detach dump handlers.
+local function dumpoff()
+ if active then
+ active = false
+ jit.attach(dump_trace)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach dump handlers.
+local function dumpon(outfile)
+ if active then dumpoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(dump_trace, "trace")
+ active = true
+end
+
+-- Public module functions.
+module(...)
+
+on = dumpon
+off = dumpoff
+start = dumpon -- For -j command line option.
+
diff --git a/src/LuaJIT/src/Makefile.dep b/src/LuaJIT/src/Makefile.dep
new file mode 100644
index 000000000..82cdc0d7c
--- /dev/null
+++ b/src/LuaJIT/src/Makefile.dep
@@ -0,0 +1,209 @@
+buildvm.o: buildvm.c buildvm.h lj_def.h lua.h luaconf.h lj_arch.h \
+ lj_obj.h lj_gc.h lj_bc.h lj_ir.h lj_ircall.h lj_jit.h lj_frame.h \
+ lj_dispatch.h lj_ctype.h lj_ccall.h luajit.h \
+ lj_traceerr.h
+buildvm_asm.o: buildvm_asm.c buildvm.h lj_def.h lua.h luaconf.h lj_arch.h \
+ lj_bc.h
+buildvm_fold.o: buildvm_fold.c buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_obj.h lj_ir.h
+buildvm_lib.o: buildvm_lib.c buildvm.h lj_def.h lua.h luaconf.h lj_arch.h \
+ lj_obj.h lj_lib.h
+buildvm_peobj.o: buildvm_peobj.c buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_bc.h
+lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_lib.h lj_alloc.h
+lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \
+ lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \
+ lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_lib.h lj_libdef.h
+lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_lib.h lj_libdef.h
+lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h lj_libdef.h
+lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \
+ lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \
+ lj_ccallback.h lj_clib.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
+lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h
+lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_ff.h lj_ffdef.h \
+ lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h \
+ lj_libdef.h
+lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h \
+ lj_obj.h lj_def.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \
+ lj_bc.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_target.h \
+ lj_target_*.h lj_dispatch.h lj_vm.h lj_vmevent.h lj_lib.h luajit.h \
+ lj_libdef.h
+lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h
+lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_lib.h lj_libdef.h
+lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h
+lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h \
+ lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h lj_char.h lj_lib.h \
+ lj_libdef.h
+lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_lib.h \
+ lj_libdef.h
+lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h
+lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
+ lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h
+lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \
+ lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \
+ lj_asm_*.h
+lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \
+ lj_bcdef.h
+lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_bc.h lj_ctype.h \
+ lj_cdata.h lj_lex.h lj_bcdump.h lj_state.h
+lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h lj_ir.h \
+ lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h
+lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_carith.h
+lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h
+lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \
+ lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \
+ lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h
+lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \
+ lj_ccallback.h
+lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h
+lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h
+lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_clib.h
+lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_ctype.h lj_cparse.h lj_frame.h \
+ lj_bc.h lj_vm.h lj_char.h
+lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h \
+ lj_gc.h lj_cdata.h lj_cparse.h lj_cconv.h lj_clib.h lj_ccall.h lj_ir.h \
+ lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_record.h lj_ffrecord.h lj_snap.h lj_crecord.h
+lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_ccallback.h
+lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_state.h lj_frame.h \
+ lj_bc.h lj_jit.h lj_ir.h
+lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_func.h lj_str.h lj_tab.h lj_meta.h lj_debug.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h lj_jit.h lj_ir.h \
+ lj_ccallback.h lj_ctype.h lj_gc.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_vm.h luajit.h
+lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \
+ lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \
+ lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h
+lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \
+ lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \
+ lj_vm.h lj_recdef.h
+lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h lj_vm.h
+lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h \
+ lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h
+lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_jit.h \
+ lj_ir.h lj_dispatch.h
+lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h lj_carith.h \
+ lj_vm.h lj_lib.h
+lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h lualib.h \
+ lj_state.h lj_lex.h lj_parse.h lj_char.h
+lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \
+ lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_lib.h
+lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h lj_vm.h
+lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
+ lj_vm.h
+lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
+lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_jit.h lj_iropt.h
+lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
+ lj_bc.h lj_traceerr.h lj_carith.h lj_vm.h lj_folddef.h
+lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h lj_vm.h
+lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_tab.h lj_ir.h lj_jit.h lj_iropt.h
+lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_str.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h
+lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_ircall.h \
+ lj_iropt.h lj_vm.h
+lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h \
+ lj_state.h lj_bc.h lj_ctype.h lj_lex.h lj_parse.h lj_vm.h lj_vmevent.h
+lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
+ lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h lj_vm.h
+lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h lj_target_*.h
+lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_meta.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h
+lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_char.h
+lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h
+lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \
+ lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \
+ lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h
+lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_udata.h
+lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \
+ lj_vm.h lj_vmevent.h
+lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_vm.h
+ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h \
+ lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h \
+ lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_err.c \
+ lj_debug.h lj_ff.h lj_ffdef.h lj_char.c lj_char.h lj_bc.c lj_bcdef.h \
+ lj_obj.c lj_str.c lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_debug.c \
+ lj_state.c lj_lex.h lj_alloc.h lj_dispatch.c lj_ccallback.h luajit.h \
+ lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_api.c lj_bcdump.h lj_parse.h \
+ lj_lex.c lualib.h lj_parse.c lj_bcread.c lj_bcwrite.c lj_ctype.c \
+ lj_cdata.c lj_cconv.h lj_cconv.c lj_ccall.c lj_ccall.h lj_ccallback.c \
+ lj_target.h lj_target_*.h lj_mcode.h lj_carith.c lj_carith.h lj_clib.c \
+ lj_clib.h lj_cparse.c lj_cparse.h lj_lib.c lj_lib.h lj_ir.c lj_ircall.h \
+ lj_iropt.h lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c \
+ lj_opt_dce.c lj_opt_loop.c lj_snap.h lj_opt_split.c lj_mcode.c lj_snap.c \
+ lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c lj_crecord.h \
+ lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h lj_asm_*.h \
+ lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c lib_base.c \
+ lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c lib_os.c \
+ lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c lib_init.c
+luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h
diff --git a/src/LuaJIT/src/buildvm.c b/src/LuaJIT/src/buildvm.c
new file mode 100644
index 000000000..29cf73785
--- /dev/null
+++ b/src/LuaJIT/src/buildvm.c
@@ -0,0 +1,513 @@
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** This is a tool to build the hand-tuned assembler code required for
+** LuaJIT's bytecode interpreter. It supports a variety of output formats
+** to feed different toolchains (see usage() below).
+**
+** This tool is not particularly optimized because it's only used while
+** _building_ LuaJIT. There's no point in distributing or installing it.
+** Only the object code generated by this tool is linked into LuaJIT.
+**
+** Caveat: some memory is not free'd, error handling is lazy.
+** It's a one-shot tool -- any effort fixing this would be wasted.
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_ircall.h"
+#include "lj_frame.h"
+#include "lj_dispatch.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_ccall.h"
+#endif
+#include "luajit.h"
+
+#if defined(_WIN32)
+#include
+#include
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+/* DynASM glue definitions. */
+#define Dst ctx
+#define Dst_DECL BuildCtx *ctx
+#define Dst_REF (ctx->D)
+#define DASM_CHECKS 1
+
+#include "../dynasm/dasm_proto.h"
+
+/* Glue macros for DynASM. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type);
+
+#define DASM_EXTERN(ctx, addr, idx, type) \
+ collect_reloc(ctx, addr, idx, type)
+
+/* ------------------------------------------------------------------------ */
+
+/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */
+#define DASM_ALIGNED_WRITES 1
+
+/* Embed architecture-specific DynASM encoder and backend. */
+#if LJ_TARGET_X86
+#include "../dynasm/dasm_x86.h"
+#include "buildvm_x86.h"
+#elif LJ_TARGET_X64
+#include "../dynasm/dasm_x86.h"
+#if LJ_ABI_WIN
+#include "buildvm_x64win.h"
+#else
+#include "buildvm_x64.h"
+#endif
+#elif LJ_TARGET_ARM
+#include "../dynasm/dasm_arm.h"
+#include "buildvm_arm.h"
+#elif LJ_TARGET_PPC
+#include "../dynasm/dasm_ppc.h"
+#include "buildvm_ppc.h"
+#elif LJ_TARGET_PPCSPE
+#include "../dynasm/dasm_ppc.h"
+#include "buildvm_ppcspe.h"
+#elif LJ_TARGET_MIPS
+#include "../dynasm/dasm_mips.h"
+#include "buildvm_mips.h"
+#else
+#error "No support for this architecture (yet)"
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+void owrite(BuildCtx *ctx, const void *ptr, size_t sz)
+{
+ if (fwrite(ptr, 1, sz, ctx->fp) != sz) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit code as raw bytes. Only used for DynASM debugging. */
+static void emit_raw(BuildCtx *ctx)
+{
+ owrite(ctx, ctx->code, ctx->codesz);
+}
+
+/* -- Build machine code -------------------------------------------------- */
+
+static const char *sym_decorate(BuildCtx *ctx,
+ const char *prefix, const char *suffix)
+{
+ char name[256];
+ char *p;
+#if LJ_64
+ const char *symprefix = ctx->mode == BUILD_machasm ? "_" : "";
+#else
+ const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : "";
+#endif
+ sprintf(name, "%s%s%s", symprefix, prefix, suffix);
+ p = strchr(name, '@');
+ if (p) {
+ if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj))
+ name[0] = '@';
+ else
+ *p = '\0';
+ }
+ p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */
+ strcpy(p, name);
+ return p;
+}
+
+#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1)
+
+static int relocmap[NRELOCSYM];
+
+/* Collect external relocations. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type)
+{
+ if (ctx->nreloc >= BUILD_MAX_RELOC) {
+ fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n");
+ exit(1);
+ }
+ if (relocmap[idx] < 0) {
+ relocmap[idx] = ctx->nrelocsym;
+ ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]);
+ ctx->nrelocsym++;
+ }
+ ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code);
+ ctx->reloc[ctx->nreloc].sym = relocmap[idx];
+ ctx->reloc[ctx->nreloc].type = type;
+ ctx->nreloc++;
+ return 0; /* Encode symbol offset of 0. */
+}
+
+/* Naive insertion sort. Performance doesn't matter here. */
+static void sym_insert(BuildCtx *ctx, int32_t ofs,
+ const char *prefix, const char *suffix)
+{
+ ptrdiff_t i = ctx->nsym++;
+ while (i > 0) {
+ if (ctx->sym[i-1].ofs <= ofs)
+ break;
+ ctx->sym[i] = ctx->sym[i-1];
+ i--;
+ }
+ ctx->sym[i].ofs = ofs;
+ ctx->sym[i].name = sym_decorate(ctx, prefix, suffix);
+}
+
+/* Build the machine code. */
+static int build_code(BuildCtx *ctx)
+{
+ int status;
+ int i;
+
+ /* Initialize DynASM structures. */
+ ctx->nglob = GLOB__MAX;
+ ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *));
+ memset(ctx->glob, 0, ctx->nglob*sizeof(void *));
+ ctx->nreloc = 0;
+
+ ctx->globnames = globnames;
+ ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *));
+ ctx->nrelocsym = 0;
+ for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1;
+
+ ctx->dasm_ident = DASM_IDENT;
+ ctx->dasm_arch = DASM_ARCH;
+
+ dasm_init(Dst, DASM_MAXSECTION);
+ dasm_setupglobal(Dst, ctx->glob, ctx->nglob);
+ dasm_setup(Dst, build_actionlist);
+
+ /* Call arch-specific backend to emit the code. */
+ ctx->npc = build_backend(ctx);
+
+ /* Finalize the code. */
+ (void)dasm_checkstep(Dst, -1);
+ if ((status = dasm_link(Dst, &ctx->codesz))) return status;
+ ctx->code = (uint8_t *)malloc(ctx->codesz);
+ if ((status = dasm_encode(Dst, (void *)ctx->code))) return status;
+
+ /* Allocate symbol table and bytecode offsets. */
+ ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin");
+ ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym));
+ ctx->nsym = 0;
+ ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t));
+
+ /* Collect the opcodes (PC labels). */
+ for (i = 0; i < ctx->npc; i++) {
+ int32_t ofs = dasm_getpclabel(Dst, i);
+ if (ofs < 0) return 0x22000000|i;
+ ctx->bc_ofs[i] = ofs;
+ if ((LJ_HASJIT ||
+ !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP ||
+ i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) &&
+ (LJ_HASFFI || i != BC_KCDATA))
+ sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]);
+ }
+
+ /* Collect the globals (named labels). */
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = globnames[i];
+ int len = (int)strlen(gl);
+ if (!ctx->glob[i]) {
+ fprintf(stderr, "Error: undefined global %s\n", gl);
+ exit(2);
+ }
+ /* Skip the _Z symbols. */
+ if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z'))
+ sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code),
+ LABEL_PREFIX, globnames[i]);
+ }
+
+ /* Close the address range. */
+ sym_insert(ctx, (int32_t)ctx->codesz, "", "");
+ ctx->nsym--;
+
+ dasm_free(Dst);
+
+ return 0;
+}
+
+/* -- Generate VM enums --------------------------------------------------- */
+
+const char *const bc_names[] = {
+#define BCNAME(name, ma, mb, mc, mt) #name,
+BCDEF(BCNAME)
+#undef BCNAME
+ NULL
+};
+
+const char *const ir_names[] = {
+#define IRNAME(name, m, m1, m2) #name,
+IRDEF(IRNAME)
+#undef IRNAME
+ NULL
+};
+
+const char *const irt_names[] = {
+#define IRTNAME(name) #name,
+IRTDEF(IRTNAME)
+#undef IRTNAME
+ NULL
+};
+
+const char *const irfpm_names[] = {
+#define FPMNAME(name) #name,
+IRFPMDEF(FPMNAME)
+#undef FPMNAME
+ NULL
+};
+
+const char *const irfield_names[] = {
+#define FLNAME(name, ofs) #name,
+IRFLDEF(FLNAME)
+#undef FLNAME
+ NULL
+};
+
+const char *const ircall_names[] = {
+#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name,
+IRCALLDEF(IRCALLNAME)
+#undef IRCALLNAME
+ NULL
+};
+
+static const char *const trace_errors[] = {
+#define TREDEF(name, msg) msg,
+#include "lj_traceerr.h"
+ NULL
+};
+
+static const char *lower(char *buf, const char *s)
+{
+ char *p = buf;
+ while (*s) {
+ *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s;
+ s++;
+ }
+ *p = '\0';
+ return buf;
+}
+
+/* Emit C source code for bytecode-related definitions. */
+static void emit_bcdef(BuildCtx *ctx)
+{
+ int i;
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n");
+ for (i = 0; i < ctx->npc; i++) {
+ if (i != 0)
+ fprintf(ctx->fp, ",\n");
+ fprintf(ctx->fp, "%d", ctx->bc_ofs[i]);
+ }
+}
+
+/* Emit VM definitions as Lua code for debug modules. */
+static void emit_vmdef(BuildCtx *ctx)
+{
+ char buf[80];
+ int i;
+ fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n");
+ fprintf(ctx->fp, "module(...)\n\n");
+
+ fprintf(ctx->fp, "bcnames = \"");
+ for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]);
+ fprintf(ctx->fp, "\"\n\n");
+
+ fprintf(ctx->fp, "irnames = \"");
+ for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]);
+ fprintf(ctx->fp, "\"\n\n");
+
+ fprintf(ctx->fp, "irfpm = { [0]=");
+ for (i = 0; irfpm_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i]));
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "irfield = { [0]=");
+ for (i = 0; irfield_names[i]; i++) {
+ char *p;
+ lower(buf, irfield_names[i]);
+ p = strchr(buf, '_');
+ if (p) *p = '.';
+ fprintf(ctx->fp, "\"%s\", ", buf);
+ }
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "ircall = {\n[0]=");
+ for (i = 0; ircall_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]);
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "traceerr = {\n[0]=");
+ for (i = 0; trace_errors[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]);
+ fprintf(ctx->fp, "}\n\n");
+}
+
+/* -- Argument parsing ---------------------------------------------------- */
+
+/* Build mode names. */
+static const char *const modenames[] = {
+#define BUILDNAME(name) #name,
+BUILDDEF(BUILDNAME)
+#undef BUILDNAME
+ NULL
+};
+
+/* Print usage information and exit. */
+static void usage(void)
+{
+ int i;
+ fprintf(stderr, LUAJIT_VERSION " VM builder.\n");
+ fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n");
+ fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n");
+ fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n");
+ fprintf(stderr, "Available modes:\n");
+ for (i = 0; i < BUILD__MAX; i++)
+ fprintf(stderr, " %s\n", modenames[i]);
+ exit(1);
+}
+
+/* Parse the output mode name. */
+static BuildMode parsemode(const char *mode)
+{
+ int i;
+ for (i = 0; modenames[i]; i++)
+ if (!strcmp(mode, modenames[i]))
+ return (BuildMode)i;
+ usage();
+ return (BuildMode)-1;
+}
+
+/* Parse arguments. */
+static void parseargs(BuildCtx *ctx, char **argv)
+{
+ const char *a;
+ int i;
+ ctx->mode = (BuildMode)-1;
+ ctx->outname = "-";
+ for (i = 1; (a = argv[i]) != NULL; i++) {
+ if (a[0] != '-')
+ break;
+ switch (a[1]) {
+ case '-':
+ if (a[2]) goto err;
+ i++;
+ goto ok;
+ case '\0':
+ goto ok;
+ case 'm':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->mode = parsemode(argv[i]);
+ break;
+ case 'o':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->outname = argv[i];
+ break;
+ default: err:
+ usage();
+ break;
+ }
+ }
+ok:
+ ctx->args = argv+i;
+ if (ctx->mode == (BuildMode)-1) goto err;
+}
+
+int main(int argc, char **argv)
+{
+ BuildCtx ctx_;
+ BuildCtx *ctx = &ctx_;
+ int status, binmode;
+
+ if (sizeof(void *) != 4*LJ_32+8*LJ_64) {
+ fprintf(stderr,"Error: pointer size mismatch in cross-build.\n");
+ fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=... TARGET=...\n\n");
+ return 1;
+ }
+
+ UNUSED(argc);
+ parseargs(ctx, argv);
+
+ if ((status = build_code(ctx))) {
+ fprintf(stderr,"Error: DASM error %08x\n", status);
+ return 1;
+ }
+
+ switch (ctx->mode) {
+ case BUILD_peobj:
+ case BUILD_raw:
+ binmode = 1;
+ break;
+ default:
+ binmode = 0;
+ break;
+ }
+
+ if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') {
+ ctx->fp = stdout;
+#if defined(_WIN32)
+ if (binmode)
+ _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */
+#endif
+ } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) {
+ fprintf(stderr, "Error: cannot open output file '%s': %s\n",
+ ctx->outname, strerror(errno));
+ exit(1);
+ }
+
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ case BUILD_machasm:
+ emit_asm(ctx);
+ emit_asm_debug(ctx);
+ break;
+ case BUILD_peobj:
+ emit_peobj(ctx);
+ break;
+ case BUILD_raw:
+ emit_raw(ctx);
+ break;
+ case BUILD_bcdef:
+ emit_bcdef(ctx);
+ emit_lib(ctx);
+ break;
+ case BUILD_vmdef:
+ emit_vmdef(ctx);
+ emit_lib(ctx);
+ break;
+ case BUILD_ffdef:
+ case BUILD_libdef:
+ case BUILD_recdef:
+ emit_lib(ctx);
+ break;
+ case BUILD_folddef:
+ emit_fold(ctx);
+ break;
+ default:
+ break;
+ }
+
+ fflush(ctx->fp);
+ if (ferror(ctx->fp)) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ fclose(ctx->fp);
+
+ return 0;
+}
+
diff --git a/src/LuaJIT/src/buildvm.h b/src/LuaJIT/src/buildvm.h
new file mode 100644
index 000000000..2b7168ed3
--- /dev/null
+++ b/src/LuaJIT/src/buildvm.h
@@ -0,0 +1,104 @@
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _BUILDVM_H
+#define _BUILDVM_H
+
+#include
+#include
+#include
+#include
+#include
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Hardcoded limits. Increase as needed. */
+#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */
+#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */
+
+/* Prefix for scanned library definitions. */
+#define LIBDEF_PREFIX "LJLIB_"
+
+/* Prefix for scanned fold definitions. */
+#define FOLDDEF_PREFIX "LJFOLD"
+
+/* Prefixes for generated labels. */
+#define LABEL_PREFIX "lj_"
+#define LABEL_PREFIX_BC LABEL_PREFIX "BC_"
+#define LABEL_PREFIX_FF LABEL_PREFIX "ff_"
+#define LABEL_PREFIX_CF LABEL_PREFIX "cf_"
+#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_"
+#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_"
+#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_"
+
+/* Forward declaration. */
+struct dasm_State;
+
+/* Build modes. */
+#define BUILDDEF(_) \
+ _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \
+ _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \
+ _(folddef)
+
+typedef enum {
+#define BUILDENUM(name) BUILD_##name,
+BUILDDEF(BUILDENUM)
+#undef BUILDENUM
+ BUILD__MAX
+} BuildMode;
+
+/* Code relocation. */
+typedef struct BuildReloc {
+ int32_t ofs;
+ int sym;
+ int type;
+} BuildReloc;
+
+typedef struct BuildSym {
+ const char *name;
+ int32_t ofs;
+} BuildSym;
+
+/* Build context structure. */
+typedef struct BuildCtx {
+ /* DynASM state pointer. Should be first member. */
+ struct dasm_State *D;
+ /* Parsed command line. */
+ BuildMode mode;
+ FILE *fp;
+ const char *outname;
+ char **args;
+ /* Code and symbols generated by DynASM. */
+ uint8_t *code;
+ size_t codesz;
+ int npc, nglob, nsym, nreloc, nrelocsym;
+ void **glob;
+ BuildSym *sym;
+ const char **relocsym;
+ int32_t *bc_ofs;
+ const char *beginsym;
+ /* Strings generated by DynASM. */
+ const char *const *globnames;
+ const char *dasm_ident;
+ const char *dasm_arch;
+ /* Relocations. */
+ BuildReloc reloc[BUILD_MAX_RELOC];
+} BuildCtx;
+
+extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz);
+extern void emit_asm(BuildCtx *ctx);
+extern void emit_peobj(BuildCtx *ctx);
+extern void emit_lib(BuildCtx *ctx);
+extern void emit_fold(BuildCtx *ctx);
+
+extern const char *const bc_names[];
+extern const char *const ir_names[];
+extern const char *const irt_names[];
+extern const char *const irfpm_names[];
+extern const char *const irfield_names[];
+extern const char *const ircall_names[];
+
+#endif
diff --git a/src/LuaJIT/src/buildvm_arm.dasc b/src/LuaJIT/src/buildvm_arm.dasc
new file mode 100644
index 000000000..dd4b7c893
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_arm.dasc
@@ -0,0 +1,4115 @@
+|// Low-level VM code for ARM CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch arm
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|
+|// The following must be C callee-save.
+|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding.
+|.define KBASE, r5 // Constants of current Lua function.
+|.define PC, r6 // Next PC.
+|.define DISPATCH, r7 // Opcode dispatch table.
+|.define LREG, r8 // Register holding lua_State (also in SAVE_L).
+|
+|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+.
+|.define BASE, r9 // Base of current Lua stack frame.
+|
+|// The following temporaries are not saved across C calls, except for RA/RC.
+|.define RA, r10 // Callee-save.
+|.define RC, r11 // Callee-save.
+|.define RB, r12
+|.define OP, r12 // Overlaps RB, must not be lr.
+|.define INS, lr
+|
+|// Calling conventions. Also used as temporaries.
+|.define CARG1, r0
+|.define CARG2, r1
+|.define CARG3, r2
+|.define CARG4, r3
+|.define CARG12, r0 // For 1st soft-fp double.
+|.define CARG34, r2 // For 2nd soft-fp double.
+|
+|.define CRET1, r0
+|.define CRET2, r1
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define CFRAME_SPACE, #28
+|.define SAVE_ERRF, [sp, #24]
+|.define SAVE_NRES, [sp, #20]
+|.define SAVE_CFRAME, [sp, #16]
+|.define SAVE_L, [sp, #12]
+|.define SAVE_PC, [sp, #8]
+|.define SAVE_MULTRES, [sp, #4]
+|.define ARG5, [sp]
+|
+|.define TMPDhi, [sp, #4]
+|.define TMPDlo, [sp]
+|.define TMPD, [sp]
+|.define TMPDp, sp
+|
+|.macro saveregs
+| push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+| sub sp, sp, CFRAME_SPACE
+|.endmacro
+|.macro restoreregs_ret
+| add sp, sp, CFRAME_SPACE
+| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; ud; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_FUNC, #-8
+|.define FRAME_PC, #-4
+|
+|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro
+|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro
+|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro
+|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro
+|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| ldrb OP, [PC]
+|.endmacro
+|.macro ins_NEXT2
+| ldr INS, [PC], #4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT3
+| ldr OP, [DISPATCH, OP, lsl #2]
+| decode_RA8 RA, INS
+| decode_RD RC, INS
+| bx OP
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+| ins_NEXT3
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+| .define ins_next3, ins_NEXT3
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| .endmacro
+| .macro ins_next3
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Avoid register name substitution for field name.
+#define field_pc pc
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ldr PC, LFUNC:CARG3->field_pc
+| ldrb OP, [PC] // STALL: load PC. early PC.
+| ldr INS, [PC], #4
+| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP.
+| decode_RA8 RA, INS
+| add RA, RA, BASE
+| bx OP
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| str PC, [BASE, FRAME_PC]
+| ins_callt // STALL: locked PC.
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro
+|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro
+|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta
+| lsr CARG1, PC, #1
+| and CARG1, CARG1, #126
+| sub CARG1, CARG1, #-GG_DISP2HOT
+| ldrh CARG2, [DISPATCH, CARG1]
+| subs CARG2, CARG2, #delta
+| strh CARG2, [DISPATCH, CARG1]
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP
+| blo ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL
+| blo ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro
+|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| bic mark, mark, #LJ_GC_BLACK // black2gray(tab)
+| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| strb mark, tab->marked
+| str tmp, tab->gclist
+|.endmacro
+|
+|.macro IOS, a, b
+||if (LJ_TARGET_OSX) {
+| a, b
+||}
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: RB = previous base.
+ | tst PC, #FRAME_P
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
+ | mvn CARG2, #~LJ_TTRUE
+ | mov BASE, RB
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | str CARG2, [RA, FRAME_PC] // Prepend true to results.
+ | sub RA, RA, #8
+ |
+ |->vm_returnc:
+ | add RC, RC, #8 // RC = (nresults+1)*8.
+ | ands CARG1, PC, #FRAME_TYPE
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
+ | // CARG1 = PC & FRAME_TYPE
+ | bic RB, PC, #FRAME_TYPEP
+ | cmp CARG1, #FRAME_C
+ | sub RB, BASE, RB // RB = previous base.
+ | bne ->vm_returnp
+ |
+ | str RB, L->base
+ | ldr KBASE, SAVE_NRES
+ | mv_vmstate CARG4, C
+ | sub BASE, BASE, #8
+ | subs CARG3, RC, #8
+ | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8
+ | st_vmstate CARG4
+ | beq >2
+ |1:
+ | subs CARG3, CARG3, #8
+ | ldrd CARG12, [RA], #8
+ | strd CARG12, [BASE], #8
+ | bne <1
+ |2:
+ | cmp KBASE, RC // More/less results wanted?
+ | bne >6
+ |3:
+ | str BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ldr RC, SAVE_CFRAME // Restore previous C frame.
+ | mov CRET1, #0 // Ok return status for vm_pcall.
+ | str RC, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | blt >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | ldr CARG3, L->maxstack
+ | mvn CARG2, #~LJ_TNIL
+ | cmp BASE, CARG3
+ | bhs >8
+ | str CARG2, [BASE, #4]
+ | add RC, RC, #8
+ | add BASE, BASE, #8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | sub CARG1, RC, KBASE
+ | cmp KBASE, #0 // LUA_MULTRET+1 case?
+ | subne BASE, BASE, CARG1 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | str BASE, L->top // Save current top held in BASE (yes).
+ | mov CARG2, KBASE
+ | mov CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov sp, CARG1
+ | mov CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mv_vmstate CARG4, C
+ | ldr GL:CARG3, L->glref
+ | str CARG4, GL:CARG3->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG1
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mov MASKR8, #255
+ | mov RC, #16 // 2 results: false + error message.
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | ldr BASE, L->base
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mvn CARG1, #~LJ_TFALSE
+ | sub RA, BASE, #8 // Results start at BASE-8.
+ | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | mv_vmstate CARG2, INTERP
+ | str CARG1, [BASE, #-4] // Prepend false to error message.
+ | st_vmstate CARG2
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | // CARG1 = L
+ | mov CARG2, #LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | mov CARG1, L
+ | str BASE, L->base
+ | add PC, PC, #4 // Must point after first instruction.
+ | str RC, L->top
+ | lsr CARG3, RA, #3
+ |2:
+ | // L->base = new base, L->top = top
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | ldr RC, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L, CARG1
+ | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table.
+ | mov BASE, CARG2
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | str L, SAVE_L
+ | mov PC, #FRAME_CP
+ | str CARG3, SAVE_NRES
+ | add CARG2, sp, #CFRAME_RESUME
+ | ldrb CARG1, L->status
+ | str CARG3, SAVE_ERRF
+ | str CARG2, L->cframe
+ | str CARG3, SAVE_CFRAME
+ | cmp CARG1, #0
+ | str L, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mov RA, BASE
+ | ldr BASE, L->base
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | strb CARG3, L->status
+ | sub RC, CARG1, BASE
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | mv_vmstate CARG2, INTERP
+ | add RC, RC, #8
+ | ands CARG1, PC, #FRAME_TYPE
+ | st_vmstate CARG2
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, #FRAME_CP
+ | str CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, #FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ldr RC, L:CARG1->cframe
+ | str CARG3, SAVE_NRES
+ | mov L, CARG1
+ | str CARG1, SAVE_L
+ | mov BASE, CARG2
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | str RC, SAVE_CFRAME
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | ldr RB, L->base // RB = old base (for vmeta_call).
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | add PC, PC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | sub PC, PC, RB // PC = frame delta + frame type
+ | mv_vmstate CARG2, INTERP
+ | sub NARGS8:RC, CARG1, BASE
+ | st_vmstate CARG2
+ |
+ |->vm_call_dispatch:
+ | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ldrd CARG34, [BASE, FRAME_FUNC]
+ | checkfunc CARG4, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L, CARG1
+ | ldr RA, L:CARG1->stack
+ | str CARG1, SAVE_L
+ | ldr RB, L->top
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | ldr RC, L->cframe
+ | sub RA, RA, RB // Compute -savestack(L, L->top).
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ | mov RB, #0
+ | str RA, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | str RB, SAVE_ERRF // No error function.
+ | str RC, SAVE_CFRAME
+ | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | movs BASE, CRET1
+ | mov PC, #FRAME_CP
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
+ | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
+ | ldr CARG1, [BASE, #-16] // Get continuation.
+ | mov CARG4, BASE
+ | mov BASE, RB // Restore caller BASE.
+#if LJ_HASFFI
+ | cmp CARG1, #1
+#endif
+ | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC].
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | mvn INS, #~LJ_TNIL
+ | add CARG2, RA, RC
+ | str INS, [CARG2, #-4] // Ensure one valid arg.
+#if LJ_HASFFI
+ | bls >1
+#endif
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | // BASE = base, RA = resultptr, CARG4 = meta base
+ | bx CARG1
+ |
+#if LJ_HASFFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | sub CARG4, CARG4, #16
+ | sub RC, CARG4, BASE
+ | b ->vm_call_tail
+#endif
+ |
+ |->cont_cat: // RA = resultptr, CARG4 = meta base
+ | ldr INS, [PC, #-4]
+ | sub CARG2, CARG4, #16
+ | ldrd CARG34, [RA]
+ | str BASE, L->base
+ | decode_RB8 RC, INS
+ | decode_RA8 RA, INS
+ | add CARG1, BASE, RC
+ | subs CARG1, CARG2, CARG1
+ | strdne CARG34, [CARG2]
+ | movne CARG3, CARG1
+ | bne ->BC_CAT_Z
+ | strd CARG34, [BASE, RA]
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tgets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq >3
+ | ldrd CARG34, [CRET1]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #16 // 2 args for func(t, k).
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tsets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | ldrd CARG34, [BASE, RA]
+ | beq >3
+ | ins_next1
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | strd CARG34, [CRET1]
+ | ins_next2
+ | ins_next3
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
+ | strd CARG34, [BASE, #16] // Copy value to third argument.
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mov CARG1, L
+ | sub PC, PC, #4
+ | mov CARG2, RA
+ | str BASE, L->base
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | decode_OP CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | IOS ldr BASE, L->base
+ | cmp CRET1, #1
+ | bhi ->vmeta_binop
+ |4:
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | subhs PC, RB, #0x20000
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | ldr INS, [PC, #-4]
+ | ldrd CARG12, [RA]
+ | decode_RA8 CARG3, INS
+ | strd CARG12, [BASE, CARG3]
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | mvn CARG1, #~LJ_TTRUE
+ | cmp CARG1, CARG2 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | checktp CARG2, LJ_TFALSE // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+#if LJ_HASFFI
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, INS
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+#endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG4, BASE, RB
+ | add CARG3, KBASE, RC
+ | b >1
+ |
+ |->vmeta_unm:
+ | ldr INS, [PC, #-8]
+ | sub PC, PC, #4
+ | add CARG3, BASE, RC
+ | add CARG4, BASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |1:
+ | decode_OP OP, INS
+ | add CARG2, BASE, RA
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | str OP, ARG5
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub CARG2, CRET1, BASE
+ | str PC, [CRET1, #-12] // [cont|PC]
+ | add PC, CARG2, #FRAME_CONT
+ | mov BASE, CRET1
+ | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | add CARG2, BASE, RC
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+ | IOS ldr BASE, L->base
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmp CRET1, #0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | ldr TAB:CARG1, [BASE, RC]
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // RB = old base, BASE = new base, RC = nargs*8
+ | mov CARG1, L
+ | str RB, L->base // This is the callers base!
+ | sub CARG2, BASE, #8
+ | str PC, SAVE_PC
+ | add CARG3, BASE, NARGS8:RC
+ | IOS mov RA, BASE
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | IOS mov BASE, RA
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub CARG2, RA, #8
+ | str PC, SAVE_PC
+ | add CARG3, RA, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | IOS ldr BASE, L->base
+ | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here.
+ | ldr PC, [BASE, FRAME_PC]
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | b ->BC_CALLT2_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, RA
+ | str PC, SAVE_PC
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ | IOS ldr BASE, L->base
+#if LJ_HASJIT
+ | ldrb OP, [PC, #-4]
+#endif
+ | ldr INS, [PC, #-4]
+#if LJ_HASJIT
+ | cmp OP, #BC_JFORI
+#endif
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+#if LJ_HASJIT
+ | beq =>BC_JFORI
+#endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | checktp CARG2, LJ_TISNUM
+ | cmnlo CARG4, #-LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
+ |.macro ffgccheck
+ | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | cmp CARG1, CARG2
+ | blge ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | checktp CARG2, LJ_TTRUE
+ | bhi ->fff_fallback
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RB, BASE
+ | subs RA, NARGS8:RC, #8
+ | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
+ | beq ->fff_res // Done if exactly 1 argument.
+ |1:
+ | ldrd CARG12, [RB, #8]
+ | subs RA, RA, #8
+ | strd CARG12, [RB], #8
+ | bne <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | mvnlo CARG2, #~LJ_TISNUM
+ | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1
+ | lsl CARG4, CARG4, #3
+ | ldrd CARG12, [CFUNC:CARG3, CARG4]
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktp CARG2, LJ_TTAB
+ | cmnne CARG2, #-LJ_TUDATA
+ | bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RB, TAB:CARG1->metatable
+ |2:
+ | mvn CARG2, #~LJ_TNIL
+ | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])]
+ | cmp TAB:RB, #0
+ | beq ->fff_restv
+ | ldr CARG3, TAB:RB->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:RB->node
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG12, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG4, LJ_TSTR
+ | cmpeq CARG3, STR:RC
+ | beq >5
+ | cmp NODE:INS, #0
+ | bne <3
+ |4:
+ | mov CARG1, RB // Use metatable as default result.
+ | mvn CARG2, #~LJ_TTAB
+ | b ->fff_restv
+ |5:
+ | checktp CARG2, LJ_TNIL
+ | bne ->fff_restv
+ | b <4
+ |
+ |6:
+ | checktp CARG2, LJ_TISNUM
+ | mvnhs CARG2, CARG2
+ | movlo CARG2, #~LJ_TISNUM
+ | add CARG4, DISPATCH, CARG2, lsl #2
+ | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp CARG2, LJ_TTAB
+ | ldreq TAB:RB, TAB:CARG1->metatable
+ | checktpeq CARG4, LJ_TTAB
+ | ldrbeq CARG4, TAB:CARG1->marked
+ | cmpeq TAB:RB, #0
+ | bne ->fff_fallback
+ | tst CARG4, #LJ_GC_BLACK // isblack(table)
+ | str TAB:CARG3, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, CARG4, CARG3
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | ldrd CARG34, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | mov CARG2, CARG3
+ | checktab CARG4, ->fff_fallback
+ | mov CARG1, L
+ | add CARG3, BASE, #8
+ | IOS mov RA, BASE
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | IOS mov BASE, RA
+ | ldrd CARG12, [CRET1]
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc_1 tonumber
+ | // Only handles the number case inline (without a base argument).
+ | checktp CARG2, LJ_TISNUM
+ | bls ->fff_restv
+ | b ->fff_fallback
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checktp CARG2, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv
+ | // Handle numbers inline, unless a number base metatable is present.
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])]
+ | str BASE, L->base
+ | checktp CARG2, LJ_TISNUM
+ | cmpls CARG4, #0
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | bhi ->fff_fallback
+ | ffgccheck
+ | mov CARG1, L
+ | mov CARG2, BASE
+ | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | mvn CARG4, #~LJ_TNIL
+ | checktab CARG2, ->fff_fallback
+ | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
+ | ldr PC, [BASE, FRAME_PC]
+ | mov CARG2, CARG1
+ | str BASE, L->base // Add frame since C call can throw.
+ | mov CARG1, L
+ | str BASE, L->top // Dummy frame length is ok.
+ | add CARG3, BASE, #8
+ | str PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | mvneq CRET2, #~LJ_TNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | ldrd CARG12, [BASE, #8] // Copy key and value to results.
+ | ldrd CARG34, [BASE, #16]
+ | mov RC, #(2+1)*8
+ | strd CARG12, [BASE, #-8]
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab CARG2, ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mvn CARG2, #~LJ_TNIL
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | str CARG2, [BASE, #12]
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktp CARG2, LJ_TTAB
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | ldr RB, TAB:CARG1->asize
+ | ldr RC, TAB:CARG1->array
+ | add CARG3, CARG3, #1
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp CARG3, RB
+ | add RC, RC, CARG3, lsl #3
+ | strd CARG34, [BASE, #-8]
+ | ldrdlo CARG12, [RC]
+ | mov RC, #(0+1)*8
+ | bhs >2 // Not in array part?
+ |1:
+ | checktp CARG2, LJ_TNIL
+ | movne RC, #(2+1)*8
+ | strdne CARG12, [BASE]
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | ldr RB, TAB:CARG1->hmask
+ | mov CARG2, CARG3
+ | cmp RB, #0
+ | beq ->fff_res
+ | IOS mov RA, BASE
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | IOS mov BASE, RA
+ | cmp CRET1, #0
+ | beq ->fff_res
+ | ldrd CARG12, [CRET1]
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG2, ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mov CARG1, #0
+ | mvn CARG2, #~LJ_TISNUM
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | strd CARG12, [BASE, #8]
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | mov RB, BASE
+ | add BASE, BASE, #8
+ | moveq PC, #8+FRAME_PCALL
+ | movne PC, #8+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | b ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | checkfunc CARG4, ->fff_fallback // Traceback must be a function.
+ | mov RB, BASE
+ | strd CARG12, [BASE, #8] // Swap function and traceback.
+ | strd CARG34, [BASE]
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | add BASE, BASE, #16
+ | moveq PC, #16+FRAME_PCALL
+ | movne PC, #16+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #16
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG2, LJ_TTHREAD
+ | bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
+ |.endif
+ | ldr PC, [BASE, FRAME_PC]
+ | str BASE, L->base
+ | ldr CARG2, L:CARG1->top
+ | ldrb RA, L:CARG1->status
+ | ldr RB, L:CARG1->base
+ | add CARG3, CARG2, NARGS8:RC
+ | add CARG4, CARG2, RA
+ | str PC, SAVE_PC
+ | cmp CARG4, RB
+ | beq ->fff_fallback
+ | ldr CARG4, L:CARG1->maxstack
+ | ldr RB, L:CARG1->cframe
+ | cmp RA, #LUA_YIELD
+ | cmpls CARG3, CARG4
+ | cmpls RB, #0
+ | bhi ->fff_fallback
+ |1:
+ |.if resume
+ | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
+ | add BASE, BASE, #8
+ | sub NARGS8:RC, NARGS8:RC, #8
+ |.endif
+ | str CARG3, L:CARG1->top
+ | str BASE, L->top
+ |2: // Move args to coroutine.
+ | ldrd CARG34, [BASE, RB]
+ | cmp RB, NARGS8:RC
+ | strdne CARG34, [CARG2, RB]
+ | add RB, RB, #8
+ | bne <2
+ |
+ | mov CARG3, #0
+ | mov L:RA, L:CARG1
+ | mov CARG4, #0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | ldr CARG3, L:RA->base
+ | mv_vmstate CARG2, INTERP
+ | ldr CARG4, L:RA->top
+ | st_vmstate CARG2
+ | cmp CRET1, #LUA_YIELD
+ | ldr BASE, L->base
+ | bhi >8
+ | subs RC, CARG4, CARG3
+ | ldr CARG1, L->maxstack
+ | add CARG2, BASE, RC
+ | beq >6 // No results?
+ | cmp CARG2, CARG1
+ | mov RB, #0
+ | bhi >9 // Need to grow stack?
+ |
+ | sub CARG4, RC, #8
+ | str CARG3, L:RA->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | ldrd CARG12, [CARG3, RB]
+ | cmp RB, CARG4
+ | strd CARG12, [BASE, RB]
+ | add RB, RB, #8
+ | bne <5
+ |6:
+ |.if resume
+ | mvn CARG3, #~LJ_TTRUE
+ | add RC, RC, #16
+ |7:
+ | str CARG3, [BASE, #-4] // Prepend true/false to results.
+ | sub RA, BASE, #8
+ |.else
+ | mov RA, BASE
+ | add RC, RC, #8
+ |.endif
+ | ands CARG1, PC, #FRAME_TYPE
+ | str PC, SAVE_PC
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | ldrd CARG12, [CARG4, #-8]!
+ | mvn CARG3, #~LJ_TFALSE
+ | mov RC, #(2+1)*8
+ | str CARG4, L:RA->top // Remove error from coroutine stack.
+ | strd CARG12, [BASE] // Copy error message.
+ | b <7
+ |.else
+ | mov CARG1, L
+ | mov CARG2, L:RA
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Never returns.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov CARG1, L
+ | lsr CARG2, RC, #3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | mov CRET1, #0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ldr CARG1, L->cframe
+ | add CARG2, BASE, NARGS8:RC
+ | str BASE, L->base
+ | tst CARG1, #CFRAME_RESUME
+ | str CARG2, L->top
+ | mov CRET1, #LUA_YIELD
+ | mov CARG3, #0
+ | beq ->fff_fallback
+ | str CARG3, L->cframe
+ | strb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checktp CARG2, LJ_TISNUM
+ | beq ->fff_restv
+ | bhi ->fff_fallback
+ | // Round FP value and normalize result.
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | lsl CARG4, CARG2, #11
+ | lsl CARG3, CARG1, #11
+ | orr CARG4, CARG4, #0x80000000
+ | rsb INS, RB, #32
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | bls >3 // |x| >= 2^31?
+ | orr CARG3, CARG3, CARG4, lsl INS
+ | lsr CARG1, CARG4, RB
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31
+ | addne CARG1, CARG1, #1
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31
+ | addsne CARG1, CARG1, #1
+ | ldrdvs CARG12, >9
+ | bvs ->fff_restv
+ |.endif
+ | cmp CARG2, #0
+ | rsblt CARG1, CARG1, #0
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |2: // |x| < 1
+ | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1
+ | moveq CARG1, #0
+ | mvnne CARG1, #0
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1
+ | moveq CARG1, #0
+ | movne CARG1, #1
+ |.endif
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |3: // |x| >= 2^31. Check for x == -(2^31).
+ | cmpeq CARG4, #0x80000000
+ |.if "func" == "floor"
+ | cmpeq CARG3, #0
+ |.endif
+ | bne >4
+ | cmp CARG2, #0
+ | movmi CARG1, #0x80000000
+ | bmi <1
+ |4:
+ | bl ->vm_..func
+ | b ->fff_restv
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ |
+ |.ffunc_1 math_abs
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->fff_fallback
+ | bicne CARG2, CARG2, #0x80000000
+ | bne ->fff_restv
+ | cmp CARG1, #0
+ | rsbslt CARG1, CARG1, #0
+ | ldrdvs CARG12, <9
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG12 = TValue result.
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ |->fff_res1:
+ | // PC = return.
+ | mov RC, #(1+1)*8
+ |->fff_res:
+ | // RC = (nresults+1)*8, PC = return.
+ | ands CARG1, PC, #FRAME_TYPE
+ | ldreq INS, [PC, #-4]
+ | str RC, SAVE_MULTRES
+ | sub RA, BASE, #8
+ | bne ->vm_return
+ | decode_RB8 RB, INS
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | decode_RA8 CARG1, INS
+ | ins_next1
+ | ins_next2
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, CARG1
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | add CARG2, RA, RC
+ | mvn CARG1, #~LJ_TNIL
+ | add RC, RC, #8
+ | str CARG1, [CARG2, #-4]
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | IOS mov RA, BASE
+ | bl extern func
+ | IOS mov BASE, RA
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | IOS mov RA, BASE
+ | bl extern func
+ | IOS mov BASE, RA
+ | b ->fff_restv
+ |.endmacro
+ |
+ | math_extern sqrt
+ | math_extern log
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ | ldrd CARG34, CFUNC:CARG3->upvalue[0]
+ | bl extern __aeabi_dmul
+ | b ->fff_restv
+ |
+ |.ffunc_2 math_ldexp
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | IOS mov RA, BASE
+ | bl extern ldexp // (double x, int exp)
+ | IOS mov BASE, RA
+ | b ->fff_restv
+ |
+ |.ffunc_n math_frexp
+ | mov CARG3, sp
+ | IOS mov RA, BASE
+ | bl extern frexp
+ | IOS mov BASE, RA
+ | ldr CARG3, [sp]
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RC, #(2+1)*8
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ | sub CARG3, BASE, #8
+ | ldr PC, [BASE, FRAME_PC]
+ | IOS mov RA, BASE
+ | bl extern modf
+ | IOS mov BASE, RA
+ | mov RC, #(2+1)*8
+ | strd CARG12, [BASE]
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, cond, fcond
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | mov RA, #8
+ | bne >4
+ |1: // Handle integers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bne >3
+ | cmp CARG1, CARG3
+ | add RA, RA, #8
+ | mov..cond CARG1, CARG3
+ | b <1
+ |3:
+ | bhi ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [BASE, RA]
+ | b >6
+ |
+ |4:
+ | bhi ->fff_fallback
+ |5: // Handle numbers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bhs >7
+ |6:
+ | bl extern __aeabi_cdcmple
+ | add RA, RA, #8
+ | mov..fcond CARG1, CARG3
+ | mov..fcond CARG2, CARG4
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | bhi ->fff_fallback
+ | strd CARG12, TMPD
+ | mov CARG1, CARG3
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, TMPD
+ | b <6
+ |.endmacro
+ |
+ | math_minmax math_min, gt, hi
+ | math_minmax math_max, lt, lo
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG1, STR:CARG1->len
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8
+ | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument.
+ | bne ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | mvn CARG2, #~LJ_TISNUM
+ | cmp CARG3, #0
+ | moveq RC, #(0+1)*8
+ | movne RC, #(1+1)*8
+ | strd CARG12, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | checktpeq CARG2, LJ_TISNUM
+ | bicseq CARG4, CARG1, #255
+ | mov CARG3, #1
+ | bne ->fff_fallback
+ | str CARG1, TMPD
+ | mov CARG2, TMPDp // Points to stack. Little-endian.
+ |->fff_newstr:
+ | // CARG2 = str, CARG3 = len.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #16]
+ | cmp NARGS8:RC, #16
+ | mvn RB, #0
+ | beq >1
+ | blo ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | mov RB, CARG3
+ | bne ->fff_fallback
+ |1:
+ | ldrd CARG34, [BASE, #8]
+ | checktp CARG2, LJ_TSTR
+ | ldreq CARG2, STR:CARG1->len
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end
+ | add CARG4, CARG2, #1
+ | cmp CARG3, #0 // if (start < 0) start += len+1
+ | addlt CARG3, CARG3, CARG4
+ | cmp CARG3, #1 // if (start < 1) start = 1
+ | movlt CARG3, #1
+ | cmp RB, #0 // if (end < 0) end += len+1
+ | addlt RB, RB, CARG4
+ | bic RB, RB, RB, asr #31 // if (end < 0) end = 0
+ | cmp RB, CARG2 // if (end > len) end = len
+ | add CARG1, STR:CARG1, #sizeof(GCstr)-1
+ | movgt RB, CARG2
+ | add CARG2, CARG1, CARG3
+ | subs CARG3, RB, CARG3 // len = end - start
+ | add CARG3, CARG3, #1 // len += 1
+ | bge ->fff_newstr
+ |->fff_emptystr:
+ | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty)
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TSTR
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | subs CARG4, CARG3, #1
+ | ldr CARG2, STR:CARG1->len
+ | blt ->fff_emptystr // Count <= 0?
+ | cmp CARG2, #1
+ | blo ->fff_emptystr // Zero-length string?
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | ldr CARG1, STR:CARG1[1]
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // Fill buffer with char.
+ | strb CARG1, [CARG2, CARG4]
+ | subs CARG4, CARG4, #1
+ | bge <1
+ | b ->fff_newstr
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | mov CARG4, CARG3
+ | add CARG1, STR:CARG1, #sizeof(GCstr)
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // Reverse string copy.
+ | ldrb RB, [CARG1], #1
+ | subs CARG4, CARG4, #1
+ | blt ->fff_newstr
+ | strb RB, [CARG2, CARG4]
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | mov CARG4, #0
+ | add CARG1, STR:CARG1, #sizeof(GCstr)
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // ASCII case conversion.
+ | ldrb RB, [CARG1, CARG4]
+ | cmp CARG4, CARG3
+ | bhs ->fff_newstr
+ | sub RC, RB, #lo
+ | cmp RC, #26
+ | eorlo RB, RB, #0x20
+ | strb RB, [CARG2, CARG4]
+ | add CARG4, CARG4, #1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG2, ->fff_fallback
+ | IOS mov RA, BASE
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | IOS mov BASE, RA
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |// FP number to bit conversion for soft-float. Clobbers r0-r3.
+ |->vm_tobit_fb:
+ | bhi ->fff_fallback
+ |->vm_tobit:
+ | lsl RB, CARG2, #1
+ | adds RB, RB, #0x00200000
+ | movpl CARG1, #0 // |x| < 1?
+ | bxpl lr
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | bmi >1 // |x| >= 2^32?
+ | lsl CARG4, CARG2, #11
+ | orr CARG4, CARG4, #0x80000000
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | cmp CARG2, #0
+ | lsr CARG1, CARG4, RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |1:
+ | add RB, RB, #21
+ | lsr CARG4, CARG1, RB
+ | rsb RB, RB, #20
+ | lsl CARG1, CARG2, #12
+ | cmp CARG2, #0
+ | orr CARG1, CARG4, CARG1, lsl RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.endmacro
+ |
+ |.ffunc_bit tobit
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | mov CARG3, CARG1
+ | mov RA, #8
+ |1:
+ | ldrd CARG12, [BASE, RA]
+ | cmp RA, NARGS8:RC
+ | add RA, RA, #8
+ | bge >2
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG3, CARG3, CARG1
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, orr
+ |.ffunc_bit_op bxor, eor
+ |
+ |2:
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG34, [BASE, #-8]
+ | b ->fff_res1
+ |
+ |.ffunc_bit bswap
+ | eor CARG3, CARG1, CARG1, ror #16
+ | bic CARG3, CARG3, #0x00ff0000
+ | ror CARG1, CARG1, #8
+ | mvn CARG2, #~LJ_TISNUM
+ | eor CARG1, CARG1, CARG3, lsr #8
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | mvn CARG1, CARG1
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc bit_..name
+ | ldrd CARG12, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.if shmod == 0
+ | and RA, CARG1, #31
+ |.else
+ | rsb RA, CARG1, #0
+ |.endif
+ | ldrd CARG12, [BASE]
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG1, CARG1, RA
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, lsl, 0
+ |.ffunc_bit_sh rshift, lsr, 0
+ |.ffunc_bit_sh arshift, asr, 0
+ |.ffunc_bit_sh rol, ror, 1
+ |.ffunc_bit_sh ror, ror, 0
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RC = nargs*8
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | ldr CARG2, L->maxstack
+ | add CARG1, BASE, NARGS8:RC
+ | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC.
+ | str CARG1, L->top
+ | ldr CARG3, CFUNC:CARG3->f
+ | str BASE, L->base
+ | add CARG1, CARG1, #8*LUA_MINSTACK
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | cmp CARG1, CARG2
+ | mov CARG1, L
+ | bhi >5 // Need to grow stack.
+ | blx CARG3 // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | lsl RC, CRET1, #3
+ | sub RA, BASE, #8
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | ldr CARG1, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, CARG1, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | ands CARG1, PC, #FRAME_TYPE
+ | bic CARG2, PC, #FRAME_TYPEP
+ | ldreq INS, [PC, #-4]
+ | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8.
+ | sub RB, BASE, CARG2
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2, #LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | cmp CARG1, CARG1 // Set zero-flag to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mov RA, lr
+ | str BASE, L->base
+ | add CARG2, BASE, NARGS8:RC
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | str CARG2, L->top
+ | mov CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | ldr BASE, L->base
+ | mov lr, RA // Help return address predictor.
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | bx lr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+#if LJ_HASJIT
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE
+ | bne >1
+ | sub CARG2, CARG2, #1
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | b >1
+#endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | decode_OP OP, INS
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr pc, [OP, #GG_DISP2STATIC]
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | bne <5
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq <5
+ | subs CARG2, CARG2, #1
+ | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | beq >1
+ | tst CARG1, #LUA_MASKLINE
+ | beq <5
+ |1:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | ldr BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | ldrb OP, [PC, #-4]
+ | ldr INS, [PC, #-4]
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr OP, [OP, #GG_DISP2STATIC]
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+ | bx OP
+ |
+ |->cont_hook: // Continue from hook yield.
+ | ldr CARG1, [CARG4, #-24]
+ | add PC, PC, #4
+ | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+#if LJ_HASJIT
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | str PC, SAVE_PC
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | mov CARG2, PC
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | ldrb CARG3, [CARG3, #PC2PROTO(framesize)]
+ | str BASE, L->base
+ | add CARG3, BASE, CARG3, lsl #3
+ | str CARG3, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+#endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov CARG2, PC
+#if LJ_HASJIT
+ | b >1
+#endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+#if LJ_HASJIT
+ | orr CARG2, PC, #1
+ |1:
+#endif
+ | add CARG4, BASE, RC
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RA, RA, BASE
+ | str CARG4, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | ldr BASE, L->base
+ | ldr CARG4, L->top
+ | mov CARG2, #0
+ | add RA, BASE, RA
+ | sub NARGS8:RC, CARG4, BASE
+ | str CARG2, SAVE_PC // Invalidate for subsequent line hook.
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | ldr INS, [PC, #-4]
+ | bx CRET1
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_exit_handler:
+#if LJ_HASJIT
+ | sub sp, sp, #12
+ | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12}
+ | ldr CARG1, [sp, #64] // Load original value of lr.
+ | ldr DISPATCH, [lr] // Load DISPATCH.
+ | add CARG3, sp, #64 // Recompute original value of sp.
+ | mv_vmstate CARG4, EXIT
+ | str CARG3, [sp, #52] // Store sp in RID_SP
+ | st_vmstate CARG4
+ | ldr CARG2, [CARG1, #-4]! // Get exit instruction.
+ | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC.
+ | str CARG1, [sp, #60]
+ | lsl CARG2, CARG2, #8
+ | add CARG1, CARG1, CARG2, asr #6
+ | ldr CARG2, [lr, #4] // Load exit stub group offset.
+ | sub CARG1, CARG1, lr
+ | ldr L, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number.
+ | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str CARG1, [DISPATCH, #DISPATCH_J(exitno)]
+ | mov CARG4, #0
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | str BASE, L->base
+ | str CARG4, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | mov CARG2, sp
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ldr CARG2, L->cframe
+ | ldr BASE, L->base
+ | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG2
+ | ldr PC, SAVE_PC // Get SAVE_PC.
+ | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+#endif
+ |->vm_exit_interp:
+ | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set.
+#if LJ_HASJIT
+ | ldr L, SAVE_L
+ |1:
+ | cmp CARG1, #0
+ | blt >3 // Check for error from exit.
+ | lsl RC, CARG1, #3
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | str RC, SAVE_MULTRES
+ | mov CARG3, #0
+ | ldr CARG2, LFUNC:CARG2->field_pc
+ | str CARG3, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | mv_vmstate CARG4, INTERP
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | ldrb OP, [PC]
+ | mov MASKR8, #255
+ | ldr INS, [PC], #4
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG4
+ | cmp OP, #BC_FUNCF // Function header?
+ | ldr OP, [DISPATCH, OP, lsl #2]
+ | decode_RA8 RA, INS
+ | lsrlo RC, INS, #16 // No: Decode operands A*8 and D.
+ | subhs RC, RC, #8
+ | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8
+ | bx OP
+ |
+ |3: // Rethrow error from the right C frame.
+ | rsb CARG2, CARG1, #0
+ | mov CARG1, L
+ | bl extern lj_err_throw // (lua_State *L, int errcode)
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called from JIT code.
+ |//
+ |// double lj_vm_floor/ceil/trunc(double x);
+ |.macro vm_round, func
+ |->vm_ .. func:
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: done.
+ | mvn CARG4, #1
+ | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask
+ | orrpl CARG3, CARG3, CARG4
+ | mvnpl CARG4, #1
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | bxeq lr // iszero: done.
+ | mvn CARG4, #1
+ | cmp RB, #0
+ | lslpl CARG3, CARG4, RB
+ | mvnmi CARG3, #0
+ | add RB, RB, #32
+ | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask
+ | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry
+ | bx lr
+ |
+ |2: // |x| < 1:
+ | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | mov CARG1, #0 // lo = 0
+ | and CARG2, CARG2, #0x80000000
+ | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0)
+ | orrne CARG2, CARG2, CARG4
+ | bx lr
+ |.endmacro
+ |
+ |9:
+ | .long 0x3ff00000 // hiword(1.0)
+ | vm_round floor
+ | vm_round ceil
+ |
+ |->vm_trunc:
+#if LJ_HASJIT
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0.
+ | movpl CARG1, #0
+ | bxpl lr
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: already done.
+ | mvn CARG4, #1
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ | bx lr
+#endif
+ |
+ | // double lj_vm_mod(double dividend, double divisor);
+ |->vm_mod:
+ | push {r0, r1, r2, r3, r4, lr}
+ | bl extern __aeabi_ddiv
+ | bl ->vm_floor
+ | ldrd CARG34, [sp, #8]
+ | bl extern __aeabi_dmul
+ | ldrd CARG34, [sp]
+ | eor CARG2, CARG2, #0x80000000
+ | bl extern __aeabi_dadd
+ | add sp, sp, #20
+ | pop {pc}
+ |
+ | // int lj_vm_modi(int dividend, int divisor);
+ |->vm_modi:
+ | ands RB, CARG1, #0x80000000
+ | rsbmi CARG1, CARG1, #0 // a = |dividend|
+ | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor).
+ | cmp CARG2, #0
+ | rsbmi CARG2, CARG2, #0 // b = |divisor|
+ | subs CARG4, CARG2, #1
+ | cmpne CARG1, CARG2
+ | moveq CARG1, #0 // if (b == 1 || a == b) a = 0
+ | tsthi CARG2, CARG4
+ | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1
+ | bls >1
+ | // Use repeated subtraction to get the remainder.
+ | clz CARG3, CARG1
+ | clz CARG4, CARG2
+ | sub CARG4, CARG4, CARG3
+ | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8
+ | addne pc, pc, CARG3, lsl #3 // Duff's device.
+ | nop
+ {
+ int i;
+ for (i = 31; i >= 0; i--) {
+ | cmp CARG1, CARG2, lsl #i
+ | subhs CARG1, CARG1, CARG2, lsl #i
+ }
+ }
+ |1:
+ | cmp CARG1, #0
+ | cmpne RB, #0
+ | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b
+ | eors CARG2, CARG1, RB, lsl #1
+ | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y
+ | bx lr
+ |
+ |// Callable from C: double lj_vm_foldarith(double x, double y, int op)
+ |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -)
+ |// and basic math functions. ORDER ARITH
+ |->vm_foldarith:
+ | ldr OP, [sp]
+ | cmp OP, #1
+ | blo extern __aeabi_dadd
+ | beq extern __aeabi_dsub
+ | cmp OP, #3
+ | blo extern __aeabi_dmul
+ | beq extern __aeabi_ddiv
+ | cmp OP, #5
+ | blo ->vm_mod
+ | beq extern pow
+ | cmp OP, #7
+ | eorlo CARG2, CARG2, #0x80000000
+ | biceq CARG2, CARG2, #0x80000000
+ | bxls lr
+#if LJ_HASJIT
+ | cmp OP, #9
+ | blo extern atan2
+ | beq >9 // No support needed for IR_LDEXP.
+ | cmp OP, #11
+ | bhi >9
+ | push {r4, lr}
+ | beq >1
+ | // IR_MIN
+ | bl extern __aeabi_cdcmple
+ | movhi CARG1, CARG3
+ | movhi CARG2, CARG4
+ | pop {r4, pc}
+ |9:
+ | NYI // Bad op.
+ |
+ |1: // IR_MAX
+ | bl extern __aeabi_cdcmple
+ | movlo CARG1, CARG3
+ | movlo CARG2, CARG4
+ | pop {r4, pc}
+#else
+ | NYI // Other operations only needed by JIT compiler.
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions.
+ |// Saveregs already performed. Callback slot number in [sp], g in r12.
+ |->vm_ffi_callback:
+#if LJ_HASFFI
+ |.type CTSTATE, CTState, PC
+ | ldr CTSTATE, GL:r12->ctype_state
+ | add DISPATCH, r12, #GG_G2DISP
+ | strd CARG12, CTSTATE->cb.gpr[0]
+ | strd CARG34, CTSTATE->cb.gpr[2]
+ | ldr CARG4, [sp]
+ | add CARG3, sp, #CFRAME_SIZE
+ | mov CARG1, CTSTATE
+ | lsr CARG4, CARG4, #3
+ | str CARG3, CTSTATE->cb.stack
+ | mov CARG2, sp
+ | str CARG4, CTSTATE->cb.slot
+ | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | ldr BASE, L:CRET1->base
+ | mv_vmstate CARG2, INTERP
+ | ldr RC, L:CRET1->top
+ | mov MASKR8, #255
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | mov L, CRET1
+ | sub RC, RC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG2
+ | ins_callt
+#endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+#if LJ_HASFFI
+ | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)]
+ | str BASE, L->base
+ | str CARG4, L->top
+ | str L, CTSTATE->L
+ | mov CARG1, CTSTATE
+ | mov CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | ldrd CARG12, CTSTATE->cb.gpr[0]
+ | b ->vm_leave_unw
+#endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+#if LJ_HASFFI
+ | .type CCSTATE, CCallState, r4
+ | push {CCSTATE, r5, r11, lr}
+ | mov CCSTATE, CARG1
+ | ldr CARG1, CCSTATE:CARG1->spadj
+ | ldrb CARG2, CCSTATE->nsp
+ | add CARG3, CCSTATE, #offsetof(CCallState, stack)
+ | mov r11, sp
+ | sub sp, sp, CARG1 // Readjust stack.
+ | subs CARG2, CARG2, #1
+ | ldr RB, CCSTATE->func
+ | bmi >2
+ |1: // Copy stack slots.
+ | ldr CARG4, [CARG3, CARG2, lsl #2]
+ | str CARG4, [sp, CARG2, lsl #2]
+ | subs CARG2, CARG2, #1
+ | bpl <1
+ |2:
+ | ldr CARG1, CCSTATE->gpr[0]
+ | ldr CARG2, CCSTATE->gpr[1]
+ | ldr CARG3, CCSTATE->gpr[2]
+ | ldr CARG4, CCSTATE->gpr[3]
+ | blx RB
+ | mov sp, r11
+ | str CRET1, CCSTATE->gpr[0]
+ | str CRET2, CCSTATE->gpr[1]
+ | pop {CCSTATE, r5, r11, pc}
+#endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (op == BC_ISLT) {
+ | sublt PC, RB, #0x20000
+ } else if (op == BC_ISGE) {
+ | subge PC, RB, #0x20000
+ } else if (op == BC_ISLE) {
+ | suble PC, RB, #0x20000
+ } else {
+ | subgt PC, RB, #0x20000
+ }
+ |1:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ | bhi ->vmeta_comp
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, [RC] // Restore first operand.
+ | b >5
+ |4: // CARG1 is an integer, CARG34 is not an integer.
+ | bhi ->vmeta_comp
+ | // CARG1 is an integer, CARG34 is a number
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore second operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmple
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | sublo PC, RA, #0x20000
+ } else if (op == BC_ISGE) {
+ | subhs PC, RA, #0x20000
+ } else if (op == BC_ISLE) {
+ | subls PC, RA, #0x20000
+ } else {
+ | subhi PC, RA, #0x20000
+ }
+ | b <1
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | cmnls CARG4, #-LJ_TISNUM
+ if (vk) {
+ | bls ->BC_ISEQN_Z
+ } else {
+ | bls ->BC_ISNEN_Z
+ }
+ | // Either or both types are not numbers.
+ if (LJ_HASFFI) {
+ | checktp CARG2, LJ_TCDATA
+ | checktpne CARG4, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ }
+ | cmp CARG2, CARG4 // Compare types.
+ | bne >2 // Not the same type?
+ | checktp CARG2, LJ_TISPRI
+ | bhs >1 // Same type and primitive type?
+ |
+ | // Same types and not a primitive type. Compare GCobj or pvalue.
+ | cmp CARG1, CARG3
+ if (vk) {
+ | bne >3 // Different GCobjs or pvalues?
+ |1: // Branch if same.
+ | sub PC, RB, #0x20000
+ |2: // Different.
+ | ins_next
+ |3:
+ | checktp CARG2, LJ_TISTABUD
+ | bhi <2 // Different objects and not table/ud?
+ } else {
+ | beq >1 // Same GCobjs or pvalues?
+ | checktp CARG2, LJ_TISTABUD
+ | bhi >2 // Different objects and not table/ud?
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ if (vk) {
+ | beq <2 // No metatable?
+ } else {
+ | beq >2 // No metatable?
+ }
+ | ldrb RA, TAB:RA->nomm
+ | mov CARG4, #1-vk // ne = 0 or 1.
+ | mov CARG2, CARG1
+ | tst RA, #1<vmeta_equal // 'no __eq' flag not set?
+ if (vk) {
+ | b <2
+ } else {
+ |2: // Branch if different.
+ | sub PC, RB, #0x20000
+ |1: // Same.
+ | ins_next
+ }
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RC = str_const (~), JMP with RC = target
+ | mvn RC, RC
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TSTR
+ if (LJ_HASFFI) {
+ | bne >7
+ | cmp CARG1, CARG3
+ } else {
+ | cmpeq CARG1, CARG3
+ }
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ |
+ if (LJ_HASFFI) {
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ }
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RC = num_const (~), JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, KBASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ |2:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ if (LJ_HASFFI) {
+ | bhi >7
+ } else {
+ if (!vk) {
+ | subhi PC, RB, #0x20000
+ }
+ | bhi <2
+ }
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ |4: // CARG1 is an integer, CARG34 is a number.
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore other operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmpeq
+ if (vk) {
+ | subeq PC, RA, #0x20000
+ } else {
+ | subne PC, RA, #0x20000
+ }
+ | b <2
+ |
+ if (LJ_HASFFI) {
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RC = primitive_type (~), JMP with RC = target
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | mvn RC, RC
+ | add RB, PC, RB, lsl #2
+ if (LJ_HASFFI) {
+ | checktp CARG2, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ }
+ | cmp CARG2, RC
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ } else {
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RC = src, JMP with RC = target
+ | add RC, BASE, RC, lsl #3
+ | ldrh RB, [PC, #2]
+ | ldrd CARG12, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TTRUE
+ if (op == BC_ISTC || op == BC_IST) {
+ | subls PC, RB, #0x20000
+ if (op == BC_ISTC) {
+ | strdls CARG12, [BASE, RA]
+ }
+ } else {
+ | subhi PC, RB, #0x20000
+ if (op == BC_ISFC) {
+ | strdhi CARG12, [BASE, RA]
+ }
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [BASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RC = src
+ | add RC, BASE, RC, lsl #3
+ | ins_next1
+ | ldr CARG1, [RC, #4]
+ | add RA, BASE, RA
+ | ins_next2
+ | checktp CARG1, LJ_TTRUE
+ | mvnls CARG2, #~LJ_TFALSE
+ | mvnhi CARG2, #~LJ_TTRUE
+ | str CARG2, [RA, #4]
+ | ins_next3
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->vmeta_unm
+ | eorne CARG2, CARG2, #0x80000000
+ | bne >5
+ | rsbseq CARG1, CARG1, #0
+ | ldrdvs CARG12, >9
+ |5:
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | checkstr CARG2, >2
+ | ldr CARG1, STR:CARG1->len
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |2:
+ | checktab CARG2, ->vmeta_len
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | ldr TAB:CARG3, TAB:CARG1->metatable
+ | cmp TAB:CARG3, #0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | IOS mov RC, BASE
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | IOS mov BASE, RC
+ | b <1
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ |9:
+ | ldrb CARG4, TAB:CARG3->nomm
+ | tst CARG4, #1<vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithcheck, cond, ncond, target
+ ||if (vk == 1) {
+ | cmn CARG4, #-LJ_TISNUM
+ | cmn..cond CARG2, #-LJ_TISNUM
+ ||} else {
+ | cmn CARG2, #-LJ_TISNUM
+ | cmn..cond CARG4, #-LJ_TISNUM
+ ||}
+ | b..ncond target
+ |.endmacro
+ |.macro ins_arithcheck_int, target
+ | ins_arithcheck eq, ne, target
+ |.endmacro
+ |.macro ins_arithcheck_num, target
+ | ins_arithcheck lo, hs, target
+ |.endmacro
+ |
+ |.macro ins_arithpre
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [KBASE, RC]
+ || break;
+ ||case 1:
+ | ldrd CARG34, [BASE, RB]
+ | ldrd CARG12, [KBASE, RC]
+ || break;
+ ||default:
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpcall
+ | ins_arithpre
+ |.if "intins" ~= "vm_modi"
+ | ins_next1
+ |.endif
+ | ins_arithcheck_int >5
+ |.if "intins" == "smull"
+ | smull CARG1, RC, CARG3, CARG1
+ | cmp RC, CARG1, asr #31
+ | ins_arithfallback bne
+ |.elif "intins" == "vm_modi"
+ | movs CARG2, CARG3
+ | ins_arithfallback beq
+ | bl ->vm_modi
+ | mvn CARG2, #~LJ_TISNUM
+ |.else
+ | intins CARG1, CARG1, CARG3
+ | ins_arithfallback bvs
+ |.endif
+ |4:
+ |.if "intins" == "vm_modi"
+ | ins_next1
+ |.endif
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5: // FP variant.
+ | ins_arithfallback ins_arithcheck_num
+ |.if "intins" == "vm_modi"
+ | bl fpcall
+ |.else
+ | bl fpcall
+ | ins_next1
+ |.endif
+ | b <4
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpcall
+ | ins_arithpre
+ | ins_arithfallback ins_arithcheck_num
+ |.if "fpcall" == "extern pow"
+ | IOS mov RC, BASE
+ | bl fpcall
+ | IOS mov BASE, RC
+ |.else
+ | bl fpcall
+ |.endif
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arithdn adds, extern __aeabi_dadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arithdn subs, extern __aeabi_dsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arithdn smull, extern __aeabi_dmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp extern __aeabi_ddiv
+ break;
+ case BC_MODVN: case BC_MODNV: case BC_MODVV:
+ | ins_arithdn vm_modi, ->vm_mod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | ins_arithfp extern pow
+ break;
+
+ case BC_CAT:
+ | decode_RB8 RC, INS
+ | decode_RC8 RB, INS
+ | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!)
+ | sub CARG3, RB, RC
+ | str BASE, L->base
+ | add CARG2, BASE, RB
+ |->BC_CAT_Z:
+ | // RA = dst*8, RC = src_start*8, CARG2 = top-1
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | lsr CARG3, CARG3, #3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | bne ->vmeta_binop
+ | ldrd CARG34, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA] // Copy result to RA.
+ | ins_next3
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RC = str_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TSTR
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ | // RA = dst*8, RC = cdata_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TCDATA
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+#endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, (RC = int16_literal)
+ | mov CARG1, INS, asr #16 // Refetch sign-extended reg.
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RC = num_const
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [KBASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RC = primitive_type (~)
+ | add RA, BASE, RA
+ | mvn RC, RC
+ | ins_next1
+ | ins_next2
+ | str RC, [RA, #4]
+ | ins_next3
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RC = end
+ | add RA, BASE, RA
+ | add RC, BASE, RC, lsl #3
+ | mvn CARG1, #~LJ_TNIL
+ | str CARG1, [RA, #4]
+ | add RA, RA, #8
+ |1:
+ | str CARG1, [RA, #4]
+ | cmp RA, RC
+ | add RA, RA, #8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RC = uvnum
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsl RC, RC, #2
+ | add RC, RC, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldrd CARG34, [CARG2]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RC = src
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [BASE, RC]
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldrb RC, UPVAL:CARG2->closed
+ | ldr CARG2, UPVAL:CARG2->v
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | add RB, CARG4, #-LJ_TISGCV
+ | cmpne RC, #0
+ | strd CARG34, [CARG2]
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmn RB, #-(LJ_TISNUM - LJ_TISGCV)
+ | ldrbhi RC, GCOBJ:CARG3->gch.marked
+ | bls <1 // tvisgcv(v)
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | tst RC, #LJ_GC_WHITES
+ | // Crossed a write barrier. Move the barrier forward.
+ if (LJ_TARGET_OSX) {
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ } else {
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ }
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | mvn RC, RC
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | mvn CARG4, #~LJ_TSTR
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldrb RC, UPVAL:CARG2->closed
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | ldrb RB, STR:CARG3->marked
+ | strd CARG34, [CARG2]
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | tst RB, #LJ_GC_WHITES // iswhite(str)
+ | cmpne RC, #0
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | // Crossed a write barrier. Move the barrier forward.
+ if (LJ_TARGET_OSX) {
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ } else {
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ }
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RC = num_const
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [KBASE, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [CARG2]
+ | ins_next3
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RC = primitive_type (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | mvn RC, RC
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | str RC, [CARG2, #4]
+ | ins_next3
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RC = target
+ | ldr CARG3, L->openupval
+ | add RC, PC, RC, lsl #2
+ | str BASE, L->base
+ | cmp CARG3, #0
+ | sub PC, RC, #0x20000
+ | beq >1
+ | mov CARG1, L
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | ldr BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RC = proto_const (~) (holding function prototype)
+ | mvn RC, RC
+ | str BASE, L->base
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | str PC, SAVE_PC
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | mov CARG1, L
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TFUNC
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RC = (hbits|asize) | tab_const (~)
+ if (op == BC_TDUP) {
+ | mvn RC, RC
+ }
+ | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | cmp CARG3, CARG4
+ | mov CARG1, L
+ | bhs >5
+ |1:
+ if (op == BC_TNEW) {
+ | lsl CARG2, RC, #21
+ | lsr CARG3, RC, #11
+ | asr RC, CARG2, #21
+ | lsr CARG2, CARG2, #21
+ | cmn RC, #1
+ | addeq CARG2, CARG2, #2
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns GCtab *.
+ } else {
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns GCtab *.
+ }
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TTAB
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5:
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mov CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RC = str_const (~)
+ case BC_GSET:
+ | // RA = dst*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | mvn RC, RC
+ | ldr TAB:CARG1, LFUNC:CARG2->env
+ | ldr STR:RC, [KBASE, RC, lsl #2]
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG4, TAB:CARG1->array
+ | ldreq CARG2, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG4, CARG4, CARG3, lsl #3
+ | cmp CARG3, CARG2 // In array part?
+ | ldrdlo CARG34, [CARG4]
+ | bhs ->vmeta_tgetv
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<vmeta_tgetv
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TGETS_Z
+ | b ->vmeta_tgetv
+ break;
+ case BC_TGETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |1:
+ | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG34, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >4
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |3:
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |4: // Follow hash chain.
+ | cmp NODE:INS, #0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, #0 // Optional clear of undef. value (during load stall).
+ | mvn CARG4, #~LJ_TNIL
+ | cmp TAB:CARG1, #0
+ | beq <3 // No metatable: done.
+ | ldrb CARG2, TAB:CARG1->nomm
+ | tst CARG2, #1<vmeta_tgets
+ break;
+ case BC_TGETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr CARG4, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG4, CARG2]
+ | bhs ->vmeta_tgetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<vmeta_tgetb
+ break;
+
+ case BC_TSETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = src*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG2, TAB:CARG1->array
+ | ldreq CARG4, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG2, CARG2, CARG3, lsl #3
+ | cmp CARG3, CARG4 // In array part?
+ | ldrlo INS, [CARG2, #4]
+ | bhs ->vmeta_tsetv
+ | ins_next1 // Overwrites RB!
+ | checktp INS, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TSETS_Z
+ | b ->vmeta_tsetv
+ break;
+ case BC_TSETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | mov CARG4, #0
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ | strb CARG4, TAB:RB->nomm // Clear metamethod cache.
+ |1:
+ | ldrd CARG12, NODE:INS->key
+ | ldr CARG4, NODE:INS->val.it
+ | ldr NODE:CARG3, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >5
+ | ldrb CARG2, TAB:RB->marked
+ | checktp CARG4, LJ_TNIL // Key found, but nil value?
+ | ldrd CARG34, [BASE, RA]
+ | beq >4
+ |2:
+ | tst CARG2, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, NODE:INS->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | cmp TAB:CARG1, #0
+ | beq <2 // No metatable: done.
+ | ldrb CARG1, TAB:CARG1->nomm
+ | tst CARG1, #1<vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | movs NODE:INS, NODE:CARG3
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, TMPDp
+ | str PC, SAVE_PC
+ | cmp TAB:CARG1, #0 // No metatable: continue.
+ | str BASE, L->base
+ | ldrbne CARG2, TAB:CARG1->nomm
+ | mov CARG1, L
+ | beq >6
+ | tst CARG2, #1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | mov CARG2, TAB:RB
+ | str CARG4, TMPDhi
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | ldr BASE, L->base
+ | ldrd CARG34, [BASE, RA]
+ | strd CARG34, [CRET1]
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, CARG2, CARG3
+ | b <3
+ break;
+ case BC_TSETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr RB, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG2, RB]!
+ | bhs ->vmeta_tsetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<vmeta_tsetb
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RC = num_const (start index)
+ | add RA, BASE, RA
+ |1:
+ | ldr RB, SAVE_MULTRES
+ | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
+ | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
+ | subs RB, RB, #8
+ | ldr CARG4, TAB:CARG2->asize
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG1, RB, lsr #3
+ | cmp CARG3, CARG4
+ | ldr CARG4, TAB:CARG2->array
+ | add RB, RA, RB
+ | bhi >5
+ | add INS, CARG4, CARG1, lsl #3
+ | ldrb CARG1, TAB:CARG2->marked
+ |3: // Copy result slots to table.
+ | ldrd CARG34, [RA], #8
+ | strd CARG34, [INS], #8
+ | cmp RA, RB
+ | blo <3
+ | tst CARG1, #LJ_GC_BLACK // isblack(table)
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | IOS ldr BASE, L->base
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, CARG1, CARG3
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = nresults+1,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | decode_RC8 NARGS8:RC, INS
+ | add NARGS8:RC, NARGS8:RC, CARG1
+ | b ->BC_CALL_Z
+ break;
+ case BC_CALL:
+ | decode_RC8 NARGS8:RC, INS
+ | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8
+ |->BC_CALL_Z:
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [BASE, RA]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add BASE, BASE, #8
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | add NARGS8:RC, CARG1, RC, lsl #3
+ | b ->BC_CALLT1_Z
+ break;
+ case BC_CALLT:
+ | lsl NARGS8:RC, RC, #3
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ |->BC_CALLT1_Z:
+ | ldrd LFUNC:CARG34, [RA, BASE]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add RA, RA, #8
+ | checkfunc CARG4, ->vmeta_callt
+ | ldr PC, [BASE, FRAME_PC]
+ |->BC_CALLT2_Z:
+ | mov RB, #0
+ | ldrb CARG4, LFUNC:CARG3->ffid
+ | tst PC, #FRAME_TYPE
+ | bne >7
+ |1:
+ | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
+ | cmp NARGS8:RC, #0
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA, RB]
+ | add INS, RB, #8
+ | cmp INS, NARGS8:RC
+ | strd CARG12, [BASE, RB]
+ | mov RB, INS
+ | bne <2
+ |3:
+ | cmp CARG4, #1 // (> FF_C) Calling a fast function?
+ | bhi >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | ldr INS, [PC, #-4]
+ | decode_RA8 RA, INS
+ | sub CARG1, BASE, RA
+ | ldr LFUNC:CARG1, [CARG1, #-16]
+ | ldr CARG1, LFUNC:CARG1->field_pc
+ | ldr KBASE, [CARG1, #PC2PROTO(k)]
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | eor PC, PC, #FRAME_VARG
+ | tst PC, #FRAME_TYPEP // Vararg frame below?
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | bne <1
+ | sub BASE, BASE, PC
+ | ldr PC, [BASE, FRAME_PC]
+ | tst PC, #FRAME_TYPE
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [RA, #-16]
+ | ldrd CARG12, [RA, #-8]
+ | add BASE, RA, #8
+ | strd CARG34, [RA, #8] // Copy state.
+ | strd CARG12, [RA, #16] // Copy control var.
+ | // STALL: locked CARG34.
+ | ldrd LFUNC:CARG34, [RA, #-24]
+ | mov NARGS8:RC, #16 // Iterators get 2 arguments.
+ | // STALL: load CARG34.
+ | strd LFUNC:CARG34, [RA] // Copy callable.
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+#if LJ_HASJIT
+ | // NYI: add hotloop, record BC_ITERN.
+#endif
+ | add RA, BASE, RA
+ | ldr TAB:RB, [RA, #-16]
+ | ldr CARG1, [RA, #-8] // Get index from control var.
+ | ldr INS, TAB:RB->asize
+ | ldr CARG2, TAB:RB->array
+ | add PC, PC, #4
+ |1: // Traverse array part.
+ | subs RC, CARG1, INS
+ | add CARG3, CARG2, CARG1, lsl #3
+ | bhs >5 // Index points after array part?
+ | ldrd CARG34, [CARG3]
+ | checktp CARG4, LJ_TNIL
+ | addeq CARG1, CARG1, #1 // Skip holes in array part.
+ | beq <1
+ | ldrh RC, [PC, #-2]
+ | mvn CARG2, #~LJ_TISNUM
+ | strd CARG34, [RA, #8]
+ | add RC, PC, RC, lsl #2
+ | add RB, CARG1, #1
+ | strd CARG12, [RA]
+ | sub PC, RC, #0x20000
+ | str RB, [RA, #-8] // Update control var.
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | ldr CARG4, TAB:RB->hmask
+ | ldr NODE:RB, TAB:RB->node
+ |6:
+ | add CARG1, RC, RC, lsl #1
+ | cmp RC, CARG4 // End of iteration? Branch to ITERL+1.
+ | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
+ | bhi <3
+ | ldrd CARG12, NODE:CARG3->val
+ | checktp CARG2, LJ_TNIL
+ | add RC, RC, #1
+ | beq <6 // Skip holes in hash part.
+ | ldrh RB, [PC, #-2]
+ | add RC, RC, INS
+ | ldrd CARG34, NODE:CARG3->key
+ | str RC, [RA, #-8] // Update control var.
+ | strd CARG12, [RA, #8]
+ | add RC, PC, RB, lsl #2
+ | sub PC, RC, #0x20000
+ | strd CARG34, [RA]
+ | b <3
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RC = target (points to ITERN)
+ | add RA, BASE, RA
+ | add RC, PC, RC, lsl #2
+ | ldrd CFUNC:CARG12, [RA, #-24]
+ | ldr CARG3, [RA, #-12]
+ | ldr CARG4, [RA, #-4]
+ | checktp CARG2, LJ_TFUNC
+ | ldrbeq CARG1, CFUNC:CARG1->ffid
+ | checktpeq CARG3, LJ_TTAB
+ | checktpeq CARG4, LJ_TNIL
+ | cmpeq CARG1, #FF_next_N
+ | subeq PC, RC, #0x20000
+ | bne >5
+ | ins_next1
+ | ins_next2
+ | mov CARG1, #0
+ | str CARG1, [RA, #-8] // Initialize control var.
+ |1:
+ | ins_next3
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov CARG1, #BC_JMP
+ | mov OP, #BC_ITERC
+ | strb CARG1, [PC, #-4]
+ | sub PC, RC, #0x20000
+ | strb OP, [PC] // Subsumes ins_next1.
+ | ins_next2
+ | b <1
+ break;
+
+ case BC_VARG:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | ldr CARG1, [BASE, FRAME_PC]
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | add RC, RC, #FRAME_VARG
+ | add CARG4, RA, RB
+ | sub CARG3, BASE, #8 // CARG3 = vtop
+ | sub RC, RC, CARG1 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmp RB, #0
+ | sub CARG1, CARG3, RC
+ | beq >5 // Copy all varargs?
+ | sub CARG4, CARG4, #16
+ |1: // Copy vararg slots to destination slots.
+ | cmp RC, CARG3
+ | ldrdlo CARG12, [RC], #8
+ | mvnhs CARG2, #~LJ_TNIL
+ | cmp RA, CARG4
+ | strd CARG12, [RA], #8
+ | blo <1
+ |2:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ldr CARG4, L->maxstack
+ | cmp CARG1, #0
+ | movle RB, #8 // MULTRES = (0+1)*8
+ | addgt RB, CARG1, #8
+ | add CARG2, RA, CARG1
+ | str RB, SAVE_MULTRES
+ | ble <2
+ | cmp CARG2, CARG4
+ | bhi >7
+ |6:
+ | ldrd CARG12, [RC], #8
+ | strd CARG12, [RA], #8
+ | cmp RC, CARG3
+ | blo <6
+ | b <2
+ |
+ |7: // Grow stack for varargs.
+ | lsr CARG2, CARG1, #3
+ | str RA, L->top
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RC, RC, BASE // Need delta, because BASE may change.
+ | str PC, SAVE_PC
+ | sub RA, RA, BASE
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, RC
+ | sub CARG3, BASE, #8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RC = extra results
+ | ldr CARG1, SAVE_MULTRES
+ | ldr PC, [BASE, FRAME_PC]
+ | add RA, BASE, RA
+ | add RC, CARG1, RC, lsl #3
+ | b ->BC_RETM_Z
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | add RA, BASE, RA
+ |->BC_RETM_Z:
+ | str RC, SAVE_MULTRES
+ |1:
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV2_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
+ | ldr INS, [PC, #-4]
+ | subs CARG4, RC, #8
+ | sub CARG3, BASE, #8
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA], #8
+ | add BASE, BASE, #8
+ | subs CARG4, CARG4, #8
+ | strd CARG12, [BASE, #-16]
+ | bne <2
+ |3:
+ | decode_RA8 RA, INS
+ | sub CARG4, CARG3, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | mov BASE, CARG4
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | mvn CARG2, #~LJ_TNIL
+ | add BASE, BASE, #8
+ | add RC, RC, #8
+ | str CARG2, [BASE, #-12]
+ | b <5
+ |
+ |->BC_RETV1_Z: // Non-standard return case.
+ | add RA, BASE, RA
+ |->BC_RETV2_Z:
+ | tst CARG2, #FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, CARG2
+ | ldr PC, [BASE, FRAME_PC]
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | str RC, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | ldreq INS, [PC, #-4]
+ | bne ->BC_RETV1_Z
+ if (op == BC_RET1) {
+ | ldrd CARG12, [BASE, RA]
+ }
+ | sub CARG4, BASE, #8
+ | decode_RA8 RA, INS
+ if (op == BC_RET1) {
+ | strd CARG12, [CARG4]
+ }
+ | sub BASE, CARG4, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ |5:
+ | cmp RB, RC
+ | bhi >6
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | sub CARG2, CARG4, #4
+ | mvn CARG3, #~LJ_TNIL
+ | str CARG3, [CARG2, RC]
+ | add RC, RC, #8
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
+ |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
+ |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
+ |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
+
+ case BC_FORL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RC = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ldrd CARG12, [RA, BASE]!
+ if (op != BC_JFORL) {
+ | add RC, PC, RC, lsl #2
+ }
+ if (!vk) {
+ | ldrd CARG34, FOR_STOP
+ | checktp CARG2, LJ_TISNUM
+ | ldr RB, FOR_TSTEP
+ | bne >5
+ | checktp CARG4, LJ_TISNUM
+ | ldr CARG4, FOR_STEP
+ | checktpeq RB, LJ_TISNUM
+ | bne ->vmeta_for
+ | cmp CARG4, #0
+ | blt >4
+ | cmp CARG1, CARG3
+ } else {
+ | ldrd CARG34, FOR_STEP
+ | checktp CARG2, LJ_TISNUM
+ | bne >5
+ | adds CARG1, CARG1, CARG3
+ | ldr CARG4, FOR_STOP
+ if (op == BC_IFORL) {
+ | addvs RC, PC, #0x20000 // Overflow: prevent branch.
+ } else {
+ | bvs >2 // Overflow: do not enter mcode.
+ }
+ | cmp CARG3, #0
+ | blt >4
+ | cmp CARG1, CARG4
+ }
+ |1:
+ if (op == BC_FORI) {
+ | subgt PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhle RC, [PC, #-2]
+ } else if (op == BC_IFORL) {
+ | suble PC, RC, #0x20000
+ }
+ if (vk) {
+ | strd CARG12, FOR_IDX
+ }
+ |2:
+ | ins_next1
+ | ins_next2
+ | strd CARG12, FOR_EXT
+ if (op == BC_JFORI || op == BC_JFORL) {
+ | ble =>BC_JLOOP
+ }
+ |3:
+ | ins_next3
+ |
+ |4: // Invert check for negative step.
+ if (!vk) {
+ | cmp CARG3, CARG1
+ } else {
+ | cmp CARG4, CARG1
+ }
+ | b <1
+ |
+ |5: // FP loop.
+ if (!vk) {
+ | cmnlo CARG4, #-LJ_TISNUM
+ | cmnlo RB, #-LJ_TISNUM
+ | bhs ->vmeta_for
+ | cmp RB, #0
+ | strd CARG12, FOR_IDX
+ | strd CARG12, FOR_EXT
+ | blt >8
+ } else {
+ | cmp CARG4, #0
+ | blt >8
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | ldrd CARG34, FOR_STOP
+ | strd CARG12, FOR_EXT
+ }
+ |6:
+ | bl extern __aeabi_cdcmple
+ if (op == BC_FORI) {
+ | subhi PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhls RC, [PC, #-2]
+ | bls =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | subls PC, RC, #0x20000
+ } else {
+ | bls =>BC_JLOOP
+ }
+ | ins_next1
+ | ins_next2
+ | b <3
+ |
+ |8: // Invert check for negative step.
+ if (vk) {
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | strd CARG12, FOR_EXT
+ }
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, FOR_STOP
+ | b <6
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RC = target
+ | ldrd CARG12, [RA, BASE]!
+ if (op == BC_JITERL) {
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | strdne CARG12, [RA, #-8]
+ | bne =>BC_JLOOP
+ } else {
+ | add RC, PC, RC, lsl #2
+ | // STALL: load CARG12.
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | subne PC, RC, #0x20000 // Otherwise save control var + branch.
+ | strdne CARG12, [RA, #-8]
+ }
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | // Note: RA/RC is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ | // RA = base (ignored), RC = traceno
+ | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)]
+ | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0.
+ | ldr TRACE:RC, [CARG1, RC, lsl #2]
+ | st_vmstate CARG2
+ | ldr RA, TRACE:RC->mcode
+ | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str L, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | bx RA
+#endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RC = target
+ | add RC, PC, RC, lsl #2
+ | sub PC, RC, #0x20000
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ | hotcall
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)]
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | bhi ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ | ins_next2
+ }
+ |2:
+ | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters.
+ | mvn CARG4, #~LJ_TNIL
+ | blo >3
+ if (op == BC_JFUNCF) {
+ | decode_RD RC, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next3
+ }
+ |
+ |3: // Clear missing parameters.
+ | strd CARG34, [BASE, NARGS8:RC]
+ | add NARGS8:RC, NARGS8:RC, #8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | add CARG4, BASE, RC
+ | add RA, RA, RC
+ | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC.
+ | add CARG2, RC, #8+FRAME_VARG
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG.
+ | bhs ->vm_growstack_l
+ | ldrb RB, [PC, #-4+PC2PROTO(numparams)]
+ | mov RA, BASE
+ | mov RC, CARG4
+ | cmp RB, #0
+ | add BASE, CARG4, #8
+ | beq >3
+ | mvn CARG3, #~LJ_TNIL
+ |1:
+ | cmp RA, RC // Less args than parameters?
+ | ldrdlo CARG12, [RA], #8
+ | movhs CARG2, CARG3
+ | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC).
+ |2:
+ | subs RB, RB, #1
+ | strd CARG12, [CARG4, #8]!
+ | bne <1
+ |3:
+ | ins_next
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ldr CARG4, CFUNC:CARG3->f
+ } else {
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)]
+ }
+ | add CARG2, RA, NARGS8:RC
+ | ldr CARG1, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | str BASE, L->base
+ | cmp CARG2, CARG1
+ | str RC, L->top
+ if (op == BC_FUNCCW) {
+ | ldr CARG2, CFUNC:CARG3->f
+ }
+ | mv_vmstate CARG3, C
+ | mov CARG1, L
+ | bhi ->vm_growstack_c // Need to grow stack.
+ | st_vmstate CARG3
+ | blx CARG4 // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | ldr BASE, L->base
+ | mv_vmstate CARG3, INTERP
+ | ldr CRET2, L->top
+ | lsl RC, CRET1, #3
+ | st_vmstate CARG3
+ | ldr PC, [BASE, FRAME_PC]
+ | sub RA, CRET2, RC // RA = L->top - nresults*8
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 0xe\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */
+ fcofs, CFRAME_SIZE);
+ for (i = 11; i >= 4; i--) /* offset r4-r11 */
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */
+ "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */
+ "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */
+ "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_arm.h b/src/LuaJIT/src/buildvm_arm.h
new file mode 100644
index 000000000..812291810
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_arm.h
@@ -0,0 +1,7487 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM arm version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_arm.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned int build_actionlist[5777] = {
+0x00010001,
+0x00060014,
+0xe3160000,
+0x000a0000,
+0x0a000000,
+0x00050015,
+0xe51c6004,
+0xe3e01000,
+0x000a0000,
+0xe1a0900c,
+0xe50a1004,
+0xe24aa008,
+0x00060016,
+0xe28bb008,
+0xe2160000,
+0x000a0000,
+0xe58db004,
+0x0a000000,
+0x00050017,
+0x00060018,
+0xe3c6c000,
+0x000a0000,
+0xe3500000,
+0x000a0000,
+0xe049c00c,
+0x1a000000,
+0x00050014,
+0xe508c000,
+0x000d8180,
+0xe59d5014,
+0xe3e03000,
+0x000a0000,
+0xe2499008,
+0xe25b2008,
+0xe1a05185,
+0xe5073000,
+0x000d8180,
+0x0a000000,
+0x00050002,
+0x0006000b,
+0xe2522008,
+0xe0ca00d8,
+0xe0c900f8,
+0x1a000000,
+0x0005000b,
+0x0006000c,
+0xe155000b,
+0x1a000000,
+0x00050006,
+0x0006000d,
+0xe5089000,
+0x000d8180,
+0x00060019,
+0x00000000,
+0xe59db010,
+0xe3a00000,
+0xe508b000,
+0x000d8180,
+0x0006001a,
+0xe28dd01c,
+0xe8bd8ff0,
+0x00060010,
+0xba000000,
+0x00050007,
+0xe5182000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe1590002,
+0x2a000000,
+0x00050008,
+0xe5891004,
+0xe28bb008,
+0xe2899008,
+0xea000000,
+0x0005000c,
+0x00060011,
+0xe04b0005,
+0xe3550000,
+0x10499000,
+0xea000000,
+0x0005000d,
+0x00060012,
+0xe5089000,
+0x000d8180,
+0xe1a01005,
+0xe1a00008,
+0xeb000000,
+0x00030000,
+0xe5189000,
+0x000d8180,
+0xea000000,
+0x0005000c,
+0x0006001b,
+0xe1a0d000,
+0xe1a00001,
+0x0006001c,
+0xe59d800c,
+0xe3e03000,
+0x000a0000,
+0xe5182000,
+0x000d8180,
+0xe5023000,
+0x000d8180,
+0xea000000,
+0x0005001a,
+0x0006001d,
+0x00000000,
+0xe3c00000,
+0x000a0000,
+0xe1a0d000,
+0x0006001e,
+0xe59d800c,
+0xe3a040ff,
+0xe3a0b010,
+0xe1a04184,
+0xe5189000,
+0x000d8180,
+0xe5187000,
+0x000d8180,
+0xe3e00000,
+0x000a0000,
+0xe249a008,
+0xe5196004,
+0xe2877000,
+0x000a0000,
+0xe3e01000,
+0x000a0000,
+0xe5090004,
+0xe5071000,
+0x000d8180,
+0xea000000,
+0x00050016,
+0x0006001f,
+0xe3a01000,
+0x000a0000,
+0xea000000,
+0x00050002,
+0x00060020,
+0xe089b00b,
+0xe04aa009,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe2866004,
+0xe508b000,
+0x000d8180,
+0xe1a021aa,
+0x0006000c,
+0xe58d6008,
+0xeb000000,
+0x00030000,
+0xe5189000,
+0x000d8180,
+0xe518b000,
+0x000d8180,
+0xe5192008,
+0xe04bb009,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00060021,
+0xe92d4ff0,
+0xe24dd01c,
+0xe1a08000,
+0xe5107000,
+0x000d8180,
+0x00000000,
+0xe1a09001,
+0xe2877000,
+0x000a0000,
+0xe58d800c,
+0xe3a06000,
+0x000a0000,
+0xe58d2014,
+0xe28d1000,
+0x000a0000,
+0xe5580000,
+0x000d8180,
+0xe58d2018,
+0xe5081000,
+0x000d8180,
+0xe58d2010,
+0xe3500000,
+0xe58d8008,
+0x0a000000,
+0x00050003,
+0xe1a0a009,
+0xe5189000,
+0x000d8180,
+0xe5180000,
+0x000d8180,
+0xe3a040ff,
+0xe5482000,
+0x000d8180,
+0xe040b009,
+0xe5196004,
+0xe1a04184,
+0xe3e01000,
+0x000a0000,
+0xe28bb008,
+0xe2160000,
+0x000a0000,
+0xe5071000,
+0x000d8180,
+0xe58db004,
+0x0a000000,
+0x00050017,
+0xea000000,
+0x00050018,
+0x00060022,
+0xe92d4ff0,
+0xe24dd01c,
+0xe3a06000,
+0x000a0000,
+0xe58d3018,
+0xea000000,
+0x00050001,
+0x00060023,
+0xe92d4ff0,
+0xe24dd01c,
+0xe3a06000,
+0x000a0000,
+0x0006000b,
+0xe510b000,
+0x000d8180,
+0xe58d2014,
+0xe1a08000,
+0xe58d000c,
+0xe1a09001,
+0xe508d000,
+0x000d8180,
+0x00000000,
+0xe5187000,
+0x000d8180,
+0xe58d0008,
+0xe58db010,
+0xe2877000,
+0x000a0000,
+0x0006000d,
+0xe518c000,
+0x000d8180,
+0xe5180000,
+0x000d8180,
+0xe3a040ff,
+0xe0866009,
+0xe1a04184,
+0xe046600c,
+0xe3e01000,
+0x000a0000,
+0xe040b009,
+0xe5071000,
+0x000d8180,
+0x00060024,
+0xe14920d8,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050025,
+0x00060026,
+0xe5096004,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00060027,
+0xe92d4ff0,
+0xe24dd01c,
+0xe1a08000,
+0xe510a000,
+0x000d8180,
+0xe58d000c,
+0xe518c000,
+0x000d8180,
+0xe58d0008,
+0xe518b000,
+0x000d8180,
+0xe04aa00c,
+0xe508d000,
+0x000d8180,
+0xe3a0c000,
+0xe58da014,
+0xe58dc018,
+0xe58db010,
+0xe12fff33,
+0xe5187000,
+0x000d8180,
+0xe1b09000,
+0xe3a06000,
+0x000a0000,
+0xe2877000,
+0x000a0000,
+0x1a000000,
+0x0005000d,
+0xea000000,
+0x00050019,
+0x00060015,
+0x00000000,
+0xe51c2008,
+0xe5190010,
+0xe1a03009,
+0xe1a0900c,
+0x00000000,
+0xe3500001,
+0x00000000,
+0xe513600c,
+0xe5122000,
+0x000d8180,
+0xe3e0e000,
+0x000a0000,
+0xe08a100b,
+0xe501e004,
+0x00000000,
+0x9a000000,
+0x00050001,
+0x00000000,
+0xe5125000,
+0x000d8180,
+0xe12fff10,
+0x00000000,
+0x0006000b,
+0x0a000000,
+0x00050028,
+0xe5192008,
+0xe2433010,
+0xe043b009,
+0xea000000,
+0x00050029,
+0x00000000,
+0x0006002a,
+0xe516e004,
+0xe2431010,
+0xe1ca20d0,
+0xe5089000,
+0x000d8180,
+0xe004baae,
+0xe004a2ae,
+0xe089000b,
+0xe0510000,
+0x11c120f0,
+0x11a02000,
+0x1a000000,
+0x0005002b,
+0xe18920fa,
+0xea000000,
+0x0005002c,
+0x0006002d,
+0xe089100c,
+0xea000000,
+0x00050002,
+0x0006002e,
+0xe2471000,
+0x000a0000,
+0xe3e03000,
+0x000a0000,
+0xe581c000,
+0xe5813004,
+0x0006000c,
+0xe3e03000,
+0x000a0000,
+0xe58db000,
+0xe58d3004,
+0xe1a0200d,
+0xea000000,
+0x00050001,
+0x0006002f,
+0xe004caae,
+0xe58db000,
+0xe3e03000,
+0x000a0000,
+0xe089100c,
+0xe58d3004,
+0xe1a0200d,
+0xea000000,
+0x00050001,
+0x00060030,
+0xe089100c,
+0xe089200b,
+0x0006000b,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x00030001,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500000,
+0x0a000000,
+0x00050003,
+0xe1c020d0,
+0xe5d6c000,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000d,
+0xe2690000,
+0x000a0000,
+0xe5189000,
+0x000d8180,
+0xe3a0b010,
+0xe509600c,
+0xe0806009,
+0xe5192008,
+0xea000000,
+0x00050026,
+0x00060031,
+0xe089100c,
+0xea000000,
+0x00050002,
+0x00060032,
+0xe2471000,
+0x000a0000,
+0xe3e03000,
+0x000a0000,
+0xe581c000,
+0xe5813004,
+0x0006000c,
+0xe3e03000,
+0x000a0000,
+0xe58db000,
+0xe58d3004,
+0xe1a0200d,
+0xea000000,
+0x00050001,
+0x00060033,
+0xe004caae,
+0xe58db000,
+0xe3e03000,
+0x000a0000,
+0xe089100c,
+0xe58d3004,
+0xe1a0200d,
+0xea000000,
+0x00050001,
+0x00060034,
+0xe089100c,
+0xe089200b,
+0x0006000b,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x00030002,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500000,
+0xe18920da,
+0x0a000000,
+0x00050003,
+0xe5d6c000,
+0xe1c020f0,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000d,
+0xe2690000,
+0x000a0000,
+0xe5189000,
+0x000d8180,
+0xe3a0b018,
+0xe1c921f0,
+0xe509600c,
+0xe0806009,
+0xe5192008,
+0xea000000,
+0x00050026,
+0x00060035,
+0xe1a00008,
+0xe2466004,
+0xe1a0100a,
+0xe5089000,
+0x000d8180,
+0xe1a0200b,
+0xe58d6008,
+0xe20e30ff,
+0xeb000000,
+0x00030003,
+0x0006000d,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500001,
+0x8a000000,
+0x00050036,
+0x0006000e,
+0xe1d6c0b2,
+0xe2866004,
+0xe086c10c,
+0x224c6b80,
+0x0006002c,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00060037,
+0xe516e004,
+0xe1ca00d0,
+0xe00422ae,
+0xe18900f2,
+0xea000000,
+0x0005002c,
+0x00060038,
+0xe59a1004,
+0xe3e00000,
+0x000a0000,
+0xe1500001,
+0xea000000,
+0x0005000e,
+0x00060039,
+0xe59a1004,
+0xe3710000,
+0x000a0000,
+0xea000000,
+0x0005000e,
+0x0006003a,
+0xe2466004,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x00030004,
+0xea000000,
+0x0005000d,
+0x0006003b,
+0x00000000,
+0xe2466004,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe1a0100e,
+0xe58d6008,
+0xeb000000,
+0x00030005,
+0xea000000,
+0x0005000d,
+0x00000000,
+0x0006003c,
+0xe004caae,
+0xe004b6ae,
+0xe089200c,
+0xe085300b,
+0xea000000,
+0x00050001,
+0x0006003d,
+0xe004caae,
+0xe004b6ae,
+0xe089300c,
+0xe085200b,
+0xea000000,
+0x00050001,
+0x0006003e,
+0xe516e008,
+0xe2466004,
+0xe089200b,
+0xe089300b,
+0xea000000,
+0x00050001,
+0x0006003f,
+0xe004caae,
+0xe004b6ae,
+0xe089200c,
+0xe089300b,
+0x0006000b,
+0xe20ec0ff,
+0xe089100a,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xe58dc000,
+0xeb000000,
+0x00030006,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500000,
+0x0a000000,
+0x0005002c,
+0x00060036,
+0xe0401009,
+0xe500600c,
+0xe2816000,
+0x000a0000,
+0xe1a09000,
+0xe3a0b010,
+0xea000000,
+0x00050024,
+0x00060040,
+0xe089100b,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x00030007,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500000,
+0x1a000000,
+0x00050036,
+0xe799000b,
+0xea000000,
+0x00050041,
+0x00000000,
+0xea000000,
+0x00050036,
+0x00000000,
+0x00060025,
+0xe1a00008,
+0xe508c000,
+0x000d8180,
+0xe2491008,
+0xe58d6008,
+0xe089200b,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030008,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe5192008,
+0xe28bb008,
+0xe5096004,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00060042,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe24a1008,
+0xe58d6008,
+0xe08a200b,
+0xeb000000,
+0x00030008,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe51a2008,
+0xe5196004,
+0xe28bb008,
+0xea000000,
+0x00050043,
+0x00060044,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe1a0100a,
+0xe58d6008,
+0xeb000000,
+0x00030009,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe556c004,
+0x00000000,
+0xe516e004,
+0x00000000,
+0xe35c0000,
+0x000a0000,
+0x00000000,
+0xe004a2ae,
+0xe1a0b82e,
+0x00000000,
+0x0a000000,
+0x00070000,
+0x00000000,
+0xea000000,
+0x00070000,
+0x00060045,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x8a000000,
+0x00050046,
+0xe5196004,
+0xe14900f8,
+0xe1a0c009,
+0xe25ba008,
+0xe28bb008,
+0x0a000000,
+0x00050047,
+0x0006000b,
+0xe1cc00d8,
+0xe25aa008,
+0xe0cc00f8,
+0x1a000000,
+0x0005000b,
+0xea000000,
+0x00050047,
+0x00060048,
+0xe5991004,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x33e01000,
+0x000a0000,
+0xe2613000,
+0x000a0000,
+0xe1a03183,
+0xe18200d3,
+0xea000000,
+0x00050049,
+0x0006004a,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x13710000,
+0x000a0000,
+0x1a000000,
+0x00050006,
+0x0006000b,
+0xe510c000,
+0x000d8180,
+0x0006000c,
+0x00000000,
+0xe3e01000,
+0x000a0000,
+0xe517b000,
+0x000d8180,
+0xe35c0000,
+0x0a000000,
+0x00050049,
+0xe51c2000,
+0x000d8180,
+0xe51b3000,
+0x000d8180,
+0xe51ce000,
+0x000d8180,
+0xe0022003,
+0xe0822082,
+0xe08ee182,
+0x0006000d,
+0xe14e20d0,
+0x000c8100,
+0xe14e00d0,
+0x000c8100,
+0xe51ee000,
+0x000d8180,
+0xe3730000,
+0x000a0000,
+0x0152000b,
+0x0a000000,
+0x00050005,
+0xe35e0000,
+0x1a000000,
+0x0005000d,
+0x0006000e,
+0xe1a0000c,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006000f,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050049,
+0xea000000,
+0x0005000e,
+0x00060010,
+0xe3710000,
+0x000a0000,
+0x00000000,
+0x21e01001,
+0x33a01000,
+0x000a0000,
+0xe0873101,
+0xe513c000,
+0x000d8180,
+0xea000000,
+0x0005000c,
+0x0006004b,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x0510c000,
+0x000d8180,
+0x03730000,
+0x000a0000,
+0x05503000,
+0x000d8180,
+0x035c0000,
+0x1a000000,
+0x00050046,
+0xe3130000,
+0x000a0000,
+0xe5002000,
+0x000d8180,
+0x0a000000,
+0x00050049,
+0xe5172000,
+0x000d8180,
+0xe3c33000,
+0x000a0000,
+0xe5070000,
+0x000d8180,
+0xe5403000,
+0x000d8180,
+0xe5002000,
+0x000d8180,
+0xea000000,
+0x00050049,
+0x0006004c,
+0xe1c920d0,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe1a01002,
+0xe3730000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050046,
+0xe1a00008,
+0xe2892008,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003000a,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe1c000d0,
+0xea000000,
+0x00050049,
+0x0006004d,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x9a000000,
+0x00050049,
+0xea000000,
+0x00050046,
+0x0006004e,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x0a000000,
+0x00050049,
+0xe5173000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe3710000,
+0x000a0000,
+0x93530000,
+0xe58d6008,
+0x8a000000,
+0x00050046,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1a00008,
+0xe1a01009,
+0xeb000000,
+0x0003000b,
+0xe5189000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060050,
+0x00000000,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3e03000,
+0x000a0000,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe18920fb,
+0xe5196004,
+0xe1a01000,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe2892008,
+0xe58d6008,
+0xeb000000,
+0x0003000c,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xe3500000,
+0x03e01000,
+0x000a0000,
+0x0a000000,
+0x00050049,
+0xe1c900d8,
+0xe1c921d0,
+0xe3a0b000,
+0x000a0000,
+0xe14900f8,
+0xe1c920f0,
+0xea000000,
+0x00050047,
+0x00060051,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe510c000,
+0x000d8180,
+0x00000000,
+0xe14220d0,
+0x000c8100,
+0xe5196004,
+0x00000000,
+0xe35c0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe3e01000,
+0x000a0000,
+0xe3a0b000,
+0x000a0000,
+0xe14920f8,
+0xe589100c,
+0xea000000,
+0x00050047,
+0x00060052,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe510c000,
+0x000d8180,
+0xe510b000,
+0x000d8180,
+0xe2822001,
+0xe5196004,
+0xe152000c,
+0xe08bb182,
+0xe14920f8,
+0x31cb00d0,
+0xe3a0b000,
+0x000a0000,
+0x2a000000,
+0x00050002,
+0x0006000b,
+0xe3710000,
+0x000a0000,
+0x13a0b000,
+0x000a0000,
+0x11c900f0,
+0xea000000,
+0x00050047,
+0x0006000c,
+0xe510c000,
+0x000d8180,
+0xe1a01002,
+0xe35c0000,
+0x0a000000,
+0x00050047,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003000d,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe3500000,
+0x0a000000,
+0x00050047,
+0xe1c000d0,
+0xea000000,
+0x0005000b,
+0x00060053,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe510c000,
+0x000d8180,
+0x00000000,
+0xe14220d0,
+0x000c8100,
+0xe5196004,
+0x00000000,
+0xe35c0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe3a00000,
+0xe3e01000,
+0x000a0000,
+0xe3a0b000,
+0x000a0000,
+0xe14920f8,
+0xe1c900f8,
+0xea000000,
+0x00050047,
+0x00060054,
+0xe557a000,
+0x000d8180,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe31a0000,
+0x000a0000,
+0xe1a0c009,
+0xe2899008,
+0x03a06000,
+0x000a0000,
+0x13a06000,
+0x000a0000,
+0xe24bb008,
+0xea000000,
+0x00050024,
+0x00060055,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe557a000,
+0x000d8180,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe1a0c009,
+0xe1c900f8,
+0xe1c920f0,
+0xe31a0000,
+0x000a0000,
+0xe2899010,
+0x03a06000,
+0x000a0000,
+0x13a06000,
+0x000a0000,
+0xe24bb010,
+0xea000000,
+0x00050024,
+0x00060056,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050046,
+0xe5196004,
+0xe5089000,
+0x000d8180,
+0xe5101000,
+0x000d8180,
+0xe550a000,
+0x000d8180,
+0xe510c000,
+0x000d8180,
+0xe081200b,
+0xe081300a,
+0xe58d6008,
+0xe153000c,
+0x0a000000,
+0x00050046,
+0xe5103000,
+0x000d8180,
+0xe510c000,
+0x000d8180,
+0xe35a0000,
+0x000a0000,
+0x91520003,
+0x935c0000,
+0x8a000000,
+0x00050046,
+0x0006000b,
+0xe2422008,
+0xe2899008,
+0xe24bb008,
+0xe5002000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0x0006000c,
+0xe18920dc,
+0xe15c000b,
+0x118120fc,
+0xe28cc008,
+0x1a000000,
+0x0005000c,
+0xe3a02000,
+0xe1a0a000,
+0xe3a03000,
+0xeb000000,
+0x00050021,
+0x0006000e,
+0xe51a2000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe51a3000,
+0x000d8180,
+0xe5071000,
+0x000d8180,
+0xe3500000,
+0x000a0000,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x8a000000,
+0x00050008,
+0xe053b002,
+0xe5180000,
+0x000d8180,
+0xe089100b,
+0x0a000000,
+0x00050006,
+0xe1510000,
+0xe3a0c000,
+0x8a000000,
+0x00050009,
+0xe24b3008,
+0xe50a2000,
+0x000d8180,
+0x0006000f,
+0xe18200dc,
+0xe15c0003,
+0xe18900fc,
+0xe28cc008,
+0x1a000000,
+0x0005000f,
+0x00060010,
+0xe3e02000,
+0x000a0000,
+0xe28bb010,
+0x00060011,
+0xe5092004,
+0xe249a008,
+0xe2160000,
+0x000a0000,
+0xe58d6008,
+0xe58db004,
+0x0a000000,
+0x00050017,
+0xea000000,
+0x00050018,
+0x00060012,
+0xe16300d8,
+0xe3e02000,
+0x000a0000,
+0xe3a0b000,
+0x000a0000,
+0xe50a3000,
+0x000d8180,
+0xe1c900f0,
+0xea000000,
+0x00050011,
+0x00060013,
+0xe1a00008,
+0xe1a011ab,
+0xeb000000,
+0x00030000,
+0xe3a00000,
+0xea000000,
+0x0005000e,
+0x00060057,
+0x00000000,
+0xe5120000,
+0x000d8180,
+0xe5196004,
+0xe5089000,
+0x000d8180,
+0xe5101000,
+0x000d8180,
+0xe550a000,
+0x000d8180,
+0xe510c000,
+0x000d8180,
+0xe081200b,
+0xe081300a,
+0xe58d6008,
+0xe153000c,
+0x0a000000,
+0x00050046,
+0xe5103000,
+0x000d8180,
+0xe510c000,
+0x000d8180,
+0xe35a0000,
+0x000a0000,
+0x91520003,
+0x935c0000,
+0x8a000000,
+0x00050046,
+0x0006000b,
+0xe5002000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0x0006000c,
+0xe18920dc,
+0xe15c000b,
+0x118120fc,
+0xe28cc008,
+0x1a000000,
+0x0005000c,
+0xe3a02000,
+0xe1a0a000,
+0xe3a03000,
+0xeb000000,
+0x00050021,
+0x0006000e,
+0xe51a2000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe51a3000,
+0x000d8180,
+0xe5071000,
+0x000d8180,
+0xe3500000,
+0x000a0000,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x8a000000,
+0x00050008,
+0xe053b002,
+0xe5180000,
+0x000d8180,
+0xe089100b,
+0x0a000000,
+0x00050006,
+0xe1510000,
+0xe3a0c000,
+0x8a000000,
+0x00050009,
+0xe24b3008,
+0xe50a2000,
+0x000d8180,
+0x0006000f,
+0xe18200dc,
+0xe15c0003,
+0xe18900fc,
+0xe28cc008,
+0x1a000000,
+0x0005000f,
+0x00060010,
+0xe1a0a009,
+0xe28bb008,
+0xe2160000,
+0x000a0000,
+0xe58d6008,
+0xe58db004,
+0x0a000000,
+0x00050017,
+0xea000000,
+0x00050018,
+0x00060012,
+0xe1a00008,
+0xe1a0100a,
+0xeb000000,
+0x0003000e,
+0x00060013,
+0xe1a00008,
+0xe1a011ab,
+0xeb000000,
+0x00030000,
+0xe3a00000,
+0xea000000,
+0x0005000e,
+0x00060058,
+0xe5180000,
+0x000d8180,
+0xe089100b,
+0xe5089000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0xe5081000,
+0x000d8180,
+0x00000000,
+0xe3a00000,
+0x000a0000,
+0xe3a02000,
+0x0a000000,
+0x00050046,
+0xe5082000,
+0x000d8180,
+0xe5480000,
+0x000d8180,
+0xea000000,
+0x0005001a,
+0x00060059,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x0a000000,
+0x00050049,
+0x8a000000,
+0x00050046,
+0xe1a02081,
+0xe292c980,
+0x5a000000,
+0x00050002,
+0xe3e03ff8,
+0xe053cacc,
+0xe1a03581,
+0xe1a02580,
+0xe3833480,
+0xe26ce020,
+0xe1833aa0,
+0x9a000000,
+0x00050003,
+0xe1822e13,
+0xe1a00c33,
+0xe1120fc1,
+0x12800001,
+0xe3510000,
+0xb2600000,
+0x0006000b,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006000c,
+0xe1822000,
+0xe1120fc1,
+0x03a00000,
+0x13e00000,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006000d,
+0x03530480,
+0x03520000,
+0x1a000000,
+0x00050004,
+0xe3510000,
+0x43a00480,
+0x4a000000,
+0x0005000b,
+0x0006000e,
+0x00000000,
+0xeb000000,
+0x0005005a,
+0xea000000,
+0x00050049,
+0x0006005b,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x0a000000,
+0x00050049,
+0x8a000000,
+0x00050046,
+0xe1a02081,
+0xe292c980,
+0x5a000000,
+0x00050002,
+0xe3e03ff8,
+0xe053cacc,
+0xe1a03581,
+0xe1a02580,
+0xe3833480,
+0xe26ce020,
+0xe1833aa0,
+0x9a000000,
+0x00050003,
+0xe1822e13,
+0xe1a00c33,
+0xe1d22fc1,
+0x12900001,
+0x614f00d0,
+0x00051809,
+0x6a000000,
+0x00050049,
+0xe3510000,
+0xb2600000,
+0x0006000b,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006000c,
+0xe1822000,
+0xe1d22fc1,
+0x03a00000,
+0x13a00001,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006000d,
+0x03530480,
+0x1a000000,
+0x00050004,
+0xe3510000,
+0x43a00480,
+0x4a000000,
+0x0005000b,
+0x0006000e,
+0xeb000000,
+0x0005005c,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00040007,
+0x00060013,
+0x00020000,
+0x00000000,
+0x41e00000,
+0x0006005d,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x8a000000,
+0x00050046,
+0x13c11480,
+0x1a000000,
+0x00050049,
+0xe3500000,
+0xb2700000,
+0x614f00d0,
+0x00051813,
+0x00060049,
+0xe5196004,
+0xe14900f8,
+0x0006005e,
+0xe3a0b000,
+0x000a0000,
+0x00060047,
+0xe2160000,
+0x000a0000,
+0x0516e004,
+0xe58db004,
+0xe249a008,
+0x1a000000,
+0x00050018,
+0xe004caae,
+0x0006000f,
+0xe15c000b,
+0x8a000000,
+0x00050006,
+0xe00402ae,
+0xe5d6c000,
+0xe496e004,
+0xe04a9000,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00060010,
+0xe08a100b,
+0xe3e00000,
+0x000a0000,
+0xe28bb008,
+0xe5010004,
+0xea000000,
+0x0005000f,
+0x0006005f,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003000f,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060060,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030010,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060061,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030011,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060062,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030012,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060063,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030013,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060064,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030014,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060065,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030015,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060066,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030016,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060067,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030017,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060068,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030018,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060069,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030019,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006a,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003001a,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006b,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003001b,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006c,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003001c,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006d,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003001d,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006e,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x0003001e,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x0006006f,
+0x00060070,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0xe14220d0,
+0x000c8100,
+0xeb000000,
+0x0003001f,
+0xea000000,
+0x00050049,
+0x00060071,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030020,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xea000000,
+0x00050049,
+0x00060072,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0xe1a0200d,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030021,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe59d2000,
+0xe3e03000,
+0x000a0000,
+0xe5196004,
+0xe14900f8,
+0xe3a0b000,
+0x000a0000,
+0xe1c920f0,
+0xea000000,
+0x00050047,
+0x00060073,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050046,
+0xe2492008,
+0xe5196004,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030022,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe3a0b000,
+0x000a0000,
+0xe1c900f0,
+0xea000000,
+0x00050047,
+0x00060074,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0xe3a0a008,
+0x1a000000,
+0x00050004,
+0x0006000b,
+0xe18920da,
+0xe15a000b,
+0x2a000000,
+0x00050049,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050003,
+0xe1500002,
+0xe28aa008,
+0xc1a00002,
+0xea000000,
+0x0005000b,
+0x0006000d,
+0x8a000000,
+0x00050046,
+0xeb000000,
+0x00030023,
+0xe18920da,
+0xea000000,
+0x00050006,
+0x0006000e,
+0x8a000000,
+0x00050046,
+0x0006000f,
+0xe18920da,
+0xe15a000b,
+0x2a000000,
+0x00050049,
+0xe3730000,
+0x000a0000,
+0x2a000000,
+0x00050007,
+0x00060010,
+0x00000000,
+0xeb000000,
+0x00030024,
+0xe28aa008,
+0x81a00002,
+0x81a01003,
+0xea000000,
+0x0005000f,
+0x00060011,
+0x8a000000,
+0x00050046,
+0xe1cd00f0,
+0xe1a00002,
+0xeb000000,
+0x00030023,
+0xe1cd20d0,
+0xea000000,
+0x00050010,
+0x00060075,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0xe3a0a008,
+0x1a000000,
+0x00050004,
+0x0006000b,
+0xe18920da,
+0xe15a000b,
+0x2a000000,
+0x00050049,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050003,
+0xe1500002,
+0xe28aa008,
+0xb1a00002,
+0xea000000,
+0x0005000b,
+0x0006000d,
+0x8a000000,
+0x00050046,
+0xeb000000,
+0x00030023,
+0xe18920da,
+0xea000000,
+0x00050006,
+0x0006000e,
+0x8a000000,
+0x00050046,
+0x0006000f,
+0x00000000,
+0xe18920da,
+0xe15a000b,
+0x2a000000,
+0x00050049,
+0xe3730000,
+0x000a0000,
+0x2a000000,
+0x00050007,
+0x00060010,
+0xeb000000,
+0x00030024,
+0xe28aa008,
+0x31a00002,
+0x31a01003,
+0xea000000,
+0x0005000f,
+0x00060011,
+0x8a000000,
+0x00050046,
+0xe1cd00f0,
+0xe1a00002,
+0xeb000000,
+0x00030023,
+0xe1cd20d0,
+0xea000000,
+0x00050010,
+0x00060076,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe5100000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060077,
+0xe1c900d0,
+0xe5196004,
+0xe35b0008,
+0x03710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe5102000,
+0x000d8180,
+0xe5500000,
+0x000d8180,
+0x00000000,
+0xe3e01000,
+0x000a0000,
+0xe3520000,
+0x03a0b000,
+0x000a0000,
+0x13a0b000,
+0x000a0000,
+0xe14900f8,
+0xea000000,
+0x00050047,
+0x00060078,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe5196004,
+0xe35b0008,
+0x03710000,
+0x000a0000,
+0x03d030ff,
+0xe3a02001,
+0x1a000000,
+0x00050046,
+0xe58d0000,
+0xe1a0100d,
+0x00060079,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x00030025,
+0xe5189000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006007a,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe1c921d0,
+0xe35b0010,
+0xe3e0c000,
+0x0a000000,
+0x00050001,
+0x3a000000,
+0x00050046,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0xe1a0c002,
+0x1a000000,
+0x00050046,
+0x0006000b,
+0xe1c920d8,
+0xe3710000,
+0x000a0000,
+0x05101000,
+0x000d8180,
+0x03730000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe2813001,
+0xe3520000,
+0xb0822003,
+0xe3520001,
+0xb3a02001,
+0xe35c0000,
+0xb08cc003,
+0xe1cccfcc,
+0xe15c0001,
+0xe2800000,
+0x000a0000,
+0xc1a0c001,
+0xe0801002,
+0xe05c2002,
+0xe2822001,
+0xaa000000,
+0x00050079,
+0x0006007b,
+0xe2470000,
+0x000a0000,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006007c,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe1c920d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe2523001,
+0xe5101000,
+0x000d8180,
+0x00000000,
+0xba000000,
+0x0005007b,
+0xe3510001,
+0x3a000000,
+0x0005007b,
+0x1a000000,
+0x00050046,
+0xe517c000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe5100000,
+0x000d8180,
+0xe15c0002,
+0x3a000000,
+0x00050046,
+0x0006000b,
+0xe7c10003,
+0xe2533001,
+0xaa000000,
+0x0005000b,
+0xea000000,
+0x00050079,
+0x0006007d,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe5102000,
+0x000d8180,
+0xe517c000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1a03002,
+0xe2800000,
+0x000a0000,
+0xe15c0002,
+0x3a000000,
+0x00050046,
+0x0006000b,
+0x00000000,
+0xe4d0c001,
+0xe2533001,
+0xba000000,
+0x00050079,
+0xe7c1c003,
+0xea000000,
+0x0005000b,
+0x0006007e,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe5102000,
+0x000d8180,
+0xe517c000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe3a03000,
+0xe2800000,
+0x000a0000,
+0xe15c0002,
+0x3a000000,
+0x00050046,
+0x0006000b,
+0xe7d0c003,
+0xe1530002,
+0x2a000000,
+0x00050079,
+0xe24cb041,
+0xe35b001a,
+0x322cc020,
+0xe7c1c003,
+0xe2833001,
+0xea000000,
+0x0005000b,
+0x0006007f,
+0xe5170000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe1500001,
+0xab000000,
+0x0005004f,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0xe5102000,
+0x000d8180,
+0xe517c000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe3a03000,
+0xe2800000,
+0x000a0000,
+0xe15c0002,
+0x3a000000,
+0x00050046,
+0x0006000b,
+0xe7d0c003,
+0xe1530002,
+0x2a000000,
+0x00050079,
+0xe24cb061,
+0xe35b001a,
+0x322cc020,
+0xe7c1c003,
+0xe2833001,
+0xea000000,
+0x0005000b,
+0x00060080,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050046,
+0x00000000,
+0xe1a0a009,
+0x00000000,
+0xeb000000,
+0x00030026,
+0x00000000,
+0xe1a0900a,
+0x00000000,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060081,
+0x8a000000,
+0x00050046,
+0x00060082,
+0xe1a0c081,
+0xe29cc980,
+0x53a00000,
+0x512fff1e,
+0xe3e03ff8,
+0xe053cacc,
+0x4a000000,
+0x00050001,
+0xe1a03581,
+0xe3833480,
+0xe1833aa0,
+0xe3510000,
+0xe1a00c33,
+0xb2600000,
+0xe12fff1e,
+0x0006000b,
+0xe28cc015,
+0xe1a03c30,
+0xe26cc014,
+0xe1a00601,
+0xe3510000,
+0xe1830c10,
+0xb2600000,
+0xe12fff1e,
+0x00060083,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060084,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a02000,
+0xe3a0a008,
+0x0006000b,
+0xe18900da,
+0xe15a000b,
+0xe28aa008,
+0xaa000000,
+0x00050002,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe0022000,
+0xea000000,
+0x0005000b,
+0x00060085,
+0x00000000,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a02000,
+0xe3a0a008,
+0x0006000b,
+0xe18900da,
+0xe15a000b,
+0xe28aa008,
+0xaa000000,
+0x00050002,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1822000,
+0xea000000,
+0x0005000b,
+0x00060086,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a02000,
+0xe3a0a008,
+0x0006000b,
+0xe18900da,
+0xe15a000b,
+0xe28aa008,
+0xaa000000,
+0x00050002,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe0222000,
+0xea000000,
+0x0005000b,
+0x0006000c,
+0xe3e03000,
+0x000a0000,
+0xe5196004,
+0xe14920f8,
+0xea000000,
+0x0005005e,
+0x00060087,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe0202860,
+0xe3c228ff,
+0xe1a00460,
+0xe3e01000,
+0x000a0000,
+0xe0200422,
+0xea000000,
+0x00050049,
+0x00060088,
+0xe1c900d0,
+0xe35b0008,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1e00000,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060089,
+0xe1c900d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe200a01f,
+0xe1c900d0,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a00a10,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006008a,
+0xe1c900d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0x00000000,
+0xe200a01f,
+0xe1c900d0,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a00a30,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006008b,
+0xe1c900d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe200a01f,
+0xe1c900d0,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a00a50,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006008c,
+0xe1c900d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe260a000,
+0xe1c900d0,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a00a70,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x0006008d,
+0xe1c900d8,
+0xe35b0010,
+0x3a000000,
+0x00050046,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe200a01f,
+0xe1c900d0,
+0xe3710000,
+0x000a0000,
+0x1b000000,
+0x00050081,
+0xe1a00a70,
+0xe3e01000,
+0x000a0000,
+0xea000000,
+0x00050049,
+0x00060046,
+0xe5192008,
+0xe5181000,
+0x000d8180,
+0xe089000b,
+0xe5196004,
+0xe5080000,
+0x000d8180,
+0xe5122000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe2800000,
+0x000a0000,
+0xe58d6008,
+0xe1500001,
+0xe1a00008,
+0x8a000000,
+0x00050005,
+0xe12fff32,
+0xe5189000,
+0x000d8180,
+0xe3500000,
+0xe1a0b180,
+0xe249a008,
+0xca000000,
+0x00050047,
+0x0006000b,
+0xe5180000,
+0x000d8180,
+0xe5192008,
+0xe040b009,
+0x1a000000,
+0x00050029,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00060029,
+0xe2160000,
+0x000a0000,
+0xe3c61000,
+0x000a0000,
+0x00000000,
+0x0516e004,
+0x00020000,
+0x000412ae,
+0xe049c001,
+0xea000000,
+0x00050024,
+0x0006000f,
+0xe3a01000,
+0x000a0000,
+0xeb000000,
+0x00030000,
+0xe5189000,
+0x000d8180,
+0xe1500000,
+0xea000000,
+0x0005000b,
+0x0006004f,
+0xe1a0a00e,
+0xe5089000,
+0x000d8180,
+0xe089100b,
+0xe58d6008,
+0xe5081000,
+0x000d8180,
+0xe1a00008,
+0xeb000000,
+0x00030027,
+0xe5189000,
+0x000d8180,
+0xe1a0e00a,
+0xe5192008,
+0xe12fff1e,
+0x0006008e,
+0x00000000,
+0xe5570000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0x1a000000,
+0x00050005,
+0xe5171000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0x1a000000,
+0x00050001,
+0xe2411001,
+0xe3100000,
+0x000a0000,
+0x15071000,
+0x000d8180,
+0xea000000,
+0x00050001,
+0x00000000,
+0x0006008f,
+0xe5570000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0x0a000000,
+0x00050001,
+0x0006000f,
+0xe20ec0ff,
+0xe087c10c,
+0xe51cf000,
+0x000d8180,
+0x00060090,
+0xe5570000,
+0x000d8180,
+0xe5171000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0x1a000000,
+0x0005000f,
+0xe3100000,
+0x000a0000,
+0x0a000000,
+0x0005000f,
+0xe2511001,
+0xe5071000,
+0x000d8180,
+0x0a000000,
+0x00050001,
+0xe3100000,
+0x000a0000,
+0x0a000000,
+0x0005000f,
+0x0006000b,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe1a01006,
+0xeb000000,
+0x00030028,
+0x0006000d,
+0xe5189000,
+0x000d8180,
+0x0006000e,
+0x00000000,
+0xe556c004,
+0xe516e004,
+0xe087c10c,
+0xe51cc000,
+0x000d8180,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00060091,
+0xe5130018,
+0xe2866004,
+0xe58d0004,
+0xea000000,
+0x0005000e,
+0x00060092,
+0x00000000,
+0xe5192008,
+0xe2470000,
+0x000a0000,
+0xe58d6008,
+0xe5122000,
+0x000d8180,
+0xe1a01006,
+0xe5078000,
+0x000d8180,
+0xe5522000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe0892182,
+0xe5082000,
+0x000d8180,
+0xeb000000,
+0x00030029,
+0xea000000,
+0x0005000d,
+0x00000000,
+0x00060093,
+0xe1a01006,
+0x00000000,
+0xea000000,
+0x00050001,
+0x00000000,
+0x00060094,
+0x00000000,
+0xe3861001,
+0x0006000b,
+0x00000000,
+0xe089300b,
+0xe58d6008,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe04aa009,
+0xe5083000,
+0x000d8180,
+0xeb000000,
+0x0003002a,
+0xe5189000,
+0x000d8180,
+0xe5183000,
+0x000d8180,
+0xe3a01000,
+0xe089a00a,
+0xe043b009,
+0xe58d1008,
+0xe5192008,
+0xe516e004,
+0xe12fff10,
+0x00060095,
+0x00000000,
+0xe24dd00c,
+0xe92d1fff,
+0xe59d0040,
+0xe59e7000,
+0xe28d2040,
+0xe3e03000,
+0x000a0000,
+0xe58d2034,
+0xe5073000,
+0x000d8180,
+0xe5301004,
+0xe58d0038,
+0xe58d003c,
+0xe1a01401,
+0xe0800341,
+0xe59e1004,
+0xe040000e,
+0xe5178000,
+0x000d8180,
+0xe0810120,
+0xe5179000,
+0x000d8180,
+0xe5070000,
+0x000d8180,
+0xe3a03000,
+0xe5078000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe5073000,
+0x000d8180,
+0xe2470000,
+0x000a0000,
+0xe1a0100d,
+0xeb000000,
+0x0003002b,
+0xe5181000,
+0x000d8180,
+0xe5189000,
+0x000d8180,
+0xe3c11000,
+0x000a0000,
+0xe1a0d001,
+0xe59d6008,
+0xe58d800c,
+0xea000000,
+0x00050001,
+0x00000000,
+0x00060096,
+0x00000000,
+0xe59d800c,
+0x0006000b,
+0xe3500000,
+0xba000000,
+0x00050003,
+0xe1a0b180,
+0xe5191008,
+0xe58db004,
+0xe3a02000,
+0xe5111000,
+0x000d8180,
+0xe5072000,
+0x000d8180,
+0xe3e03000,
+0x000a0000,
+0xe5115000,
+0x000d8180,
+0xe5d6c000,
+0xe3a040ff,
+0xe496e004,
+0xe1a04184,
+0xe5073000,
+0x000d8180,
+0xe35c0000,
+0x000a0000,
+0xe797c10c,
+0xe004a2ae,
+0x31a0b82e,
+0x224bb008,
+0x208aa009,
+0xe12fff1c,
+0x0006000d,
+0xe2601000,
+0xe1a00008,
+0xeb000000,
+0x0003002c,
+0x00000000,
+0x00060013,
+0x3ff00000,
+0x0006005a,
+0xe1a02081,
+0xe292c980,
+0x5a000000,
+0x00050002,
+0xe3e03ff3,
+0xe053cacc,
+0x312fff1e,
+0xe3e03001,
+0xe1c02c13,
+0xe0000c13,
+0xe25cc020,
+0x51c13c13,
+0x51822003,
+0x53e03001,
+0x50011c13,
+0xe1120fc1,
+0x012fff1e,
+0xe3e03001,
+0xe35c0000,
+0x51a02c13,
+0x43e02000,
+0xe28cc020,
+0xe0500c13,
+0xe0c11002,
+0xe12fff1e,
+0x0006000c,
+0xe1822000,
+0xe1120fc1,
+0xe3a00000,
+0xe2011480,
+0x151f3000,
+0x00050813,
+0x11811003,
+0xe12fff1e,
+0x0006005c,
+0xe1a02081,
+0xe292c980,
+0x5a000000,
+0x00050002,
+0xe3e03ff3,
+0xe053cacc,
+0x312fff1e,
+0xe3e03001,
+0xe1c02c13,
+0xe0000c13,
+0xe25cc020,
+0x51c13c13,
+0x51822003,
+0x53e03001,
+0x50011c13,
+0xe1d22fc1,
+0x012fff1e,
+0xe3e03001,
+0xe35c0000,
+0x51a02c13,
+0x43e02000,
+0xe28cc020,
+0xe0500c13,
+0xe0c11002,
+0xe12fff1e,
+0x0006000c,
+0xe1822000,
+0xe1d22fc1,
+0xe3a00000,
+0xe2011480,
+0x151f3000,
+0x00050813,
+0x11811003,
+0xe12fff1e,
+0x00060097,
+0x00000000,
+0xe1a02081,
+0xe292c980,
+0x52011480,
+0x53a00000,
+0x512fff1e,
+0xe3e03ff3,
+0xe053cacc,
+0x312fff1e,
+0xe3e03001,
+0xe0000c13,
+0xe25cc020,
+0x50011c13,
+0xe12fff1e,
+0x00000000,
+0x00060098,
+0xe92d401f,
+0xeb000000,
+0x0003002d,
+0xeb000000,
+0x0005005a,
+0xe1cd20d8,
+0xeb000000,
+0x0003001f,
+0xe1cd20d0,
+0xe2211480,
+0xeb000000,
+0x0003002e,
+0xe28dd014,
+0xe8bd8000,
+0x00060099,
+0xe210c480,
+0x42600000,
+0xe02cc0c1,
+0xe3510000,
+0x42611000,
+0xe2513001,
+0x11500001,
+0x03a00000,
+0x81110003,
+0x00020000,
+0x00000003,
+0x9a000000,
+0x00050001,
+0xe16f2f10,
+0xe16f3f11,
+0xe0433002,
+0xe273201f,
+0x108ff182,
+0xe1a00000,
+0x00000000,
+0xe1500001,
+0x000900a7,
+0x20400001,
+0x000900a7,
+0x00000000,
+0x0006000b,
+0xe3500000,
+0x135c0000,
+0x40400001,
+0xe030108c,
+0x42600000,
+0xe12fff1e,
+0x0006009a,
+0xe59dc000,
+0xe35c0001,
+0x3a000000,
+0x0003002e,
+0x0a000000,
+0x0003002f,
+0xe35c0003,
+0x3a000000,
+0x0003001f,
+0x0a000000,
+0x0003002d,
+0xe35c0005,
+0x3a000000,
+0x00050098,
+0x0a000000,
+0x0003001c,
+0xe35c0007,
+0x32211480,
+0x03c11480,
+0x912fff1e,
+0x00000000,
+0xe35c0009,
+0x3a000000,
+0x0003001d,
+0x0a000000,
+0x00050009,
+0xe35c000b,
+0x8a000000,
+0x00050009,
+0xe92d4010,
+0x0a000000,
+0x00050001,
+0xeb000000,
+0x00030024,
+0x81a00002,
+0x81a01003,
+0xe8bd8010,
+0x00060013,
+0xe7f001f0,
+0x0006000b,
+0xeb000000,
+0x00030024,
+0x31a00002,
+0x31a01003,
+0xe8bd8010,
+0x00000000,
+0xe7f001f0,
+0x00000000,
+0x0006009b,
+0x00000000,
+0xe51c6000,
+0x000d8180,
+0xe28c7000,
+0x000a0000,
+0xe14600f0,
+0x000c8100,
+0xe14620f0,
+0x000c8100,
+0xe59d3000,
+0xe28d2000,
+0x000a0000,
+0xe1a00006,
+0xe1a031a3,
+0xe5062000,
+0x000d8180,
+0xe1a0100d,
+0xe5063000,
+0x000d8180,
+0xe58d6008,
+0xeb000000,
+0x00030030,
+0xe5109000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe510b000,
+0x000d8180,
+0xe3a040ff,
+0xe5192008,
+0xe1a08000,
+0xe04bb009,
+0xe1a04184,
+0xe5071000,
+0x000d8180,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00000000,
+0x00060028,
+0x00000000,
+0xe5176000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe5083000,
+0x000d8180,
+0xe5068000,
+0x000d8180,
+0xe1a00006,
+0xe1a0100a,
+0xeb000000,
+0x00030031,
+0xe14600d0,
+0x000c8100,
+0xea000000,
+0x0005001a,
+0x00000000,
+0x0006009c,
+0x00000000,
+0xe92d4830,
+0xe1a04000,
+0xe5100000,
+0x000d8180,
+0xe5541000,
+0x000d8180,
+0xe2842000,
+0x000a0000,
+0xe1a0b00d,
+0xe04dd000,
+0xe2511001,
+0xe514c000,
+0x000d8180,
+0x4a000000,
+0x00050002,
+0x0006000b,
+0xe7923101,
+0xe78d3101,
+0xe2511001,
+0x5a000000,
+0x0005000b,
+0x0006000c,
+0xe5140000,
+0x000d8180,
+0xe5141000,
+0x000d8180,
+0xe5142000,
+0x000d8180,
+0xe5143000,
+0x000d8180,
+0xe12fff3c,
+0xe1a0d00b,
+0xe5040000,
+0x000d8180,
+0xe5041000,
+0x000d8180,
+0xe8bd8830,
+0x00000000,
+0x00080000,
+0x00000000,
+0xe1a0b18b,
+0xe1aa00d9,
+0xe1d6c0b2,
+0xe1ab20d9,
+0xe2866004,
+0xe086c10c,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050003,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050004,
+0xe1500002,
+0x00000000,
+0xb24c6b80,
+0x00000000,
+0xa24c6b80,
+0x00000000,
+0xd24c6b80,
+0x00000000,
+0xc24c6b80,
+0x00000000,
+0x0006000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000d,
+0x8a000000,
+0x00050035,
+0xe3730000,
+0x000a0000,
+0x31a0a00c,
+0x3a000000,
+0x00050005,
+0xe1a00002,
+0xe1a0b00a,
+0xe1a0a00c,
+0xeb000000,
+0x00030023,
+0xe1a02000,
+0xe1a03001,
+0xe1cb00d0,
+0xea000000,
+0x00050005,
+0x0006000e,
+0x8a000000,
+0x00050035,
+0xe1a0a00c,
+0xeb000000,
+0x00030023,
+0xe1cb20d0,
+0x0006000f,
+0xeb000000,
+0x00030024,
+0x00000000,
+0x324a6b80,
+0x00000000,
+0x224a6b80,
+0x00000000,
+0x924a6b80,
+0x00000000,
+0x824a6b80,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe1a0b18b,
+0xe1aa00d9,
+0xe1d6c0b2,
+0xe1ab20d9,
+0xe2866004,
+0xe086c10c,
+0xe3710000,
+0x000a0000,
+0x93730000,
+0x000a0000,
+0x00000000,
+0x9a000000,
+0x0005009d,
+0x00000000,
+0x9a000000,
+0x0005009e,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x13730000,
+0x000a0000,
+0x0a000000,
+0x0005003b,
+0x00000000,
+0xe1510003,
+0x1a000000,
+0x00050002,
+0xe3710000,
+0x000a0000,
+0x2a000000,
+0x00050001,
+0xe1500002,
+0x00000000,
+0x1a000000,
+0x00050003,
+0x0006000b,
+0xe24c6b80,
+0x0006000c,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000d,
+0xe3710000,
+0x000a0000,
+0x8a000000,
+0x0005000c,
+0x00000000,
+0x0a000000,
+0x00050001,
+0xe3710000,
+0x000a0000,
+0x8a000000,
+0x00050002,
+0x00000000,
+0xe510a000,
+0x000d8180,
+0xe35a0000,
+0x00000000,
+0x0a000000,
+0x0005000c,
+0x00000000,
+0x0a000000,
+0x00050002,
+0x00000000,
+0xe55aa000,
+0x000d8180,
+0xe3a03000,
+0x000a0000,
+0xe1a01000,
+0xe31a0000,
+0x000a0000,
+0x0a000000,
+0x0005003a,
+0x00000000,
+0xea000000,
+0x0005000c,
+0x00000000,
+0x0006000c,
+0xe24c6b80,
+0x0006000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1e0b00b,
+0xe18900da,
+0xe1d6c0b2,
+0xe795210b,
+0xe2866004,
+0xe086c10c,
+0xe3710000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050007,
+0xe1500002,
+0x00000000,
+0x01500002,
+0x00000000,
+0x024c6b80,
+0x0006000b,
+0x00000000,
+0x0006000b,
+0x124c6b80,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0x00060011,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xea000000,
+0x0005003b,
+0x00000000,
+0xe1a0b18b,
+0xe1aa00d9,
+0xe1d6c0b2,
+0xe1ab20d5,
+0xe2866004,
+0xe086c10c,
+0x00000000,
+0x0006009d,
+0x00000000,
+0x0006009e,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050003,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050004,
+0xe1500002,
+0x00000000,
+0x024c6b80,
+0x0006000b,
+0x00000000,
+0x0006000b,
+0x124c6b80,
+0x00000000,
+0x0006000c,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000d,
+0x00000000,
+0x8a000000,
+0x00050007,
+0x00000000,
+0x824c6b80,
+0x00000000,
+0x8a000000,
+0x0005000c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x31a0a00c,
+0x3a000000,
+0x00050005,
+0xe1a00002,
+0xe1a0b00a,
+0x0006000e,
+0xe1a0a00c,
+0xeb000000,
+0x00030023,
+0xe1cb20d0,
+0x0006000f,
+0xeb000000,
+0x00030032,
+0x00000000,
+0x024a6b80,
+0x00000000,
+0x124a6b80,
+0x00000000,
+0xea000000,
+0x0005000c,
+0x00000000,
+0x00060011,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xea000000,
+0x0005003b,
+0x00000000,
+0xe18900da,
+0xe1d6c0b2,
+0xe2866004,
+0xe1e0b00b,
+0xe086c10c,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x0a000000,
+0x0005003b,
+0x00000000,
+0xe151000b,
+0x00000000,
+0x024c6b80,
+0x00000000,
+0x124c6b80,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe089b18b,
+0xe1d6c0b2,
+0xe1cb00d0,
+0xe2866004,
+0xe086c10c,
+0xe3710000,
+0x000a0000,
+0x00000000,
+0x924c6b80,
+0x00000000,
+0x918900fa,
+0x00000000,
+0x824c6b80,
+0x00000000,
+0x818900fa,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a0b18b,
+0xe5d6c000,
+0xe18900db,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe089b18b,
+0xe5d6c000,
+0xe59b0004,
+0xe089a00a,
+0xe496e004,
+0xe3700000,
+0x000a0000,
+0x93e01000,
+0x000a0000,
+0x83e01000,
+0x000a0000,
+0xe58a1004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a0b18b,
+0xe18900db,
+0xe5d6c000,
+0xe496e004,
+0xe3710000,
+0x000a0000,
+0x8a000000,
+0x0005003e,
+0x12211480,
+0x1a000000,
+0x00050005,
+0x02700000,
+0x614f00d0,
+0x00051809,
+0x0006000f,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00040007,
+0x00060013,
+0x00020000,
+0x00000000,
+0x41e00000,
+0x00000000,
+0xe1a0b18b,
+0xe18900db,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050002,
+0xe5100000,
+0x000d8180,
+0x0006000b,
+0xe3e01000,
+0x000a0000,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000c,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050040,
+0x00000000,
+0xe5102000,
+0x000d8180,
+0xe3520000,
+0x1a000000,
+0x00050009,
+0x0006000d,
+0x00000000,
+0x00060041,
+0x00000000,
+0xe1a0b009,
+0x00000000,
+0xeb000000,
+0x00030026,
+0x00000000,
+0xe1a0900b,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x00000000,
+0x00060013,
+0xe5523000,
+0x000d8180,
+0xe3130000,
+0x000a0000,
+0x1a000000,
+0x0005000d,
+0xea000000,
+0x00050040,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe5d6c000,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x03710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050005,
+0xe0900002,
+0x00000000,
+0x6a000000,
+0x0005003c,
+0x00000000,
+0x6a000000,
+0x0005003d,
+0x00000000,
+0x6a000000,
+0x0005003f,
+0x00000000,
+0x0006000e,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x0003002e,
+0xe5d6c000,
+0xea000000,
+0x0005000e,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe5d6c000,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x03710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050005,
+0xe0500002,
+0x00000000,
+0x6a000000,
+0x0005003c,
+0x00000000,
+0x6a000000,
+0x0005003d,
+0x00000000,
+0x6a000000,
+0x0005003f,
+0x00000000,
+0x0006000e,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x0003002f,
+0xe5d6c000,
+0xea000000,
+0x0005000e,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe5d6c000,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x03710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050005,
+0xe0cb0092,
+0xe15b0fc0,
+0x00000000,
+0x1a000000,
+0x0005003c,
+0x00000000,
+0x1a000000,
+0x0005003d,
+0x00000000,
+0x1a000000,
+0x0005003f,
+0x00000000,
+0x0006000e,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x0003001f,
+0xe5d6c000,
+0xea000000,
+0x0005000e,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x0003002d,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x03710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x00000000,
+0x1a000000,
+0x00050005,
+0xe1b01002,
+0x00000000,
+0x0a000000,
+0x0005003c,
+0x00000000,
+0x0a000000,
+0x0005003d,
+0x00000000,
+0x0a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x00050099,
+0xe3e01000,
+0x000a0000,
+0x0006000e,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xeb000000,
+0x00050098,
+0xea000000,
+0x0005000e,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0x00000000,
+0xe18900dc,
+0xe18520db,
+0x00000000,
+0xe18920dc,
+0xe18500db,
+0x00000000,
+0xe18900dc,
+0xe18920db,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003c,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003d,
+0x00000000,
+0xe3730000,
+0x000a0000,
+0x33710000,
+0x000a0000,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x33730000,
+0x000a0000,
+0x00000000,
+0x2a000000,
+0x0005003f,
+0x00000000,
+0xe1a0b009,
+0x00000000,
+0xeb000000,
+0x0003001c,
+0x00000000,
+0xe1a0900b,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe004baae,
+0xe004c6ae,
+0xe04c200b,
+0xe5089000,
+0x000d8180,
+0xe089100c,
+0x0006002b,
+0xe1a00008,
+0xe58d6008,
+0xe1a021a2,
+0xeb000000,
+0x00030033,
+0xe5189000,
+0x000d8180,
+0xe3500000,
+0x1a000000,
+0x00050036,
+0xe18920db,
+0xe5d6c000,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1e0b00b,
+0xe5d6c000,
+0xe795010b,
+0xe3e01000,
+0x000a0000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1e0b00b,
+0xe5d6c000,
+0xe795010b,
+0xe3e01000,
+0x000a0000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a0084e,
+0xe3e01000,
+0x000a0000,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a0b18b,
+0xe5d6c000,
+0xe18500db,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe089a00a,
+0xe1e0b00b,
+0xe5d6c000,
+0xe496e004,
+0xe58ab004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe089a00a,
+0xe089b18b,
+0xe3e00000,
+0x000a0000,
+0xe58a0004,
+0xe28aa008,
+0x0006000b,
+0xe58a0004,
+0xe15a000b,
+0xe28aa008,
+0xba000000,
+0x0005000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5191008,
+0xe1a0b10b,
+0xe28bb000,
+0x000a0000,
+0xe791100b,
+0xe5111000,
+0x000d8180,
+0xe1c120d0,
+0xe5d6c000,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5191008,
+0xe1a0a0aa,
+0xe28aa000,
+0x000a0000,
+0xe1a0b18b,
+0xe791100a,
+0xe18920db,
+0xe551c000,
+0x000d8180,
+0xe551b000,
+0x000d8180,
+0xe5111000,
+0x000d8180,
+0xe31c0000,
+0x000a0000,
+0xe283c000,
+0x000a0000,
+0x135b0000,
+0xe1c120f0,
+0x1a000000,
+0x00050002,
+0x0006000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000c,
+0xe37c0000,
+0x000a0000,
+0x8552b000,
+0x000d8180,
+0x9a000000,
+0x0005000b,
+0xe2470000,
+0x000a0000,
+0xe31b0000,
+0x000a0000,
+0x00000000,
+0x0a000000,
+0x0005000b,
+0xe1a0b009,
+0xeb000000,
+0x00030034,
+0xe1a0900b,
+0x00000000,
+0x1b000000,
+0x00030034,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe5191008,
+0xe1a0a0aa,
+0xe28aa000,
+0x000a0000,
+0xe1e0b00b,
+0xe791100a,
+0xe795210b,
+0xe3e03000,
+0x000a0000,
+0xe551c000,
+0x000d8180,
+0xe5111000,
+0x000d8180,
+0xe551b000,
+0x000d8180,
+0xe31c0000,
+0x000a0000,
+0xe552c000,
+0x000d8180,
+0xe1c120f0,
+0x1a000000,
+0x00050002,
+0x0006000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000c,
+0xe31c0000,
+0x000a0000,
+0x135b0000,
+0xe2470000,
+0x000a0000,
+0x00000000,
+0x0a000000,
+0x0005000b,
+0xe1a0b009,
+0xeb000000,
+0x00030034,
+0xe1a0900b,
+0x00000000,
+0x1b000000,
+0x00030034,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe5191008,
+0xe1a0a0aa,
+0xe28aa000,
+0x000a0000,
+0xe1a0b18b,
+0xe791100a,
+0xe18520db,
+0xe5111000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe1c120f0,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5191008,
+0xe1a0a0aa,
+0xe28aa000,
+0x000a0000,
+0xe791100a,
+0xe1e0b00b,
+0xe5111000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe581b004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5182000,
+0x000d8180,
+0xe086b10b,
+0xe5089000,
+0x000d8180,
+0xe3520000,
+0xe24b6b80,
+0x0a000000,
+0x00050001,
+0xe1a00008,
+0xe089100a,
+0xeb000000,
+0x00030035,
+0xe5189000,
+0x000d8180,
+0x0006000b,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1e0b00b,
+0xe5089000,
+0x000d8180,
+0xe795110b,
+0xe58d6008,
+0xe5192008,
+0xe1a00008,
+0xeb000000,
+0x00030036,
+0xe5189000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1e0b00b,
+0x00000000,
+0xe5172000,
+0x000d8180,
+0xe5173000,
+0x000d8180,
+0xe5089000,
+0x000d8180,
+0xe58d6008,
+0xe1520003,
+0xe1a00008,
+0x2a000000,
+0x00050005,
+0x0006000b,
+0x00000000,
+0xe1a01a8b,
+0xe1a025ab,
+0xe1a0bac1,
+0xe1a01aa1,
+0xe37b0001,
+0x02811002,
+0xeb000000,
+0x00030037,
+0x00000000,
+0xe795110b,
+0xeb000000,
+0x00030038,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0xe3e01000,
+0x000a0000,
+0xe5d6c000,
+0xe496e004,
+0xe18900fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xeb000000,
+0x00030039,
+0xe1a00008,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe5191008,
+0xe1e0b00b,
+0xe5110000,
+0x000d8180,
+0xe795b10b,
+0x00000000,
+0xea000000,
+0x0005009f,
+0x00000000,
+0xea000000,
+0x000500a0,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0xe18900dc,
+0xe18920db,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050030,
+0xe3730000,
+0x000a0000,
+0x05103000,
+0x000d8180,
+0x05101000,
+0x000d8180,
+0x1a000000,
+0x00050009,
+0xe0833182,
+0xe1520001,
+0x31c320d0,
+0x2a000000,
+0x00050030,
+0xe5d6c000,
+0xe3730000,
+0x000a0000,
+0x0a000000,
+0x00050005,
+0x0006000b,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe5101000,
+0x000d8180,
+0xe3510000,
+0x0a000000,
+0x0005000b,
+0xe5511000,
+0x000d8180,
+0xe3110000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xe004caae,
+0xea000000,
+0x00050030,
+0x00060013,
+0xe3730000,
+0x000a0000,
+0x01a0b002,
+0x0a000000,
+0x0005009f,
+0xea000000,
+0x00050030,
+0x00000000,
+0xe004caae,
+0xe20bb0ff,
+0xe18900dc,
+0xe1e0b00b,
+0xe795b10b,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x0005002d,
+0x0006009f,
+0xe5102000,
+0x000d8180,
+0xe51b3000,
+0x000d8180,
+0xe510e000,
+0x000d8180,
+0xe1a0c000,
+0xe0022003,
+0xe0822082,
+0xe08ee182,
+0x0006000b,
+0xe14e00d0,
+0x000c8100,
+0xe14e20d0,
+0x000c8100,
+0xe51ee000,
+0x000d8180,
+0xe3710000,
+0x000a0000,
+0x0150000b,
+0x1a000000,
+0x00050004,
+0xe3730000,
+0x000a0000,
+0x0a000000,
+0x00050005,
+0x0006000d,
+0xe5d6c000,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000e,
+0xe35e0000,
+0x1a000000,
+0x0005000b,
+0x0006000f,
+0xe51c0000,
+0x000d8180,
+0xe3a02000,
+0xe3e03000,
+0x000a0000,
+0xe3500000,
+0x0a000000,
+0x0005000d,
+0xe5501000,
+0x000d8180,
+0x00000000,
+0xe3110000,
+0x000a0000,
+0x1a000000,
+0x0005000d,
+0xea000000,
+0x0005002e,
+0x00000000,
+0xe004caae,
+0xe20bb0ff,
+0xe18900dc,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x0005002f,
+0xe5102000,
+0x000d8180,
+0xe5103000,
+0x000d8180,
+0xe1a0118b,
+0xe15b0002,
+0x318320d1,
+0x2a000000,
+0x0005002f,
+0xe5d6c000,
+0xe3730000,
+0x000a0000,
+0x0a000000,
+0x00050005,
+0x0006000b,
+0xe496e004,
+0xe18920fa,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe5101000,
+0x000d8180,
+0xe3510000,
+0x0a000000,
+0x0005000b,
+0xe5511000,
+0x000d8180,
+0xe3110000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xea000000,
+0x0005002f,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0xe18900dc,
+0xe18920db,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050034,
+0xe3730000,
+0x000a0000,
+0x05101000,
+0x000d8180,
+0x05103000,
+0x000d8180,
+0x1a000000,
+0x00050009,
+0xe0811182,
+0xe1520003,
+0x3591e004,
+0x2a000000,
+0x00050034,
+0xe5d6c000,
+0xe37e0000,
+0x000a0000,
+0xe550e000,
+0x000d8180,
+0xe18920da,
+0x0a000000,
+0x00050005,
+0x0006000b,
+0xe31e0000,
+0x000a0000,
+0xe1c120f0,
+0x1a000000,
+0x00050007,
+0x0006000c,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe510a000,
+0x000d8180,
+0xe35a0000,
+0x0a000000,
+0x0005000b,
+0xe55aa000,
+0x000d8180,
+0xe31a0000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xe516e004,
+0xe004caae,
+0xe004a2ae,
+0xea000000,
+0x00050034,
+0x00060011,
+0x00000000,
+0xe5172000,
+0x000d8180,
+0xe3cee000,
+0x000a0000,
+0xe5070000,
+0x000d8180,
+0xe540e000,
+0x000d8180,
+0xe5002000,
+0x000d8180,
+0xea000000,
+0x0005000c,
+0x00060013,
+0xe3730000,
+0x000a0000,
+0x01a0b002,
+0x0a000000,
+0x000500a0,
+0xea000000,
+0x00050034,
+0x00000000,
+0xe004caae,
+0xe20bb0ff,
+0xe18900dc,
+0xe1e0b00b,
+0xe795b10b,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050031,
+0x000600a0,
+0xe5102000,
+0x000d8180,
+0xe51b3000,
+0x000d8180,
+0xe510e000,
+0x000d8180,
+0xe1a0c000,
+0xe0022003,
+0xe0822082,
+0xe3a03000,
+0xe08ee182,
+0xe54c3000,
+0x000d8180,
+0x0006000b,
+0xe14e00d0,
+0x000c8100,
+0xe51e3000,
+0x000d8180,
+0xe51e2000,
+0x000d8180,
+0xe3710000,
+0x000a0000,
+0x0150000b,
+0x1a000000,
+0x00050005,
+0xe55c1000,
+0x000d8180,
+0xe3730000,
+0x000a0000,
+0xe18920da,
+0x0a000000,
+0x00050004,
+0x0006000c,
+0xe3110000,
+0x000a0000,
+0xe14e20f0,
+0x000c8100,
+0x1a000000,
+0x00050007,
+0x0006000d,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000e,
+0x00000000,
+0xe51c0000,
+0x000d8180,
+0xe3500000,
+0x0a000000,
+0x0005000c,
+0xe5500000,
+0x000d8180,
+0xe3100000,
+0x000a0000,
+0x1a000000,
+0x0005000c,
+0xea000000,
+0x00050032,
+0x0006000f,
+0xe1b0e002,
+0x1a000000,
+0x0005000b,
+0xe51c0000,
+0x000d8180,
+0xe1a0200d,
+0xe58d6008,
+0xe3500000,
+0xe5089000,
+0x000d8180,
+0x15501000,
+0x000d8180,
+0xe1a00008,
+0x0a000000,
+0x00050006,
+0xe3110000,
+0x000a0000,
+0x0a000000,
+0x00050032,
+0x00060010,
+0xe3e03000,
+0x000a0000,
+0xe58db000,
+0xe1a0100c,
+0xe58d3004,
+0xeb000000,
+0x0003003a,
+0xe5189000,
+0x000d8180,
+0xe18920da,
+0xe1c020f0,
+0xea000000,
+0x0005000d,
+0x00060011,
+0xe5172000,
+0x000d8180,
+0xe3c11000,
+0x000a0000,
+0x00000000,
+0xe507c000,
+0x000d8180,
+0xe54c1000,
+0x000d8180,
+0xe50c2000,
+0x000d8180,
+0xea000000,
+0x0005000d,
+0x00000000,
+0xe004caae,
+0xe20bb0ff,
+0xe18900dc,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050033,
+0xe5102000,
+0x000d8180,
+0xe510c000,
+0x000d8180,
+0xe1a0118b,
+0xe15b0002,
+0x31a120dc,
+0x2a000000,
+0x00050033,
+0xe5d6c000,
+0xe3730000,
+0x000a0000,
+0xe550e000,
+0x000d8180,
+0xe18920da,
+0x0a000000,
+0x00050005,
+0x0006000b,
+0xe31e0000,
+0x000a0000,
+0xe1c120f0,
+0x1a000000,
+0x00050007,
+0x0006000c,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe510a000,
+0x000d8180,
+0xe35a0000,
+0x0a000000,
+0x0005000b,
+0xe55aa000,
+0x000d8180,
+0xe31a0000,
+0x000a0000,
+0x1a000000,
+0x0005000b,
+0xe516e004,
+0xe004a2ae,
+0xea000000,
+0x00050033,
+0x00060011,
+0xe5172000,
+0x000d8180,
+0xe3cee000,
+0x000a0000,
+0x00000000,
+0xe5070000,
+0x000d8180,
+0xe540e000,
+0x000d8180,
+0xe5002000,
+0x000d8180,
+0xea000000,
+0x0005000c,
+0x00000000,
+0xe089a00a,
+0x0006000b,
+0xe59dc004,
+0xe51a1008,
+0xe795018b,
+0xe25cc008,
+0xe5113000,
+0x000d8180,
+0x0a000000,
+0x00050004,
+0xe08021ac,
+0xe1520003,
+0xe5113000,
+0x000d8180,
+0xe08ac00c,
+0x8a000000,
+0x00050005,
+0xe083e180,
+0xe5510000,
+0x000d8180,
+0x0006000d,
+0xe0ca20d8,
+0xe0ce20f8,
+0xe15a000c,
+0x3a000000,
+0x0005000d,
+0xe3100000,
+0x000a0000,
+0x1a000000,
+0x00050007,
+0x0006000e,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe5089000,
+0x000d8180,
+0xe1a00008,
+0xe58d6008,
+0xeb000000,
+0x0003003b,
+0x00000000,
+0xe5189000,
+0x000d8180,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x00060011,
+0xe5172000,
+0x000d8180,
+0xe3c00000,
+0x000a0000,
+0xe5071000,
+0x000d8180,
+0xe5410000,
+0x000d8180,
+0xe5012000,
+0x000d8180,
+0xea000000,
+0x0005000e,
+0x00000000,
+0xe59d0004,
+0xe004b6ae,
+0xe08bb000,
+0xea000000,
+0x000500a1,
+0x00000000,
+0xe004b6ae,
+0x000600a1,
+0xe1a0c009,
+0xe1a920da,
+0xe24bb008,
+0xe2899008,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050025,
+0xe5096004,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00000000,
+0xe59d0004,
+0xe080b18b,
+0xea000000,
+0x000500a2,
+0x00000000,
+0xe1a0b18b,
+0x000600a2,
+0xe1aa20d9,
+0xe24bb008,
+0xe28aa008,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050042,
+0xe5196004,
+0x00060043,
+0xe3a0c000,
+0xe5523000,
+0x000d8180,
+0xe3160000,
+0x000a0000,
+0x1a000000,
+0x00050007,
+0x0006000b,
+0xe5092008,
+0xe35b0000,
+0x0a000000,
+0x00050003,
+0x0006000c,
+0xe18a00dc,
+0xe28ce008,
+0xe15e000b,
+0xe18900fc,
+0xe1a0c00e,
+0x1a000000,
+0x0005000c,
+0x0006000d,
+0xe3530001,
+0x8a000000,
+0x00050005,
+0x0006000e,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x0006000f,
+0xe516e004,
+0xe004a2ae,
+0xe049000a,
+0xe5100010,
+0xe5100000,
+0x000d8180,
+0xe5105000,
+0x000d8180,
+0xea000000,
+0x0005000e,
+0x00060011,
+0xe2266000,
+0x000a0000,
+0xe3160000,
+0x000a0000,
+0x00000000,
+0x13a03000,
+0x1a000000,
+0x0005000b,
+0xe0499006,
+0xe5196004,
+0xe3160000,
+0x000a0000,
+0x13a03000,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe089a00a,
+0xe1a0c009,
+0xe14a21d0,
+0xe14a00d8,
+0xe28a9008,
+0xe1ca20f8,
+0xe1ca01f0,
+0xe14a21d8,
+0xe3a0b010,
+0xe1ca20f0,
+0xe3730000,
+0x000a0000,
+0x1a000000,
+0x00050025,
+0xe5096004,
+0xe5126000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe08aa009,
+0xe12fff1c,
+0x00000000,
+0xe089a00a,
+0xe51ac010,
+0xe51a0008,
+0xe51ce000,
+0x000d8180,
+0xe51c1000,
+0x000d8180,
+0xe2866004,
+0x0006000b,
+0xe050b00e,
+0xe0812180,
+0x2a000000,
+0x00050005,
+0xe1c220d0,
+0xe3730000,
+0x000a0000,
+0x02800001,
+0x0a000000,
+0x0005000b,
+0xe156b0b2,
+0xe3e01000,
+0x000a0000,
+0xe1ca20f8,
+0xe086b10b,
+0xe280c001,
+0xe1ca00f0,
+0xe24b6b80,
+0xe50ac008,
+0x0006000d,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe51c3000,
+0x000d8180,
+0xe51cc000,
+0x000d8180,
+0x00060010,
+0xe08b008b,
+0xe15b0003,
+0xe08c2180,
+0x8a000000,
+0x0005000d,
+0xe14200d0,
+0x000c8100,
+0xe3710000,
+0x000a0000,
+0xe28bb001,
+0x0a000000,
+0x00050010,
+0xe156c0b2,
+0xe08bb00e,
+0xe14220d0,
+0x000c8100,
+0xe50ab008,
+0xe1ca00f8,
+0xe086b10c,
+0xe24b6b80,
+0xe1ca20f0,
+0xea000000,
+0x0005000d,
+0x00000000,
+0xe089a00a,
+0xe086b10b,
+0xe14a01d8,
+0xe51a200c,
+0xe51a3004,
+0xe3710000,
+0x000a0000,
+0x05500000,
+0x000d8180,
+0x03720000,
+0x000a0000,
+0x03730000,
+0x000a0000,
+0x03500000,
+0x000a0000,
+0x024b6b80,
+0x1a000000,
+0x00050005,
+0xe5d6c000,
+0xe496e004,
+0xe3a00000,
+0xe50a0008,
+0x0006000b,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe3a00000,
+0x000a0000,
+0xe3a0c000,
+0x000a0000,
+0xe5460004,
+0xe24b6b80,
+0xe5c6c000,
+0xe496e004,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe004caae,
+0xe004b6ae,
+0xe5190004,
+0xe089b00b,
+0xe089a00a,
+0xe28bb000,
+0x000a0000,
+0xe08a300c,
+0xe2492008,
+0xe04bb000,
+0xe35c0000,
+0xe042000b,
+0x0a000000,
+0x00050005,
+0xe2433010,
+0x0006000b,
+0xe15b0002,
+0x30cb00d8,
+0x23e01000,
+0x000a0000,
+0xe15a0003,
+0xe0ca00f8,
+0x3a000000,
+0x0005000b,
+0x0006000c,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000f,
+0xe5183000,
+0x000d8180,
+0xe3500000,
+0xd3a0c008,
+0xc280c008,
+0xe08a1000,
+0xe58dc004,
+0xda000000,
+0x0005000c,
+0xe1510003,
+0x8a000000,
+0x00050007,
+0x00060010,
+0xe0cb00d8,
+0xe0ca00f8,
+0xe15b0002,
+0x3a000000,
+0x00050010,
+0xea000000,
+0x0005000c,
+0x00060011,
+0xe1a011a0,
+0xe508a000,
+0x000d8180,
+0xe1a00008,
+0xe5089000,
+0x000d8180,
+0xe04bb009,
+0xe58d6008,
+0xe04aa009,
+0xeb000000,
+0x00030000,
+0xe5189000,
+0x000d8180,
+0xe089a00a,
+0xe089b00b,
+0xe2492008,
+0xea000000,
+0x00050010,
+0x00000000,
+0xe59d0004,
+0xe5196004,
+0xe089a00a,
+0xe080b18b,
+0xea000000,
+0x000500a3,
+0x00000000,
+0xe5196004,
+0xe1a0b18b,
+0xe089a00a,
+0x000600a3,
+0xe58db004,
+0x0006000b,
+0xe2160000,
+0x000a0000,
+0xe2261000,
+0x000a0000,
+0x1a000000,
+0x000500a4,
+0x00060017,
+0xe516e004,
+0xe25b3008,
+0xe2492008,
+0x0a000000,
+0x00050003,
+0x0006000c,
+0xe0ca00d8,
+0xe2899008,
+0xe2533008,
+0xe14901f0,
+0x1a000000,
+0x0005000c,
+0x0006000d,
+0xe004a2ae,
+0xe042300a,
+0xe004caae,
+0xe5130008,
+0x0006000f,
+0xe15c000b,
+0x8a000000,
+0x00050006,
+0xe1a09003,
+0xe5101000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe5115000,
+0x000d8180,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00060010,
+0xe3e01000,
+0x000a0000,
+0xe2899008,
+0xe28bb008,
+0xe509100c,
+0xea000000,
+0x0005000f,
+0x000600a5,
+0xe089a00a,
+0x000600a4,
+0xe3110000,
+0x000a0000,
+0x1a000000,
+0x00050018,
+0xe0499001,
+0xe5196004,
+0xea000000,
+0x0005000b,
+0x00000000,
+0xe5196004,
+0xe1a0b18b,
+0xe58db004,
+0xe2160000,
+0x000a0000,
+0xe2261000,
+0x000a0000,
+0x0516e004,
+0x1a000000,
+0x000500a5,
+0x00000000,
+0xe18900da,
+0x00000000,
+0xe2493008,
+0xe004a2ae,
+0x00000000,
+0xe1c300f0,
+0x00000000,
+0xe043900a,
+0xe004caae,
+0xe5190008,
+0x0006000f,
+0xe15c000b,
+0x8a000000,
+0x00050006,
+0xe5101000,
+0x000d8180,
+0xe5d6c000,
+0xe496e004,
+0xe5115000,
+0x000d8180,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00060010,
+0xe2431004,
+0xe3e02000,
+0x000a0000,
+0xe781200b,
+0xe28bb008,
+0xea000000,
+0x0005000f,
+0x00000000,
+0xe1a000a6,
+0xe200007e,
+0xe2400000,
+0x000a0000,
+0xe19710b0,
+0xe2511000,
+0x000a0000,
+0xe18710b0,
+0x3a000000,
+0x00050092,
+0x00000000,
+0xe1aa00d9,
+0x00000000,
+0xe086b10b,
+0x00000000,
+0xe1ca20d8,
+0xe3710000,
+0x000a0000,
+0xe59ac014,
+0x1a000000,
+0x00050005,
+0xe3730000,
+0x000a0000,
+0xe59a3010,
+0x037c0000,
+0x000a0000,
+0x1a000000,
+0x00050044,
+0xe3530000,
+0xba000000,
+0x00050004,
+0xe1500002,
+0x00000000,
+0xe1ca21d0,
+0xe3710000,
+0x000a0000,
+0x1a000000,
+0x00050005,
+0xe0900002,
+0xe59a3008,
+0x00000000,
+0x6286bb80,
+0x00000000,
+0x6a000000,
+0x00050002,
+0x00000000,
+0xe3520000,
+0xba000000,
+0x00050004,
+0xe1500003,
+0x00000000,
+0x0006000b,
+0x00000000,
+0xc24b6b80,
+0x00000000,
+0xe24b6b80,
+0xd156b0b2,
+0x00000000,
+0xd24b6b80,
+0x00000000,
+0xe1ca00f0,
+0x00000000,
+0x0006000c,
+0xe5d6c000,
+0xe496e004,
+0xe1ca01f8,
+0x00000000,
+0xda000000,
+0x00070000,
+0x00000000,
+0x0006000d,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x0006000e,
+0x00000000,
+0xe1520000,
+0x00000000,
+0xe1530000,
+0x00000000,
+0xea000000,
+0x0005000b,
+0x0006000f,
+0x00000000,
+0x33730000,
+0x000a0000,
+0x337c0000,
+0x000a0000,
+0x2a000000,
+0x00050044,
+0xe35c0000,
+0xe1ca00f0,
+0xe1ca01f8,
+0xba000000,
+0x00050008,
+0x00000000,
+0xe3530000,
+0xba000000,
+0x00050008,
+0xeb000000,
+0x0003002e,
+0xe1ca00f0,
+0xe1ca20d8,
+0xe1ca01f8,
+0x00000000,
+0x00060010,
+0xeb000000,
+0x00030024,
+0x00000000,
+0x824b6b80,
+0x00000000,
+0xe24b6b80,
+0x9156b0b2,
+0x9a000000,
+0x00070000,
+0x00000000,
+0x924b6b80,
+0x00000000,
+0x9a000000,
+0x00070000,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xea000000,
+0x0005000d,
+0x00060012,
+0x00000000,
+0xeb000000,
+0x0003002e,
+0xe1ca00f0,
+0xe1ca01f8,
+0x00000000,
+0xe1a02000,
+0xe1a03001,
+0xe1ca00d8,
+0xea000000,
+0x00050010,
+0x00000000,
+0xe1a000a6,
+0xe200007e,
+0xe2400000,
+0x000a0000,
+0xe19710b0,
+0xe2511000,
+0x000a0000,
+0xe18710b0,
+0x3a000000,
+0x00050092,
+0x00000000,
+0xe1aa00d9,
+0x00000000,
+0xe3710000,
+0x000a0000,
+0x114a00f8,
+0x1a000000,
+0x00070000,
+0x00000000,
+0xe086b10b,
+0xe3710000,
+0x000a0000,
+0x124b6b80,
+0x114a00f8,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a000a6,
+0xe200007e,
+0xe2400000,
+0x000a0000,
+0xe19710b0,
+0xe2511000,
+0x000a0000,
+0xe18710b0,
+0x3a000000,
+0x00050092,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5170000,
+0x000d8180,
+0xe3a01000,
+0xe790b10b,
+0xe5071000,
+0x000d8180,
+0xe51ba000,
+0x000d8180,
+0xe5079000,
+0x000d8180,
+0xe5078000,
+0x000d8180,
+0xe12fff1a,
+0x00000000,
+0xe086b10b,
+0xe24b6b80,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe1a000a6,
+0xe200007e,
+0xe2400000,
+0x000a0000,
+0xe19710b0,
+0xe2511000,
+0x000a0000,
+0xe18710b0,
+0x3a000000,
+0x00050094,
+0x00000000,
+0xe5180000,
+0x000d8180,
+0xe5561000,
+0x000d8180,
+0xe5165000,
+0x000d8180,
+0xe15a0000,
+0x8a000000,
+0x00050020,
+0x00000000,
+0xe5d6c000,
+0xe496e004,
+0x00000000,
+0x0006000c,
+0xe15b0181,
+0xe3e03000,
+0x000a0000,
+0x3a000000,
+0x00050003,
+0x00000000,
+0xe1a0b82e,
+0xea000000,
+0x00070000,
+0x00000000,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0x0006000d,
+0xe18920fb,
+0xe28bb008,
+0xea000000,
+0x0005000c,
+0x00000000,
+0xe7f001f0,
+0x00000000,
+0xe5180000,
+0x000d8180,
+0xe089300b,
+0xe08aa00b,
+0xe5832000,
+0xe28b1000,
+0x000a0000,
+0xe5165000,
+0x000d8180,
+0xe15a0000,
+0xe5831004,
+0x2a000000,
+0x00050020,
+0xe556c000,
+0x000d8180,
+0xe1a0a009,
+0xe1a0b003,
+0xe35c0000,
+0xe2839008,
+0x0a000000,
+0x00050003,
+0xe3e02000,
+0x000a0000,
+0x0006000b,
+0xe15a000b,
+0x30ca00d8,
+0x21a01002,
+0x350a2004,
+0x0006000c,
+0xe25cc001,
+0xe1e300f8,
+0x1a000000,
+0x0005000b,
+0x0006000d,
+0xe5d6c000,
+0xe496e004,
+0xe797c10c,
+0xe004a2ae,
+0xe1a0b82e,
+0xe12fff1c,
+0x00000000,
+0xe5123000,
+0x000d8180,
+0x00000000,
+0xe5173000,
+0x000d8180,
+0x00000000,
+0xe08a100b,
+0xe5180000,
+0x000d8180,
+0xe089b00b,
+0xe5089000,
+0x000d8180,
+0xe1510000,
+0xe508b000,
+0x000d8180,
+0x00000000,
+0xe5121000,
+0x000d8180,
+0x00000000,
+0xe3e02000,
+0x000a0000,
+0xe1a00008,
+0x8a000000,
+0x0005001f,
+0xe5072000,
+0x000d8180,
+0xe12fff33,
+0xe5189000,
+0x000d8180,
+0xe3e02000,
+0x000a0000,
+0xe5181000,
+0x000d8180,
+0xe1a0b180,
+0xe5072000,
+0x000d8180,
+0xe5196004,
+0xe041a00b,
+0xea000000,
+0x00050016,
+0x00000000,
+0x00010000
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_l,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_BC_CAT_Z,
+ GLOB_cont_nop,
+ GLOB_vmeta_tgets1,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets1,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_ra,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_callt,
+ GLOB_BC_CALLT2_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res,
+ GLOB_ff_type,
+ GLOB_fff_restv,
+ GLOB_ff_getmetatable,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil,
+ GLOB_ff_math_abs,
+ GLOB_fff_res1,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_pow,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_fff_emptystr,
+ GLOB_ff_string_rep,
+ GLOB_ff_string_reverse,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_vm_tobit_fb,
+ GLOB_vm_tobit,
+ GLOB_ff_bit_tobit,
+ GLOB_ff_bit_band,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_trunc,
+ GLOB_vm_mod,
+ GLOB_vm_modi,
+ GLOB_vm_foldarith,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_ISEQN_Z,
+ GLOB_BC_ISNEN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB_BC_CALL_Z,
+ GLOB_BC_CALLT1_Z,
+ GLOB_BC_RETM_Z,
+ GLOB_BC_RETV2_Z,
+ GLOB_BC_RETV1_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c",
+ "vm_unwind_c_eh",
+ "vm_unwind_ff",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_l",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "BC_CAT_Z",
+ "cont_nop",
+ "vmeta_tgets1",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets1",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_ra",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_arith_vn",
+ "vmeta_arith_nv",
+ "vmeta_unm",
+ "vmeta_arith_vv",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_callt",
+ "BC_CALLT2_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res",
+ "ff_type",
+ "fff_restv",
+ "ff_getmetatable",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_floor",
+ "vm_floor",
+ "ff_math_ceil",
+ "vm_ceil",
+ "ff_math_abs",
+ "fff_res1",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_pow",
+ "ff_math_atan2",
+ "ff_math_fmod",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "fff_emptystr",
+ "ff_string_rep",
+ "ff_string_reverse",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "vm_tobit_fb",
+ "vm_tobit",
+ "ff_bit_tobit",
+ "ff_bit_band",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_trunc",
+ "vm_mod",
+ "vm_modi",
+ "vm_foldarith",
+ "vm_ffi_callback",
+ "vm_ffi_call",
+ "BC_ISEQN_Z",
+ "BC_ISNEN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ "BC_CALL_Z",
+ "BC_CALLT1_Z",
+ "BC_RETM_Z",
+ "BC_RETV2_Z",
+ "BC_RETV1_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_equal_cd",
+ "lj_meta_arith",
+ "lj_meta_len",
+ "lj_meta_call",
+ "lj_meta_for",
+ "lj_tab_get",
+ "lj_str_fromnumber",
+ "lj_tab_next",
+ "lj_tab_getinth",
+ "lj_ffh_coroutine_wrap_err",
+ "sqrt",
+ "log",
+ "log10",
+ "exp",
+ "sin",
+ "cos",
+ "tan",
+ "asin",
+ "acos",
+ "atan",
+ "sinh",
+ "cosh",
+ "tanh",
+ "pow",
+ "atan2",
+ "fmod",
+ "__aeabi_dmul",
+ "ldexp",
+ "frexp",
+ "modf",
+ "__aeabi_i2d",
+ "__aeabi_cdcmple",
+ "lj_str_new",
+ "lj_tab_len",
+ "lj_gc_step",
+ "lj_dispatch_ins",
+ "lj_trace_hot",
+ "lj_dispatch_call",
+ "lj_trace_exit",
+ "lj_err_throw",
+ "__aeabi_ddiv",
+ "__aeabi_dadd",
+ "__aeabi_dsub",
+ "lj_ccallback_enter",
+ "lj_ccallback_leave",
+ "__aeabi_cdcmpeq",
+ "lj_meta_cat",
+ "lj_gc_barrieruv",
+ "lj_func_closeuv",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_tab_dup",
+ "lj_gc_step_fixtop",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define field_pc pc
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 1, FRAME_P, ~LJ_TTRUE, FRAME_TYPE, FRAME_TYPEP, FRAME_C, Dt1(->base), LJ_VMST_C, DISPATCH_GL(vmstate), Dt1(->top));
+ dasm_put(Dst, 54, Dt1(->cframe), Dt1(->maxstack), ~LJ_TNIL, Dt1(->top), Dt1(->top), LJ_VMST_C, Dt1(->glref), Dt2(->vmstate));
+ dasm_put(Dst, 108, ~CFRAME_RAWMASK, Dt1(->base), Dt1(->glref), ~LJ_TFALSE, GG_G2DISP, LJ_VMST_INTERP, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->field_pc), Dt1(->glref));
+ dasm_put(Dst, 173, GG_G2DISP, FRAME_CP, CFRAME_RESUME, Dt1(->status), Dt1(->cframe), Dt1(->base), Dt1(->top), Dt1(->status), LJ_VMST_INTERP, FRAME_TYPE, DISPATCH_GL(vmstate), FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe));
+ dasm_put(Dst, 238, Dt1(->glref), GG_G2DISP, Dt1(->base), Dt1(->top), LJ_VMST_INTERP, DISPATCH_GL(vmstate), -LJ_TFUNC, Dt7(->field_pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP);
+ dasm_put(Dst, 307);
+#if LJ_HASFFI
+ dasm_put(Dst, 312);
+#endif
+ dasm_put(Dst, 314, Dt7(->field_pc), ~LJ_TNIL);
+#if LJ_HASFFI
+ dasm_put(Dst, 322);
+#endif
+ dasm_put(Dst, 325, PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 329);
+#endif
+ dasm_put(Dst, 338, Dt1(->base), -DISPATCH_GL(tmptv), ~LJ_TTAB, ~LJ_TSTR, ~LJ_TISNUM, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 395, Dt1(->base));
+ }
+ dasm_put(Dst, 398, FRAME_CONT, Dt1(->top), -DISPATCH_GL(tmptv), ~LJ_TTAB, ~LJ_TSTR, ~LJ_TISNUM, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 460, Dt1(->base));
+ }
+ dasm_put(Dst, 463, FRAME_CONT, Dt1(->top), Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 499, Dt1(->base));
+ }
+ dasm_put(Dst, 502, ~LJ_TTRUE, -LJ_TFALSE, Dt1(->base));
+#if LJ_HASFFI
+ dasm_put(Dst, 549, Dt1(->base));
+#endif
+ dasm_put(Dst, 560, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 597, Dt1(->base));
+ }
+ dasm_put(Dst, 600, FRAME_CONT, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 621, Dt1(->base));
+ }
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 624);
+#else
+ dasm_put(Dst, 631);
+#endif
+ dasm_put(Dst, 634, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 642);
+ }
+ dasm_put(Dst, 644);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 647);
+ }
+ dasm_put(Dst, 649, Dt7(->field_pc), Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 670, Dt1(->base));
+ }
+ dasm_put(Dst, 673, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 687, Dt1(->base));
+ }
+#if LJ_HASJIT
+ dasm_put(Dst, 690);
+#endif
+ dasm_put(Dst, 692);
+#if LJ_HASJIT
+ dasm_put(Dst, 694, BC_JFORI);
+#endif
+ dasm_put(Dst, 697);
+#if LJ_HASJIT
+ dasm_put(Dst, 700, BC_JFORI);
+#endif
+ dasm_put(Dst, 703, BC_FORI, -LJ_TTRUE, -LJ_TISNUM, ~LJ_TISNUM, (int)(offsetof(GCfuncC, upvalue)>>3)-1, -LJ_TTAB, -LJ_TUDATA, Dt6(->metatable));
+ dasm_put(Dst, 760, ~LJ_TNIL, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable]), Dt6(->hmask), Dt5(->hash), Dt6(->node), DtB(->key), DtB(->val), DtB(->next), -LJ_TSTR, ~LJ_TTAB, -LJ_TNIL, -LJ_TISNUM);
+ dasm_put(Dst, 808, ~LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT]), -LJ_TTAB, Dt6(->metatable), -LJ_TTAB, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), DISPATCH_GL(gc.grayagain), LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist), -LJ_TTAB);
+ dasm_put(Dst, 860);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 865);
+ }
+ dasm_put(Dst, 867);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 870);
+ }
+ dasm_put(Dst, 872, -LJ_TISNUM, -LJ_TSTR, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), Dt1(->base), -LJ_TISNUM, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), ~LJ_TSTR);
+ dasm_put(Dst, 924, ~LJ_TNIL, -LJ_TTAB, Dt1(->base), Dt1(->top));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 947, Dt1(->base));
+ }
+ dasm_put(Dst, 950, ~LJ_TNIL, (2+1)*8, -LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 973, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 976, Dt8(->upvalue[0]));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 980);
+#endif
+ dasm_put(Dst, 984, ~LJ_TNIL, (3+1)*8, -LJ_TTAB, -LJ_TISNUM, Dt6(->asize), Dt6(->array), (0+1)*8, -LJ_TNIL, (2+1)*8, Dt6(->hmask));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1034);
+ }
+ dasm_put(Dst, 1036);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1039);
+ }
+ dasm_put(Dst, 1041, -LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1057, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 1060, Dt8(->upvalue[0]));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1064);
+#endif
+ dasm_put(Dst, 1068, ~LJ_TISNUM, (3+1)*8, DISPATCH_GL(hookmask), HOOK_ACTIVE, 8+FRAME_PCALL, 8+FRAME_PCALLH, DISPATCH_GL(hookmask), -LJ_TFUNC, HOOK_ACTIVE, 16+FRAME_PCALL, 16+FRAME_PCALLH, -LJ_TTHREAD);
+ dasm_put(Dst, 1127, Dt1(->base), Dt1(->top), Dt1(->status), Dt1(->base), Dt1(->maxstack), Dt1(->cframe), LUA_YIELD, Dt1(->top), Dt1(->top), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate), LUA_YIELD);
+ dasm_put(Dst, 1186, Dt1(->base), Dt1(->maxstack), Dt1(->top), ~LJ_TTRUE, FRAME_TYPE, ~LJ_TFALSE, (2+1)*8, Dt1(->top));
+ dasm_put(Dst, 1246, Dt8(->upvalue[0].gcr), Dt1(->base), Dt1(->top), Dt1(->status), Dt1(->base), Dt1(->maxstack), Dt1(->cframe), LUA_YIELD, Dt1(->top), Dt1(->top), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate), LUA_YIELD);
+ dasm_put(Dst, 1302, Dt1(->base), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, Dt1(->cframe), Dt1(->base), CFRAME_RESUME, Dt1(->top));
+ dasm_put(Dst, 1361, LUA_YIELD, Dt1(->cframe), Dt1(->status), -LJ_TISNUM, ~LJ_TISNUM, ~LJ_TISNUM);
+ dasm_put(Dst, 1427, -LJ_TISNUM, ~LJ_TISNUM, ~LJ_TISNUM);
+ dasm_put(Dst, 1491, -LJ_TISNUM, (1+1)*8, FRAME_TYPE, ~LJ_TNIL);
+ dasm_put(Dst, 1555, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1560);
+ }
+ dasm_put(Dst, 1562);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1565);
+ }
+ dasm_put(Dst, 1567, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1579);
+ }
+ dasm_put(Dst, 1581);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1584);
+ }
+ dasm_put(Dst, 1586, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1598);
+ }
+ dasm_put(Dst, 1600);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1603);
+ }
+ dasm_put(Dst, 1605, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1617);
+ }
+ dasm_put(Dst, 1619);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1622);
+ }
+ dasm_put(Dst, 1624, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1636);
+ }
+ dasm_put(Dst, 1638);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1641);
+ }
+ dasm_put(Dst, 1643, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1655);
+ }
+ dasm_put(Dst, 1657);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1660);
+ }
+ dasm_put(Dst, 1662, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1674);
+ }
+ dasm_put(Dst, 1676);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1679);
+ }
+ dasm_put(Dst, 1681, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1693);
+ }
+ dasm_put(Dst, 1695);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1698);
+ }
+ dasm_put(Dst, 1700, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1712);
+ }
+ dasm_put(Dst, 1714);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1717);
+ }
+ dasm_put(Dst, 1719, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1731);
+ }
+ dasm_put(Dst, 1733);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1736);
+ }
+ dasm_put(Dst, 1738, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1750);
+ }
+ dasm_put(Dst, 1752);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1755);
+ }
+ dasm_put(Dst, 1757, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1769);
+ }
+ dasm_put(Dst, 1771);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1774);
+ }
+ dasm_put(Dst, 1776, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1788);
+ }
+ dasm_put(Dst, 1790);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1793);
+ }
+ dasm_put(Dst, 1795, -LJ_TISNUM, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1810);
+ }
+ dasm_put(Dst, 1812);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1815);
+ }
+ dasm_put(Dst, 1817, -LJ_TISNUM, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1832);
+ }
+ dasm_put(Dst, 1834);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1837);
+ }
+ dasm_put(Dst, 1839, -LJ_TISNUM, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1854);
+ }
+ dasm_put(Dst, 1856);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1859);
+ }
+ dasm_put(Dst, 1861, -LJ_TISNUM, Dt8(->upvalue[0]), -LJ_TISNUM, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1894);
+ }
+ dasm_put(Dst, 1896);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1899);
+ }
+ dasm_put(Dst, 1901, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1914);
+ }
+ dasm_put(Dst, 1916);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1919);
+ }
+ dasm_put(Dst, 1921, ~LJ_TISNUM, (2+1)*8, -LJ_TISNUM);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1943);
+ }
+ dasm_put(Dst, 1945);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 1948);
+ }
+ dasm_put(Dst, 1950, (2+1)*8, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM);
+ dasm_put(Dst, 2001, -LJ_TISNUM, -LJ_TISNUM);
+ dasm_put(Dst, 2055, -LJ_TISNUM, -LJ_TSTR, Dt5(->len), ~LJ_TISNUM, -LJ_TSTR, Dt5(->len), Dt5([1]));
+ dasm_put(Dst, 2109, ~LJ_TISNUM, (0+1)*8, (1+1)*8, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TISNUM, Dt1(->base), Dt1(->base), ~LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2168, -LJ_TISNUM, -LJ_TSTR, Dt5(->len), -LJ_TISNUM, sizeof(GCstr)-1, -DISPATCH_GL(strempty), ~LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, -LJ_TISNUM, Dt5(->len));
+ dasm_put(Dst, 2230, DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), Dt5([1]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr));
+ dasm_put(Dst, 2283, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2343, -LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), DISPATCH_GL(tmpbuf.buf), sizeof(GCstr), -LJ_TTAB);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 2381);
+ }
+ dasm_put(Dst, 2383);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 2386);
+ }
+ dasm_put(Dst, 2388, ~LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM);
+ dasm_put(Dst, 2459, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM);
+ dasm_put(Dst, 2519, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM);
+ dasm_put(Dst, 2575, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM);
+ dasm_put(Dst, 2632, -LJ_TISNUM, -LJ_TISNUM, ~LJ_TISNUM, Dt1(->maxstack), Dt1(->top), Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->base), Dt1(->top), Dt7(->field_pc), FRAME_TYPE, FRAME_TYPEP);
+ dasm_put(Dst, 2695, LUA_MINSTACK, Dt1(->base), Dt1(->base), Dt1(->top), Dt1(->base));
+#if LJ_HASJIT
+ dasm_put(Dst, 2729, DISPATCH_GL(hookmask), HOOK_VMEVENT, DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 2749, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 2795, GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 2811, -GG_DISP2J, Dt7(->field_pc), DISPATCH_J(L), PC2PROTO(framesize), Dt1(->base), Dt1(->top));
+#endif
+ dasm_put(Dst, 2832);
+#if LJ_HASJIT
+ dasm_put(Dst, 2835);
+#endif
+ dasm_put(Dst, 2838);
+#if LJ_HASJIT
+ dasm_put(Dst, 2840);
+#endif
+ dasm_put(Dst, 2843, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 2866, LJ_VMST_EXIT, DISPATCH_GL(vmstate), DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(exitno), DISPATCH_J(L), Dt1(->base), DISPATCH_GL(jit_L), -GG_DISP2J, Dt1(->cframe), Dt1(->base), ~CFRAME_RAWMASK);
+#endif
+ dasm_put(Dst, 2914);
+#if LJ_HASJIT
+ dasm_put(Dst, 2916, Dt7(->field_pc), DISPATCH_GL(jit_L), LJ_VMST_INTERP, PC2PROTO(k), DISPATCH_GL(vmstate), BC_FUNCF);
+#endif
+ dasm_put(Dst, 2953);
+#if LJ_HASJIT
+ dasm_put(Dst, 3027);
+#endif
+ dasm_put(Dst, 3041);
+ {
+ int i;
+ for (i = 31; i >= 0; i--) {
+ dasm_put(Dst, 3077, i, i);
+ }
+ }
+ dasm_put(Dst, 3082);
+#if LJ_HASJIT
+ dasm_put(Dst, 3111);
+#else
+ dasm_put(Dst, 3136);
+#endif
+ dasm_put(Dst, 3138);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 3140, Dt2(->ctype_state), GG_G2DISP, DtE(->cb.gpr[0]), DtE(->cb.gpr[2]), CFRAME_SIZE, DtE(->cb.stack), DtE(->cb.slot), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate), Dt7(->field_pc));
+#endif
+ dasm_put(Dst, 3183);
+#if LJ_HASFFI
+ dasm_put(Dst, 3185, DISPATCH_GL(ctype_state), Dt1(->base), Dt1(->top), DtE(->L), DtE(->cb.gpr[0]));
+#endif
+ dasm_put(Dst, 3202);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 3204, DtF(->spadj), DtF(->nsp), offsetof(CCallState, stack), DtF(->func), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->gpr[2]), DtF(->gpr[3]), DtF(->gpr[0]), DtF(->gpr[1]));
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ dasm_put(Dst, 3242, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ dasm_put(Dst, 3244, -LJ_TISNUM, -LJ_TISNUM);
+ if (op == BC_ISLT) {
+ dasm_put(Dst, 3260);
+ } else if (op == BC_ISGE) {
+ dasm_put(Dst, 3262);
+ } else if (op == BC_ISLE) {
+ dasm_put(Dst, 3264);
+ } else {
+ dasm_put(Dst, 3266);
+ }
+ dasm_put(Dst, 3268, -LJ_TISNUM);
+ if (op == BC_ISLT) {
+ dasm_put(Dst, 3304);
+ } else if (op == BC_ISGE) {
+ dasm_put(Dst, 3306);
+ } else if (op == BC_ISLE) {
+ dasm_put(Dst, 3308);
+ } else {
+ dasm_put(Dst, 3310);
+ }
+ dasm_put(Dst, 3312);
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 3315, -LJ_TISNUM, -LJ_TISNUM);
+ if (vk) {
+ dasm_put(Dst, 3326);
+ } else {
+ dasm_put(Dst, 3329);
+ }
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3332, -LJ_TCDATA, -LJ_TCDATA);
+ }
+ dasm_put(Dst, 3339, -LJ_TISPRI);
+ if (vk) {
+ dasm_put(Dst, 3348, -LJ_TISTABUD);
+ } else {
+ dasm_put(Dst, 3365, -LJ_TISTABUD);
+ }
+ dasm_put(Dst, 3372, Dt6(->metatable));
+ if (vk) {
+ dasm_put(Dst, 3376);
+ } else {
+ dasm_put(Dst, 3379);
+ }
+ dasm_put(Dst, 3382, Dt6(->nomm), 1-vk, 1<len), ~LJ_TISNUM, -LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3640, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 3647);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 3649);
+ }
+ dasm_put(Dst, 3651);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 3654);
+ }
+ dasm_put(Dst, 3656);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3659, Dt6(->nomm), 1<base), Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 4189, ~LJ_TSTR);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 4201, ~LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ dasm_put(Dst, 4213, ~LJ_TISNUM);
+ break;
+ case BC_KNUM:
+ dasm_put(Dst, 4224);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 4234);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 4244, ~LJ_TNIL);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 4263, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+ dasm_put(Dst, 4279, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->closed), DtA(->v), LJ_GC_BLACK, -LJ_TISGCV, -(LJ_TISNUM - LJ_TISGCV), Dt4(->gch.marked), -GG_DISP2G, LJ_GC_WHITES);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 4319);
+ } else {
+ dasm_put(Dst, 4326);
+ }
+ dasm_put(Dst, 4329);
+ break;
+ case BC_USETS:
+ dasm_put(Dst, 4332, offsetof(GCfuncL, uvptr), ~LJ_TSTR, DtA(->marked), DtA(->v), DtA(->closed), LJ_GC_BLACK, Dt5(->marked), LJ_GC_WHITES, -GG_DISP2G);
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 4368);
+ } else {
+ dasm_put(Dst, 4375);
+ }
+ dasm_put(Dst, 4378);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 4381, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 4398, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+
+ case BC_UCLO:
+ dasm_put(Dst, 4414, Dt1(->openupval), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 4437, Dt1(->base), Dt1(->base), ~LJ_TFUNC);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ if (op == BC_TDUP) {
+ dasm_put(Dst, 4458);
+ }
+ dasm_put(Dst, 4460, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base));
+ if (op == BC_TNEW) {
+ dasm_put(Dst, 4473);
+ } else {
+ dasm_put(Dst, 4482);
+ }
+ dasm_put(Dst, 4486, Dt1(->base), ~LJ_TTAB);
+ break;
+
+ case BC_GGET:
+ case BC_GSET:
+ dasm_put(Dst, 4504, Dt7(->env));
+ if (op == BC_GGET) {
+ dasm_put(Dst, 4510);
+ } else {
+ dasm_put(Dst, 4513);
+ }
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 4516, -LJ_TTAB, -LJ_TISNUM, Dt6(->array), Dt6(->asize), -LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), Dt6(->node), DtB(->key), DtB(->val), DtB(->next), -LJ_TSTR, -LJ_TNIL, Dt6(->metatable), ~LJ_TNIL, Dt6(->nomm));
+ dasm_put(Dst, 4633, 1<asize), Dt6(->array), -LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<array), Dt6(->asize), -LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist), -LJ_TSTR);
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 4764, -LJ_TTAB, Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), DtB(->key), DtB(->val.it), DtB(->next), -LJ_TSTR, Dt6(->marked), -LJ_TNIL, LJ_GC_BLACK, DtB(->val));
+ dasm_put(Dst, 4822, Dt6(->metatable), Dt6(->nomm), 1<metatable), Dt1(->base), Dt6(->nomm), 1<base), DISPATCH_GL(gc.grayagain), LJ_GC_BLACK);
+ dasm_put(Dst, 4875, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 4884, -LJ_TTAB, Dt6(->asize), Dt6(->array), -LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 4951, Dt6(->asize), Dt6(->array), Dt6(->marked), LJ_GC_BLACK, Dt1(->base));
+ if (LJ_TARGET_OSX) {
+ dasm_put(Dst, 4996, Dt1(->base));
+ }
+ dasm_put(Dst, 4999, DISPATCH_GL(gc.grayagain), LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ dasm_put(Dst, 5015);
+ break;
+ case BC_CALL:
+ dasm_put(Dst, 5021, -LJ_TFUNC, Dt7(->field_pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 5041);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 5046, -LJ_TFUNC, Dt7(->ffid), FRAME_TYPE, Dt7(->field_pc), Dt7(->field_pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP);
+ dasm_put(Dst, 5107, FRAME_TYPE);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 5118, -LJ_TFUNC, Dt7(->field_pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 5142, Dt6(->asize), Dt6(->array), -LJ_TNIL, ~LJ_TISNUM, Dt6(->hmask), Dt6(->node), DtB(->val), -LJ_TNIL, DtB(->key));
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 5207, -LJ_TFUNC, Dt8(->ffid), -LJ_TTAB, -LJ_TNIL, FF_next_N, BC_JMP, BC_ITERC);
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 5246, FRAME_VARG, ~LJ_TNIL, Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->base));
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 5318);
+ break;
+
+ case BC_RET:
+ dasm_put(Dst, 5325, FRAME_TYPE, FRAME_VARG, Dt7(->field_pc), PC2PROTO(k), ~LJ_TNIL, FRAME_TYPEP);
+ break;
+
+ case BC_RET0: case BC_RET1:
+ dasm_put(Dst, 5390, FRAME_TYPE, FRAME_VARG);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 5401);
+ }
+ dasm_put(Dst, 5403);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 5406);
+ }
+ dasm_put(Dst, 5408, Dt7(->field_pc), PC2PROTO(k), ~LJ_TNIL);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 5434, -GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 5445);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 5447);
+ }
+ if (!vk) {
+ dasm_put(Dst, 5449, -LJ_TISNUM, -LJ_TISNUM, -LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 5467, -LJ_TISNUM);
+ if (op == BC_IFORL) {
+ dasm_put(Dst, 5475);
+ } else {
+ dasm_put(Dst, 5477);
+ }
+ dasm_put(Dst, 5480);
+ }
+ dasm_put(Dst, 5485);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 5487);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 5489);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 5492);
+ }
+ if (vk) {
+ dasm_put(Dst, 5494);
+ }
+ dasm_put(Dst, 5496);
+ if (op == BC_JFORI || op == BC_JFORL) {
+ dasm_put(Dst, 5501, BC_JLOOP);
+ }
+ dasm_put(Dst, 5504);
+ if (!vk) {
+ dasm_put(Dst, 5511);
+ } else {
+ dasm_put(Dst, 5513);
+ }
+ dasm_put(Dst, 5515);
+ if (!vk) {
+ dasm_put(Dst, 5519, -LJ_TISNUM, -LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 5531);
+ }
+ dasm_put(Dst, 5540);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 5544);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 5546, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 5551);
+ } else {
+ dasm_put(Dst, 5553, BC_JLOOP);
+ }
+ dasm_put(Dst, 5556);
+ if (vk) {
+ dasm_put(Dst, 5562);
+ }
+ dasm_put(Dst, 5567);
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 5573, -GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 5584);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 5586, -LJ_TNIL, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 5592, -LJ_TNIL);
+ }
+ dasm_put(Dst, 5598);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 5605, -GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 5616);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 5623, DISPATCH_J(trace), DISPATCH_GL(vmstate), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L));
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 5637);
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 5646, -GG_DISP2HOT, HOTCOUNT_CALL);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 5657, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k));
+ if (op != BC_JFUNCF) {
+ dasm_put(Dst, 5667);
+ }
+ dasm_put(Dst, 5670, ~LJ_TNIL);
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 5677, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 5681);
+ }
+ dasm_put(Dst, 5686);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 5692);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 5694, Dt1(->maxstack), 8+FRAME_VARG, -4+PC2PROTO(k), -4+PC2PROTO(numparams), ~LJ_TNIL);
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 5735, Dt8(->f));
+ } else {
+ dasm_put(Dst, 5738, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 5741, Dt1(->maxstack), Dt1(->base), Dt1(->top));
+ if (op == BC_FUNCCW) {
+ dasm_put(Dst, 5751, Dt8(->f));
+ }
+ dasm_put(Dst, 5754, LJ_VMST_C, DISPATCH_GL(vmstate), Dt1(->base), LJ_VMST_INTERP, Dt1(->top), DISPATCH_GL(vmstate));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ dasm_put(Dst, 5776);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 0xe\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */
+ fcofs, CFRAME_SIZE);
+ for (i = 11; i >= 4; i--) /* offset r4-r11 */
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */
+ "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */
+ "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */
+ "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_asm.c b/src/LuaJIT/src/buildvm_asm.c
new file mode 100644
index 000000000..f975eadc7
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_asm.c
@@ -0,0 +1,283 @@
+/*
+** LuaJIT VM builder: Assembler source code emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+/* ------------------------------------------------------------------------ */
+
+#if LJ_TARGET_X86ORX64
+/* Emit bytes piecewise as assembler text. */
+static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.byte %d", p[i]);
+ else
+ fprintf(ctx->fp, ",%d", p[i]);
+ if ((i & 15) == 15) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation */
+static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym);
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ default: /* BUILD_machasm for relative relocations handled below. */
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ }
+}
+
+static const char *const jccnames[] = {
+ "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja",
+ "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg"
+};
+
+/* Emit relocation for the incredibly stupid OSX assembler. */
+static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n,
+ const char *sym)
+{
+ const char *opname = NULL;
+ if (--n < 0) goto err;
+ if (cp[n] == 0xe8) {
+ opname = "call";
+ } else if (cp[n] == 0xe9) {
+ opname = "jmp";
+ } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) {
+ opname = jccnames[cp[n]-0x80];
+ n--;
+ } else {
+err:
+ fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n",
+ sym);
+ exit(1);
+ }
+ emit_asm_bytes(ctx, cp, n);
+ fprintf(ctx->fp, "\t%s %s\n", opname, sym);
+}
+#else
+/* Emit words piecewise as assembler text. */
+static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i += 4) {
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i));
+ else
+ fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i));
+ if ((i & 15) == 12) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation as part of an instruction. */
+static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n,
+ const char *sym)
+{
+ uint32_t ins;
+ emit_asm_words(ctx, p, n-4);
+ ins = *(uint32_t *)(p+n-4);
+#if LJ_TARGET_ARM
+ if ((ins & 0xff000000u) == 0xfa000000u) {
+ fprintf(ctx->fp, "\tblx %s\n", sym);
+ } else if ((ins & 0x0e000000u) == 0x0a000000u) {
+ fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b",
+ "eqnecsccmiplvsvchilsgeltgtle" + 2*(ins >> 28), sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
+ if ((ins >> 26) == 16) {
+ fprintf(ctx->fp, "\t%s %d, %d, %s\n",
+ (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym);
+ } else if ((ins >> 26) == 18) {
+ fprintf(ctx->fp, "\t%s %s\n", (ins & 1) ? "bl" : "b", sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_MIPS
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+#else
+#error "missing relocation support for this architecture"
+#endif
+}
+#endif
+
+#if LJ_TARGET_ARM
+#define ELFASM_PX "%%"
+#else
+#define ELFASM_PX "@"
+#endif
+
+/* Emit an assembler label. */
+static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp,
+ "\n\t.globl %s\n"
+ "\t.hidden %s\n"
+ "\t.type %s, " ELFASM_PX "%s\n"
+ "\t.size %s, %d\n"
+ "%s:\n",
+ name, name, name, isfunc ? "function" : "object", name, size, name);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\n\t.globl %s\n", name);
+ if (isfunc)
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name);
+ fprintf(ctx->fp, "%s:\n", name);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\n\t.private_extern %s\n"
+ "%s:\n", name, name);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Emit alignment. */
+static void emit_asm_align(BuildCtx *ctx, int bits)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.p2align %d\n", bits);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp, "\t.align %d\n", bits);
+ break;
+ default:
+ break;
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit assembler source code. */
+void emit_asm(BuildCtx *ctx)
+{
+ int i, rel;
+
+ fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch);
+ fprintf(ctx->fp, "\t.text\n");
+ emit_asm_align(ctx, 4);
+
+ emit_asm_label(ctx, ctx->beginsym, 0, 0);
+ if (ctx->mode != BUILD_machasm)
+ fprintf(ctx->fp, ".Lbegin:\n");
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !defined(LUAJIT_NO_UNWIND)
+ /* This should really be moved into buildvm_arm.dasc. */
+ fprintf(ctx->fp,
+ ".fnstart\n"
+ ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
+ ".pad #28\n");
+#endif
+#if LJ_TARGET_MIPS
+ fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n");
+#endif
+
+ for (i = rel = 0; i < ctx->nsym; i++) {
+ int32_t ofs = ctx->sym[i].ofs;
+ int32_t next = ctx->sym[i+1].ofs;
+#if LJ_TARGET_ARM && defined(__GNUC__) && !defined(LUAJIT_NO_UNWIND) && \
+ LJ_HASFFI
+ if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call"))
+ fprintf(ctx->fp,
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+ ".fnend\n"
+ ".fnstart\n"
+ ".save {r4, r5, r11, lr}\n"
+ ".setfp r11, sp\n");
+#endif
+ emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1);
+ while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) {
+ BuildReloc *r = &ctx->reloc[rel];
+ int n = r->ofs - ofs;
+#if LJ_TARGET_X86ORX64
+ if (ctx->mode == BUILD_machasm && r->type != 0) {
+ emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ } else {
+ emit_asm_bytes(ctx, ctx->code+ofs, n);
+ emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]);
+ }
+ ofs += n+4;
+#else
+ emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ ofs += n;
+#endif
+ rel++;
+ }
+#if LJ_TARGET_X86ORX64
+ emit_asm_bytes(ctx, ctx->code+ofs, next-ofs);
+#else
+ emit_asm_words(ctx, ctx->code+ofs, next-ofs);
+#endif
+ }
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !defined(LUAJIT_NO_UNWIND)
+ fprintf(ctx->fp,
+#if !LJ_HASFFI
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+#endif
+ ".fnend\n");
+#endif
+
+ fprintf(ctx->fp, "\n");
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n");
+#if LJ_TARGET_PPCSPE
+ /* Soft-float ABI + SPE. */
+ fprintf(ctx->fp, "\t.gnu_attribute 4, 2\n\t.gnu_attribute 8, 3\n");
+#elif LJ_TARGET_PPC
+ /* Hard-float ABI. */
+ fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n");
+#endif
+ /* fallthrough */
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\t.cstring\n"
+ "\t.ascii \"%s\\0\"\n", ctx->dasm_ident);
+ break;
+ default:
+ break;
+ }
+ fprintf(ctx->fp, "\n");
+}
+
diff --git a/src/LuaJIT/src/buildvm_fold.c b/src/LuaJIT/src/buildvm_fold.c
new file mode 100644
index 000000000..73f4f80a7
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_fold.c
@@ -0,0 +1,229 @@
+/*
+** LuaJIT VM builder: IR folding hash table generator.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_ir.h"
+
+/* Context for the folding hash table generator. */
+static int lineno;
+static int funcidx;
+static uint32_t foldkeys[BUILD_MAX_FOLD];
+static uint32_t nkeys;
+
+/* Try to fill the hash table with keys using the hash parameters. */
+static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol)
+{
+ uint32_t i;
+ if (dorol && ((r & 31) == 0 || (r>>5) == 0))
+ return 0; /* Avoid zero rotates. */
+ memset(htab, 0xff, (sz+1)*sizeof(uint32_t));
+ for (i = 0; i < nkeys; i++) {
+ uint32_t key = foldkeys[i];
+ uint32_t k = key & 0xffffff;
+ uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) :
+ (((k << (r>>5)) - k) << (r&31))) % sz;
+ if (htab[h] != 0xffffffff) { /* Collision on primary slot. */
+ if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */
+ /* Try to move the colliding key, if possible. */
+ if (h < sz-1 && htab[h+2] == 0xffffffff) {
+ uint32_t k2 = htab[h+1] & 0xffffff;
+ uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) :
+ (((k2 << (r>>5)) - k2) << (r&31))) % sz;
+ if (h2 != h+1) return 0; /* Cannot resolve collision. */
+ htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */
+ } else {
+ return 0; /* Collision. */
+ }
+ }
+ htab[h+1] = key;
+ } else {
+ htab[h] = key;
+ }
+ }
+ return 1; /* Success, all keys could be stored. */
+}
+
+/* Print the generated hash table. */
+static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz)
+{
+ uint32_t i;
+ fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x",
+ sz+1, htab[0]);
+ for (i = 1; i < sz+1; i++)
+ fprintf(ctx->fp, ",\n0x%08x", htab[i]);
+ fprintf(ctx->fp, "\n};\n\n");
+}
+
+/* Exhaustive search for the shortest semi-perfect hash table. */
+static void makehash(BuildCtx *ctx)
+{
+ uint32_t htab[BUILD_MAX_FOLD*2+1];
+ uint32_t sz, r;
+ /* Search for the smallest hash table with an odd size. */
+ for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) {
+ /* First try all shift hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 0)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ /* Then try all rotate hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 1)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ }
+ fprintf(stderr, "Error: search for perfect hash failed\n");
+ exit(1);
+}
+
+/* Parse one token of a fold rule. */
+static uint32_t nexttoken(char **pp, int allowlit, int allowany)
+{
+ char *p = *pp;
+ if (p) {
+ uint32_t i;
+ char *q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ *pp = q;
+ if (allowlit && !strncmp(p, "IRFPM_", 6)) {
+ for (i = 0; irfpm_names[i]; i++)
+ if (!strcmp(irfpm_names[i], p+6))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRFL_", 5)) {
+ for (i = 0; irfield_names[i]; i++)
+ if (!strcmp(irfield_names[i], p+5))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCALL_", 7)) {
+ for (i = 0; ircall_names[i]; i++)
+ if (!strcmp(ircall_names[i], p+7))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCONV_", 7)) {
+ for (i = 0; irt_names[i]; i++) {
+ const char *r = strchr(p+7, '_');
+ if (r && !strncmp(irt_names[i], p+7, r-(p+7))) {
+ uint32_t j;
+ for (j = 0; irt_names[j]; j++)
+ if (!strcmp(irt_names[j], r+1))
+ return (i << 5) + j;
+ }
+ }
+ } else if (allowlit && *p >= '0' && *p <= '9') {
+ for (i = 0; *p >= '0' && *p <= '9'; p++)
+ i = i*10 + (*p - '0');
+ if (*p == '\0')
+ return i;
+ } else if (allowany && !strcmp("any", p)) {
+ return allowany;
+ } else {
+ for (i = 0; ir_names[i]; i++)
+ if (!strcmp(ir_names[i], p))
+ return i;
+ }
+ fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno);
+ exit(1);
+ }
+ return 0;
+}
+
+/* Parse a fold rule. */
+static void foldrule(char *p)
+{
+ uint32_t op = nexttoken(&p, 0, 0);
+ uint32_t left = nexttoken(&p, 0, 0x7f);
+ uint32_t right = nexttoken(&p, 1, 0x3ff);
+ uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right;
+ uint32_t i;
+ if (nkeys >= BUILD_MAX_FOLD) {
+ fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n");
+ exit(1);
+ }
+ /* Simple insertion sort to detect duplicates. */
+ for (i = nkeys; i > 0; i--) {
+ if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff))
+ break;
+ if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) {
+ fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno);
+ exit(1);
+ }
+ foldkeys[i] = foldkeys[i-1];
+ }
+ foldkeys[i] = key;
+ nkeys++;
+}
+
+/* Emit C source code for IR folding hash table. */
+void emit_fold(BuildCtx *ctx)
+{
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ const char *fname = ctx->args[0];
+ FILE *fp;
+
+ if (fname == NULL) {
+ fprintf(stderr, "Error: missing input filename\n");
+ exit(1);
+ }
+
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n");
+
+ lineno = 0;
+ funcidx = 0;
+ nkeys = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ lineno++;
+ /* The prefix must be at the start of a line, otherwise it's ignored. */
+ if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) {
+ char *p = buf+sizeof(FOLDDEF_PREFIX)-1;
+ char *q = strchr(p, ')');
+ if (p[0] == '(' && q) {
+ p++;
+ *q = '\0';
+ foldrule(p);
+ } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) {
+ p += 2;
+ *q = '\0';
+ if (funcidx)
+ fprintf(ctx->fp, ",\n");
+ if (p[-2] == 'X')
+ fprintf(ctx->fp, " %s", p);
+ else
+ fprintf(ctx->fp, " fold_%s", p);
+ funcidx++;
+ } else {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n",
+ FOLDDEF_PREFIX, p, lineno);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ fprintf(ctx->fp, "\n};\n\n");
+
+ makehash(ctx);
+}
+
diff --git a/src/LuaJIT/src/buildvm_lib.c b/src/LuaJIT/src/buildvm_lib.c
new file mode 100644
index 000000000..3231d3ad1
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_lib.c
@@ -0,0 +1,377 @@
+/*
+** LuaJIT VM builder: library definition compiler.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_lib.h"
+
+/* Context for library definitions. */
+static uint8_t obuf[8192];
+static uint8_t *optr;
+static char modname[80];
+static size_t modnamelen;
+static char funcname[80];
+static int modstate, regfunc;
+static int ffid, recffid, ffasmfunc;
+
+enum {
+ REGFUNC_OK,
+ REGFUNC_NOREG,
+ REGFUNC_NOREGUV
+};
+
+static void libdef_name(const char *p, int kind)
+{
+ size_t n = strlen(p);
+ if (kind != LIBINIT_STRING) {
+ if (n > modnamelen && p[modnamelen] == '_' &&
+ !strncmp(p, modname, modnamelen)) {
+ p += modnamelen+1;
+ n -= modnamelen+1;
+ }
+ }
+ if (n > LIBINIT_MAXSTR) {
+ fprintf(stderr, "Error: string too long: '%s'\n", p);
+ exit(1);
+ }
+ if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = (uint8_t)(n | kind);
+ memcpy(optr, p, n);
+ optr += n;
+}
+
+static void libdef_endmodule(BuildCtx *ctx)
+{
+ if (modstate != 0) {
+ char line[80];
+ const uint8_t *p;
+ int n;
+ if (modstate == 1)
+ fprintf(ctx->fp, " (lua_CFunction)0");
+ fprintf(ctx->fp, "\n};\n");
+ fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n",
+ LABEL_PREFIX_LIBINIT, modname);
+ line[0] = '\0';
+ for (n = 0, p = obuf; p < optr; p++) {
+ n += sprintf(line+n, "%d,", *p);
+ if (n >= 75) {
+ fprintf(ctx->fp, "%s\n", line);
+ n = 0;
+ line[0] = '\0';
+ }
+ }
+ fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END);
+ }
+}
+
+static void libdef_module(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ optr = obuf;
+ *optr++ = (uint8_t)ffid;
+ *optr++ = (uint8_t)ffasmfunc;
+ *optr++ = 0; /* Hash table size. */
+ modstate = 1;
+ fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n",
+ LABEL_PREFIX_LIBCF, p);
+ }
+ modnamelen = strlen(p);
+ if (modnamelen > sizeof(modname)-1) {
+ fprintf(stderr, "Error: module name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(modname, p);
+}
+
+static int find_ffofs(BuildCtx *ctx, const char *name)
+{
+ int i;
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = ctx->globnames[i];
+ if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) {
+ return (int)((uint8_t *)ctx->glob[i] - ctx->code);
+ }
+ }
+ fprintf(stderr, "Error: undefined fast function %s%s\n",
+ LABEL_PREFIX_FF, name);
+ exit(1);
+}
+
+static void libdef_func(BuildCtx *ctx, char *p, int arg)
+{
+ if (arg != LIBINIT_CF)
+ ffasmfunc++;
+ if (ctx->mode == BUILD_libdef) {
+ if (modstate == 0) {
+ fprintf(stderr, "Error: no module for function definition %s\n", p);
+ exit(1);
+ }
+ if (regfunc == REGFUNC_NOREG) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_FFID;
+ } else {
+ if (arg != LIBINIT_ASM_) {
+ if (modstate != 1) fprintf(ctx->fp, ",\n");
+ modstate = 2;
+ fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p);
+ }
+ if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */
+ libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg);
+ }
+ } else if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "FFDEF(%s)\n", p);
+ } else if (ctx->mode == BUILD_recdef) {
+ if (strlen(p) > sizeof(funcname)-1) {
+ fprintf(stderr, "Error: function name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(funcname, p);
+ } else if (ctx->mode == BUILD_vmdef) {
+ int i;
+ for (i = 1; p[i] && modname[i-1]; i++)
+ if (p[i] == '_') p[i] = '.';
+ fprintf(ctx->fp, "\"%s\",\n", p);
+ } else if (ctx->mode == BUILD_bcdef) {
+ if (arg != LIBINIT_CF)
+ fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p));
+ }
+ ffid++;
+ regfunc = REGFUNC_OK;
+}
+
+static uint32_t find_rec(char *name)
+{
+ char *p = (char *)obuf;
+ uint32_t n;
+ for (n = 2; *p; n++) {
+ if (strcmp(p, name) == 0)
+ return n;
+ p += strlen(p)+1;
+ }
+ if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ strcpy(p, name);
+ return n;
+}
+
+static void libdef_rec(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_recdef) {
+ char *q;
+ uint32_t n;
+ for (; recffid+1 < ffid; recffid++)
+ fprintf(ctx->fp, ",\n0");
+ recffid = ffid;
+ if (*p == '.') p = funcname;
+ q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ n = find_rec(p);
+ if (q)
+ fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q);
+ else
+ fprintf(ctx->fp, ",\n0x%02x00", n);
+ }
+}
+
+static void memcpy_endian(void *dst, void *src, size_t n)
+{
+ union { uint8_t b; uint32_t u; } host_endian;
+ host_endian.u = 1;
+ if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) {
+ memcpy(dst, src, n);
+ } else {
+ size_t i;
+ for (i = 0; i < n; i++)
+ ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1];
+ }
+}
+
+static void libdef_push(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ int len = (int)strlen(p);
+ if (*p == '"') {
+ if (len > 1 && p[len-1] == '"') {
+ p[len-1] = '\0';
+ libdef_name(p+1, LIBINIT_STRING);
+ return;
+ }
+ } else if (*p >= '0' && *p <= '9') {
+ char *ep;
+ double d = strtod(p, &ep);
+ if (*ep == '\0') {
+ if (optr+1+sizeof(double) > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_NUMBER;
+ memcpy_endian(optr, &d, sizeof(double));
+ optr += sizeof(double);
+ return;
+ }
+ } else if (!strcmp(p, "lastcl")) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_LASTCL;
+ return;
+ } else if (len > 4 && !strncmp(p, "top-", 4)) {
+ if (optr+2 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_COPY;
+ *optr++ = (uint8_t)atoi(p+4);
+ return;
+ }
+ fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p);
+ exit(1);
+ }
+}
+
+static void libdef_set(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */
+ libdef_name(p, LIBINIT_STRING);
+ *optr++ = LIBINIT_SET;
+ obuf[2]++; /* Bump hash table size. */
+ }
+}
+
+static void libdef_regfunc(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(ctx); UNUSED(p);
+ regfunc = arg;
+}
+
+typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg);
+
+typedef struct LibDefHandler {
+ const char *suffix;
+ const char *stop;
+ const LibDefFunc func;
+ const int arg;
+} LibDefHandler;
+
+static const LibDefHandler libdef_handlers[] = {
+ { "MODULE_", " \t\r\n", libdef_module, 0 },
+ { "CF(", ")", libdef_func, LIBINIT_CF },
+ { "ASM(", ")", libdef_func, LIBINIT_ASM },
+ { "ASM_(", ")", libdef_func, LIBINIT_ASM_ },
+ { "REC(", ")", libdef_rec, 0 },
+ { "PUSH(", ")", libdef_push, 0 },
+ { "SET(", ")", libdef_set, 0 },
+ { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV },
+ { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG },
+ { NULL, NULL, (LibDefFunc)0, 0 }
+};
+
+/* Emit C source code for library function definitions. */
+void emit_lib(BuildCtx *ctx)
+{
+ const char *fname;
+
+ if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef ||
+ ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ else if (ctx->mode == BUILD_vmdef)
+ fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n");
+ if (ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100");
+ recffid = ffid = FF_C+1;
+ ffasmfunc = 0;
+
+ while ((fname = *ctx->args++)) {
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ FILE *fp;
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+ modstate = 0;
+ regfunc = REGFUNC_OK;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ char *p;
+ for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) {
+ const LibDefHandler *ldh;
+ p += sizeof(LIBDEF_PREFIX)-1;
+ for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) {
+ size_t n, len = strlen(ldh->suffix);
+ if (!strncmp(p, ldh->suffix, len)) {
+ p += len;
+ n = ldh->stop ? strcspn(p, ldh->stop) : 0;
+ if (!p[n]) break;
+ p[n] = '\0';
+ ldh->func(ctx, p, ldh->arg);
+ p += n+1;
+ break;
+ }
+ }
+ if (ldh->suffix == NULL) {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown library definition tag %s%s\n",
+ LIBDEF_PREFIX, p);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ }
+ }
+
+ if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "\n#undef FFDEF\n\n");
+ fprintf(ctx->fp,
+ "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n",
+ ffasmfunc);
+ } else if (ctx->mode == BUILD_vmdef) {
+ fprintf(ctx->fp, "}\n\n");
+ } else if (ctx->mode == BUILD_bcdef) {
+ int i;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n");
+ fprintf(ctx->fp, "BCDEF(BCMODE)\n");
+ for (i = ffasmfunc-1; i > 0; i--)
+ fprintf(ctx->fp, "BCMODE_FF,\n");
+ fprintf(ctx->fp, "BCMODE_FF\n};\n\n");
+ } else if (ctx->mode == BUILD_recdef) {
+ char *p = (char *)obuf;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n"
+ "recff_nyi,\n"
+ "recff_c");
+ while (*p) {
+ fprintf(ctx->fp, ",\nrecff_%s", p);
+ p += strlen(p)+1;
+ }
+ fprintf(ctx->fp, "\n};\n\n");
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_mips.dasc b/src/LuaJIT/src/buildvm_mips.dasc
new file mode 100644
index 000000000..9e984189a
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_mips.dasc
@@ -0,0 +1,4223 @@
+|// Low-level VM code for MIPS CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch mips
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r16 // Base of current Lua stack frame.
+|.define KBASE, r17 // Constants of current Lua function.
+|.define PC, r18 // Next PC.
+|.define DISPATCH, r19 // Opcode dispatch table.
+|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
+|// NYI: r22 currently unused.
+|
+|.define JGL, r30 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNIL, r30
+|.define TOBIT, f30 // 2^52 + 2^51.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r23 // Callee-save.
+|.define RB, r8
+|.define RC, r9
+|.define RD, r10
+|.define INS, r11
+|
+|.define AT, r1 // Assembler temporary.
+|.define TMP0, r12
+|.define TMP1, r13
+|.define TMP2, r14
+|.define TMP3, r15
+|
+|// Calling conventions.
+|.define CFUNCADDR, r25
+|.define CARG1, r4
+|.define CARG2, r5
+|.define CARG3, r6
+|.define CARG4, r7
+|
+|.define CRET1, r2
+|.define CRET2, r3
+|
+|.define FARG1, f12
+|.define FARG2, f14
+|
+|.define FRET1, f0
+|.define FRET2, f2
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define CFRAME_SPACE, 112 // Delta for sp.
+|
+|.define SAVE_ERRF, 124(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 120(sp)
+|.define SAVE_CFRAME, 116(sp)
+|.define SAVE_L, 112(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
+|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves.
+|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves.
+|.define SAVE_PC, 20(sp)
+|.define ARG5, 16(sp)
+|.define CSAVE_4, 12(sp)
+|.define CSAVE_3, 8(sp)
+|.define CSAVE_2, 4(sp)
+|.define CSAVE_1, 0(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee.
+|
+|.define ARG5_OFS, 16
+|.define SAVE_MULTRES, ARG5
+|
+|.macro saveregs
+| addiu sp, sp, -CFRAME_SPACE
+| sw ra, SAVE_GPR_+9*4(sp)
+| sw r30, SAVE_GPR_+8*4(sp)
+| sdc1 f30, SAVE_FPR_+5*8(sp)
+| sw r23, SAVE_GPR_+7*4(sp)
+| sw r22, SAVE_GPR_+6*4(sp)
+| sdc1 f28, SAVE_FPR_+4*8(sp)
+| sw r21, SAVE_GPR_+5*4(sp)
+| sw r20, SAVE_GPR_+4*4(sp)
+| sdc1 f26, SAVE_FPR_+3*8(sp)
+| sw r19, SAVE_GPR_+3*4(sp)
+| sw r18, SAVE_GPR_+2*4(sp)
+| sdc1 f24, SAVE_FPR_+2*8(sp)
+| sw r17, SAVE_GPR_+1*4(sp)
+| sw r16, SAVE_GPR_+0*4(sp)
+| sdc1 f22, SAVE_FPR_+1*8(sp)
+| sdc1 f20, SAVE_FPR_+0*8(sp)
+|.endmacro
+|
+|.macro restoreregs_ret
+| lw ra, SAVE_GPR_+9*4(sp)
+| lw r30, SAVE_GPR_+8*4(sp)
+| ldc1 f30, SAVE_FPR_+5*8(sp)
+| lw r23, SAVE_GPR_+7*4(sp)
+| lw r22, SAVE_GPR_+6*4(sp)
+| ldc1 f28, SAVE_FPR_+4*8(sp)
+| lw r21, SAVE_GPR_+5*4(sp)
+| lw r20, SAVE_GPR_+4*4(sp)
+| ldc1 f26, SAVE_FPR_+3*8(sp)
+| lw r19, SAVE_GPR_+3*4(sp)
+| lw r18, SAVE_GPR_+2*4(sp)
+| ldc1 f24, SAVE_FPR_+2*8(sp)
+| lw r17, SAVE_GPR_+1*4(sp)
+| lw r16, SAVE_GPR_+0*4(sp)
+| ldc1 f22, SAVE_FPR_+1*8(sp)
+| ldc1 f20, SAVE_FPR_+0*8(sp)
+| jr ra
+| addiu sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; .long 0xf0f0f0f0; .endmacro
+|
+|// Macros to mark delay slots.
+|.macro ., a; a; .endmacro
+|.macro ., a,b; a,b; .endmacro
+|.macro ., a,b,c; a,b,c; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Endian-specific defines.
+|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8)
+|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4)
+|.define HI, LJ_ENDIAN_SELECT(4,0)
+|.define LO, LJ_ENDIAN_SELECT(0,4)
+|.define OFS_RD, LJ_ENDIAN_SELECT(2,0)
+|.define OFS_RA, LJ_ENDIAN_SELECT(1,2)
+|.define OFS_OP, LJ_ENDIAN_SELECT(0,3)
+|
+|// Instruction decode.
+|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro
+|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro
+|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
+|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
+|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
+|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP4a TMP1, INS
+| decode_OP4b TMP1
+| addu TMP0, DISPATCH, TMP1
+| decode_RD8a RD, INS
+| lw AT, 0(TMP0)
+| decode_RA8a RA, INS
+| decode_RD8b RD
+| jr AT
+| decode_RA8b RA
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lw PC, LFUNC:RB->pc
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+| decode_OP4a TMP1, INS
+| decode_RA8a RA, INS
+| decode_OP4b TMP1
+| decode_RA8b RA
+| addu TMP0, DISPATCH, TMP1
+| lw TMP0, 0(TMP0)
+| jr TMP0
+| addu RA, RA, BASE
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| sw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|.macro branch_RD
+| srl TMP0, RD, 1
+| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
+| addu TMP0, TMP0, AT
+| addu PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name)
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro load_got, func
+| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
+|.endmacro
+|// Much faster. Sadly, there's no easy way to force the required code layout.
+|// .macro call_intern, func; bal extern func; .endmacro
+|.macro call_intern, func; jalr CFUNCADDR; .endmacro
+|.macro call_extern; jalr CFUNCADDR; .endmacro
+|.macro jmp_extern; jr CFUNCADDR; .endmacro
+|
+|.macro hotcheck, delta, target
+| srl TMP1, PC, 1
+| andi TMP1, TMP1, 126
+| addu TMP1, TMP1, DISPATCH
+| lhu TMP2, GG_DISP2HOT(TMP1)
+| addiu TMP2, TMP2, -delta
+| bltz TMP2, target
+|. sh TMP2, GG_DISP2HOT(TMP1)
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp, target
+| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
+| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| sb mark, tab->marked
+| b target
+|. sw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi AT, PC, FRAME_P
+ | beqz AT, ->cont_dispatch
+ |. li TMP1, LJ_TTRUE
+ |
+ | // Return from pcall or xpcall fast func.
+ | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | move BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | sw TMP1, FRAME_PC(RA) // Prepend true to results.
+ | addiu RA, RA, -8
+ |
+ |->vm_returnc:
+ | andi TMP0, PC, FRAME_TYPE
+ | addiu RD, RD, 8 // RD = (nresults+1)*8.
+ | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
+ |. move MULTRES, RD
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | li TMP2, -8
+ | xori AT, TMP0, FRAME_C
+ | and TMP2, PC, TMP2
+ | bnez AT, ->vm_returnp
+ | subu TMP2, BASE, TMP2 // TMP2 = previous base.
+ |
+ | addiu TMP1, RD, -8
+ | sw TMP2, L->base
+ | li_vmstate C
+ | lw TMP2, SAVE_NRES
+ | addiu BASE, BASE, -8
+ | st_vmstate
+ | beqz TMP1, >2
+ |. sll TMP2, TMP2, 3
+ |1:
+ | addiu TMP1, TMP1, -8
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | sdc1 f0, 0(BASE)
+ | bnez TMP1, <1
+ |. addiu BASE, BASE, 8
+ |
+ |2:
+ | bne TMP2, RD, >6
+ |3:
+ |. sw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lw TMP0, SAVE_CFRAME // Restore previous C frame.
+ | move CRET1, r0 // Ok return status for vm_pcall.
+ | sw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | lw TMP1, L->maxstack
+ | slt AT, TMP2, RD
+ | bnez AT, >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ |. slt AT, BASE, TMP1
+ | beqz AT, >8
+ |. nop
+ | sw TISNIL, HI(BASE)
+ | addiu RD, RD, 8
+ | b <2
+ |. addiu BASE, BASE, 8
+ |
+ |7: // Less results wanted.
+ | subu TMP0, RD, TMP2
+ | subu TMP0, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | load_got lj_state_growstack
+ | move MULTRES, RD
+ | move CARG2, TMP2
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw TMP2, SAVE_NRES
+ | lw BASE, L->top // Need the (realloced) L->top in BASE.
+ | move RD, MULTRES
+ | b <2
+ |. sll TMP2, TMP2, 3
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | move sp, CARG1
+ | move CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lw GL:TMP1, L->glref
+ | b ->vm_leave_unw
+ |. sw TMP0, GL:TMP1->vmstate
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | li AT, -4
+ | and sp, CARG1, AT
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li TISNIL, LJ_TNIL
+ | lw BASE, L->base
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mtc1 TMP3, TOBIT
+ | li TMP1, LJ_TFALSE
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | cvt.d.s TOBIT, TOBIT
+ | addiu RA, BASE, -8 // Results start at BASE-8.
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP1, HI(RA) // Prepend false to error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |. li RD, 16 // 2 results: false + error message.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | b >2
+ |. li CARG2, LUA_MINSTACK
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | addu RC, BASE, RC
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | addiu PC, PC, 4 // Must point after first instruction.
+ | sw RC, L->top
+ | srl CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | load_got lj_state_growstack
+ | sw PC, SAVE_PC
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | lw RC, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | subu RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | move L, CARG1
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | move BASE, CARG2
+ | lbu TMP1, L->status
+ | sw L, SAVE_L
+ | li PC, FRAME_CP
+ | addiu TMP0, sp, CFRAME_RESUME
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw r0, SAVE_NRES
+ | sw r0, SAVE_ERRF
+ | sw TMP0, L->cframe
+ | sw r0, SAVE_CFRAME
+ | beqz TMP1, >3
+ |. sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ |
+ | // Resume after yield (like a return).
+ | move RA, BASE
+ | lw BASE, L->base
+ | lw TMP1, L->top
+ | lw PC, FRAME_PC(BASE)
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | subu RD, TMP1, BASE
+ | mtc1 TMP3, TOBIT
+ | sb r0, L->status
+ | cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | addiu RD, RD, 8
+ | st_vmstate
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz TMP0, ->BC_RET_Z
+ |. li TISNIL, LJ_TNIL
+ | b ->vm_return
+ |. nop
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | sw CARG4, SAVE_ERRF
+ | b >1
+ |. li PC, FRAME_CP
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lw TMP1, L:CARG1->cframe
+ | sw CARG3, SAVE_NRES
+ | move L, CARG1
+ | sw CARG1, SAVE_L
+ | move BASE, CARG2
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sw TMP1, SAVE_CFRAME
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lw TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw TMP1, L->top
+ | mtc1 TMP3, TOBIT
+ | addu PC, PC, BASE
+ | subu NARGS8:RC, TMP1, BASE
+ | subu PC, PC, TMP2 // PC = frame delta + frame type
+ | cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lw TMP0, FRAME_PC(BASE)
+ | li AT, LJ_TFUNC
+ | bne TMP0, AT, ->vmeta_call
+ |. lw LFUNC:RB, FRAME_FUNC(BASE)
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | move L, CARG1
+ | lw TMP0, L:CARG1->stack
+ | sw CARG1, SAVE_L
+ | lw TMP1, L->top
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lw TMP1, L->cframe
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | sw r0, SAVE_ERRF // No error function.
+ | move CFUNCADDR, CARG4
+ | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |. sw TMP1, SAVE_CFRAME
+ | move BASE, CRET1
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | bnez CRET1, <3 // Else continue with the call.
+ |. addiu DISPATCH, DISPATCH, GG_G2DISP
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |. nop
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lw TMP0, -16+LO(BASE) // Continuation.
+ | move RB, BASE
+ | move BASE, TMP2 // Restore caller BASE.
+ | lw LFUNC:TMP1, FRAME_FUNC(TMP2)
+#if LJ_HASFFI
+ | sltiu AT, TMP0, 2
+#endif
+ | lw PC, -16+HI(RB) // Restore PC from [cont|PC].
+ | addu TMP2, RA, RD
+ | lw TMP1, LFUNC:TMP1->pc
+#if LJ_HASFFI
+ | bnez AT, >1
+#endif
+ |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg.
+ | // BASE = base, RA = resultptr, RB = meta base
+ | jr TMP0 // Jump to continuation.
+ |. lw KBASE, PC2PROTO(k)(TMP1)
+ |
+#if LJ_HASFFI
+ |1:
+ | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ |. addiu TMP1, RB, -16
+ | b ->vm_call_tail
+ |. subu RC, TMP1, BASE
+#endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | addiu CARG2, RB, -16
+ | ldc1 f0, 0(RA)
+ | decode_RB8a MULTRES, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b MULTRES
+ | decode_RA8b RA
+ | addu TMP1, BASE, MULTRES
+ | sw BASE, L->base
+ | subu CARG3, CARG2, TMP1
+ | bne TMP1, CARG2, ->BC_CAT_Z
+ |. sdc1 f0, 0(CARG2)
+ | addu RA, BASE, RA
+ | b ->cont_nop
+ |. sdc1 f0, 0(RA)
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tgets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | mtc1 TMP0, f0
+ | cvt.d.w f0, f0
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sdc1 f0, 0(CARG3)
+ |
+ |->vmeta_tgetv:
+ |1:
+ | load_got lj_meta_tget
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. addiu TMP1, BASE, -FRAME_CONT
+ | ldc1 f0, 0(CRET1)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 16 // 2 args for func(t, k).
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tsets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | mtc1 TMP0, f0
+ | cvt.d.w f0, f0
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sdc1 f0, 0(CARG3)
+ |
+ |->vmeta_tsetv:
+ |1:
+ | load_got lj_meta_tset
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. ldc1 f0, 0(RA)
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | sdc1 f0, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | addiu TMP1, BASE, -FRAME_CONT
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | sdc1 f0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT.
+ | load_got lj_meta_comp
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | decode_OP1 CARG4, INS
+ | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | sltiu AT, CRET1, 2
+ | beqz AT, ->vmeta_binop
+ | negu TMP2, CRET1
+ |4:
+ | lhu RD, OFS_RD(PC)
+ | addiu PC, PC, 4
+ | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sll RD, RD, 2
+ | addu RD, RD, TMP1
+ | and RD, RD, TMP2
+ | addu PC, PC, RD
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lbu TMP1, -4+OFS_RA(PC)
+ | ldc1 f0, 0(RA)
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | b ->cont_nop
+ |. sdc1 f0, 0(TMP1)
+ |
+ |->cont_condt: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. negu TMP2, AT // Branch if result is true.
+ |
+ |->cont_condf: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. addiu TMP2, AT, -1 // Branch if result is false.
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | load_got lj_meta_equal
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |
+ |->vmeta_equal_cd:
+#if LJ_HASFFI
+ | load_got lj_meta_equal_cd
+ | move CARG2, INS
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+#endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_unm:
+ | move CARG4, CARG3
+ |
+ |->vmeta_arith:
+ | load_got lj_meta_arith
+ | decode_OP1 TMP0, INS
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | move CARG2, RA
+ | sw TMP0, ARG5
+ | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | beqz CRET1, ->cont_nop
+ |. nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | subu TMP1, CRET1, BASE
+ | sw PC, -16+HI(CRET1) // [cont|PC]
+ | move TMP2, BASE
+ | addiu PC, TMP1, FRAME_CONT
+ | move BASE, CRET1
+ | b ->vm_call_dispatch
+ |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ |
+ |->vmeta_len:
+ | // CARG2 already set by BC_LEN.
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | move MULTRES, CARG1
+#endif
+ | load_got lj_meta_len
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_len // (lua_State *L, TValue *o)
+ |. move CARG1, L
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
+ |. nop
+ | b ->BC_LEN_Z
+ |. move CARG1, MULTRES
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+ |. nop
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw TMP2, L->base // This is the callers base!
+ | addiu CARG2, BASE, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, BASE, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw BASE, L->base
+ | addiu CARG2, RA, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, RA, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw TMP1, FRAME_PC(BASE)
+ | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | load_got lj_meta_for
+ | sw BASE, L->base
+ | move CARG2, RA
+ | sw PC, SAVE_PC
+ | move MULTRES, INS
+ | call_intern lj_meta_for // (lua_State *L, TValue *base)
+ |. move CARG1, L
+#if LJ_HASJIT
+ | decode_OP1 TMP0, MULTRES
+ | li AT, BC_JFORI
+#endif
+ | decode_RA8a RA, MULTRES
+ | decode_RD8a RD, MULTRES
+ | decode_RA8b RA
+#if LJ_HASJIT
+ | beq TMP0, AT, =>BC_JFORI
+ |. decode_RD8b RD
+ | b =>BC_FORI
+ |. nop
+#else
+ | b =>BC_FORI
+ |. decode_RD8b RD
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | beqz NARGS8:RC, ->fff_fallback
+ |. lw CARG3, HI(BASE)
+ | lw CARG1, LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG4, 8+HI(BASE)
+ | lw CARG1, LO(BASE)
+ | lw CARG2, 8+LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_n, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ldc1 FARG1, 0(BASE)
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG4, 8+HI(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | ldc1 FARG2, 8(BASE)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | beqz TMP0, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
+ |.macro ffgccheck
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | subu AT, TMP0, TMP1
+ | bgezal AT, ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | sltiu AT, CARG3, LJ_TISTRUECOND
+ | beqz AT, ->fff_fallback
+ |. addiu RA, BASE, -8
+ | lw PC, FRAME_PC(BASE)
+ | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | addu TMP2, RA, NARGS8:RC
+ | sw CARG3, HI(RA)
+ | addiu TMP1, BASE, 8
+ | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
+ |. sw CARG1, LO(RA)
+ |1:
+ | ldc1 f0, 0(TMP1)
+ | sdc1 f0, -8(TMP1)
+ | bne TMP1, TMP2, <1
+ |. addiu TMP1, TMP1, 8
+ | b ->fff_res
+ |. nop
+ |
+ |.ffunc type
+ | lw CARG3, HI(BASE)
+ | li TMP1, LJ_TISNUM
+ | beqz NARGS8:RC, ->fff_fallback
+ |. sltiu TMP0, CARG3, LJ_TISNUM
+ | movz TMP1, CARG3, TMP0
+ | not TMP1, TMP1
+ | sll TMP1, TMP1, 3
+ | addu TMP1, CFUNC:RB, TMP1
+ | b ->fff_resn
+ |. ldc1 FRET1, CFUNC:TMP1->upvalue
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, >6
+ |. li AT, LJ_TUDATA
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:CARG1, TAB:CARG1->metatable
+ |2:
+ | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beqz TAB:CARG1, ->fff_restv
+ |. li CARG3, LJ_TNIL
+ | lw TMP0, TAB:CARG1->hmask
+ | li CARG3, LJ_TTAB // Use metatable as default result.
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:CARG1->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | li AT, LJ_TSTR
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP3, NODE:TMP2->next
+ | bne CARG4, AT, >4
+ |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | beq TMP0, STR:RC, >5
+ |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2)
+ |4:
+ | beqz NODE:TMP3, ->fff_restv // Not found, keep default result.
+ |. move NODE:TMP2, NODE:TMP3
+ | b <3
+ |. nop
+ |5:
+ | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value.
+ |. nop
+ | move CARG3, CARG2 // Return value of mt.__metatable.
+ | b ->fff_restv
+ |. move CARG1, TMP1
+ |
+ |6:
+ | beq CARG3, AT, <1
+ |. sltiu TMP0, CARG3, LJ_TISNUM
+ | li TMP1, LJ_TISNUM
+ | movz TMP1, CARG3, TMP0
+ | not TMP1, TMP1
+ | sll TMP1, TMP1, 2
+ | addu TMP1, DISPATCH, TMP1
+ | b <2
+ |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1)
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. addiu CARG4, CARG4, -LJ_TTAB
+ | lw TAB:TMP1, TAB:CARG1->metatable
+ | lbu TMP3, TAB:CARG1->marked
+ | or AT, CARG4, TAB:TMP1
+ | bnez AT, ->fff_fallback
+ |. andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | beqz AT, ->fff_restv
+ |. sw TAB:CARG2, TAB:CARG1->metatable
+ | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv
+ |
+ |.ffunc rawget
+ | lw CARG4, HI(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | lw TAB:CARG2, LO(BASE)
+ | load_got lj_tab_get
+ | addiu CARG4, CARG4, -LJ_TTAB
+ | or AT, AT, CARG4
+ | bnez AT, ->fff_fallback
+ | addiu CARG3, BASE, 8
+ | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ |. move CARG1, L
+ | // Returns cTValue *.
+ | b ->fff_resn
+ |. ldc1 FRET1, 0(CRET1)
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | lw CARG1, HI(BASE)
+ | xori AT, NARGS8:RC, 8
+ | sltiu CARG1, CARG1, LJ_TISNUM
+ | movn CARG1, r0, AT
+ | beqz CARG1, ->fff_fallback // Exactly one number argument.
+ |. ldc1 FRET1, 0(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | li AT, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq CARG3, AT, ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, TMP1, 1
+ | and TMP0, TMP0, TMP1
+ | beqz TMP0, ->fff_fallback
+ |. sw BASE, L->base // Add frame since C call can throw.
+ | ffgccheck
+ |. sw PC, SAVE_PC // Redundant (but a defined value).
+ | load_got lj_str_fromnum
+ | move CARG1, L
+ | call_intern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ |. move CARG2, BASE
+ | // Returns GCstr *.
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | lw CARG1, HI(BASE)
+ | lw TAB:CARG2, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. addu TMP2, BASE, NARGS8:RC
+ | li AT, LJ_TTAB
+ | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil.
+ | bne CARG1, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | load_got lj_tab_next
+ | sw BASE, L->base // Add frame since C call can throw.
+ | sw BASE, L->top // Dummy frame length is ok.
+ | addiu CARG3, BASE, 8
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ |. move CARG1, L
+ | // Returns 0 at end of traversal.
+ | beqz CRET1, ->fff_restv // End of traversal: return nil.
+ |. li CARG3, LJ_TNIL
+ | ldc1 f0, 8(BASE) // Copy key and value to results.
+ | addiu RA, BASE, -8
+ | ldc1 f2, 16(BASE)
+ | li RD, (2+1)*8
+ | sdc1 f0, 0(RA)
+ | b ->fff_res
+ |. sdc1 f2, 8(RA)
+ |
+ |.ffunc_1 pairs
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+#endif
+ |. addiu RA, BASE, -8
+ | sw TISNIL, 8+HI(BASE)
+ | li RD, (3+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |.ffunc ipairs_aux
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | lw TAB:CARG1, LO(BASE)
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. ldc1 FARG2, 8(BASE)
+ | addiu CARG3, CARG3, -LJ_TTAB
+ | sltiu AT, CARG4, LJ_TISNUM
+ | li TMP0, 1
+ | movn AT, r0, CARG3
+ | mtc1 TMP0, FARG1
+ | beqz AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | cvt.w.d FRET1, FARG2
+ | cvt.d.w FARG1, FARG1
+ | lw TMP0, TAB:CARG1->asize
+ | lw TMP1, TAB:CARG1->array
+ | mfc1 TMP2, FRET1
+ | addiu RA, BASE, -8
+ | add.d FARG2, FARG2, FARG1
+ | addiu TMP2, TMP2, 1
+ | sltu AT, TMP2, TMP0
+ | sll TMP3, TMP2, 3
+ | addu TMP3, TMP1, TMP3
+ | beqz AT, >2 // Not in array part?
+ |. sdc1 FARG2, 0(RA)
+ | lw TMP2, HI(TMP3)
+ | ldc1 f0, 0(TMP3)
+ |1:
+ | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results.
+ |. li RD, (0+1)*8
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 8(RA)
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lw TMP0, TAB:CARG1->hmask
+ | load_got lj_tab_getinth
+ | beqz TMP0, ->fff_res
+ |. li RD, (0+1)*8
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. move CARG2, TMP2
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->fff_res
+ |. li RD, (0+1)*8
+ | lw TMP2, HI(CRET1)
+ | b <1
+ |. ldc1 f0, 0(CRET1)
+ |
+ |.ffunc_1 ipairs
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+#endif
+ |. addiu RA, BASE, -8
+ | sw r0, 8+HI(BASE)
+ | sw r0, 8+LO(BASE)
+ | li RD, (3+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | beqz NARGS8:RC, ->fff_fallback
+ | move TMP2, BASE
+ | addiu BASE, BASE, 8
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | andi TMP3, TMP3, 1
+ | addiu PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |
+ |.ffunc xpcall
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. ldc1 FARG2, 8(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | li AT, LJ_TFUNC
+ | move TMP2, BASE
+ | bne CARG4, AT, ->fff_fallback // Traceback must be a function.
+ | addiu BASE, BASE, 16
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | sdc1 FARG2, 0(TMP2) // Swap function and traceback.
+ | andi TMP3, TMP3, 1
+ | sdc1 FARG1, 8(TMP2)
+ | addiu PC, TMP3, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -16
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | li AT, LJ_TTHREAD
+ | bne CARG3, AT, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lw L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbu TMP0, L:CARG1->status
+ | lw TMP1, L:CARG1->cframe
+ | lw CARG2, L:CARG1->top
+ | lw TMP2, L:CARG1->base
+ | addiu TMP3, TMP0, -LUA_YIELD
+ | bgtz TMP3, ->fff_fallback // st > LUA_YIELD?
+ |. xor TMP2, TMP2, CARG2
+ | bnez TMP1, ->fff_fallback // cframe != 0?
+ |. or AT, TMP2, TMP0
+ | lw TMP0, L:CARG1->maxstack
+ | beqz AT, ->fff_fallback // base == top && st == 0?
+ |. lw PC, FRAME_PC(BASE)
+ | addu TMP2, CARG2, NARGS8:RC
+ | sltu AT, TMP0, TMP2
+ | bnez AT, ->fff_fallback // Stack overflow?
+ |. sw PC, SAVE_PC
+ | sw BASE, L->base
+ |1:
+ |.if resume
+ | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | addiu TMP2, TMP2, -8
+ |.endif
+ | sw TMP2, L:CARG1->top
+ | addu TMP1, BASE, NARGS8:RC
+ | move CARG3, CARG2
+ | sw BASE, L->top
+ |2: // Move args to coroutine.
+ | ldc1 f0, 0(BASE)
+ | sltu AT, BASE, TMP1
+ | beqz AT, >3
+ |. addiu BASE, BASE, 8
+ | sdc1 f0, 0(CARG3)
+ | b <2
+ |. addiu CARG3, CARG3, 8
+ |3:
+ | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |. move L:RA, L:CARG1
+ | // Returns thread status.
+ |4:
+ | lw TMP2, L:RA->base
+ | sltiu AT, CRET1, LUA_YIELD+1
+ | lw TMP3, L:RA->top
+ | li_vmstate INTERP
+ | lw BASE, L->base
+ | st_vmstate
+ | beqz AT, >8
+ |. subu RD, TMP3, TMP2
+ | lw TMP0, L->maxstack
+ | beqz RD, >6 // No results?
+ |. addu TMP1, BASE, RD
+ | sltu AT, TMP0, TMP1
+ | bnez AT, >9 // Need to grow stack?
+ |. addu TMP3, TMP2, RD
+ | sw TMP2, L:RA->top // Clear coroutine stack.
+ | move TMP1, BASE
+ |5: // Move results from coroutine.
+ | ldc1 f0, 0(TMP2)
+ | addiu TMP2, TMP2, 8
+ | sltu AT, TMP2, TMP3
+ | sdc1 f0, 0(TMP1)
+ | bnez AT, <5
+ |. addiu TMP1, TMP1, 8
+ |6:
+ | andi TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | addiu RA, BASE, -8
+ | sw TMP1, -8+HI(BASE) // Prepend true to results.
+ | addiu RD, RD, 16
+ |.else
+ | move RA, BASE
+ | addiu RD, RD, 8
+ |.endif
+ |7:
+ | sw PC, SAVE_PC
+ | beqz TMP0, ->BC_RET_Z
+ |. move MULTRES, RD
+ | b ->vm_return
+ |. nop
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | addiu TMP3, TMP3, -8
+ | li TMP1, LJ_TFALSE
+ | ldc1 f0, 0(TMP3)
+ | sw TMP3, L:RA->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | sw TMP1, -8+HI(BASE) // Prepend false to results.
+ | addiu RA, BASE, -8
+ | sdc1 f0, 0(BASE) // Copy error message.
+ | b <7
+ |. andi TMP0, PC, FRAME_TYPE
+ |.else
+ | load_got lj_ffh_coroutine_wrap_err
+ | move CARG2, L:RA
+ | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |. move CARG1, L
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | load_got lj_state_growstack
+ | srl CARG2, RD, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | b <4
+ |. li CRET1, 0
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lw TMP0, L->cframe
+ | addu TMP1, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | andi TMP0, TMP0, CFRAME_RESUME
+ | sw TMP1, L->top
+ | beqz TMP0, ->fff_fallback
+ |. li CRET1, LUA_YIELD
+ | sw r0, L->cframe
+ | b ->vm_leave_unw
+ |. sb CRET1, L->status
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_n math_abs
+ |. abs.d FRET1, FARG1
+ |->fff_resn:
+ | lw PC, FRAME_PC(BASE)
+ | addiu RA, BASE, -8
+ | b ->fff_res1
+ |. sdc1 FRET1, -8(BASE)
+ |
+ |->fff_restv:
+ | // CARG3/CARG1 = TValue result.
+ | lw PC, FRAME_PC(BASE)
+ | sw CARG3, -8+HI(BASE)
+ | addiu RA, BASE, -8
+ | sw CARG1, -8+LO(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->vm_return
+ |. move MULTRES, RD
+ | lw INS, -4(PC)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6 // More results expected?
+ |. decode_RA8a TMP0, INS
+ | decode_RA8b TMP0
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | subu BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addu TMP1, RA, RD
+ | addiu RD, RD, 8
+ | b <5
+ |. sw TISNIL, -8+HI(TMP1)
+ |
+ |.macro math_extern, func
+ |->ff_math_ .. func:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. load_got func
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. nop
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ |. load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc_n math_ .. func
+ |. nop
+ | bal ->vm_ .. func
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ | math_extern log
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_n math_sqrt
+ |. sqrt.d FRET1, FARG1
+ | b ->fff_resn
+ |. nop
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ |. ldc1 FARG2, CFUNC:RB->upvalue[0]
+ | b ->fff_resn
+ |. mul.d FRET1, FARG1, FARG2
+ |
+ |.ffunc_nn math_ldexp
+ | cvt.w.d FARG2, FARG2
+ | load_got ldexp
+ | mfc1 CARG3, FARG2
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_n math_frexp
+ | load_got frexp
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | addiu RA, BASE, -8
+ | mtc1 TMP1, FARG2
+ | sdc1 FRET1, 0(RA)
+ | cvt.d.w FARG2, FARG2
+ | sdc1 FARG2, 8(RA)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.ffunc_n math_modf
+ | load_got modf
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, BASE, -8
+ | addiu RA, BASE, -8
+ | sdc1 FRET1, 0(BASE)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.macro math_minmax, name, ismax
+ |->ff_ .. name:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ldc1 FRET1, 0(BASE)
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. addu TMP2, BASE, NARGS8:RC
+ | addiu TMP1, BASE, 8
+ | beq TMP1, TMP2, ->fff_resn
+ |1:
+ |. lw CARG3, HI(TMP1)
+ | ldc1 FARG1, 0(TMP1)
+ | addiu TMP1, TMP1, 8
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.if ismax
+ |. c.olt.d FARG1, FRET1
+ |.else
+ |. c.olt.d FRET1, FARG1
+ |.endif
+ | bne TMP1, TMP2, <1
+ |. movf.d FRET1, FARG1
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_minmax math_min, 0
+ | math_minmax math_max, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. nop
+ | b ->fff_resi
+ |. lw CRET1, STR:CARG1->len
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | xori AT, NARGS8:RC, 8
+ | addiu CARG3, CARG3, -LJ_TSTR
+ | or AT, AT, CARG3
+ | bnez AT, ->fff_fallback // Need exactly 1 string argument.
+ |. nop
+ | lw TMP0, STR:CARG1->len
+ | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addiu RA, BASE, -8
+ | sltu RD, r0, TMP0
+ | mtc1 TMP1, f0
+ | addiu RD, RD, 1
+ | cvt.d.w f0, f0
+ | lw PC, FRAME_PC(BASE)
+ | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. li CARG3, 1
+ | cvt.w.d FARG1, FARG1
+ | addiu CARG2, sp, ARG5_OFS
+ | sltiu AT, TMP0, 256
+ | mfc1 TMP0, FARG1
+ | beqz AT, ->fff_fallback
+ |. sw TMP0, ARG5
+ |->fff_newstr:
+ | load_got lj_str_new
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
+ |. move CARG1, L
+ | // Returns GCstr *.
+ | lw BASE, L->base
+ | move CARG1, CRET1
+ | b ->fff_restv
+ |. li CARG3, LJ_TSTR
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | addiu AT, NARGS8:RC, -16
+ | lw CARG3, 16+HI(BASE)
+ | ldc1 f0, 16(BASE)
+ | lw TMP0, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | bltz AT, ->fff_fallback
+ | lw CARG2, 8+HI(BASE)
+ | ldc1 f2, 8(BASE)
+ | beqz AT, >1
+ |. li CARG4, -1
+ | cvt.w.d f0, f0
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. mfc1 CARG4, f0
+ |1:
+ | sltiu AT, CARG2, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | cvt.w.d f2, f2
+ | bne TMP0, AT, ->fff_fallback
+ |. lw CARG2, STR:CARG1->len
+ | mfc1 CARG3, f2
+ | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
+ | slt AT, CARG4, r0
+ | addiu TMP0, CARG2, 1
+ | addu TMP1, CARG4, TMP0
+ | slt TMP3, CARG3, r0
+ | movn CARG4, TMP1, AT // if (end < 0) end += len+1
+ | addu TMP1, CARG3, TMP0
+ | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | movn CARG4, r0, AT // if (end < 0) end = 0
+ | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | movn CARG4, CARG2, AT // if (end > len) end = len
+ | addu CARG2, STR:CARG1, CARG3
+ | subu CARG3, CARG4, CARG3 // len = end - start
+ | addiu CARG2, CARG2, sizeof(GCstr)-1
+ | bgez CARG3, ->fff_newstr
+ |. addiu CARG3, CARG3, 1 // len++
+ |->fff_emptystr: // Return empty string.
+ | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
+ | b ->fff_restv
+ |. li CARG3, LJ_TSTR
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | lw TMP0, HI(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG4, 8+HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | addiu TMP0, TMP0, -LJ_TSTR
+ | ldc1 f0, 8(BASE)
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. sltiu AT, CARG4, LJ_TISNUM
+ | cvt.w.d f0, f0
+ | beqz AT, ->fff_fallback
+ |. lw TMP0, STR:CARG1->len
+ | mfc1 CARG3, f0
+ | lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | li AT, 1
+ | blez CARG3, ->fff_emptystr // Count <= 0?
+ |. sltu AT, AT, TMP0
+ | beqz TMP0, ->fff_emptystr // Zero length string?
+ |. sltu TMP0, TMP1, CARG3
+ | or AT, AT, TMP0
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | bnez AT, ->fff_fallback // Fallback for > 1-char strings.
+ |. lbu TMP0, STR:CARG1[1]
+ | addu TMP2, CARG2, CARG3
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | addiu TMP2, TMP2, -1
+ | sltu AT, CARG2, TMP2
+ | bnez AT, <1
+ |. sb TMP0, 0(TMP2)
+ | b ->fff_newstr
+ |. nop
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | lw CARG3, STR:CARG1->len
+ | addiu CARG1, STR:CARG1, #STR
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | sltu AT, TMP1, CARG3
+ | bnez AT, ->fff_fallback
+ |. addu TMP3, CARG1, CARG3
+ | addu CARG4, CARG2, CARG3
+ |1: // Reverse string copy.
+ | lbu TMP1, 0(CARG1)
+ | sltu AT, CARG1, TMP3
+ | beqz AT, ->fff_newstr
+ |. addiu CARG1, CARG1, 1
+ | addiu CARG4, CARG4, -1
+ | b <1
+ | sb TMP1, 0(CARG4)
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | lw CARG3, STR:CARG1->len
+ | addiu CARG1, STR:CARG1, #STR
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | sltu AT, TMP1, CARG3
+ | bnez AT, ->fff_fallback
+ |. addu TMP3, CARG1, CARG3
+ | move CARG4, CARG2
+ |1: // ASCII case conversion.
+ | lbu TMP1, 0(CARG1)
+ | sltu AT, CARG1, TMP3
+ | beqz AT, ->fff_newstr
+ |. addiu TMP0, TMP1, -lo
+ | xori TMP2, TMP1, 0x20
+ | sltiu AT, TMP0, 26
+ | movn TMP1, TMP2, AT
+ | addiu CARG1, CARG1, 1
+ | sb TMP1, 0(CARG4)
+ | b <1
+ |. addiu CARG4, CARG4, 1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b ->fff_resi
+ |. nop
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_n bit_..name
+ |. add.d FARG1, FARG1, TOBIT
+ | mfc1 CRET1, FARG1
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addiu TMP1, BASE, 8
+ | addu TMP2, BASE, NARGS8:RC
+ |1:
+ | lw CARG4, HI(TMP1)
+ | beq TMP1, TMP2, ->fff_resi
+ |. ldc1 FARG1, 0(TMP1)
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ | add.d FARG1, FARG1, TOBIT
+ | mfc1 CARG2, FARG1
+ | ins CRET1, CRET1, CARG2
+ | b <1
+ |. addiu TMP1, TMP1, 8
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | srl TMP0, CRET1, 24
+ | srl TMP2, CRET1, 8
+ | sll TMP1, CRET1, 24
+ | andi TMP2, TMP2, 0xff00
+ | or TMP0, TMP0, TMP1
+ | andi CRET1, CRET1, 0xff00
+ | or TMP0, TMP0, TMP2
+ | sll CRET1, CRET1, 8
+ | b ->fff_resi
+ |. or CRET1, TMP0, CRET1
+ |
+ |.ffunc_bit bnot
+ | b ->fff_resi
+ |. not CRET1, CRET1
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc_nn bit_..name
+ |. add.d FARG1, FARG1, TOBIT
+ | add.d FARG2, FARG2, TOBIT
+ | mfc1 CARG1, FARG1
+ | mfc1 CARG2, FARG2
+ |.if shmod == 1
+ | li AT, 32
+ | subu TMP0, AT, CARG2
+ | sllv CARG2, CARG1, CARG2
+ | srlv CARG1, CARG1, TMP0
+ |.elif shmod == 2
+ | li AT, 32
+ | subu TMP0, AT, CARG2
+ | srlv CARG2, CARG1, CARG2
+ | sllv CARG1, CARG1, TMP0
+ |.endif
+ | b ->fff_resi
+ |. ins CRET1, CARG1, CARG2
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, sllv, 0
+ |.ffunc_bit_sh rshift, srlv, 0
+ |.ffunc_bit_sh arshift, srav, 0
+ |// Can't use rotrv, since it's only in MIPS32R2.
+ |.ffunc_bit_sh rol, or, 1
+ |.ffunc_bit_sh ror, or, 2
+ |
+ |.ffunc_bit tobit
+ |->fff_resi:
+ | mtc1 CRET1, FRET1
+ | b ->fff_resn
+ |. cvt.d.w FRET1, FRET1
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lw TMP3, CFUNC:RB->f
+ | addu TMP1, BASE, NARGS8:RC
+ | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addiu TMP0, TMP1, 8*LUA_MINSTACK
+ | lw TMP2, L->maxstack
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sltu AT, TMP2, TMP0
+ | sw BASE, L->base
+ | sw TMP1, L->top
+ | bnez AT, >5 // Need to grow stack.
+ |. move CFUNCADDR, TMP3
+ | jalr TMP3 // (lua_State *L)
+ |. move CARG1, L
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | bgtz CRET1, ->fff_res // Returned nresults+1?
+ |. addiu RA, BASE, -8
+ |1: // Returned 0 or -1: retry fast path.
+ | lw TMP0, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | bnez CRET1, ->vm_call_tail // Returned -1?
+ |. subu NARGS8:RC, TMP0, BASE
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi TMP0, PC, FRAME_TYPE
+ | li AT, -4
+ | bnez TMP0, >3
+ |. and TMP1, PC, AT
+ | lbu TMP1, OFS_RA(PC)
+ | sll TMP1, TMP1, 3
+ |3:
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |. subu TMP2, BASE, TMP1
+ |
+ |5: // Grow stack for fallback handler.
+ | load_got lj_state_growstack
+ | li CARG2, LUA_MINSTACK
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | b <1
+ |. li CRET1, 0 // Force retry.
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | move MULTRES, ra
+ | load_got lj_gc_step
+ | sw BASE, L->base
+ | addu TMP0, BASE, NARGS8:RC
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sw TMP0, L->top
+ | call_intern lj_gc_step // (lua_State *L)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | move ra, MULTRES
+ | lw TMP0, L->top
+ | lw CFUNC:RB, FRAME_FUNC(BASE)
+ | jr ra
+ |. subu NARGS8:RC, TMP0, BASE
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+#if LJ_HASJIT
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bnez AT, >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE
+ | bnez AT, >1
+ |. addiu TMP2, TMP2, -1
+ | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, >1
+ |. nop
+ | b >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+#endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | beqz AT, >1
+ |5: // Re-dispatch to static ins.
+ |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
+ | jr AT
+ |. nop
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | bnez AT, <5
+ |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, <5
+ |. addiu TMP2, TMP2, -1
+ | beqz TMP2, >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, LUA_MASKLINE
+ | beqz AT, <5
+ |1:
+ |. load_got lj_dispatch_ins
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ |3:
+ | lw BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lw INS, -4(PC)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ | decode_RA8b RA
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addiu PC, PC, 4
+ | b <4
+ |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+#if LJ_HASJIT
+ | lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw PC, SAVE_PC
+ | lw TMP1, LFUNC:TMP1->pc
+ | move CARG2, PC
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | lbu TMP1, PC2PROTO(framesize)(TMP1)
+ | load_got lj_trace_hot
+ | sw BASE, L->base
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ |. sw TMP1, L->top
+ | b <3
+ |. nop
+#endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+#if LJ_HASJIT
+ | b >1
+#endif
+ |. move CARG2, PC
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+#if LJ_HASJIT
+ | ori CARG2, PC, 1
+ |1:
+#endif
+ | load_got lj_dispatch_call
+ | addu TMP0, BASE, RC
+ | sw PC, SAVE_PC
+ | sw BASE, L->base
+ | subu RA, RA, BASE
+ | sw TMP0, L->top
+ | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // Returns ASMFunction.
+ | lw BASE, L->base
+ | lw TMP0, L->top
+ | sw r0, SAVE_PC // Invalidate for subsequent line hook.
+ | subu NARGS8:RC, TMP0, BASE
+ | addu RA, BASE, RA
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | jr CRET1
+ |. lw INS, -4(PC)
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ | sdc1 f..a, 16+a*8(sp)
+ | sw r..a, 16+32*8+a*4(sp)
+ | sw r..b, 16+32*8+b*4(sp)
+ |.endmacro
+ |
+ |->vm_exit_handler:
+#if LJ_HASJIT
+ | addiu sp, sp, -(16+32*8+32*4)
+ | savex_ 0, 1
+ | savex_ 2, 3
+ | savex_ 4, 5
+ | savex_ 6, 7
+ | savex_ 8, 9
+ | savex_ 10, 11
+ | savex_ 12, 13
+ | savex_ 14, 15
+ | savex_ 16, 17
+ | savex_ 18, 19
+ | savex_ 20, 21
+ | savex_ 22, 23
+ | savex_ 24, 25
+ | savex_ 26, 27
+ | sdc1 f28, 16+28*8(sp)
+ | sw r28, 16+32*8+28*4(sp)
+ | sdc1 f30, 16+30*8(sp)
+ | sw r30, 16+32*8+30*4(sp)
+ | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP.
+ | li_vmstate EXIT
+ | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ | lw TMP1, 0(TMP2) // Load exit number.
+ | st_vmstate
+ | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP.
+ | lw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | load_got lj_trace_exit
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
+ | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw BASE, L->base
+ | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
+ |. addiu CARG2, sp, 16
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lw TMP1, L->cframe
+ | li AT, -4
+ | lw BASE, L->base
+ | and sp, TMP1, AT
+ | lw PC, SAVE_PC // Get SAVE_PC.
+ | b >1
+ |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+#endif
+ |->vm_exit_interp:
+#if LJ_HASJIT
+ | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lw L, SAVE_L
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ |1:
+ | bltz CRET1, >3 // Check for error from exit.
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | sll MULTRES, CRET1, 3
+ | li TISNIL, LJ_TNIL
+ | sw MULTRES, SAVE_MULTRES
+ | mtc1 TMP3, TOBIT
+ | lw TMP1, LFUNC:TMP1->pc
+ | sw r0, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | cvt.d.s TOBIT, TOBIT
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lw INS, 0(PC)
+ | addiu PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
+ | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header?
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, 0(TMP0)
+ | decode_RA8a RA, INS
+ | beqz TMP2, >2
+ |. decode_RA8b RA
+ | jr AT
+ |. decode_RD8b RD
+ |2:
+ | addiu RC, MULTRES, -8
+ | jr AT
+ |. add RA, RA, BASE
+ |
+ |3: // Rethrow error from the right C frame.
+ | load_got lj_err_throw
+ | negu CARG2, CRET1
+ | call_intern lj_err_throw // (lua_State *L, int errcode)
+ |. move CARG1, L
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1, FARG2.
+ |.macro vm_round, func
+ | lui TMP0, 0x4330 // Hiword of 2^52 (double).
+ | mtc1 r0, f4
+ | mtc1 TMP0, f5
+ | abs.d FRET2, FARG1 // |x|
+ | mfc1 AT, f13
+ | c.olt.d 0, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1f 0, >1 // Truncate only if |x| < 2^52.
+ |. sub.d FRET1, FRET1, f4
+ | slt AT, AT, r0
+ |.if "func" == "ceil"
+ | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
+ |.else
+ | lui TMP0, 0x3ff0 // Hiword of +1 (double).
+ |.endif
+ |.if "func" == "trunc"
+ | mtc1 TMP0, f5
+ | c.olt.d 0, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
+ | neg.d FRET2, FRET1
+ | jr ra
+ |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.else
+ | neg.d FRET2, FRET1
+ | mtc1 TMP0, f5
+ | movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.if "func" == "ceil"
+ | c.olt.d 0, FRET1, FARG1 // x > result?
+ |.else
+ | c.olt.d 0, FARG1, FRET1 // x < result?
+ |.endif
+ | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. movt.d FRET1, FRET2, 0
+ |.endif
+ |1:
+ | jr ra
+ |. mov.d FRET1, FARG1
+ |.endmacro
+ |
+ |->vm_floor:
+ | vm_round floor
+ |->vm_ceil:
+ | vm_round ceil
+ |->vm_trunc:
+#if LJ_HASJIT
+ | vm_round trunc
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r1, g in r2.
+ |->vm_ffi_callback:
+#if LJ_HASFFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lw CTSTATE, GL:r2->ctype_state
+ | addiu DISPATCH, r2, GG_G2DISP
+ | load_got lj_ccallback_enter
+ | sw r1, CTSTATE->cb.slot
+ | sw CARG1, CTSTATE->cb.gpr[0]
+ | sw CARG2, CTSTATE->cb.gpr[1]
+ | sdc1 FARG1, CTSTATE->cb.fpr[0]
+ | sw CARG3, CTSTATE->cb.gpr[2]
+ | sw CARG4, CTSTATE->cb.gpr[3]
+ | sdc1 FARG2, CTSTATE->cb.fpr[1]
+ | addiu TMP0, sp, CFRAME_SPACE+16
+ | sw TMP0, CTSTATE->cb.stack
+ | sw r0, SAVE_PC // Any value outside of bytecode is ok.
+ | move CARG2, sp
+ | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
+ |. move CARG1, CTSTATE
+ | // Returns lua_State *.
+ | lw BASE, L:CRET1->base
+ | lw RC, L:CRET1->top
+ | move L, CRET1
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | mtc1 TMP3, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | subu RC, RC, BASE
+ | st_vmstate
+ | cvt.d.s TOBIT, TOBIT
+ | ins_callt
+#endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+#if LJ_HASFFI
+ | load_got lj_ccallback_leave
+ | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | sw BASE, L->base
+ | sw RB, L->top
+ | sw L, CTSTATE->L
+ | move CARG2, RA
+ | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
+ |. move CARG1, CTSTATE
+ | lw CRET1, CTSTATE->cb.gpr[0]
+ | ldc1 FRET1, CTSTATE->cb.fpr[0]
+ | lw CRET2, CTSTATE->cb.gpr[1]
+ | b ->vm_leave_unw
+ |. ldc1 FRET2, CTSTATE->cb.fpr[1]
+#endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+#if LJ_HASFFI
+ | .type CCSTATE, CCallState, CARG1
+ | lw TMP1, CCSTATE->spadj
+ | lbu CARG2, CCSTATE->nsp
+ | move TMP2, sp
+ | subu sp, sp, TMP1
+ | sw ra, -4(TMP2)
+ | sll CARG2, CARG2, 2
+ | sw r16, -8(TMP2)
+ | sw CCSTATE, -12(TMP2)
+ | move r16, TMP2
+ | addiu TMP1, CCSTATE, offsetof(CCallState, stack)
+ | addiu TMP2, sp, 16
+ | beqz CARG2, >2
+ |. addu TMP3, TMP1, CARG2
+ |1:
+ | lw TMP0, 0(TMP1)
+ | addiu TMP1, TMP1, 4
+ | sltu AT, TMP1, TMP3
+ | sw TMP0, 0(TMP2)
+ | bnez AT, <1
+ |. addiu TMP2, TMP2, 4
+ |2:
+ | lw CFUNCADDR, CCSTATE->func
+ | lw CARG2, CCSTATE->gpr[1]
+ | lw CARG3, CCSTATE->gpr[2]
+ | lw CARG4, CCSTATE->gpr[3]
+ | ldc1 FARG1, CCSTATE->fpr[0]
+ | ldc1 FARG2, CCSTATE->fpr[1]
+ | jalr CFUNCADDR
+ |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | lw CCSTATE:TMP1, -12(r16)
+ | lw TMP2, -8(r16)
+ | lw ra, -4(r16)
+ | sw CRET1, CCSTATE:TMP1->gpr[0]
+ | sw CRET2, CCSTATE:TMP1->gpr[1]
+ | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
+ | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
+ | move sp, r16
+ | jr ra
+ |. move r16, TMP2
+#endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | addu CARG2, BASE, RA
+ | addu CARG3, BASE, RD
+ | lw TMP0, HI(CARG2)
+ | lw TMP1, HI(CARG3)
+ | ldc1 f0, 0(CARG2)
+ | ldc1 f2, 0(CARG3)
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | lhu TMP2, OFS_RD(PC)
+ | and TMP0, TMP0, TMP1
+ | addiu PC, PC, 4
+ | beqz TMP0, ->vmeta_comp
+ |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b TMP2
+ | addu TMP2, TMP2, TMP1
+ if (op == BC_ISLT || op == BC_ISGE) {
+ | c.olt.d f0, f2
+ } else {
+ | c.ole.d f0, f2
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ | movf TMP2, r0
+ } else {
+ | movt TMP2, r0
+ }
+ | addu PC, PC, TMP2
+ |1:
+ | ins_next
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | ldc1 f0, 0(RA)
+ | addu RD, BASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | lw TMP1, HI(RD)
+ | ldc1 f2, 0(RD)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | sltiu CARG1, TMP1, LJ_TISNUM
+ | decode_RD4b TMP2
+ | and AT, AT, CARG1
+ | beqz AT, >5
+ |. addu TMP2, TMP2, TMP3
+ | c.eq.d f0, f2
+ if (vk) {
+ | movf TMP2, r0
+ } else {
+ | movt TMP2, r0
+ }
+ |1:
+ | addu PC, PC, TMP2
+ | ins_next
+ |5: // Either or both types are not numbers.
+ | lw CARG2, LO(RA)
+ | lw CARG3, LO(RD)
+ if (LJ_HASFFI) {
+ | li TMP3, LJ_TCDATA
+ | beq TMP0, TMP3, ->vmeta_equal_cd
+ }
+ |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive?
+ if (LJ_HASFFI) {
+ | beq TMP1, TMP3, ->vmeta_equal_cd
+ }
+ |. xor TMP3, CARG2, CARG3 // Same tv?
+ | xor TMP1, TMP1, TMP0 // Same type?
+ | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata?
+ | movz TMP3, r0, AT // Ignore tv if primitive.
+ | movn CARG1, r0, TMP1 // Tab/ud and same type?
+ | or AT, TMP1, TMP3 // Same type && (pri||same tv).
+ | movz CARG1, r0, AT
+ | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv.
+ if (vk) {
+ |. movn TMP2, r0, AT
+ } else {
+ |. movz TMP2, r0, AT
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:TMP1, TAB:CARG2->metatable
+ | beqz TAB:TMP1, <1 // No metatable?
+ |. nop
+ | lbu TMP1, TAB:TMP1->nomm
+ | andi TMP1, TMP1, 1<vmeta_equal // Handle __eq metamethod.
+ |. li CARG4, 1-vk // ne = 0 or 1.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | srl RD, RD, 1
+ | lw STR:TMP3, LO(RA)
+ | subu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ if (LJ_HASFFI) {
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ }
+ |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4
+ | addiu TMP0, TMP0, -LJ_TSTR
+ | decode_RD4b TMP2
+ | xor TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | ldc1 f0, 0(RA)
+ | addu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ldc1 f2, 0(RD)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | decode_RD4b TMP2
+ if (LJ_HASFFI) {
+ | beqz AT, >5
+ } else {
+ | beqz AT, >1
+ }
+ |. addu TMP2, TMP2, TMP3
+ | c.eq.d f0, f2
+ if (vk) {
+ | movf TMP2, r0
+ | addu PC, PC, TMP2
+ |1:
+ } else {
+ | movt TMP2, r0
+ |1:
+ | addu PC, PC, TMP2
+ }
+ | ins_next
+ if (LJ_HASFFI) {
+ |5:
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |. nop
+ | b <1
+ |. nop
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | srl TMP1, RD, 3
+ | lw TMP0, HI(RA)
+ | lhu TMP2, OFS_RD(PC)
+ | not TMP1, TMP1
+ | addiu PC, PC, 4
+ if (LJ_HASFFI) {
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ }
+ |. xor TMP0, TMP0, TMP1
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | addu RD, BASE, RD
+ | lhu TMP2, OFS_RD(PC)
+ | lw TMP0, HI(RD)
+ | addiu PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (op == BC_IST) {
+ | movz TMP2, r0, TMP0
+ } else {
+ | movn TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ } else {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | ldc1 f0, 0(RD)
+ if (op == BC_ISTC) {
+ | beqz TMP0, >1
+ } else {
+ | bnez TMP0, >1
+ }
+ |. addu RA, BASE, RA
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ | sdc1 f0, 0(RA)
+ | addu PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | ldc1 f0, 0(RD)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(RD)
+ | li TMP1, LJ_TFALSE
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | addiu TMP1, TMP0, LJ_TTRUE
+ | ins_next1
+ | sw TMP1, HI(RA)
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | addu CARG3, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(CARG3)
+ | ldc1 f0, 0(CARG3)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | beqz AT, ->vmeta_unm
+ |. neg.d f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | addu CARG2, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(CARG2)
+ | lw CARG1, LO(CARG2)
+ | li AT, LJ_TSTR
+ | bne TMP0, AT, >2
+ |. li AT, LJ_TTAB
+ | lw CRET1, STR:CARG1->len
+ |1:
+ | mtc1 CRET1, f0
+ | cvt.d.w f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |2:
+ | bne TMP0, AT, ->vmeta_len
+ |. nop
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | bnez TAB:TMP2, >9
+ |. nop
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |. nop
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ |9:
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<vmeta_len
+ |. nop
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||switch (vk) {
+ ||case 0:
+ | addu CARG3, BASE, RB
+ | addu CARG4, KBASE, RC
+ | lw TMP1, HI(CARG3)
+ | ldc1 f20, 0(CARG3)
+ | ldc1 f22, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ || break;
+ ||case 1:
+ | addu CARG4, BASE, RB
+ | addu CARG3, KBASE, RC
+ | lw TMP1, HI(CARG4)
+ | ldc1 f22, 0(CARG4)
+ | ldc1 f20, 0(CARG3)
+ | sltiu AT, TMP1, LJ_TISNUM
+ || break;
+ ||default:
+ | addu CARG3, BASE, RB
+ | addu CARG4, BASE, RC
+ | lw TMP1, HI(CARG3)
+ | lw TMP2, HI(CARG4)
+ | ldc1 f20, 0(CARG3)
+ | ldc1 f22, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP2, LJ_TISNUM
+ | and AT, AT, TMP0
+ || break;
+ ||}
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.endmacro
+ |
+ |.macro fpmod, a, b, c
+ |->BC_MODVN_Z:
+ | bal ->vm_floor // floor(b/c)
+ |. div.d FARG1, b, c
+ | mul.d a, FRET1, c
+ | sub.d a, b, a // b - floor(b/c)*c
+ |.endmacro
+ |
+ |.macro ins_arith, ins
+ | ins_arithpre
+ |.if "ins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |. nop
+ |.else
+ | ins f0, f20, f22
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add.d
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub.d
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mul.d
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith div.d
+ break;
+ case BC_MODVN:
+ | ins_arith fpmod
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arith fpmod_
+ break;
+ case BC_POW:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG3, BASE, RB
+ | addu CARG4, BASE, RC
+ | lw TMP1, HI(CARG3)
+ | lw TMP2, HI(CARG4)
+ | ldc1 FARG1, 0(CARG3)
+ | ldc1 FARG2, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP2, LJ_TISNUM
+ | and AT, AT, TMP0
+ | load_got pow
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ | call_extern
+ |. nop
+ | ins_next1
+ | sdc1 FRET1, 0(RA)
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | subu CARG3, RC, RB
+ | sw BASE, L->base
+ | addu CARG2, BASE, RC
+ | move MULTRES, RB
+ |->BC_CAT_Z:
+ | load_got lj_meta_cat
+ | srl CARG3, CARG3, 3
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | bnez CRET1, ->vmeta_binop
+ |. lw BASE, L->base
+ | addu RB, BASE, MULTRES
+ | ldc1 f0, 0(RB)
+ | addu RA, BASE, RA
+ | ins_next1
+ | sdc1 f0, 0(RA) // Copy result from RB to RA.
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-str_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TSTR
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TCDATA
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+#endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | sra RD, INS, 16
+ | mtc1 RD, f0
+ | addu RA, BASE, RA
+ | cvt.d.w f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | addu RD, KBASE, RD
+ | addu RA, BASE, RA
+ | ldc1 f0, 0(RD)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srl TMP1, RD, 3
+ | addu RA, BASE, RA
+ | not TMP0, TMP1
+ | ins_next1
+ | sw TMP0, HI(RA)
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | addu RA, BASE, RA
+ | sw TISNIL, HI(RA)
+ | addiu RA, RA, 8
+ | addu RD, BASE, RD
+ |1:
+ | sw TISNIL, HI(RA)
+ | slt AT, RA, RD
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RD, RD, 1
+ | addu RD, RD, LFUNC:RB
+ | lw UPVAL:RB, LFUNC:RD->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | ldc1 f0, 0(TMP1)
+ | addu RA, BASE, RA
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, BASE, RD
+ | addu RA, RA, LFUNC:RB
+ | ldc1 f0, 0(RD)
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lbu TMP3, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP0, UPVAL:RB->closed
+ | lw TMP2, HI(RD)
+ | sdc1 f0, 0(CARG2)
+ | li AT, LJ_GC_BLACK|1
+ | or TMP3, TMP3, TMP0
+ | beq TMP3, AT, >2 // Upvalue is closed and black?
+ |. addiu TMP2, TMP2, -(LJ_TISNUM+1)
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | sltiu AT, TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | beqz AT, <1 // tvisgcv(v)
+ |. lw TMP1, LO(RD)
+ | lbu TMP3, GCOBJ:TMP1->gch.marked
+ | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | beqz TMP3, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP1, RD, 1
+ | addu RA, RA, LFUNC:RB
+ | subu TMP1, KBASE, TMP1
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4
+ | lbu TMP2, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | lbu TMP3, STR:TMP1->marked
+ | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | sw STR:TMP1, LO(CARG2)
+ | bnez AT, >2
+ |. sw TMP0, HI(CARG2)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | beqz TMP2, <1
+ |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
+ | beqz AT, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, KBASE, RD
+ | addu RA, RA, LFUNC:RB
+ | ldc1 f0, 0(RD)
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | sdc1 f0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP0, RD, 3
+ | addu RA, RA, LFUNC:RB
+ | not TMP0, TMP0
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | sw TMP0, HI(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lw TMP2, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | load_got lj_func_closeuv
+ | sw BASE, L->base
+ | beqz TMP2, >1
+ |. move CARG1, L
+ | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
+ |. addu CARG2, BASE, RA
+ | lw BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srl TMP1, RD, 1
+ | load_got lj_func_newL_gc
+ | subu TMP1, KBASE, TMP1
+ | lw CARG3, FRAME_FUNC(BASE)
+ | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call_intern lj_func_newL_gc
+ |. move CARG1, L
+ | // Returns GCfuncL *.
+ | lw BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | ins_next1
+ | addu RA, BASE, RA
+ | sw TMP0, HI(RA)
+ | sw LFUNC:CRET1, LO(RA)
+ | ins_next2
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | sltu AT, TMP0, TMP1
+ | beqz AT, >5
+ |1:
+ if (op == BC_TNEW) {
+ | load_got lj_tab_new
+ | srl CARG2, RD, 3
+ | andi CARG2, CARG2, 0x7ff
+ | li TMP0, 0x801
+ | addiu AT, CARG2, -0x7ff
+ | srl CARG3, RD, 14
+ | movz CARG2, TMP0, AT
+ | // (lua_State *L, int32_t asize, uint32_t hbits)
+ | call_intern lj_tab_new
+ |. move CARG1, L
+ | // Returns Table *.
+ } else {
+ | load_got lj_tab_dup
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | move CARG1, L
+ | call_intern lj_tab_dup // (lua_State *L, Table *kt)
+ |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4
+ | // Returns Table *.
+ }
+ | lw BASE, L->base
+ | ins_next1
+ | addu RA, BASE, RA
+ | li TMP0, LJ_TTAB
+ | sw TAB:CRET1, LO(RA)
+ | sw TMP0, HI(RA)
+ | ins_next2
+ |5:
+ | load_got lj_gc_step_fixtop
+ | move MULTRES, RD
+ | call_intern lj_gc_step_fixtop // (lua_State *L)
+ |. move CARG1, L
+ | b <1
+ |. move RD, MULTRES
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lw LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | lw TAB:RB, LFUNC:TMP2->env
+ | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ |. addu RA, BASE, RA
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | ldc1 f0, 0(CARG3)
+ | bne TMP1, AT, ->vmeta_tgetv
+ |. addu RA, BASE, RA
+ | sltiu AT, TMP2, LJ_TISNUM
+ | beqz AT, >5
+ |. li AT, LJ_TSTR
+ |
+ | // Convert number key to integer, check for integerness and range.
+ | cvt.w.d f2, f0
+ | lw TMP0, TAB:RB->asize
+ | mfc1 TMP2, f2
+ | cvt.d.w f4, f2
+ | lw TMP1, TAB:RB->array
+ | c.eq.d f0, f4
+ | sltu AT, TMP2, TMP0
+ | movf AT, r0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tgetv // Integer key and in array part?
+ |. addu TMP2, TMP1, TMP2
+ | lw TMP0, HI(TMP2)
+ | beq TMP0, TISNIL, >2
+ |. ldc1 f0, 0(TMP2)
+ |1:
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<vmeta_tgetv
+ |. nop
+ |
+ |5:
+ | bne TMP2, AT, ->vmeta_tgetv
+ |. lw STR:RC, LO(CARG3)
+ | b ->BC_TGETS_Z // String key?
+ |. nop
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*4 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | subu CARG3, KBASE, RC
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tgets1
+ |. addu RA, BASE, RA
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP1, NODE:TMP2->next
+ | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | addiu CARG1, CARG1, -LJ_TSTR
+ | xor TMP0, TMP0, STR:RC
+ | or AT, CARG1, TMP0
+ | bnez AT, >4
+ |. lw TAB:TMP3, TAB:RB->metatable
+ | beq CARG2, TISNIL, >5 // Key found, but nil value?
+ |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2)
+ |3:
+ | ins_next1
+ | sw CARG2, HI(RA)
+ | sw CARG1, LO(RA)
+ | ins_next2
+ |
+ |4: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | beqz TAB:TMP3, <3 // No metatable: done.
+ |. li CARG2, LJ_TNIL
+ | lbu TMP0, TAB:TMP3->nomm
+ | andi TMP0, TMP0, 1<vmeta_tgets
+ |. nop
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tgetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tgetb
+ |. addu RC, TMP2, RC
+ | lw TMP1, HI(RC)
+ | beq TMP1, TISNIL, >5
+ |. ldc1 f0, 0(RC)
+ |1:
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<vmeta_tgetb // Caveat: preserve TMP0!
+ |. nop
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | ldc1 f0, 0(CARG3)
+ | bne TMP1, AT, ->vmeta_tsetv
+ |. addu RA, BASE, RA
+ | sltiu AT, TMP2, LJ_TISNUM
+ | beqz AT, >5
+ |. li AT, LJ_TSTR
+ |
+ | // Convert number key to integer, check for integerness and range.
+ | cvt.w.d f2, f0
+ | lw TMP0, TAB:RB->asize
+ | mfc1 TMP2, f2
+ | cvt.d.w f4, f2
+ | lw TMP1, TAB:RB->array
+ | c.eq.d f0, f4
+ | sltu AT, TMP2, TMP0
+ | movf AT, r0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tsetv // Integer key and in array part?
+ |. addu TMP1, TMP1, TMP2
+ | lbu TMP3, TAB:RB->marked
+ | lw TMP0, HI(TMP1)
+ | beq TMP0, TISNIL, >3
+ |. ldc1 f0, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f0, 0(TMP1)
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP2, TAB:TMP2->nomm
+ | andi TMP2, TMP2, 1<vmeta_tsetv
+ |. nop
+ |
+ |5:
+ | bne TMP2, AT, ->vmeta_tsetv
+ |. lw STR:RC, LO(CARG3)
+ | b ->BC_TSETS_Z // String key?
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | subu CARG3, KBASE, RC
+ | lw TAB:RB, LO(CARG2)
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tsets1
+ |. addu RA, BASE, RA
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:RB->node
+ | sb r0, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | ldc1 f20, 0(RA)
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | li AT, LJ_TSTR
+ | lw NODE:TMP1, NODE:TMP2->next
+ | bne CARG1, AT, >5
+ |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | bne TMP0, STR:RC, >5
+ |. lbu TMP3, TAB:RB->marked
+ | beq CARG2, TISNIL, >4 // Key found, but nil value?
+ |. lw TAB:TMP0, TAB:RB->metatable
+ |2:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f20, NODE:TMP2->val
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | beqz TAB:TMP0, <2 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP0->nomm
+ | andi TMP0, TMP0, 1<vmeta_tsets
+ |. nop
+ |
+ |5: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, add a new one
+ |
+ | // But check for __newindex first.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, >6 // No metatable: continue.
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |. li AT, LJ_TSTR
+ |6:
+ | load_got lj_tab_newkey
+ | sw STR:RC, LO(CARG3)
+ | sw AT, HI(CARG3)
+ | sw BASE, L->base
+ | move CARG2, TAB:RB
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
+ |. move CARG1, L
+ | // Returns TValue *.
+ | lw BASE, L->base
+ | b <3 // No 2nd write barrier needed.
+ |. sdc1 f20, 0(CRET1)
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tsetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tsetb
+ |. addu RC, TMP2, RC
+ | lw TMP1, HI(RC)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP1, TISNIL, >5
+ |. ldc1 f0, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f0, 0(RC)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0!
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | addu RA, BASE, RA
+ |1:
+ | addu TMP3, KBASE, RD
+ | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table.
+ | addiu TMP0, MULTRES, -8
+ | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
+ | beqz TMP0, >4 // Nothing to copy?
+ |. srl CARG3, TMP0, 3
+ | addu CARG3, CARG3, TMP3
+ | lw TMP2, TAB:CARG2->asize
+ | sll TMP1, TMP3, 3
+ | lbu TMP3, TAB:CARG2->marked
+ | lw CARG1, TAB:CARG2->array
+ | sltu AT, TMP2, CARG3
+ | bnez AT, >5
+ |. addu TMP2, RA, TMP0
+ | addu TMP1, TMP1, CARG1
+ | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | sltu AT, RA, TMP2
+ | sdc1 f0, 0(TMP1)
+ | bnez AT, <3
+ |. addiu TMP1, TMP1, 8
+ | bnez TMP0, >7
+ |. nop
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | load_got lj_tab_reasize
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | move BASE, RD
+ | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ |. move CARG1, L
+ | // Must not reallocate the stack.
+ | move RD, BASE
+ | b <1
+ |. lw BASE, L->base // Reload BASE for lack of a saved register.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0, <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ | b ->BC_CALL_Z
+ |. addu NARGS8:RC, NARGS8:RC, MULTRES
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ |->BC_CALL_Z:
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(BASE)
+ | lw LFUNC:RB, LO(BASE)
+ | addiu BASE, BASE, 8
+ | bne TMP0, AT, ->vmeta_call
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | addu RA, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(RA)
+ | lw LFUNC:RB, LO(RA)
+ | move NARGS8:RC, RD
+ | lw TMP1, FRAME_PC(BASE)
+ | addiu RA, RA, 8
+ | bne TMP0, AT, ->vmeta_callt
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |->BC_CALLT_Z:
+ | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
+ | lbu TMP3, LFUNC:RB->ffid
+ | bnez TMP0, >7
+ |. xori TMP2, TMP1, FRAME_VARG
+ |1:
+ | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
+ | move TMP2, BASE
+ | beqz NARGS8:RC, >3
+ |. move TMP3, NARGS8:RC
+ |2:
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | addiu TMP3, TMP3, -8
+ | sdc1 f0, 0(TMP2)
+ | bnez TMP3, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | or TMP0, TMP0, AT
+ | beqz TMP0, >5
+ |. nop
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lw INS, -4(TMP1)
+ | decode_RA8a RA, INS
+ | decode_RA8b RA
+ | subu TMP1, BASE, RA
+ | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1)
+ | lw TMP1, LFUNC:TMP1->pc
+ | b <4
+ |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ |
+ |7: // Tailcall from a vararg function.
+ | andi AT, TMP2, FRAME_TYPEP
+ | bnez AT, <1 // Vararg frame below?
+ |. subu TMP2, BASE, TMP2 // Relocate BASE down.
+ | move BASE, TMP2
+ | lw TMP1, FRAME_PC(TMP2)
+ | b <1
+ |. andi TMP0, TMP1, FRAME_TYPE
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP1, -24+HI(BASE)
+ | lw LFUNC:RB, -24+LO(BASE)
+ | ldc1 f2, -8(BASE)
+ | ldc1 f0, -16(BASE)
+ | sw TMP1, HI(BASE) // Copy callable.
+ | sw LFUNC:RB, LO(BASE)
+ | sdc1 f2, 16(BASE) // Copy control var.
+ | sdc1 f0, 8(BASE) // Copy state.
+ | addiu BASE, BASE, 8
+ | bne TMP1, AT, ->vmeta_call
+ |. li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+#if LJ_HASJIT
+ | // NYI: add hotloop, record BC_ITERN.
+#endif
+ | addu RA, BASE, RA
+ | lw TAB:RB, -16+LO(RA)
+ | lw RC, -8+LO(RA) // Get index from control var.
+ | lw TMP0, TAB:RB->asize
+ | lw TMP1, TAB:RB->array
+ | addiu PC, PC, 4
+ |1: // Traverse array part.
+ | sltu AT, RC, TMP0
+ | beqz AT, >5 // Index points after array part?
+ |. sll TMP3, RC, 3
+ | addu TMP3, TMP1, TMP3
+ | lw TMP2, HI(TMP3)
+ | ldc1 f0, 0(TMP3)
+ | mtc1 RC, f2
+ | lhu RD, -4+OFS_RD(PC)
+ | beq TMP2, TISNIL, <1 // Skip holes in array part.
+ |. addiu RC, RC, 1
+ | cvt.d.w f2, f2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sdc1 f0, 8(RA)
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sw RC, -8+LO(RA) // Update control var.
+ | addu PC, PC, RD
+ | sdc1 f2, 0(RA)
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | lw TMP1, TAB:RB->hmask
+ | subu RC, RC, TMP0
+ | lw TMP2, TAB:RB->node
+ |6:
+ | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
+ | bnez AT, <3
+ |. sll TMP3, RC, 5
+ | sll RB, RC, 3
+ | subu TMP3, TMP3, RB
+ | addu NODE:TMP3, TMP3, TMP2
+ | lw RB, HI(NODE:TMP3)
+ | ldc1 f0, 0(NODE:TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | beq RB, TISNIL, <6 // Skip holes in hash part.
+ |. addiu RC, RC, 1
+ | ldc1 f2, NODE:TMP3->key
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sdc1 f0, 8(RA)
+ | addu RC, RC, TMP0
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sdc1 f2, 0(RA)
+ | addu PC, PC, RD
+ | b <3
+ |. sw RC, -8+LO(RA) // Update control var.
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | addu RA, BASE, RA
+ | lw TMP0, -24+HI(RA)
+ | lw CFUNC:TMP1, -24+LO(RA)
+ | lw TMP2, -16+HI(RA)
+ | lw TMP3, -8+HI(RA)
+ | li AT, LJ_TFUNC
+ | bne TMP0, AT, >5
+ |. addiu TMP2, TMP2, -LJ_TTAB
+ | lbu TMP1, CFUNC:TMP1->ffid
+ | addiu TMP3, TMP3, -LJ_TNIL
+ | srl TMP0, RD, 1
+ | or TMP2, TMP2, TMP3
+ | addiu TMP1, TMP1, -FF_next_N
+ | addu TMP0, PC, TMP0
+ | or TMP1, TMP1, TMP2
+ | bnez TMP1, >5
+ |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu PC, TMP0, TMP2
+ | sw r0, -8+LO(RA) // Initialize control var.
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP3, BC_JMP
+ | li TMP1, BC_ITERC
+ | sb TMP3, -4+OFS_OP(PC)
+ | addu PC, TMP0, TMP2
+ | b <1
+ |. sb TMP1, OFS_OP(PC)
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lw TMP0, FRAME_PC(BASE)
+ | decode_RDtoRC8 RC, RD
+ | decode_RB8a RB, INS
+ | addu RC, BASE, RC
+ | decode_RB8b RB
+ | addu RA, BASE, RA
+ | addiu RC, RC, FRAME_VARG
+ | addu TMP2, RA, RB
+ | addiu TMP3, BASE, -8 // TMP3 = vtop
+ | subu RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | beqz RB, >5 // Copy all varargs?
+ |. subu TMP1, TMP3, RC
+ | addiu TMP2, TMP2, -16
+ |1: // Copy vararg slots to destination slots.
+ | lw CARG1, HI(RC)
+ | sltu AT, RC, TMP3
+ | lw CARG2, LO(RC)
+ | addiu RC, RC, 8
+ | movz CARG1, TISNIL, AT
+ | sw CARG1, HI(RA)
+ | sw CARG2, LO(RA)
+ | sltu AT, RA, TMP2
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lw TMP0, L->maxstack
+ | blez TMP1, <3 // No vararg slots?
+ |. li MULTRES, 8 // MULTRES = (0+1)*8
+ | addu TMP2, RA, TMP1
+ | sltu AT, TMP0, TMP2
+ | bnez AT, >7
+ |. addiu MULTRES, TMP1, 8
+ |6:
+ | ldc1 f0, 0(RC)
+ | addiu RC, RC, 8
+ | sdc1 f0, 0(RA)
+ | sltu AT, RC, TMP3
+ | bnez AT, <6 // More vararg slots?
+ |. addiu RA, RA, 8
+ | b <3
+ |. nop
+ |
+ |7: // Grow stack for varargs.
+ | load_got lj_state_growstack
+ | sw RA, L->top
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | subu BASE, RC, BASE // Need delta, because BASE may change.
+ | sw PC, SAVE_PC
+ | srl CARG2, TMP1, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | move RC, BASE
+ | lw BASE, L->base
+ | addu RA, BASE, RA
+ | addu RC, BASE, RC
+ | b <6
+ |. addiu TMP3, BASE, -8
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ |1:
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ | addiu RC, RD, -8
+ | decode_RA8a TMP0, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b TMP0
+ | decode_RB8b RB
+ | addu TMP3, TMP2, RB
+ | beqz RC, >3
+ |. subu BASE, TMP2, TMP0
+ |2:
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | addiu RC, RC, -8
+ | sdc1 f0, 0(TMP2)
+ | bnez RC, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | addiu TMP3, TMP3, -8
+ |5:
+ | sltu AT, TMP2, TMP3
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | sw TISNIL, HI(TMP2)
+ | b <5
+ |. addiu TMP2, TMP2, 8
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi TMP2, TMP1, FRAME_TYPEP
+ | bnez TMP2, ->vm_return
+ |. nop
+ | // Return from vararg function: relocate BASE down.
+ | subu BASE, BASE, TMP1
+ | b <1
+ |. lw PC, FRAME_PC(BASE)
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ if (op == BC_RET1) {
+ | ldc1 f0, 0(RA)
+ }
+ | decode_RB8a RB, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b RB
+ | decode_RA8b RA
+ if (op == BC_RET1) {
+ | sdc1 f0, 0(TMP2)
+ }
+ | subu BASE, TMP2, RA
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addiu TMP2, TMP2, 8
+ | addiu RD, RD, 8
+ | b <5
+ if (op == BC_RET1) {
+ |. sw TISNIL, HI(TMP2)
+ } else {
+ |. sw TISNIL, -8+HI(TMP2)
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | addu RA, BASE, RA
+ if (vk) {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f4, FORL_STEP*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | lw TMP3, FORL_STEP*8+HI(RA)
+ | add.d f0, f0, f4
+ | sdc1 f0, FORL_IDX*8(RA)
+ } else {
+ | lw TMP1, FORL_IDX*8+HI(RA)
+ | lw TMP3, FORL_STEP*8+HI(RA)
+ | lw TMP2, FORL_STOP*8+HI(RA)
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP3, LJ_TISNUM
+ | sltiu TMP2, TMP2, LJ_TISNUM
+ | and TMP1, TMP1, TMP0
+ | and TMP1, TMP1, TMP2
+ | ldc1 f0, FORL_IDX*8(RA)
+ | beqz TMP1, ->vmeta_for
+ |. ldc1 f2, FORL_STOP*8(RA)
+ }
+ if (op != BC_JFORL) {
+ | srl RD, RD, 1
+ | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535)
+ }
+ | c.le.d 0, f0, f2
+ | c.le.d 1, f2, f0
+ | sdc1 f0, FORL_EXT*8(RA)
+ if (op == BC_JFORI) {
+ | li TMP1, 1
+ | li TMP2, 1
+ | addu TMP0, RD, TMP0
+ | slt TMP3, TMP3, r0
+ | movf TMP1, r0, 0
+ | addu PC, PC, TMP0
+ | movf TMP2, r0, 1
+ | lhu RD, -4+OFS_RD(PC)
+ | movn TMP1, TMP2, TMP3
+ | bnez TMP1, =>BC_JLOOP
+ |. decode_RD8b RD
+ } else if (op == BC_JFORL) {
+ | li TMP1, 1
+ | li TMP2, 1
+ | slt TMP3, TMP3, r0
+ | movf TMP1, r0, 0
+ | movf TMP2, r0, 1
+ | movn TMP1, TMP2, TMP3
+ | bnez TMP1, =>BC_JLOOP
+ |. nop
+ } else {
+ | addu TMP1, RD, TMP0
+ | slt TMP3, TMP3, r0
+ | move TMP2, TMP1
+ if (op == BC_FORI) {
+ | movt TMP1, r0, 0
+ | movt TMP2, r0, 1
+ } else {
+ | movf TMP1, r0, 0
+ | movf TMP2, r0, 1
+ }
+ | movn TMP1, TMP2, TMP3
+ | addu PC, PC, TMP1
+ }
+ | ins_next
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | addu RA, BASE, RA
+ | lw TMP1, HI(RA)
+ | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
+ |. lw TMP2, LO(RA)
+ if (op == BC_JITERL) {
+ | sw TMP1, -8+HI(RA)
+ | b =>BC_JLOOP
+ |. sw TMP2, -8+LO(RA)
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | sw TMP1, -8+HI(RA)
+ | sw TMP2, -8+LO(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lw TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srl RD, RD, 1
+ | li AT, 0
+ | addu TMP1, TMP1, RD
+ | // Traces on MIPS don't store the trace number, so use 0.
+ | sw AT, DISPATCH_GL(vmstate)(DISPATCH)
+ | lw TRACE:TMP2, 0(TMP1)
+ | sw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | sw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw TMP2, TRACE:TMP2->mcode
+ | jr TMP2
+ |. addiu JGL, DISPATCH, GG_DISP2G+32768
+#endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ | hotcall
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lw TMP2, L->maxstack
+ | lbu TMP1, -4+PC2PROTO(numparams)(PC)
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | sltu AT, TMP2, RA
+ | bnez AT, ->vm_growstack_l
+ |. sll TMP1, TMP1, 3
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
+ | bnez AT, >3
+ |. addu AT, BASE, NARGS8:RC
+ if (op == BC_JFUNCF) {
+ | decode_RD8a RD, INS
+ | b =>BC_JLOOP
+ |. decode_RD8b RD
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | sw TISNIL, HI(AT)
+ | b <2
+ |. addiu NARGS8:RC, NARGS8:RC, 8
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | addu TMP1, BASE, RC
+ | lw TMP2, L->maxstack
+ | addu TMP0, RA, RC
+ | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC.
+ | addiu TMP3, RC, 8+FRAME_VARG
+ | sltu AT, TMP0, TMP2
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | beqz AT, ->vm_growstack_l
+ |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG.
+ | lbu TMP2, -4+PC2PROTO(numparams)(PC)
+ | move RA, BASE
+ | move RC, TMP1
+ | ins_next1
+ | beqz TMP2, >3
+ |. addiu BASE, TMP1, 8
+ |1:
+ | lw TMP0, HI(RA)
+ | lw TMP3, LO(RA)
+ | sltu AT, RA, RC // Less args than parameters?
+ | move CARG1, TMP0
+ | movz TMP0, TISNIL, AT // Clear missing parameters.
+ | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
+ | sw TMP3, 8+LO(TMP1)
+ | addiu TMP2, TMP2, -1
+ | sw TMP0, 8+HI(TMP1)
+ | addiu TMP1, TMP1, 8
+ | sw CARG1, HI(RA)
+ | bnez TMP2, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lw CFUNCADDR, CFUNC:RB->f
+ } else {
+ | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | addu TMP1, RA, NARGS8:RC
+ | lw TMP2, L->maxstack
+ | addu RC, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | sltu AT, TMP2, TMP1
+ | sw RC, L->top
+ | li_vmstate C
+ if (op == BC_FUNCCW) {
+ | lw CARG2, CFUNC:RB->f
+ }
+ | bnez AT, ->vm_growstack_c // Need to grow stack.
+ |. move CARG1, L
+ | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
+ |. st_vmstate
+ | // Returns nresults.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | lw TMP1, L->top
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | subu RA, TMP1, RD // RA = L->top - nresults*8
+ | b ->vm_returnc
+ |. st_vmstate
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+ fprintf(ctx->fp,
+ "\t.globl lj_err_unwind_dwarf\n"
+ ".Lframe1:\n"
+ "\t.4byte .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.4byte lj_err_unwind_dwarf\n"
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.4byte .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.4byte .LASFDE2-.Lframe1\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.4byte .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.4byte .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.4byte .LASFDE3-.Lframe2\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_mips.h b/src/LuaJIT/src/buildvm_mips.h
new file mode 100644
index 000000000..7c0f90bf8
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_mips.h
@@ -0,0 +1,7494 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM mips version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_mips.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned int build_actionlist[6323] = {
+0xff010001,
+0xff060014,
+0x32410000,
+0xff090200,
+0x10200000,
+0xff050815,
+0x240d0000,
+0xff098200,
+0x8dd20000,
+0xff098200,
+0x01c08021,
+0xaeed0000,
+0xff098200,
+0x26f7fff8,
+0xff060016,
+0x324c0000,
+0xff090200,
+0x254a0008,
+0x11800000,
+0xff050817,
+0x0140a821,
+0xff060018,
+0x240efff8,
+0x39810000,
+0xff090200,
+0x024e7024,
+0x14200000,
+0xff050814,
+0x020e7023,
+0x254dfff8,
+0xae8e0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8fae0078,
+0x2610fff8,
+0xae6c0000,
+0xff098200,
+0x11a00000,
+0xff050802,
+0x000e70c0,
+0xff06000b,
+0x25adfff8,
+0xd6e00000,
+0x26f70008,
+0xf6000000,
+0x15a00000,
+0xff05080b,
+0x26100008,
+0xff06000c,
+0x15ca0000,
+0xff050806,
+0xff06000d,
+0xae900000,
+0xff098200,
+0xff060019,
+0xff000000,
+0x8fac0074,
+0x00001021,
+0xae8c0000,
+0xff098200,
+0xff06001a,
+0x8fbf0000,
+0xff098200,
+0x8fbe0000,
+0xff098200,
+0xd7be0000,
+0xff098200,
+0x8fb70000,
+0xff098200,
+0x8fb60000,
+0xff098200,
+0xd7bc0000,
+0xff098200,
+0x8fb50000,
+0xff098200,
+0x8fb40000,
+0xff098200,
+0xd7ba0000,
+0xff098200,
+0x8fb30000,
+0xff098200,
+0x8fb20000,
+0xff098200,
+0xd7b80000,
+0xff098200,
+0x8fb10000,
+0xff098200,
+0x8fb00000,
+0xff098200,
+0xd7b60000,
+0xff098200,
+0xd7b40000,
+0xff098200,
+0x03e00008,
+0x27bd0070,
+0xff060010,
+0x8e8d0000,
+0xff098200,
+0x01ca082a,
+0x14200000,
+0xff050807,
+0x020d082a,
+0x10200000,
+0xff050808,
+0x00000000,
+0xae1e0000,
+0xff098200,
+0xff000000,
+0x254a0008,
+0x10000000,
+0xff05080c,
+0x26100008,
+0xff060011,
+0x014e6023,
+0x020c6023,
+0x10000000,
+0xff05080d,
+0x018e800b,
+0xff060012,
+0x8e790000,
+0xff098200,
+0x0140a821,
+0x01c02821,
+0x0320f809,
+0x02802021,
+0x8fae0078,
+0x8e900000,
+0xff098200,
+0x02a05021,
+0x10000000,
+0xff05080c,
+0x000e70c0,
+0xff06001b,
+0x0080e821,
+0x00a01021,
+0xff06001c,
+0x8fb40070,
+0x240c0000,
+0xff098200,
+0x8e8d0000,
+0xff098200,
+0x10000000,
+0xff05081a,
+0xadac0000,
+0xff098200,
+0xff06001d,
+0x2401fffc,
+0x0081e824,
+0xff06001e,
+0x8fb40070,
+0x3c0f59c0,
+0x241e0000,
+0xff098200,
+0x8e900000,
+0xff098200,
+0x8e930000,
+0xff098200,
+0x448ff000,
+0x240d0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8e120000,
+0xff098200,
+0x4600f7a1,
+0x2617fff8,
+0x26730000,
+0xff098200,
+0xaeed0000,
+0xff098200,
+0xff000000,
+0xae6c0000,
+0xff098200,
+0x10000000,
+0xff050816,
+0x240a0010,
+0xff06001f,
+0x10000000,
+0xff050802,
+0x24050000,
+0xff098200,
+0xff060020,
+0x02094821,
+0x02f0b823,
+0xae900000,
+0xff098200,
+0x26520004,
+0xae890000,
+0xff098200,
+0x001728c2,
+0xff06000c,
+0x8e790000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x8e890000,
+0xff098200,
+0x8e080000,
+0xff098200,
+0x01304823,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff060021,
+0x27bdff90,
+0xafbf0000,
+0xff098200,
+0xafbe0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xff000000,
+0xf7ba0000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xafb20000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0x0080a021,
+0x8e930000,
+0xff098200,
+0x00a08021,
+0x928d0000,
+0xff098200,
+0xafb40070,
+0x24120000,
+0xff098200,
+0x27ac0000,
+0xff098200,
+0x26730000,
+0xff098200,
+0xafa00078,
+0xafa0007c,
+0xae8c0000,
+0xff098200,
+0xafa00074,
+0x11a00000,
+0xff050803,
+0xafa40014,
+0x0200b821,
+0x8e900000,
+0xff098200,
+0x8e8d0000,
+0xff098200,
+0x8e120000,
+0xff098200,
+0x3c0f59c0,
+0x01b05023,
+0x448ff000,
+0xa2800000,
+0xff098200,
+0x4600f7a1,
+0x240c0000,
+0xff098200,
+0x254a0008,
+0xae6c0000,
+0xff098200,
+0x0140a821,
+0x324c0000,
+0xff090200,
+0x11800000,
+0xff050817,
+0xff000000,
+0x241e0000,
+0xff098200,
+0x10000000,
+0xff050818,
+0x00000000,
+0xff060022,
+0x27bdff90,
+0xafbf0000,
+0xff098200,
+0xafbe0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xf7ba0000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xafb20000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0xafa7007c,
+0x10000000,
+0xff050801,
+0x24120000,
+0xff098200,
+0xff060023,
+0x27bdff90,
+0xafbf0000,
+0xff098200,
+0xff000000,
+0xafbe0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xf7ba0000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xafb20000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0x24120000,
+0xff098200,
+0xff06000b,
+0x8c8d0000,
+0xff098200,
+0xafa60078,
+0x0080a021,
+0xafa40070,
+0x00a08021,
+0xae9d0000,
+0xff098200,
+0x8e930000,
+0xff098200,
+0xafa40014,
+0xafad0074,
+0x26730000,
+0xff098200,
+0xff06000d,
+0x8e8e0000,
+0xff098200,
+0xff000000,
+0x3c0f59c0,
+0x8e8d0000,
+0xff098200,
+0x448ff000,
+0x02509021,
+0x01b04823,
+0x024e9023,
+0x4600f7a1,
+0x240c0000,
+0xff098200,
+0x241e0000,
+0xff098200,
+0xae6c0000,
+0xff098200,
+0xff060024,
+0x8e0c0000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x15810000,
+0xff050825,
+0x8e080000,
+0xff098200,
+0xff060026,
+0xae120000,
+0xff098200,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff060027,
+0x27bdff90,
+0xafbf0000,
+0xff098200,
+0xafbe0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xf7ba0000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xff000000,
+0xafb20000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0x0080a021,
+0x8c8c0000,
+0xff098200,
+0xafa40070,
+0x8e8d0000,
+0xff098200,
+0xafa40014,
+0x018d6023,
+0x8e8d0000,
+0xff098200,
+0xae9d0000,
+0xff098200,
+0xafac0078,
+0xafa0007c,
+0x00e0c821,
+0x00e0f809,
+0xafad0074,
+0x00408021,
+0x8e930000,
+0xff098200,
+0x24120000,
+0xff098200,
+0x14400000,
+0xff05080d,
+0x26730000,
+0xff098200,
+0x10000000,
+0xff050819,
+0x00000000,
+0xff060015,
+0x8e0c0000,
+0xff098200,
+0x02004021,
+0x01c08021,
+0x8dcd0000,
+0xff098200,
+0xff000000,
+0x2d810002,
+0xff000000,
+0x8d120000,
+0xff098200,
+0x02ea7021,
+0x8dad0000,
+0xff098200,
+0xff000000,
+0x14200000,
+0xff050801,
+0xff000000,
+0xadde0000,
+0xff098200,
+0x01800008,
+0x8db10000,
+0xff098200,
+0xff000000,
+0xff06000b,
+0x15800000,
+0xff050828,
+0x250dfff0,
+0x10000000,
+0xff050829,
+0x01b04823,
+0xff000000,
+0xff06002a,
+0x8e4bfffc,
+0x2505fff0,
+0xd6e00000,
+0x000bad42,
+0x000bb942,
+0x32b507f8,
+0x32f707f8,
+0x02156821,
+0xae900000,
+0xff098200,
+0x00ad3023,
+0x15a50000,
+0xff05082b,
+0xf4a00000,
+0x0217b821,
+0x10000000,
+0xff05082c,
+0xf6e00000,
+0xff06002d,
+0x26660000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xacc90000,
+0xff098200,
+0x10000000,
+0xff050801,
+0xaccc0000,
+0xff098200,
+0xff06002e,
+0x26650000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xaca80000,
+0xff098200,
+0x26660000,
+0xff098200,
+0xacac0000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0xacc90000,
+0xff098200,
+0x10000000,
+0xff050801,
+0xaccd0000,
+0xff098200,
+0xff06002f,
+0x448c0000,
+0x46800021,
+0x26660000,
+0xff098200,
+0xf4c00000,
+0xff060030,
+0xff06000b,
+0xff000000,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x10400000,
+0xff050803,
+0x260d0000,
+0xff098200,
+0xd4400000,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000d,
+0x8e900000,
+0xff098200,
+0xae120000,
+0xff098200,
+0x020d9023,
+0x8e080000,
+0xff098200,
+0x10000000,
+0xff050826,
+0x24090010,
+0xff060031,
+0x26660000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xacc90000,
+0xff098200,
+0x10000000,
+0xff050801,
+0xaccc0000,
+0xff098200,
+0xff060032,
+0x26650000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xaca80000,
+0xff098200,
+0x26660000,
+0xff098200,
+0xacac0000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0xacc90000,
+0xff098200,
+0xff000000,
+0x10000000,
+0xff050801,
+0xaccd0000,
+0xff098200,
+0xff060033,
+0x448c0000,
+0x46800021,
+0x26660000,
+0xff098200,
+0xf4c00000,
+0xff060034,
+0xff06000b,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x10400000,
+0xff050803,
+0xd6e00000,
+0x8e4b0000,
+0x26520004,
+0xf4400000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000d,
+0x260d0000,
+0xff098200,
+0x8e900000,
+0xff098200,
+0xae120000,
+0xff098200,
+0x020d9023,
+0x8e080000,
+0xff098200,
+0xf6000010,
+0x10000000,
+0xff050826,
+0x24090018,
+0xff060035,
+0x8e790000,
+0xff098200,
+0x2652fffc,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x316700ff,
+0x0320f809,
+0x02802021,
+0xff06000d,
+0x2c410002,
+0x10200000,
+0xff050836,
+0x00027023,
+0xff06000e,
+0x964a0000,
+0xff098200,
+0x26520004,
+0x3c0d0000,
+0xff090200,
+0xff000000,
+0x000a5080,
+0x014d5021,
+0x014e5024,
+0x024a9021,
+0xff06002c,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff060037,
+0x924d0000,
+0xff098200,
+0xd6e00000,
+0x000d68c0,
+0x020d6821,
+0x10000000,
+0xff05082c,
+0xf5a00000,
+0xff060038,
+0x8eec0000,
+0xff098200,
+0x2d810000,
+0xff098200,
+0x10000000,
+0xff05080e,
+0x00017023,
+0xff060039,
+0x8eec0000,
+0xff098200,
+0x2d810000,
+0xff098200,
+0x10000000,
+0xff05080e,
+0x242effff,
+0xff06003a,
+0x8e790000,
+0xff098200,
+0x2652fffc,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff05080d,
+0x00000000,
+0xff06003b,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x01602821,
+0x2652fffc,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff05080d,
+0x00000000,
+0xff000000,
+0xff06003c,
+0x00c03821,
+0xff06003d,
+0x8e790000,
+0xff098200,
+0x316c00ff,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x02e02821,
+0xafac0010,
+0x0320f809,
+0x02802021,
+0x10400000,
+0xff05082c,
+0x00000000,
+0xff060036,
+0x00506823,
+0xac520000,
+0xff098200,
+0x02007021,
+0x25b20000,
+0xff098200,
+0x00408021,
+0x10000000,
+0xff050824,
+0x24090010,
+0xff06003e,
+0xff000000,
+0x0080a821,
+0xff000000,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0xff000000,
+0x14400000,
+0xff050836,
+0x00000000,
+0x10000000,
+0xff05083f,
+0x02a02021,
+0xff000000,
+0x10000000,
+0xff050836,
+0x00000000,
+0xff000000,
+0xff060025,
+0x8e790000,
+0xff098200,
+0xae8e0000,
+0xff098200,
+0x2605fff8,
+0xafb20014,
+0x02093021,
+0x0120a821,
+0x0320f809,
+0x02802021,
+0x8e080000,
+0xff098200,
+0x26a90008,
+0xae120000,
+0xff098200,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff060040,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x26e5fff8,
+0xafb20014,
+0x02e93021,
+0x0120a821,
+0x0320f809,
+0x02802021,
+0x8e0d0000,
+0xff098200,
+0x8ee80000,
+0xff098200,
+0x10000000,
+0xff050841,
+0x26a90008,
+0xff060042,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x02e02821,
+0xafb20014,
+0x0160a821,
+0x0320f809,
+0x02802021,
+0xff000000,
+0x32ac00ff,
+0x24010000,
+0xff098200,
+0xff000000,
+0x0015b942,
+0x00155402,
+0x32f707f8,
+0xff000000,
+0x11810000,
+0xff070800,
+0x000a50c0,
+0x10000000,
+0xff070800,
+0x00000000,
+0xff000000,
+0x10000000,
+0xff070800,
+0x000a50c0,
+0xff000000,
+0xff060043,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x2617fff8,
+0x8e120000,
+0xff098200,
+0x252a0008,
+0x02e97021,
+0xaee60000,
+0xff098200,
+0x260d0008,
+0x120e0000,
+0xff050845,
+0xaee40000,
+0xff098200,
+0xff06000b,
+0xd5a00000,
+0xf5a0fff8,
+0x15ae0000,
+0xff05080b,
+0x25ad0008,
+0x10000000,
+0xff050845,
+0x00000000,
+0xff060046,
+0x8e060000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x2ccc0000,
+0xff098200,
+0x00cc680a,
+0x01a06827,
+0x000d68c0,
+0x010d6821,
+0x10000000,
+0xff050847,
+0xd5a00000,
+0xff098200,
+0xff060048,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0xff000000,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050806,
+0x24010000,
+0xff098200,
+0xff06000b,
+0x8c840000,
+0xff098200,
+0xff06000c,
+0x8e690000,
+0xff098200,
+0x10800000,
+0xff050849,
+0x24060000,
+0xff098200,
+0x8c8c0000,
+0xff098200,
+0x24060000,
+0xff098200,
+0x8d2d0000,
+0xff098200,
+0x8c8e0000,
+0xff098200,
+0x01ac6824,
+0x000d6140,
+0x000d68c0,
+0x018d6823,
+0x01cd7021,
+0x24010000,
+0xff098200,
+0xff06000d,
+0x8dc70000,
+0xff098200,
+0x8dcc0000,
+0xff098200,
+0x8dcf0000,
+0xff098200,
+0x14e10000,
+0xff050804,
+0x8dc50000,
+0xff098200,
+0x11890000,
+0xff050805,
+0x8dcd0000,
+0xff098200,
+0xff06000e,
+0xff000000,
+0x11e00000,
+0xff050849,
+0x01e07021,
+0x10000000,
+0xff05080d,
+0x00000000,
+0xff06000f,
+0x10be0000,
+0xff050849,
+0x00000000,
+0x00a03021,
+0x10000000,
+0xff050849,
+0x01a02021,
+0xff060010,
+0x10c10000,
+0xff05080b,
+0x2ccc0000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0x00cc680a,
+0x01a06827,
+0x000d6880,
+0x026d6821,
+0x10000000,
+0xff05080c,
+0x8da40000,
+0xff098200,
+0xff06004a,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x8e050000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x24e70000,
+0xff098200,
+0x8c8d0000,
+0xff098200,
+0x908f0000,
+0xff098200,
+0x00ed0825,
+0x14200000,
+0xff050844,
+0xff000000,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff050849,
+0xac850000,
+0xff098200,
+0x8e6c0000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0xae640000,
+0xff098200,
+0xa08f0000,
+0xff098200,
+0x10000000,
+0xff050849,
+0xac8c0000,
+0xff098200,
+0xff06004b,
+0x8e070000,
+0xff098200,
+0x2d210010,
+0x8e050000,
+0xff098200,
+0x8e790000,
+0xff098200,
+0x24e70000,
+0xff098200,
+0x00270825,
+0x14200000,
+0xff050844,
+0x26060008,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff050847,
+0xd4400000,
+0xff06004c,
+0x8e040000,
+0xff098200,
+0x39210008,
+0x2c840000,
+0xff098200,
+0x0001200b,
+0x10800000,
+0xff050844,
+0xd6000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06004d,
+0x11200000,
+0xff050844,
+0xff000000,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x10c10000,
+0xff050849,
+0x8e6d0000,
+0xff098200,
+0x2ccc0000,
+0xff098200,
+0x2dad0001,
+0x018d6024,
+0x11800000,
+0xff050844,
+0xae900000,
+0xff098200,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0xafb20014,
+0x8e790000,
+0xff098200,
+0x02802021,
+0x0320f809,
+0x02002821,
+0x24060000,
+0xff098200,
+0x10000000,
+0xff050849,
+0x00402021,
+0xff06004f,
+0x8e040000,
+0xff098200,
+0x8e050000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x02097021,
+0x24010000,
+0xff098200,
+0xadde0000,
+0xff098200,
+0x14810000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0x8e790000,
+0xff098200,
+0xff000000,
+0xae900000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x26060008,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x10400000,
+0xff050849,
+0x24060000,
+0xff098200,
+0xd6000008,
+0x2617fff8,
+0xd6020010,
+0x240a0000,
+0xff098200,
+0xf6e00000,
+0x10000000,
+0xff050845,
+0xf6e20008,
+0xff060050,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0xff000000,
+0x8c8e0000,
+0xff098200,
+0xd5000000,
+0xff098200,
+0x15c00000,
+0xff050844,
+0xff000000,
+0xd5000000,
+0xff098200,
+0xff000000,
+0x2617fff8,
+0xae1e0000,
+0xff098200,
+0x240a0000,
+0xff098200,
+0x10000000,
+0xff050845,
+0xf6e00000,
+0xff060051,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x8e070000,
+0xff098200,
+0x14200000,
+0xff050844,
+0xd60e0008,
+0x24c60000,
+0xff098200,
+0x2ce10000,
+0xff098200,
+0x240c0001,
+0x0006080b,
+0x448c6000,
+0x10200000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0x46207024,
+0x46806321,
+0x8c8c0000,
+0xff098200,
+0x8c8d0000,
+0xff098200,
+0x440e0000,
+0x2617fff8,
+0x462c7380,
+0x25ce0001,
+0x01cc082b,
+0x000e78c0,
+0x01af7821,
+0x10200000,
+0xff050802,
+0xf6ee0000,
+0x8dee0000,
+0xff098200,
+0xd5e00000,
+0xff06000b,
+0x11de0000,
+0xff050845,
+0x240a0000,
+0xff098200,
+0x240a0000,
+0xff098200,
+0x10000000,
+0xff050845,
+0xf6e00008,
+0xff06000c,
+0x8c8c0000,
+0xff098200,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x11800000,
+0xff050845,
+0x240a0000,
+0xff098200,
+0x0320f809,
+0x01c02821,
+0x10400000,
+0xff050845,
+0x240a0000,
+0xff098200,
+0x8c4e0000,
+0xff098200,
+0x10000000,
+0xff05080b,
+0xd4400000,
+0xff060052,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0xff000000,
+0x8c8e0000,
+0xff098200,
+0xd5000000,
+0xff098200,
+0x15c00000,
+0xff050844,
+0xff000000,
+0xd5000000,
+0xff098200,
+0xff000000,
+0x2617fff8,
+0xae000000,
+0xff098200,
+0xae000000,
+0xff098200,
+0x240a0000,
+0xff098200,
+0x10000000,
+0xff050845,
+0xf6e00000,
+0xff060053,
+0x926f0000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x02007021,
+0x26100008,
+0x000f7802,
+0xff0900a6,
+0x31ef0001,
+0x25f20000,
+0xff098200,
+0x10000000,
+0xff050824,
+0x2529fff8,
+0xff060054,
+0x2d210010,
+0x8e070000,
+0xff098200,
+0x14200000,
+0xff050844,
+0xd60e0008,
+0xd60c0000,
+0x926d0000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x02007021,
+0x14e10000,
+0xff050844,
+0x26100010,
+0x000f7802,
+0xff0900a6,
+0xf5ce0000,
+0x31ef0001,
+0xf5cc0008,
+0x25f20000,
+0xff098200,
+0x10000000,
+0xff050824,
+0x2529fff0,
+0xff060055,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0xff000000,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x908c0000,
+0xff098200,
+0x8c8d0000,
+0xff098200,
+0x8c850000,
+0xff098200,
+0x8c8e0000,
+0xff098200,
+0x258f0000,
+0xff098200,
+0x1de00000,
+0xff050844,
+0x01c57026,
+0x15a00000,
+0xff050844,
+0x01cc0825,
+0x8c8c0000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0x00a97021,
+0x018e082b,
+0x14200000,
+0xff050844,
+0xafb20014,
+0xae900000,
+0xff098200,
+0xff06000b,
+0x26100008,
+0x2529fff8,
+0x25cefff8,
+0xac8e0000,
+0xff098200,
+0x02096821,
+0x00a03021,
+0xae900000,
+0xff098200,
+0xff06000c,
+0xd6000000,
+0x020d082b,
+0x10200000,
+0xff050803,
+0x26100008,
+0xf4c00000,
+0x10000000,
+0xff05080c,
+0x24c60008,
+0xff06000d,
+0x04110000,
+0xff050821,
+0x0080b821,
+0xff06000e,
+0xff000000,
+0x8eee0000,
+0xff098200,
+0x2c410000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8e900000,
+0xff098200,
+0xae6c0000,
+0xff098200,
+0x10200000,
+0xff050808,
+0x01ee5023,
+0x8e8c0000,
+0xff098200,
+0x11400000,
+0xff050806,
+0x020a6821,
+0x018d082b,
+0x14200000,
+0xff050809,
+0x01ca7821,
+0xaeee0000,
+0xff098200,
+0x02006821,
+0xff06000f,
+0xd5c00000,
+0x25ce0008,
+0x01cf082b,
+0xf5a00000,
+0x14200000,
+0xff05080f,
+0x25ad0008,
+0xff060010,
+0x324c0000,
+0xff090200,
+0x240d0000,
+0xff098200,
+0x2617fff8,
+0xae0d0000,
+0xff098200,
+0x254a0010,
+0xff060011,
+0xafb20014,
+0x11800000,
+0xff050817,
+0x0140a821,
+0x10000000,
+0xff050818,
+0x00000000,
+0xff060012,
+0x25effff8,
+0x240d0000,
+0xff098200,
+0xd5e00000,
+0xaeef0000,
+0xff098200,
+0xff000000,
+0x240a0000,
+0xff098200,
+0xae0d0000,
+0xff098200,
+0x2617fff8,
+0xf6000000,
+0x10000000,
+0xff050811,
+0x324c0000,
+0xff090200,
+0xff060013,
+0x8e790000,
+0xff098200,
+0x000a28c2,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff05080e,
+0x24020000,
+0xff060056,
+0x8d040000,
+0xff098200,
+0x908c0000,
+0xff098200,
+0x8c8d0000,
+0xff098200,
+0x8c850000,
+0xff098200,
+0x8c8e0000,
+0xff098200,
+0x258f0000,
+0xff098200,
+0x1de00000,
+0xff050844,
+0x01c57026,
+0x15a00000,
+0xff050844,
+0x01cc0825,
+0x8c8c0000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x8e120000,
+0xff098200,
+0x00a97021,
+0x018e082b,
+0x14200000,
+0xff050844,
+0xafb20014,
+0xae900000,
+0xff098200,
+0xff06000b,
+0xac8e0000,
+0xff098200,
+0xff000000,
+0x02096821,
+0x00a03021,
+0xae900000,
+0xff098200,
+0xff06000c,
+0xd6000000,
+0x020d082b,
+0x10200000,
+0xff050803,
+0x26100008,
+0xf4c00000,
+0x10000000,
+0xff05080c,
+0x24c60008,
+0xff06000d,
+0x04110000,
+0xff050821,
+0x0080b821,
+0xff06000e,
+0x8eee0000,
+0xff098200,
+0x2c410000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8e900000,
+0xff098200,
+0xae6c0000,
+0xff098200,
+0x10200000,
+0xff050808,
+0x01ee5023,
+0x8e8c0000,
+0xff098200,
+0x11400000,
+0xff050806,
+0x020a6821,
+0x018d082b,
+0x14200000,
+0xff050809,
+0x01ca7821,
+0xaeee0000,
+0xff098200,
+0x02006821,
+0xff06000f,
+0xd5c00000,
+0x25ce0008,
+0x01cf082b,
+0xf5a00000,
+0x14200000,
+0xff05080f,
+0x25ad0008,
+0xff060010,
+0x324c0000,
+0xff090200,
+0x0200b821,
+0x254a0008,
+0xff060011,
+0xff000000,
+0xafb20014,
+0x11800000,
+0xff050817,
+0x0140a821,
+0x10000000,
+0xff050818,
+0x00000000,
+0xff060012,
+0x8e790000,
+0xff098200,
+0x02e02821,
+0x0320f809,
+0x02802021,
+0xff060013,
+0x8e790000,
+0xff098200,
+0x000a28c2,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff05080e,
+0x24020000,
+0xff060057,
+0x8e8c0000,
+0xff098200,
+0x02096821,
+0xae900000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0xae8d0000,
+0xff098200,
+0x11800000,
+0xff050844,
+0x24020000,
+0xff098200,
+0xae800000,
+0xff098200,
+0x10000000,
+0xff05081a,
+0xa2820000,
+0xff098200,
+0xff060058,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x46206005,
+0xff060047,
+0xff000000,
+0x8e120000,
+0xff098200,
+0x2617fff8,
+0x10000000,
+0xff050859,
+0xf600fff8,
+0xff060049,
+0x8e120000,
+0xff098200,
+0xae060000,
+0xff098200,
+0x2617fff8,
+0xae040000,
+0xff098200,
+0xff060059,
+0x240a0000,
+0xff098200,
+0xff060045,
+0x324c0000,
+0xff090200,
+0x15800000,
+0xff050818,
+0x0140a821,
+0x8e4bfffc,
+0x000b4542,
+0x310807f8,
+0xff06000f,
+0x0148082b,
+0x14200000,
+0xff050806,
+0x000b6142,
+0x318c07f8,
+0x8e4b0000,
+0x26520004,
+0x02ec8023,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff060010,
+0x02ea6821,
+0x254a0008,
+0x10000000,
+0xff05080f,
+0xadbe0000,
+0xff098200,
+0xff06005a,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x04110000,
+0xff05085b,
+0x00000000,
+0x10000000,
+0xff050847,
+0xff000000,
+0x00000000,
+0xff06005c,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x04110000,
+0xff05085d,
+0x00000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06005e,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06005f,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060060,
+0x8e060000,
+0xff098200,
+0xff000000,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060061,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060062,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060063,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0xff000000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060064,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060065,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060066,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0xff000000,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060067,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060068,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060069,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x00000000,
+0x0320f809,
+0xd60c0000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06006a,
+0xff000000,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06006b,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06006c,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0xff000000,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06006d,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x46206004,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff06006e,
+0xff06006f,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0xd50e0000,
+0xff098200,
+0x10000000,
+0xff050847,
+0x462e6002,
+0xff060070,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0xff000000,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x462073a4,
+0x8e790000,
+0xff098200,
+0x44067000,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060071,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x8e120000,
+0xff098200,
+0x0320f809,
+0x26660000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x2617fff8,
+0x448d7000,
+0xf6e00000,
+0x468073a1,
+0xf6ee0008,
+0x10000000,
+0xff050845,
+0x240a0000,
+0xff098200,
+0xff060072,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x8e120000,
+0xff098200,
+0x0320f809,
+0x2606fff8,
+0x2617fff8,
+0xf6000000,
+0x10000000,
+0xff050845,
+0x240a0000,
+0xff098200,
+0xff060073,
+0xff000000,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd6000000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x02097021,
+0x260d0008,
+0x11ae0000,
+0xff050847,
+0xff06000b,
+0x8da60000,
+0xff098200,
+0xd5ac0000,
+0x25ad0008,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x462c0034,
+0x15ae0000,
+0xff05080b,
+0x46206011,
+0x10000000,
+0xff050847,
+0x00000000,
+0xff060074,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd6000000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x02097021,
+0x260d0008,
+0x11ae0000,
+0xff050847,
+0xff06000b,
+0x8da60000,
+0xff098200,
+0xd5ac0000,
+0x25ad0008,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x46206034,
+0x15ae0000,
+0xff05080b,
+0x46206011,
+0x10000000,
+0xff050847,
+0xff000000,
+0x00000000,
+0xff060075,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x00000000,
+0x10000000,
+0xff050876,
+0x8c820000,
+0xff098200,
+0xff060077,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x39210008,
+0x24c60000,
+0xff098200,
+0x00260825,
+0x14200000,
+0xff050844,
+0x00000000,
+0x8c8c0000,
+0xff098200,
+0x908d0000,
+0xff098200,
+0x2617fff8,
+0x000c502b,
+0x448d0000,
+0x254a0001,
+0x46800021,
+0x8e120000,
+0xff098200,
+0x000a50c0,
+0x10000000,
+0xff050845,
+0xf6e00000,
+0xff060078,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0x8e060000,
+0xff098200,
+0xd60c0000,
+0x24010008,
+0x15210000,
+0xff050844,
+0xff000000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x24060001,
+0x46206324,
+0x27a50010,
+0x2d810100,
+0x440c6000,
+0x10200000,
+0xff050844,
+0xafac0010,
+0xff060079,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x00402021,
+0x10000000,
+0xff050849,
+0x24060000,
+0xff098200,
+0xff06007a,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0x2521fff0,
+0x8e060000,
+0xff098200,
+0xd6000010,
+0x8e0c0000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x04200000,
+0xff050844,
+0x8e050000,
+0xff098200,
+0xd6020008,
+0x10200000,
+0xff050801,
+0x2407ffff,
+0x46200024,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x44070000,
+0xff06000b,
+0x2ca10000,
+0xff098200,
+0xff000000,
+0x10200000,
+0xff050844,
+0x24010000,
+0xff098200,
+0x462010a4,
+0x15810000,
+0xff050844,
+0x8c850000,
+0xff098200,
+0x44061000,
+0x00e0082a,
+0x24ac0001,
+0x00ec6821,
+0x00c0782a,
+0x01a1380b,
+0x00cc6821,
+0x01af300b,
+0x240e0001,
+0x00e0082a,
+0x0006782a,
+0x0001380b,
+0x01cf300a,
+0x00a7082a,
+0x00a1380b,
+0x00862821,
+0x00e63023,
+0x24a50000,
+0xff098200,
+0x04c10000,
+0xff050879,
+0x24c60001,
+0xff06007b,
+0x26640000,
+0xff098200,
+0x10000000,
+0xff050849,
+0x24060000,
+0xff098200,
+0xff06007c,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0x8e0c0000,
+0xff098200,
+0x2d210010,
+0x8e070000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x258c0000,
+0xff098200,
+0xd6000008,
+0x002c0825,
+0x14200000,
+0xff050844,
+0x2ce10000,
+0xff098200,
+0x46200024,
+0x10200000,
+0xff050844,
+0x8c8c0000,
+0xff098200,
+0x44060000,
+0x8e6d0000,
+0xff098200,
+0xff000000,
+0x24010001,
+0x18c00000,
+0xff05087b,
+0x002c082b,
+0x11800000,
+0xff05087b,
+0x01a6602b,
+0x002c0825,
+0x8e650000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x908c0000,
+0xff098200,
+0x00a67021,
+0xff06000b,
+0x25ceffff,
+0x00ae082b,
+0x14200000,
+0xff05080b,
+0xa1cc0000,
+0x10000000,
+0xff050879,
+0x00000000,
+0xff06007d,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e6d0000,
+0xff098200,
+0x8c860000,
+0xff098200,
+0x24840000,
+0xff098200,
+0x8e650000,
+0xff098200,
+0x01a6082b,
+0x14200000,
+0xff050844,
+0x00867821,
+0x00a63821,
+0xff06000b,
+0xff000000,
+0x908d0000,
+0x008f082b,
+0x10200000,
+0xff050879,
+0x24840001,
+0x24e7ffff,
+0x10000000,
+0xff05080b,
+0xa0ed0000,
+0xff06007e,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e6d0000,
+0xff098200,
+0x8c860000,
+0xff098200,
+0x24840000,
+0xff098200,
+0x8e650000,
+0xff098200,
+0x01a6082b,
+0x14200000,
+0xff050844,
+0x00867821,
+0x00a03821,
+0xff06000b,
+0x908d0000,
+0x008f082b,
+0x10200000,
+0xff050879,
+0x25acffbf,
+0x39ae0020,
+0x2d81001a,
+0x01c1680b,
+0x24840001,
+0xa0ed0000,
+0x10000000,
+0xff05080b,
+0x24e70001,
+0xff06007f,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0x018d0823,
+0x04310000,
+0xff05084e,
+0xff000000,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x11200000,
+0xff050844,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e6d0000,
+0xff098200,
+0x8c860000,
+0xff098200,
+0x24840000,
+0xff098200,
+0x8e650000,
+0xff098200,
+0x01a6082b,
+0x14200000,
+0xff050844,
+0x00867821,
+0x00a03821,
+0xff06000b,
+0x908d0000,
+0x008f082b,
+0x10200000,
+0xff050879,
+0x25acff9f,
+0x39ae0020,
+0x2d81001a,
+0x01c1680b,
+0x24840001,
+0xa0ed0000,
+0x10000000,
+0xff05080b,
+0x24e70001,
+0xff060080,
+0x11200000,
+0xff050844,
+0x8e060000,
+0xff098200,
+0x8e040000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x14c10000,
+0xff050844,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff050876,
+0x00000000,
+0xff060081,
+0x8e060000,
+0xff098200,
+0xff000000,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0x260d0008,
+0x02097021,
+0xff06000b,
+0x8da70000,
+0xff098200,
+0x11ae0000,
+0xff050876,
+0xd5ac0000,
+0x2ce10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44056000,
+0x00451024,
+0x10000000,
+0xff05080b,
+0x25ad0008,
+0xff060082,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0x260d0008,
+0x02097021,
+0xff06000b,
+0x8da70000,
+0xff098200,
+0x11ae0000,
+0xff050876,
+0xd5ac0000,
+0x2ce10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44056000,
+0x00451025,
+0x10000000,
+0xff05080b,
+0x25ad0008,
+0xff060083,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xff000000,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0x260d0008,
+0x02097021,
+0xff06000b,
+0x8da70000,
+0xff098200,
+0x11ae0000,
+0xff050876,
+0xd5ac0000,
+0x2ce10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44056000,
+0x00451026,
+0x10000000,
+0xff05080b,
+0x25ad0008,
+0xff060084,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0x00026602,
+0x00027202,
+0x00026e00,
+0x31ceff00,
+0x018d6025,
+0x3042ff00,
+0x018e6025,
+0x00021200,
+0x10000000,
+0xff050876,
+0x01821025,
+0xff060085,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0x10000000,
+0xff050876,
+0x00401027,
+0xff060086,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0xff000000,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x463e6300,
+0x463e7380,
+0x44046000,
+0x44057000,
+0x10000000,
+0xff050876,
+0x00a41004,
+0xff060087,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x463e6300,
+0x463e7380,
+0x44046000,
+0x44057000,
+0x10000000,
+0xff050876,
+0x00a41006,
+0xff060088,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x463e6300,
+0x463e7380,
+0x44046000,
+0x44057000,
+0x10000000,
+0xff050876,
+0x00a41007,
+0xff060089,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0xff000000,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x463e6300,
+0x463e7380,
+0x44046000,
+0x44057000,
+0x24010020,
+0x00256023,
+0x00a42804,
+0x01842006,
+0x10000000,
+0xff050876,
+0x00851025,
+0xff06008a,
+0x2d210010,
+0x8e060000,
+0xff098200,
+0x14200000,
+0xff050844,
+0x8e070000,
+0xff098200,
+0xd60c0000,
+0xd60e0008,
+0x2ccc0000,
+0xff098200,
+0x2ced0000,
+0xff098200,
+0x018d6024,
+0x11800000,
+0xff050844,
+0x463e6300,
+0x463e7380,
+0x44046000,
+0x44057000,
+0x24010020,
+0x00256023,
+0x00a42806,
+0x01842004,
+0x10000000,
+0xff050876,
+0x00851025,
+0xff06008b,
+0x8e060000,
+0xff098200,
+0x11200000,
+0xff050844,
+0xd60c0000,
+0x2cc10000,
+0xff098200,
+0x10200000,
+0xff050844,
+0x463e6300,
+0x44026000,
+0xff060076,
+0x44820000,
+0x10000000,
+0xff050847,
+0x46800021,
+0xff060044,
+0x8d0f0000,
+0xff098200,
+0xff000000,
+0x02096821,
+0x8e120000,
+0xff098200,
+0x25ac0000,
+0xff098200,
+0x8e8e0000,
+0xff098200,
+0xafb20014,
+0x01cc082b,
+0xae900000,
+0xff098200,
+0xae8d0000,
+0xff098200,
+0x14200000,
+0xff050805,
+0x01e0c821,
+0x01e0f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x000250c0,
+0x1c400000,
+0xff050845,
+0x2617fff8,
+0xff06000b,
+0x8e8c0000,
+0xff098200,
+0x8e080000,
+0xff098200,
+0x14400000,
+0xff050829,
+0x01904823,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff060029,
+0x324c0000,
+0xff090200,
+0x2401fffc,
+0x15800000,
+0xff050803,
+0x02416824,
+0x924d0000,
+0xff098200,
+0x000d68c0,
+0xff06000d,
+0x10000000,
+0xff050824,
+0x020d7023,
+0xff06000f,
+0x8e790000,
+0xff098200,
+0x24050000,
+0xff098200,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0xff000000,
+0x10000000,
+0xff05080b,
+0x24020000,
+0xff06004e,
+0x03e0a821,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x02096021,
+0xafb20014,
+0xae8c0000,
+0xff098200,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x02a0f821,
+0x8e8c0000,
+0xff098200,
+0x8e080000,
+0xff098200,
+0x03e00008,
+0x01904823,
+0xff06008c,
+0xff000000,
+0x926f0000,
+0xff098200,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff050805,
+0x8e6e0000,
+0xff098200,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff050801,
+0x25ceffff,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff050801,
+0x00000000,
+0x10000000,
+0xff050801,
+0xae6e0000,
+0xff098200,
+0xff000000,
+0xff06008d,
+0x926f0000,
+0xff098200,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff050801,
+0xff06000f,
+0x8d810000,
+0xff098200,
+0x00200008,
+0x00000000,
+0xff06008e,
+0x926f0000,
+0xff098200,
+0x8e6e0000,
+0xff098200,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff05080f,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff05080f,
+0x25ceffff,
+0x11c00000,
+0xff050801,
+0xae6e0000,
+0xff098200,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff05080f,
+0xff06000b,
+0x8e790000,
+0xff098200,
+0xafb50010,
+0x02402821,
+0xae900000,
+0xff098200,
+0x0320f809,
+0x02802021,
+0xff06000d,
+0x8e900000,
+0xff098200,
+0xff06000e,
+0xff000000,
+0x8e4bfffc,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0xff098200,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06008f,
+0x26520004,
+0x10000000,
+0xff05080e,
+0x8d150000,
+0xff098200,
+0xff060090,
+0xff000000,
+0x8e0d0000,
+0xff098200,
+0x26640000,
+0xff098200,
+0xafb20014,
+0x8dad0000,
+0xff098200,
+0x02402821,
+0xae740000,
+0xff098200,
+0x91ad0000,
+0xff098200,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x000d68c0,
+0x020d6821,
+0x0320f809,
+0xae8d0000,
+0xff098200,
+0x10000000,
+0xff05080d,
+0x00000000,
+0xff000000,
+0xff060091,
+0xff000000,
+0x10000000,
+0xff050801,
+0xff000000,
+0x02402821,
+0xff060092,
+0xff000000,
+0x36450001,
+0xff06000b,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x02096021,
+0xafb20014,
+0xae900000,
+0xff098200,
+0x02f0b823,
+0xae8c0000,
+0xff098200,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x8e8c0000,
+0xff098200,
+0xafa00014,
+0x01904823,
+0x0217b821,
+0x8e080000,
+0xff098200,
+0x00400008,
+0x8e4bfffc,
+0xff060093,
+0xff000000,
+0x27bd0000,
+0xff098200,
+0xf7a00000,
+0xff098200,
+0xafa00000,
+0xff098200,
+0xafa10000,
+0xff098200,
+0xf7a20000,
+0xff098200,
+0xafa20000,
+0xff098200,
+0xafa30000,
+0xff098200,
+0xf7a40000,
+0xff098200,
+0xafa40000,
+0xff098200,
+0xafa50000,
+0xff098200,
+0xf7a60000,
+0xff098200,
+0xafa60000,
+0xff098200,
+0xafa70000,
+0xff098200,
+0xf7a80000,
+0xff098200,
+0xafa80000,
+0xff098200,
+0xafa90000,
+0xff098200,
+0xf7aa0000,
+0xff098200,
+0xafaa0000,
+0xff098200,
+0xafab0000,
+0xff098200,
+0xf7ac0000,
+0xff098200,
+0xafac0000,
+0xff098200,
+0xafad0000,
+0xff098200,
+0xf7ae0000,
+0xff098200,
+0xff000000,
+0xafae0000,
+0xff098200,
+0xafaf0000,
+0xff098200,
+0xf7b00000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xf7b20000,
+0xff098200,
+0xafb20000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb80000,
+0xff098200,
+0xafb90000,
+0xff098200,
+0xf7ba0000,
+0xff098200,
+0xafba0000,
+0xff098200,
+0xafbb0000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafbc0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xff000000,
+0xafbe0000,
+0xff098200,
+0xafa00000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x27ae0000,
+0xff098200,
+0x27d30000,
+0xff098200,
+0x8dcd0000,
+0xae6c0000,
+0xff098200,
+0xafae0000,
+0xff098200,
+0x8e740000,
+0xff098200,
+0x8e700000,
+0xff098200,
+0x8e790000,
+0xff098200,
+0xae740000,
+0xff098200,
+0xae7f0000,
+0xff098200,
+0xae6d0000,
+0xff098200,
+0x26640000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x0320f809,
+0x27a50010,
+0x8e8d0000,
+0xff098200,
+0x2401fffc,
+0x8e900000,
+0xff098200,
+0x01a1e824,
+0x8fb20014,
+0x10000000,
+0xff050801,
+0xafb40070,
+0xff000000,
+0xff060094,
+0xff000000,
+0x8fb40070,
+0x27d30000,
+0xff098200,
+0xff06000b,
+0x04400000,
+0xff050803,
+0x8e0d0000,
+0xff098200,
+0x3c0f59c0,
+0x0002a8c0,
+0x241e0000,
+0xff098200,
+0xafb50010,
+0x448ff000,
+0x8dad0000,
+0xff098200,
+0xae600000,
+0xff098200,
+0x8db10000,
+0xff098200,
+0x4600f7a1,
+0x8e4b0000,
+0x26520004,
+0xae7e0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x2dae0000,
+0xff098200,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x11c00000,
+0xff050802,
+0x32f707f8,
+0x00200008,
+0x000a50c0,
+0xff06000c,
+0x26a9fff8,
+0x00200008,
+0x02f0b820,
+0xff06000d,
+0x8e790000,
+0xff098200,
+0x00022823,
+0x0320f809,
+0x02802021,
+0xff000000,
+0xff06005b,
+0x3c0c4330,
+0x44802000,
+0x448c2800,
+0x46206085,
+0x44016800,
+0x46241034,
+0x46241000,
+0x45000000,
+0xff050801,
+0x46240001,
+0x0020082a,
+0x3c0c3ff0,
+0x46200087,
+0x448c2800,
+0x46211013,
+0x46206034,
+0x46240081,
+0x03e00008,
+0x46211011,
+0xff06000b,
+0x03e00008,
+0x46206006,
+0xff06005d,
+0x3c0c4330,
+0x44802000,
+0x448c2800,
+0x46206085,
+0x44016800,
+0x46241034,
+0x46241000,
+0x45000000,
+0xff050801,
+0x46240001,
+0x0020082a,
+0x3c0cbff0,
+0x46200087,
+0x448c2800,
+0x46211013,
+0x462c0034,
+0x46240081,
+0x03e00008,
+0x46211011,
+0xff06000b,
+0x03e00008,
+0x46206006,
+0xff060095,
+0xff000000,
+0x3c0c4330,
+0x44802000,
+0x448c2800,
+0x46206085,
+0x44016800,
+0x46241034,
+0x46241000,
+0x45000000,
+0xff050801,
+0x46240001,
+0x0020082a,
+0x3c0c3ff0,
+0x448c2800,
+0x46201034,
+0x46240081,
+0x46211011,
+0x46200087,
+0x03e00008,
+0x46211013,
+0xff06000b,
+0x03e00008,
+0x46206006,
+0xff000000,
+0xff060096,
+0xff000000,
+0x27bdff90,
+0xafbf0000,
+0xff098200,
+0xafbe0000,
+0xff098200,
+0xf7be0000,
+0xff098200,
+0xafb70000,
+0xff098200,
+0xafb60000,
+0xff098200,
+0xf7bc0000,
+0xff098200,
+0xafb50000,
+0xff098200,
+0xafb40000,
+0xff098200,
+0xf7ba0000,
+0xff098200,
+0xafb30000,
+0xff098200,
+0xafb20000,
+0xff098200,
+0xf7b80000,
+0xff098200,
+0xafb10000,
+0xff098200,
+0xafb00000,
+0xff098200,
+0xf7b60000,
+0xff098200,
+0xf7b40000,
+0xff098200,
+0x8c520000,
+0xff098200,
+0x24530000,
+0xff098200,
+0x8e790000,
+0xff098200,
+0xae410000,
+0xff098200,
+0xae440000,
+0xff098200,
+0xae450000,
+0xff098200,
+0xf64c0000,
+0xff098200,
+0xff000000,
+0xae460000,
+0xff098200,
+0xae470000,
+0xff098200,
+0xf64e0000,
+0xff098200,
+0x27ac0000,
+0xff098200,
+0xae4c0000,
+0xff098200,
+0xafa00014,
+0x03a02821,
+0x0320f809,
+0x02402021,
+0x8c500000,
+0xff098200,
+0x8c490000,
+0xff098200,
+0x0040a021,
+0x3c0f59c0,
+0x8e080000,
+0xff098200,
+0x448ff000,
+0x240c0000,
+0xff098200,
+0x241e0000,
+0xff098200,
+0x01304823,
+0xae6c0000,
+0xff098200,
+0x4600f7a1,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff000000,
+0xff060028,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x8e720000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xae880000,
+0xff098200,
+0xae540000,
+0xff098200,
+0x02e02821,
+0x0320f809,
+0x02402021,
+0x8e420000,
+0xff098200,
+0xd6400000,
+0xff098200,
+0x8e430000,
+0xff098200,
+0x10000000,
+0xff05081a,
+0xd6420000,
+0xff098200,
+0xff000000,
+0xff060097,
+0xff000000,
+0x8c8d0000,
+0xff098200,
+0x90850000,
+0xff098200,
+0x03a07021,
+0x03ade823,
+0xaddffffc,
+0x00052880,
+0xadd0fff8,
+0xadc4fff4,
+0x01c08021,
+0x248d0000,
+0xff098200,
+0x27ae0010,
+0x10a00000,
+0xff050802,
+0x01a57821,
+0xff06000b,
+0x8dac0000,
+0x25ad0004,
+0x01af082b,
+0xadcc0000,
+0x14200000,
+0xff05080b,
+0x25ce0004,
+0xff06000c,
+0x8c990000,
+0xff098200,
+0x8c850000,
+0xff098200,
+0x8c860000,
+0xff098200,
+0x8c870000,
+0xff098200,
+0xd48c0000,
+0xff098200,
+0xd48e0000,
+0xff098200,
+0x0320f809,
+0x8c840000,
+0xff098200,
+0x8e0dfff4,
+0x8e0efff8,
+0x8e1ffffc,
+0xada20000,
+0xff098200,
+0xada30000,
+0xff098200,
+0xf5a00000,
+0xff098200,
+0xf5a20000,
+0xff098200,
+0x0200e821,
+0x03e00008,
+0x01c08021,
+0xff000000,
+0xff080000,
+0xff000000,
+0x02172821,
+0x020a3021,
+0x8cac0000,
+0xff098200,
+0x8ccd0000,
+0xff098200,
+0xd4a00000,
+0xd4c20000,
+0x2d8c0000,
+0xff098200,
+0x2dad0000,
+0xff098200,
+0x964e0000,
+0xff098200,
+0x018d6024,
+0x26520004,
+0x11800000,
+0xff050835,
+0x3c0d0000,
+0xff090200,
+0x000e7080,
+0x01cd7021,
+0xff000000,
+0x46220034,
+0xff000000,
+0x46220036,
+0xff000000,
+0x00007001,
+0xff000000,
+0x00017001,
+0xff000000,
+0x024e9021,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x0217b821,
+0x26520004,
+0x8eec0000,
+0xff098200,
+0xd6e00000,
+0x020a5021,
+0x964e0000,
+0xff098200,
+0x8d4d0000,
+0xff098200,
+0xd5420000,
+0x3c0f0000,
+0xff090200,
+0x2d810000,
+0xff098200,
+0x2da40000,
+0xff098200,
+0x000e7080,
+0x00240824,
+0x10200000,
+0xff050805,
+0x01cf7021,
+0x46220032,
+0xff000000,
+0x00007001,
+0xff000000,
+0x00017001,
+0xff000000,
+0xff06000b,
+0x024e9021,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8ee50000,
+0xff098200,
+0x8d460000,
+0xff098200,
+0xff000000,
+0x240f0000,
+0xff098200,
+0x118f0000,
+0xff05083b,
+0xff000000,
+0x2d810000,
+0xff098200,
+0xff000000,
+0x11af0000,
+0xff05083b,
+0xff000000,
+0x00a67826,
+0x01ac6826,
+0x2d840000,
+0xff098200,
+0x0001780a,
+0x000d200b,
+0x01af0825,
+0x0001200a,
+0x10800000,
+0xff05080b,
+0xff000000,
+0x0001700b,
+0xff000000,
+0x0001700a,
+0xff000000,
+0x8cad0000,
+0xff098200,
+0x11a00000,
+0xff05080b,
+0x00000000,
+0x91ad0000,
+0xff098200,
+0x31ad0000,
+0xff090200,
+0x15a00000,
+0xff05080b,
+0x00000000,
+0x10000000,
+0xff05083a,
+0x24070000,
+0xff098200,
+0xff000000,
+0x0217b821,
+0x26520004,
+0x8eec0000,
+0xff098200,
+0x000a5042,
+0x8eef0000,
+0xff098200,
+0x022a5023,
+0x964e0000,
+0xff098200,
+0xff000000,
+0x24010000,
+0xff098200,
+0x11810000,
+0xff05083b,
+0xff000000,
+0x8d4dfffc,
+0x258c0000,
+0xff098200,
+0x000e7080,
+0x01af6826,
+0x018d6025,
+0x3c0f0000,
+0xff090200,
+0x01cf7021,
+0xff000000,
+0x000c700b,
+0xff000000,
+0x000c700a,
+0xff000000,
+0x024e9021,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x0217b821,
+0x26520004,
+0x8eec0000,
+0xff098200,
+0xd6e00000,
+0x022a5021,
+0x964e0000,
+0xff098200,
+0xd5420000,
+0x3c0f0000,
+0xff090200,
+0x2d810000,
+0xff098200,
+0x000e7080,
+0xff000000,
+0x10200000,
+0xff050805,
+0xff000000,
+0x10200000,
+0xff050801,
+0xff000000,
+0x01cf7021,
+0x46220032,
+0xff000000,
+0x00007001,
+0x024e9021,
+0xff06000b,
+0xff000000,
+0x00017001,
+0xff06000b,
+0x024e9021,
+0xff000000,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0xff06000f,
+0x24010000,
+0xff098200,
+0x11810000,
+0xff05083b,
+0x00000000,
+0x10000000,
+0xff05080b,
+0x00000000,
+0xff000000,
+0x0217b821,
+0x000a68c2,
+0x8eec0000,
+0xff098200,
+0x964e0000,
+0xff098200,
+0x01a06827,
+0x26520004,
+0xff000000,
+0x24010000,
+0xff098200,
+0x11810000,
+0xff05083b,
+0xff000000,
+0x018d6026,
+0x000e7080,
+0x3c0f0000,
+0xff090200,
+0x01cf7021,
+0xff000000,
+0x000c700b,
+0xff000000,
+0x000c700a,
+0xff000000,
+0x024e9021,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x020a5021,
+0x964e0000,
+0xff098200,
+0x8d4c0000,
+0xff098200,
+0x26520004,
+0xff000000,
+0x2d8c0000,
+0xff098200,
+0x000e7080,
+0x3c0f0000,
+0xff090200,
+0x01cf7021,
+0xff000000,
+0x000c700a,
+0xff000000,
+0x000c700b,
+0xff000000,
+0x024e9021,
+0xff000000,
+0x2d8c0000,
+0xff098200,
+0xd5400000,
+0xff000000,
+0x11800000,
+0xff050801,
+0xff000000,
+0x15800000,
+0xff050801,
+0xff000000,
+0x0217b821,
+0x000e7080,
+0x3c0f0000,
+0xff090200,
+0x01cf7021,
+0xf6e00000,
+0x024e9021,
+0xff06000b,
+0xff000000,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x020a5021,
+0x0217b821,
+0xd5400000,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x020a5021,
+0x0217b821,
+0x8d4c0000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0x2d8c0000,
+0xff098200,
+0x258d0000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0xaeed0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x020a3021,
+0x0217b821,
+0x8ccc0000,
+0xff098200,
+0xd4c00000,
+0x2d810000,
+0xff098200,
+0x10200000,
+0xff05083c,
+0x46200007,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x020a2821,
+0x0217b821,
+0x8cac0000,
+0xff098200,
+0x8ca40000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x15810000,
+0xff050802,
+0x24010000,
+0xff098200,
+0x8c820000,
+0xff098200,
+0xff06000b,
+0x44820000,
+0x46800021,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000c,
+0x15810000,
+0xff05083e,
+0x00000000,
+0xff000000,
+0x8c8e0000,
+0xff098200,
+0x15c00000,
+0xff050809,
+0x00000000,
+0xff06000d,
+0xff000000,
+0xff06003f,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x00000000,
+0x10000000,
+0xff05080b,
+0x00000000,
+0xff000000,
+0xff060013,
+0x91cc0000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0x15800000,
+0xff05080d,
+0x00000000,
+0x10000000,
+0xff05083e,
+0x00000000,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x4636a000,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x4636a001,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x4636a002,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x4636a003,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0xff060098,
+0x04110000,
+0xff05085b,
+0x4636a303,
+0x46360002,
+0x4620a001,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0xff000000,
+0x02083021,
+0x02293821,
+0x8ccd0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083821,
+0x02293021,
+0x8ced0000,
+0xff098200,
+0xd4f60000,
+0xd4d40000,
+0x2da10000,
+0xff098200,
+0xff000000,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4d40000,
+0xd4f60000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0xff000000,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x10000000,
+0xff050898,
+0x00000000,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0x02083021,
+0x02093821,
+0x8ccd0000,
+0xff098200,
+0x8cee0000,
+0xff098200,
+0xd4cc0000,
+0xd4ee0000,
+0x2da10000,
+0xff098200,
+0x2dcc0000,
+0xff098200,
+0x002c0824,
+0x8e790000,
+0xff098200,
+0x10200000,
+0xff05083d,
+0x0217b821,
+0x0320f809,
+0x00000000,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0x01283023,
+0xae900000,
+0xff098200,
+0x02092821,
+0x0100a821,
+0xff06002b,
+0x8e790000,
+0xff098200,
+0x000630c2,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x14400000,
+0xff050836,
+0x8e900000,
+0xff098200,
+0x02154021,
+0xd5000000,
+0x0217b821,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000a6842,
+0x022d6823,
+0x8e4b0000,
+0x26520004,
+0x8dacfffc,
+0x0217b821,
+0x240e0000,
+0xff098200,
+0xaeec0000,
+0xff098200,
+0xaeee0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000a6842,
+0x022d6823,
+0x8e4b0000,
+0x26520004,
+0x8dacfffc,
+0x0217b821,
+0x240e0000,
+0xff098200,
+0xaeec0000,
+0xff098200,
+0xaeee0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000b5403,
+0x448a0000,
+0x0217b821,
+0x46800021,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x022a5021,
+0x0217b821,
+0xd5400000,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000a68c2,
+0x0217b821,
+0x01a06027,
+0x8e4b0000,
+0x26520004,
+0xaeec0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x0217b821,
+0xaefe0000,
+0xff098200,
+0x26f70008,
+0x020a5021,
+0xff06000b,
+0xaefe0000,
+0xff098200,
+0x02ea082a,
+0x14200000,
+0xff05080b,
+0x26f70008,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e080000,
+0xff098200,
+0x000a5042,
+0x01485021,
+0x8d480000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x8d0d0000,
+0xff098200,
+0xd5a00000,
+0x0217b821,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e080000,
+0xff098200,
+0x0017b842,
+0x020a5021,
+0x02e8b821,
+0xd5400000,
+0x8ee80000,
+0xff098200,
+0x910f0000,
+0xff098200,
+0x8d050000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0x910c0000,
+0xff098200,
+0x8d4e0000,
+0xff098200,
+0xf4a00000,
+0x24010000,
+0xff098200,
+0x01ec7825,
+0x11e10000,
+0xff050802,
+0x25ce0000,
+0xff098200,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000c,
+0x2dc10000,
+0xff098200,
+0x10200000,
+0xff05080b,
+0x8d4d0000,
+0xff098200,
+0x91af0000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0x11e00000,
+0xff05080b,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x26640000,
+0xff098200,
+0x10000000,
+0xff05080b,
+0x00000000,
+0xff000000,
+0x8e080000,
+0xff098200,
+0x0017b842,
+0x000a6842,
+0x02e8b821,
+0x022d6823,
+0x8ee80000,
+0xff098200,
+0x8dadfffc,
+0x910e0000,
+0xff098200,
+0x8d050000,
+0xff098200,
+0x91af0000,
+0xff098200,
+0x31c10000,
+0xff090200,
+0x910e0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xacad0000,
+0xff098200,
+0x14200000,
+0xff050802,
+0xacac0000,
+0xff098200,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000c,
+0x11c00000,
+0xff05080b,
+0x31e10000,
+0xff090200,
+0x10200000,
+0xff05080b,
+0x8e790000,
+0xff098200,
+0x0320f809,
+0x26640000,
+0xff098200,
+0x10000000,
+0xff05080b,
+0x00000000,
+0xff000000,
+0x8e080000,
+0xff098200,
+0x0017b842,
+0x022a5021,
+0x02e8b821,
+0xd5400000,
+0x8ee80000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x8d0d0000,
+0xff098200,
+0xf5a00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e080000,
+0xff098200,
+0x0017b842,
+0x000a60c2,
+0x02e8b821,
+0x01806027,
+0x8ee80000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x8d0d0000,
+0xff098200,
+0xadac0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e8e0000,
+0xff098200,
+0x000a6042,
+0x3c010000,
+0xff090200,
+0x01816021,
+0x024c9021,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x11c00000,
+0xff050801,
+0x02802021,
+0x0320f809,
+0x02172821,
+0x8e900000,
+0xff098200,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x000a6842,
+0x8e790000,
+0xff098200,
+0x022d6823,
+0x8e060000,
+0xff098200,
+0x8da5fffc,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x0217b821,
+0xaeec0000,
+0xff098200,
+0xaee20000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e6c0000,
+0xff098200,
+0x8e6d0000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x018d082b,
+0x10200000,
+0xff050805,
+0xff06000b,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x000a28c2,
+0x30a507ff,
+0x240c0801,
+0x24a1f801,
+0x000a3382,
+0x0181280a,
+0x0320f809,
+0x02802021,
+0xff000000,
+0x8e790000,
+0xff098200,
+0x000a6842,
+0x022d6823,
+0x02802021,
+0x0320f809,
+0x8da5fffc,
+0xff000000,
+0x8e900000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x0217b821,
+0x240c0000,
+0xff098200,
+0xaee20000,
+0xff098200,
+0xaeec0000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8e790000,
+0xff098200,
+0x0140a821,
+0x0320f809,
+0x02802021,
+0x10000000,
+0xff05080b,
+0x02a05021,
+0xff000000,
+0x8e0e0000,
+0xff098200,
+0x000a6842,
+0x022d6823,
+0x8dc80000,
+0xff098200,
+0x8da9fffc,
+0xff000000,
+0x10000000,
+0xff050899,
+0xff000000,
+0x10000000,
+0xff05089a,
+0xff000000,
+0x0217b821,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0x02082821,
+0x02093021,
+0x8cad0000,
+0xff098200,
+0x8cce0000,
+0xff098200,
+0x8ca80000,
+0xff098200,
+0x24010000,
+0xff098200,
+0xd4c00000,
+0x15a10000,
+0xff050830,
+0x0217b821,
+0x2dc10000,
+0xff098200,
+0x10200000,
+0xff050805,
+0x24010000,
+0xff098200,
+0x462000a4,
+0x8d0c0000,
+0xff098200,
+0x440e1000,
+0x46801121,
+0x8d0d0000,
+0xff098200,
+0x46240032,
+0x01cc082b,
+0x00000801,
+0x000e70c0,
+0x10200000,
+0xff050830,
+0x01ae7021,
+0x8dcc0000,
+0xff098200,
+0x119e0000,
+0xff050802,
+0xd5c00000,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000c,
+0x8d0e0000,
+0xff098200,
+0x11c00000,
+0xff05080b,
+0x00000000,
+0x91cc0000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0x15800000,
+0xff05080b,
+0x00000000,
+0x10000000,
+0xff050830,
+0x00000000,
+0xff06000f,
+0x15c10000,
+0xff050830,
+0xff000000,
+0x8cc90000,
+0xff098200,
+0x10000000,
+0xff050899,
+0x00000000,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x02082821,
+0x000b4b82,
+0x8cac0000,
+0xff098200,
+0x312903fc,
+0x24010000,
+0xff098200,
+0x8ca80000,
+0xff098200,
+0x02293023,
+0x8cc9fffc,
+0x15810000,
+0xff05082d,
+0x0217b821,
+0xff060099,
+0x8d0c0000,
+0xff098200,
+0x8d2d0000,
+0xff098200,
+0x8d0e0000,
+0xff098200,
+0x01ac6824,
+0x000d6140,
+0x000d68c0,
+0x018d6823,
+0x01cd7021,
+0xff06000b,
+0x8dc40000,
+0xff098200,
+0x8dcc0000,
+0xff098200,
+0x8dcd0000,
+0xff098200,
+0x8dc50000,
+0xff098200,
+0x24840000,
+0xff098200,
+0x01896026,
+0x008c0825,
+0x14200000,
+0xff050804,
+0x8d0f0000,
+0xff098200,
+0x10be0000,
+0xff050805,
+0x8dc40000,
+0xff098200,
+0xff06000d,
+0x8e4b0000,
+0x26520004,
+0xaee50000,
+0xff098200,
+0xaee40000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000e,
+0x15a00000,
+0xff05080b,
+0xff000000,
+0x01a07021,
+0xff06000f,
+0x11e00000,
+0xff05080d,
+0x24050000,
+0xff098200,
+0x91ec0000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0x15800000,
+0xff05080d,
+0x00000000,
+0x10000000,
+0xff05082e,
+0x00000000,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x02082821,
+0x314907f8,
+0x8ca40000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x8ca80000,
+0xff098200,
+0x0217b821,
+0x14810000,
+0xff05082f,
+0x000960c2,
+0x8d0d0000,
+0xff098200,
+0x8d0e0000,
+0xff098200,
+0x018d082b,
+0x10200000,
+0xff05082f,
+0x01c94821,
+0x8d2d0000,
+0xff098200,
+0x11be0000,
+0xff050805,
+0xd5200000,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0xf6e00000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8d0e0000,
+0xff098200,
+0x11c00000,
+0xff05080b,
+0x00000000,
+0x91cd0000,
+0xff098200,
+0x31ad0000,
+0xff090200,
+0x15a00000,
+0xff05080b,
+0x00000000,
+0x10000000,
+0xff05082f,
+0x00000000,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x314907f8,
+0x02082821,
+0x02093021,
+0x8cad0000,
+0xff098200,
+0x8cce0000,
+0xff098200,
+0x8ca80000,
+0xff098200,
+0x24010000,
+0xff098200,
+0xd4c00000,
+0x15a10000,
+0xff050834,
+0x0217b821,
+0x2dc10000,
+0xff098200,
+0x10200000,
+0xff050805,
+0x24010000,
+0xff098200,
+0x462000a4,
+0x8d0c0000,
+0xff098200,
+0x440e1000,
+0x46801121,
+0x8d0d0000,
+0xff098200,
+0x46240032,
+0x01cc082b,
+0x00000801,
+0x000e70c0,
+0x10200000,
+0xff050834,
+0x01ae6821,
+0x910f0000,
+0xff098200,
+0x8dac0000,
+0xff098200,
+0x119e0000,
+0xff050803,
+0xd6e00000,
+0xff06000b,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff050807,
+0xf5a00000,
+0xff06000c,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000d,
+0x8d0e0000,
+0xff098200,
+0x11c00000,
+0xff05080b,
+0x00000000,
+0x91ce0000,
+0xff098200,
+0x31ce0000,
+0xff090200,
+0xff000000,
+0x15c00000,
+0xff05080b,
+0x00000000,
+0x10000000,
+0xff050834,
+0x00000000,
+0xff06000f,
+0x15c10000,
+0xff050834,
+0x8cc90000,
+0xff098200,
+0x10000000,
+0xff05089a,
+0x00000000,
+0xff060011,
+0x8e6c0000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0xae680000,
+0xff098200,
+0xa10f0000,
+0xff098200,
+0x10000000,
+0xff05080c,
+0xad0c0000,
+0xff098200,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x02082821,
+0x000b4b82,
+0x8cac0000,
+0xff098200,
+0x312903fc,
+0x24010000,
+0xff098200,
+0x02293023,
+0x8ca80000,
+0xff098200,
+0x8cc9fffc,
+0x15810000,
+0xff050831,
+0x0217b821,
+0xff06009a,
+0x8d0c0000,
+0xff098200,
+0x8d2d0000,
+0xff098200,
+0x8d0e0000,
+0xff098200,
+0xa1000000,
+0xff098200,
+0x01ac6824,
+0x000d6140,
+0x000d68c0,
+0x018d6823,
+0x01cd7021,
+0xd6f40000,
+0xff06000b,
+0x8dc40000,
+0xff098200,
+0x8dcc0000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x8dcd0000,
+0xff098200,
+0x14810000,
+0xff050805,
+0x8dc50000,
+0xff098200,
+0x15890000,
+0xff050805,
+0x910f0000,
+0xff098200,
+0x10be0000,
+0xff050804,
+0x8d0c0000,
+0xff098200,
+0xff06000c,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff050807,
+0xff000000,
+0xf5d40000,
+0xff098200,
+0xff06000d,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000e,
+0x11800000,
+0xff05080c,
+0x00000000,
+0x918c0000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0x15800000,
+0xff05080c,
+0x00000000,
+0x10000000,
+0xff050832,
+0x00000000,
+0xff06000f,
+0x15a00000,
+0xff05080b,
+0x01a07021,
+0x8d0e0000,
+0xff098200,
+0x11c00000,
+0xff050806,
+0x26660000,
+0xff098200,
+0x91cc0000,
+0xff098200,
+0x318c0000,
+0xff090200,
+0x11800000,
+0xff050832,
+0x24010000,
+0xff098200,
+0xff060010,
+0x8e790000,
+0xff098200,
+0xacc90000,
+0xff098200,
+0xacc10000,
+0xff098200,
+0xae900000,
+0xff098200,
+0x01002821,
+0xafb20014,
+0x0320f809,
+0x02802021,
+0x8e900000,
+0xff098200,
+0xff000000,
+0x10000000,
+0xff05080d,
+0xf4540000,
+0xff060011,
+0x8e6c0000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0xae680000,
+0xff098200,
+0xa10f0000,
+0xff098200,
+0x10000000,
+0xff05080d,
+0xad0c0000,
+0xff098200,
+0xff000000,
+0x000b4542,
+0x310807f8,
+0x02082821,
+0x314907f8,
+0x8ca40000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x8ca80000,
+0xff098200,
+0x0217b821,
+0x14810000,
+0xff050833,
+0x000960c2,
+0x8d0d0000,
+0xff098200,
+0x8d0e0000,
+0xff098200,
+0x018d082b,
+0x10200000,
+0xff050833,
+0x01c94821,
+0x8d2d0000,
+0xff098200,
+0x910f0000,
+0xff098200,
+0x11be0000,
+0xff050805,
+0xd6e00000,
+0xff06000b,
+0x31e10000,
+0xff090200,
+0x14200000,
+0xff050807,
+0xf5200000,
+0xff06000c,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8d0e0000,
+0xff098200,
+0x11c00000,
+0xff05080b,
+0x00000000,
+0x91cd0000,
+0xff098200,
+0x31ad0000,
+0xff090200,
+0x15a00000,
+0xff05080b,
+0x00000000,
+0x10000000,
+0xff050833,
+0x00000000,
+0xff060011,
+0x8e6c0000,
+0xff098200,
+0xff000000,
+0x31ef0000,
+0xff090200,
+0xae680000,
+0xff098200,
+0xa10f0000,
+0xff098200,
+0x10000000,
+0xff05080c,
+0xad0c0000,
+0xff098200,
+0xff000000,
+0x0217b821,
+0xff06000b,
+0x022a7821,
+0x8ee50000,
+0xff098200,
+0x26acfff8,
+0x8def0000,
+0xff098200,
+0x11800000,
+0xff050804,
+0x000c30c2,
+0x00cf3021,
+0x8cae0000,
+0xff098200,
+0x000f68c0,
+0x90af0000,
+0xff098200,
+0x8ca40000,
+0xff098200,
+0x01c6082b,
+0x14200000,
+0xff050805,
+0x02ec7021,
+0x01a46821,
+0x31ec0000,
+0xff090200,
+0xff06000d,
+0xd6e00000,
+0x26f70008,
+0x02ee082b,
+0xf5a00000,
+0x14200000,
+0xff05080d,
+0x25ad0008,
+0x15800000,
+0xff050807,
+0x00000000,
+0xff06000e,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8e790000,
+0xff098200,
+0xae900000,
+0xff098200,
+0xafb20014,
+0x01408021,
+0x0320f809,
+0x02802021,
+0x02005021,
+0x10000000,
+0xff05080b,
+0x8e900000,
+0xff098200,
+0xff060011,
+0x8e6c0000,
+0xff098200,
+0x31ef0000,
+0xff090200,
+0xae650000,
+0xff098200,
+0xa0af0000,
+0xff098200,
+0xff000000,
+0x10000000,
+0xff05080e,
+0xacac0000,
+0xff098200,
+0xff000000,
+0x314907f8,
+0x10000000,
+0xff05089b,
+0x01354821,
+0xff000000,
+0x314907f8,
+0xff06009b,
+0x02007021,
+0x02178021,
+0x24010000,
+0xff098200,
+0x8e0c0000,
+0xff098200,
+0x8e080000,
+0xff098200,
+0x26100008,
+0x15810000,
+0xff050825,
+0x2529fff8,
+0xae120000,
+0xff098200,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff000000,
+0x01555021,
+0xff000000,
+0x0217b821,
+0x24010000,
+0xff098200,
+0x8eec0000,
+0xff098200,
+0x8ee80000,
+0xff098200,
+0x01404821,
+0x8e0d0000,
+0xff098200,
+0x26f70008,
+0x15810000,
+0xff050840,
+0x2529fff8,
+0xff060041,
+0x31ac0000,
+0xff090200,
+0x910f0000,
+0xff098200,
+0x15800000,
+0xff050807,
+0x39ae0000,
+0xff090200,
+0xff06000b,
+0xae080000,
+0xff098200,
+0x2de10002,
+0x02007021,
+0x11200000,
+0xff050803,
+0x01207821,
+0xff06000c,
+0xd6e00000,
+0x26f70008,
+0x25effff8,
+0xf5c00000,
+0x15e00000,
+0xff05080c,
+0x25ce0008,
+0xff06000d,
+0x01816025,
+0x11800000,
+0xff050805,
+0x00000000,
+0xff06000e,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff06000f,
+0x8dabfffc,
+0x000bb942,
+0x32f707f8,
+0x02176823,
+0x8dad0000,
+0xff098200,
+0x8dad0000,
+0xff098200,
+0x10000000,
+0xff05080e,
+0xff000000,
+0x8db10000,
+0xff098200,
+0xff060011,
+0x31c10000,
+0xff090200,
+0x14200000,
+0xff05080b,
+0x020e7023,
+0x01c08021,
+0x8dcd0000,
+0xff098200,
+0x10000000,
+0xff05080b,
+0x31ac0000,
+0xff090200,
+0xff000000,
+0x02007021,
+0x02178021,
+0x24010000,
+0xff098200,
+0x8e0d0000,
+0xff098200,
+0x8e080000,
+0xff098200,
+0xd602fff8,
+0xd600fff0,
+0xae0d0000,
+0xff098200,
+0xae080000,
+0xff098200,
+0xf6020010,
+0xf6000008,
+0x26100008,
+0x15a10000,
+0xff050825,
+0x24090010,
+0xae120000,
+0xff098200,
+0x8d120000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000bb942,
+0x000d6880,
+0x32f707f8,
+0x026d6021,
+0x8d8c0000,
+0x01800008,
+0x02f0b821,
+0xff000000,
+0x0217b821,
+0x8ee80000,
+0xff098200,
+0x8ee90000,
+0xff098200,
+0x8d0c0000,
+0xff098200,
+0x8d0d0000,
+0xff098200,
+0x26520004,
+0xff06000b,
+0x012c082b,
+0x10200000,
+0xff050805,
+0x000978c0,
+0x01af7821,
+0x8dee0000,
+0xff098200,
+0xd5e00000,
+0x44891000,
+0x964a0000,
+0xff098200,
+0x11de0000,
+0xff05080b,
+0x25290001,
+0x468010a1,
+0x3c0f0000,
+0xff090200,
+0xf6e00008,
+0x000a5080,
+0x014f5021,
+0xaee90000,
+0xff098200,
+0x024a9021,
+0xf6e20000,
+0xff06000d,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8d0d0000,
+0xff098200,
+0x012c4823,
+0x8d0e0000,
+0xff098200,
+0xff060010,
+0x01a9082b,
+0x14200000,
+0xff05080d,
+0x00097940,
+0x000940c0,
+0x01e87823,
+0x01ee7821,
+0x8de80000,
+0xff098200,
+0xd5e00000,
+0x964a0000,
+0xff098200,
+0x111e0000,
+0xff050810,
+0x25290001,
+0xd5e20000,
+0xff098200,
+0x3c0f0000,
+0xff090200,
+0xf6e00008,
+0x012c4821,
+0x000a5080,
+0x014f5021,
+0xf6e20000,
+0x024a9021,
+0x10000000,
+0xff05080d,
+0xff000000,
+0xaee90000,
+0xff098200,
+0xff000000,
+0x0217b821,
+0x8eec0000,
+0xff098200,
+0x8eed0000,
+0xff098200,
+0x8eee0000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x24010000,
+0xff098200,
+0x15810000,
+0xff050805,
+0x25ce0000,
+0xff098200,
+0x91ad0000,
+0xff098200,
+0x25ef0000,
+0xff098200,
+0x000a6042,
+0x01cf7025,
+0x25ad0000,
+0xff098200,
+0x024c6021,
+0x01ae6825,
+0x15a00000,
+0xff050805,
+0x3c0e0000,
+0xff090200,
+0x018e9021,
+0xaee00000,
+0xff098200,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x240f0000,
+0xff098200,
+0x240d0000,
+0xff098200,
+0xa24f0000,
+0xff098200,
+0x018e9021,
+0x10000000,
+0xff05080b,
+0xa24d0000,
+0xff098200,
+0xff000000,
+0x8e0c0000,
+0xff098200,
+0x314907f8,
+0x000b4542,
+0x02094821,
+0x310807f8,
+0x0217b821,
+0x25290000,
+0xff098200,
+0x02e87021,
+0x260ffff8,
+0x012c4823,
+0x11000000,
+0xff050805,
+0x01e96823,
+0x25cefff0,
+0xff06000b,
+0x8d240000,
+0xff098200,
+0x012f082b,
+0x8d250000,
+0xff098200,
+0x25290008,
+0x03c1200a,
+0xaee40000,
+0xff098200,
+0xaee50000,
+0xff098200,
+0x02ee082b,
+0x14200000,
+0xff05080b,
+0x26f70008,
+0xff06000d,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff06000f,
+0x8e8c0000,
+0xff098200,
+0x19a00000,
+0xff05080d,
+0x24150008,
+0x02ed7021,
+0x018e082b,
+0x14200000,
+0xff050807,
+0x25b50008,
+0xff060010,
+0xd5200000,
+0x25290008,
+0xf6e00000,
+0x012f082b,
+0x14200000,
+0xff050810,
+0x26f70008,
+0x10000000,
+0xff05080d,
+0x00000000,
+0xff060011,
+0x8e790000,
+0xff098200,
+0xae970000,
+0xff098200,
+0x02f0b823,
+0xae900000,
+0xff098200,
+0x01308023,
+0xafb20014,
+0x000d28c2,
+0x0320f809,
+0x02802021,
+0x02004821,
+0x8e900000,
+0xff098200,
+0x0217b821,
+0x02094821,
+0x10000000,
+0xff050810,
+0xff000000,
+0x260ffff8,
+0xff000000,
+0x01555021,
+0xff000000,
+0x8e120000,
+0xff098200,
+0x0217b821,
+0x0140a821,
+0xff06000b,
+0x324c0000,
+0xff090200,
+0x15800000,
+0xff05089c,
+0x3a4d0000,
+0xff090200,
+0xff060017,
+0x8e4bfffc,
+0x260efff8,
+0x2549fff8,
+0x000b6142,
+0x000b4542,
+0x318c07f8,
+0x310807f8,
+0x01c87821,
+0x11200000,
+0xff050803,
+0x01cc8023,
+0xff06000c,
+0xd6e00000,
+0x26f70008,
+0x2529fff8,
+0xf5c00000,
+0x15200000,
+0xff05080c,
+0x25ce0008,
+0xff06000d,
+0x25effff8,
+0xff06000f,
+0x01cf082b,
+0x14200000,
+0xff050806,
+0x8e0d0000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x8dad0000,
+0xff098200,
+0x8db10000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff060010,
+0xadde0000,
+0xff098200,
+0x10000000,
+0xff05080f,
+0x25ce0008,
+0xff06009c,
+0x31ae0000,
+0xff090200,
+0x15c00000,
+0xff050818,
+0x00000000,
+0x020d8023,
+0x10000000,
+0xff05080b,
+0x8e120000,
+0xff098200,
+0xff000000,
+0x8e120000,
+0xff098200,
+0x0217b821,
+0x0140a821,
+0x324c0000,
+0xff090200,
+0x15800000,
+0xff05089c,
+0x3a4d0000,
+0xff090200,
+0x8e4bfffc,
+0x260efff8,
+0xff000000,
+0xd6e00000,
+0xff000000,
+0x000b4542,
+0x000bb942,
+0x310807f8,
+0x32f707f8,
+0xff000000,
+0xf5c00000,
+0xff000000,
+0x01d78023,
+0xff06000f,
+0x0148082b,
+0x14200000,
+0xff050806,
+0x8e0d0000,
+0xff098200,
+0x8e4b0000,
+0x26520004,
+0x8dad0000,
+0xff098200,
+0x8db10000,
+0xff098200,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff060010,
+0x25ce0008,
+0x254a0008,
+0x10000000,
+0xff05080f,
+0xff000000,
+0xadde0000,
+0xff098200,
+0xff000000,
+0xadde0000,
+0xff098200,
+0xff000000,
+0x00126842,
+0x31ad007e,
+0x01b36821,
+0x95ae0000,
+0xff098200,
+0x25ce0000,
+0xff098200,
+0x05c00000,
+0xff050890,
+0xa5ae0000,
+0xff098200,
+0xff000000,
+0x0217b821,
+0xff000000,
+0xd6e00000,
+0xff098200,
+0xd6e40000,
+0xff098200,
+0xd6e20000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x46240000,
+0xf6e00000,
+0xff098200,
+0xff000000,
+0x8eed0000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x8eee0000,
+0xff098200,
+0x2dad0000,
+0xff098200,
+0x2dec0000,
+0xff098200,
+0x2dce0000,
+0xff098200,
+0x01ac6824,
+0x01ae6824,
+0xd6e00000,
+0xff098200,
+0x11a00000,
+0xff050842,
+0xd6e20000,
+0xff098200,
+0xff000000,
+0x000a5042,
+0x3c0c0000,
+0xff090200,
+0xff000000,
+0x4622003e,
+0x4620113e,
+0xf6e00000,
+0xff098200,
+0xff000000,
+0x240d0001,
+0x240e0001,
+0x014c6021,
+0x01e0782a,
+0x00006801,
+0x024c9021,
+0x00047001,
+0x964a0000,
+0xff098200,
+0x01cf680b,
+0x15a00000,
+0xff070800,
+0x000a50c0,
+0xff000000,
+0x240d0001,
+0x240e0001,
+0x01e0782a,
+0x00006801,
+0x00047001,
+0x01cf680b,
+0x15a00000,
+0xff070800,
+0x00000000,
+0xff000000,
+0x014c6821,
+0x01e0782a,
+0x01a07021,
+0xff000000,
+0x00016801,
+0x00057001,
+0xff000000,
+0x00006801,
+0x00047001,
+0xff000000,
+0x01cf680b,
+0x024d9021,
+0xff000000,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x00126842,
+0x31ad007e,
+0x01b36821,
+0x95ae0000,
+0xff098200,
+0x25ce0000,
+0xff098200,
+0x05c00000,
+0xff050890,
+0xa5ae0000,
+0xff098200,
+0xff000000,
+0x0217b821,
+0x8eed0000,
+0xff098200,
+0x11be0000,
+0xff050801,
+0x8eee0000,
+0xff098200,
+0xff000000,
+0xaeed0000,
+0xff098200,
+0x10000000,
+0xff070800,
+0xaeee0000,
+0xff098200,
+0xff000000,
+0x000a6042,
+0x3c010000,
+0xff090200,
+0x01816021,
+0x024c9021,
+0xaeed0000,
+0xff098200,
+0xaeee0000,
+0xff098200,
+0xff000000,
+0xff06000b,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x00126842,
+0x31ad007e,
+0x01b36821,
+0x95ae0000,
+0xff098200,
+0x25ce0000,
+0xff098200,
+0x05c00000,
+0xff050890,
+0xa5ae0000,
+0xff098200,
+0xff000000,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8e6d0000,
+0xff098200,
+0x000a5042,
+0x24010000,
+0x01aa6821,
+0xae610000,
+0xff098200,
+0x8dae0000,
+0xae700000,
+0xff098200,
+0xae740000,
+0xff098200,
+0x8dce0000,
+0xff098200,
+0x01c00008,
+0x267e0000,
+0xff098200,
+0xff000000,
+0x000a6042,
+0x3c010000,
+0xff090200,
+0x01816021,
+0x024c9021,
+0x8e4b0000,
+0x26520004,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x00126842,
+0x31ad007e,
+0x01b36821,
+0x95ae0000,
+0xff098200,
+0x25ce0000,
+0xff098200,
+0x05c00000,
+0xff050892,
+0xa5ae0000,
+0xff098200,
+0xff000000,
+0x8e8e0000,
+0xff098200,
+0x924d0000,
+0xff098200,
+0x8e510000,
+0xff098200,
+0x01d7082b,
+0x14200000,
+0xff050820,
+0x000d68c0,
+0xff000000,
+0x8e4b0000,
+0x26520004,
+0xff000000,
+0xff06000c,
+0x012d082b,
+0x14200000,
+0xff050803,
+0x02090821,
+0xff000000,
+0x000b5402,
+0x10000000,
+0xff070800,
+0x000a50c0,
+0xff000000,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0xff06000d,
+0xac3e0000,
+0xff098200,
+0x10000000,
+0xff05080c,
+0x25290008,
+0xff000000,
+0xf0f0f0f0,
+0xff000000,
+0x02096821,
+0x8e8e0000,
+0xff098200,
+0x02e96021,
+0xada80000,
+0xff098200,
+0x252f0000,
+0xff098200,
+0x018e082b,
+0x8e510000,
+0xff098200,
+0x10200000,
+0xff050820,
+0xadaf0000,
+0xff098200,
+0x924e0000,
+0xff098200,
+0x0200b821,
+0x01a04821,
+0x8e4b0000,
+0x26520004,
+0x11c00000,
+0xff050803,
+0x25b00008,
+0xff06000b,
+0x8eec0000,
+0xff098200,
+0x8eef0000,
+0xff098200,
+0x02e9082b,
+0x01802021,
+0x03c1600a,
+0x03c1200b,
+0xadaf0000,
+0xff098200,
+0x25ceffff,
+0xadac0000,
+0xff098200,
+0x25ad0008,
+0xaee40000,
+0xff098200,
+0x15c00000,
+0xff05080b,
+0x26f70008,
+0xff06000d,
+0x316d00ff,
+0x000d6880,
+0x026d6021,
+0x000b5402,
+0x8d810000,
+0x000bb942,
+0x000a50c0,
+0x00200008,
+0x32f707f8,
+0xff000000,
+0x8d190000,
+0xff098200,
+0xff000000,
+0x8e790000,
+0xff098200,
+0xff000000,
+0x02e96821,
+0x8e8e0000,
+0xff098200,
+0x02094821,
+0xae900000,
+0xff098200,
+0x01cd082b,
+0xae890000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0xff000000,
+0x8d050000,
+0xff098200,
+0xff000000,
+0x14200000,
+0xff05081f,
+0x02802021,
+0x0320f809,
+0xae6c0000,
+0xff098200,
+0x8e900000,
+0xff098200,
+0x000250c0,
+0x8e8d0000,
+0xff098200,
+0x240c0000,
+0xff098200,
+0x8e120000,
+0xff098200,
+0x01aab823,
+0x10000000,
+0xff050816,
+0xae6c0000,
+0xff098200,
+0xff000000,
+0xff010000
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_l,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_BC_CAT_Z,
+ GLOB_cont_nop,
+ GLOB_vmeta_tgets1,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets1,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_ra,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_callt,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res,
+ GLOB_ff_type,
+ GLOB_fff_resn,
+ GLOB_ff_getmetatable,
+ GLOB_fff_restv,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_res1,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_pow,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_fff_resi,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_fff_emptystr,
+ GLOB_ff_string_rep,
+ GLOB_ff_string_reverse,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_band,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_ff_bit_tobit,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_trunc,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB_BC_CALL_Z,
+ GLOB_BC_RETV_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c",
+ "vm_unwind_c_eh",
+ "vm_unwind_ff",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_l",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "BC_CAT_Z",
+ "cont_nop",
+ "vmeta_tgets1",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets1",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_ra",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_unm",
+ "vmeta_arith",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_callt",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res",
+ "ff_type",
+ "fff_resn",
+ "ff_getmetatable",
+ "fff_restv",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_res1",
+ "ff_math_floor",
+ "vm_floor",
+ "ff_math_ceil",
+ "vm_ceil",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_pow",
+ "ff_math_atan2",
+ "ff_math_fmod",
+ "ff_math_sqrt",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "fff_resi",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "fff_emptystr",
+ "ff_string_rep",
+ "ff_string_reverse",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_band",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "ff_bit_tobit",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_trunc",
+ "vm_ffi_callback",
+ "vm_ffi_call",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ "BC_CALL_Z",
+ "BC_RETV_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name)
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 1, FRAME_P, LJ_TTRUE, LJ_ENDIAN_SELECT(-4,-8), LJ_ENDIAN_SELECT(-4,-8), FRAME_TYPE, FRAME_C, Dt1(->base), ~LJ_VMST_C, DISPATCH_GL(vmstate), Dt1(->top));
+ dasm_put(Dst, 57, Dt1(->cframe), 72+9*4, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4, 24+3*8, 72+3*4, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, Dt1(->maxstack), LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 109, DISPATCH_GOT(lj_state_growstack), Dt1(->top), ~LJ_VMST_C, Dt1(->glref), Dt2(->vmstate), LJ_TNIL, Dt1(->base), Dt1(->glref), LJ_TFALSE, ~LJ_VMST_INTERP, LJ_ENDIAN_SELECT(-4,-8), GG_G2DISP, LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 172, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), DISPATCH_GOT(lj_state_growstack), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc), 72+9*4, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4);
+ dasm_put(Dst, 235, 24+3*8, 72+3*4, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, Dt1(->glref), Dt1(->status), FRAME_CP, CFRAME_RESUME, GG_G2DISP, Dt1(->cframe), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-4,-8), Dt1(->status), ~LJ_VMST_INTERP, DISPATCH_GL(vmstate), FRAME_TYPE);
+ dasm_put(Dst, 296, LJ_TNIL, 72+9*4, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4, 24+3*8, 72+3*4, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, FRAME_CP, 72+9*4);
+ dasm_put(Dst, 345, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4, 24+3*8, 72+3*4, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, Dt1(->base));
+ dasm_put(Dst, 396, Dt1(->top), ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate), LJ_ENDIAN_SELECT(-4,-8), LJ_TFUNC, LJ_ENDIAN_SELECT(-8,-4), LJ_ENDIAN_SELECT(-4,-8), Dt7(->pc), 72+9*4, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4, 24+3*8, 72+3*4);
+ dasm_put(Dst, 457, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP, -16+LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(-8,-4));
+#if LJ_HASFFI
+ dasm_put(Dst, 506);
+#endif
+ dasm_put(Dst, 508, -16+LJ_ENDIAN_SELECT(4,0), Dt7(->pc));
+#if LJ_HASFFI
+ dasm_put(Dst, 514);
+#endif
+ dasm_put(Dst, 517, -8+LJ_ENDIAN_SELECT(4,0), PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 523);
+#endif
+ dasm_put(Dst, 531, Dt1(->base), DISPATCH_GL(tmptv), LJ_TSTR, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), DISPATCH_GL(tmptv), LJ_TTAB, LJ_ENDIAN_SELECT(0,4), DISPATCH_GL(tmptv2), LJ_ENDIAN_SELECT(4,0), LJ_TSTR, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), DISPATCH_GL(tmptv));
+ dasm_put(Dst, 589, DISPATCH_GOT(lj_meta_tget), Dt1(->base), -FRAME_CONT, Dt1(->top), -16+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(-8,-4), DISPATCH_GL(tmptv), LJ_TSTR, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), DISPATCH_GL(tmptv), LJ_TTAB, LJ_ENDIAN_SELECT(0,4), DISPATCH_GL(tmptv2), LJ_ENDIAN_SELECT(4,0), LJ_TSTR, LJ_ENDIAN_SELECT(0,4));
+ dasm_put(Dst, 651, LJ_ENDIAN_SELECT(4,0), DISPATCH_GL(tmptv), DISPATCH_GOT(lj_meta_tset), Dt1(->base), -FRAME_CONT, Dt1(->top), -16+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(-8,-4), DISPATCH_GOT(lj_meta_comp), Dt1(->base), LJ_ENDIAN_SELECT(2,0), (-(BCBIAS_J*4 >> 16) & 65535));
+ dasm_put(Dst, 721, -4+LJ_ENDIAN_SELECT(1,2), LJ_ENDIAN_SELECT(4,0), LJ_TISTRUECOND, LJ_ENDIAN_SELECT(4,0), LJ_TISTRUECOND, DISPATCH_GOT(lj_meta_equal), Dt1(->base));
+#if LJ_HASFFI
+ dasm_put(Dst, 776, DISPATCH_GOT(lj_meta_equal_cd), Dt1(->base));
+#endif
+ dasm_put(Dst, 789, DISPATCH_GOT(lj_meta_arith), Dt1(->base), -16+LJ_ENDIAN_SELECT(4,0), FRAME_CONT);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 818);
+#endif
+ dasm_put(Dst, 820, DISPATCH_GOT(lj_meta_len), Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 828);
+#else
+ dasm_put(Dst, 835);
+#endif
+ dasm_put(Dst, 839, DISPATCH_GOT(lj_meta_call), Dt1(->base), LJ_ENDIAN_SELECT(-8,-4), LJ_ENDIAN_SELECT(-4,-8), Dt7(->pc), DISPATCH_GOT(lj_meta_call), Dt1(->base), LJ_ENDIAN_SELECT(-4,-8), LJ_ENDIAN_SELECT(-8,-4), DISPATCH_GOT(lj_meta_for), Dt1(->base));
+#if LJ_HASJIT
+ dasm_put(Dst, 896, BC_JFORI);
+#endif
+ dasm_put(Dst, 900);
+#if LJ_HASJIT
+ dasm_put(Dst, 904, BC_JFORI, BC_FORI);
+#else
+ dasm_put(Dst, 911, BC_FORI);
+#endif
+ dasm_put(Dst, 915, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TISTRUECOND, LJ_ENDIAN_SELECT(-4,-8), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, Dt8(->upvalue), LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 970, LJ_ENDIAN_SELECT(0,4), LJ_TTAB, LJ_TUDATA, Dt6(->metatable), DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable]), LJ_TNIL, Dt6(->hmask), LJ_TTAB, Dt5(->hash), Dt6(->node), LJ_TSTR, offsetof(Node, key)+LJ_ENDIAN_SELECT(4,0), offsetof(Node, key)+LJ_ENDIAN_SELECT(0,4), DtB(->next), offsetof(Node, val)+LJ_ENDIAN_SELECT(4,0), offsetof(Node, val)+LJ_ENDIAN_SELECT(0,4));
+ dasm_put(Dst, 1020, LJ_TISNUM, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT]), LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), 8+LJ_ENDIAN_SELECT(0,4), LJ_TTAB, -LJ_TTAB, Dt6(->metatable), Dt6(->marked));
+ dasm_put(Dst, 1075, LJ_GC_BLACK, Dt6(->metatable), DISPATCH_GL(gc.grayagain), ~LJ_GC_BLACK & 255, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), DISPATCH_GOT(lj_tab_get), -LJ_TTAB, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM);
+ dasm_put(Dst, 1129, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), LJ_TISNUM, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GOT(lj_str_fromnum), LJ_TSTR, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TTAB, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(-4,-8), DISPATCH_GOT(lj_tab_next));
+ dasm_put(Dst, 1184, Dt1(->base), Dt1(->top), LJ_TNIL, (2+1)*8, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TTAB, LJ_ENDIAN_SELECT(-4,-8));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1219, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1226, Dt8(->upvalue[0]));
+#endif
+ dasm_put(Dst, 1229, 8+LJ_ENDIAN_SELECT(4,0), (3+1)*8, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), 8+LJ_ENDIAN_SELECT(4,0), -LJ_TTAB, LJ_TISNUM, LJ_ENDIAN_SELECT(-4,-8), Dt6(->asize), Dt6(->array), LJ_ENDIAN_SELECT(4,0), (0+1)*8, (2+1)*8, Dt6(->hmask));
+ dasm_put(Dst, 1292, DISPATCH_GOT(lj_tab_getinth), (0+1)*8, (0+1)*8, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TTAB, LJ_ENDIAN_SELECT(-4,-8));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1323, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1330, Dt8(->upvalue[0]));
+#endif
+ dasm_put(Dst, 1333, 8+LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(0,4), (3+1)*8, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 8+FRAME_PCALL, 8+LJ_ENDIAN_SELECT(4,0), DISPATCH_GL(hookmask), LJ_TFUNC, HOOK_ACTIVE_SHIFT, 16+FRAME_PCALL, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4));
+ dasm_put(Dst, 1392, LJ_TTHREAD, Dt1(->status), Dt1(->cframe), Dt1(->top), Dt1(->base), -LUA_YIELD, Dt1(->maxstack), LJ_ENDIAN_SELECT(-4,-8), Dt1(->base), Dt1(->top), Dt1(->top));
+ dasm_put(Dst, 1451, Dt1(->base), LUA_YIELD+1, Dt1(->top), ~LJ_VMST_INTERP, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, LJ_TTRUE, -8+LJ_ENDIAN_SELECT(4,0), LJ_TFALSE, Dt1(->top));
+ dasm_put(Dst, 1511, (2+1)*8, -8+LJ_ENDIAN_SELECT(4,0), FRAME_TYPE, DISPATCH_GOT(lj_state_growstack), Dt8(->upvalue[0].gcr), Dt1(->status), Dt1(->cframe), Dt1(->top), Dt1(->base), -LUA_YIELD, Dt1(->maxstack), LJ_ENDIAN_SELECT(-4,-8), Dt1(->base), Dt1(->top));
+ dasm_put(Dst, 1566, Dt1(->top), Dt1(->base), LUA_YIELD+1, Dt1(->top), ~LJ_VMST_INTERP, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack), Dt1(->top), FRAME_TYPE);
+ dasm_put(Dst, 1627, DISPATCH_GOT(lj_ffh_coroutine_wrap_err), DISPATCH_GOT(lj_state_growstack), Dt1(->cframe), Dt1(->base), CFRAME_RESUME, Dt1(->top), LUA_YIELD, Dt1(->cframe), Dt1(->status), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM);
+ dasm_put(Dst, 1682, LJ_ENDIAN_SELECT(-4,-8), LJ_ENDIAN_SELECT(-4,-8), -8+LJ_ENDIAN_SELECT(4,0), -8+LJ_ENDIAN_SELECT(0,4), (1+1)*8, FRAME_TYPE, -8+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM);
+ dasm_put(Dst, 1750, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(log), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(log10), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 1806, DISPATCH_GOT(exp), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(sin), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(cos), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(tan));
+ dasm_put(Dst, 1862, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(asin), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(acos), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(atan), LJ_TISNUM);
+ dasm_put(Dst, 1918, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(sinh), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(cosh), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(tanh), LJ_TISNUM);
+ dasm_put(Dst, 1977, LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, DISPATCH_GOT(pow), LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, DISPATCH_GOT(atan2), LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 2039, DISPATCH_GOT(fmod), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, Dt8(->upvalue[0]), LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 2094, DISPATCH_GOT(ldexp), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, DISPATCH_GOT(frexp), LJ_ENDIAN_SELECT(-4,-8), DISPATCH_GL(tmptv), DISPATCH_GL(tmptv), (2+1)*8, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, DISPATCH_GOT(modf), LJ_ENDIAN_SELECT(-4,-8), (2+1)*8);
+ dasm_put(Dst, 2158, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM);
+ dasm_put(Dst, 2217, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, Dt5(->len), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), -LJ_TSTR, Dt5(->len), Dt5([1]), LJ_ENDIAN_SELECT(-4,-8), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 2276, LJ_TISNUM, DISPATCH_GOT(lj_str_new), Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 16+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 2337, LJ_TSTR, Dt5(->len), sizeof(GCstr)-1, DISPATCH_GL(strempty), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), -LJ_TSTR, LJ_TISNUM, Dt5(->len), DISPATCH_GL(tmpbuf.sz));
+ dasm_put(Dst, 2407, DISPATCH_GL(tmpbuf.buf), Dt5([1]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 2464, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2527, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TTAB, DISPATCH_GOT(lj_tab_len), LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 2586, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 2649, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 2719, 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0));
+ dasm_put(Dst, 2790, 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), 8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, Dt8(->f));
+ dasm_put(Dst, 2863, LJ_ENDIAN_SELECT(-4,-8), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc), FRAME_TYPE, LJ_ENDIAN_SELECT(1,2), DISPATCH_GOT(lj_state_growstack), LUA_MINSTACK, Dt1(->base));
+ dasm_put(Dst, 2931, DISPATCH_GOT(lj_gc_step), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-8,-4));
+#if LJ_HASJIT
+ dasm_put(Dst, 2957, DISPATCH_GL(hookmask), HOOK_VMEVENT, DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 2980, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE, DISPATCH_GOT(lj_dispatch_ins), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 3028, GG_DISP2STATIC, -24+LJ_ENDIAN_SELECT(0,4));
+#if LJ_HASJIT
+ dasm_put(Dst, 3047, LJ_ENDIAN_SELECT(-8,-4), GG_DISP2J, Dt7(->pc), DISPATCH_J(L), PC2PROTO(framesize), DISPATCH_GOT(lj_trace_hot), Dt1(->base), Dt1(->top));
+#endif
+ dasm_put(Dst, 3072);
+#if LJ_HASJIT
+ dasm_put(Dst, 3074);
+#endif
+ dasm_put(Dst, 3077);
+#if LJ_HASJIT
+ dasm_put(Dst, 3080);
+#endif
+ dasm_put(Dst, 3083, DISPATCH_GOT(lj_dispatch_call), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-8,-4));
+#if LJ_HASJIT
+ dasm_put(Dst, 3107, -(16+32*8+32*4), 16+0*8, 16+32*8+0*4, 16+32*8+1*4, 16+2*8, 16+32*8+2*4, 16+32*8+3*4, 16+4*8, 16+32*8+4*4, 16+32*8+5*4, 16+6*8, 16+32*8+6*4, 16+32*8+7*4, 16+8*8, 16+32*8+8*4, 16+32*8+9*4, 16+10*8, 16+32*8+10*4, 16+32*8+11*4, 16+12*8, 16+32*8+12*4, 16+32*8+13*4, 16+14*8);
+ dasm_put(Dst, 3154, 16+32*8+14*4, 16+32*8+15*4, 16+16*8, 16+32*8+16*4, 16+32*8+17*4, 16+18*8, 16+32*8+18*4, 16+32*8+19*4, 16+20*8, 16+32*8+20*4, 16+32*8+21*4, 16+22*8, 16+32*8+22*4, 16+32*8+23*4, 16+24*8, 16+32*8+24*4, 16+32*8+25*4, 16+26*8, 16+32*8+26*4, 16+32*8+27*4, 16+28*8, 16+32*8+28*4, 16+30*8);
+ dasm_put(Dst, 3201, 16+32*8+30*4, 16+32*8+31*4, ~LJ_VMST_EXIT, 16+32*8+32*4, -GG_DISP2G-32768, DISPATCH_GL(vmstate), 16+32*8+29*4, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_GOT(lj_trace_exit), DISPATCH_J(L), DISPATCH_J(parent), DISPATCH_J(exitno), GG_DISP2J, Dt1(->base), Dt1(->cframe), Dt1(->base));
+#endif
+ dasm_put(Dst, 3245);
+#if LJ_HASJIT
+ dasm_put(Dst, 3247, -GG_DISP2G-32768, LJ_ENDIAN_SELECT(-8,-4), LJ_TNIL, Dt7(->pc), DISPATCH_GL(jit_L), PC2PROTO(k), DISPATCH_GL(vmstate), BC_FUNCF*4, DISPATCH_GOT(lj_err_throw));
+#endif
+ dasm_put(Dst, 3296);
+#if LJ_HASJIT
+ dasm_put(Dst, 3344);
+#endif
+ dasm_put(Dst, 3367);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 3369, 72+9*4, 72+8*4, 24+5*8, 72+7*4, 72+6*4, 24+4*8, 72+5*4, 72+4*4, 24+3*8, 72+3*4, 72+2*4, 24+2*8, 72+1*4, 72+0*4, 24+1*8, 24+0*8, Dt2(->ctype_state), GG_G2DISP, DISPATCH_GOT(lj_ccallback_enter), DtE(->cb.slot), DtE(->cb.gpr[0]), DtE(->cb.gpr[1]), DtE(->cb.fpr[0]));
+ dasm_put(Dst, 3417, DtE(->cb.gpr[2]), DtE(->cb.gpr[3]), DtE(->cb.fpr[1]), 112+16, DtE(->cb.stack), Dt1(->base), Dt1(->top), LJ_ENDIAN_SELECT(-8,-4), ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate), Dt7(->pc));
+#endif
+ dasm_put(Dst, 3461);
+#if LJ_HASFFI
+ dasm_put(Dst, 3463, DISPATCH_GOT(lj_ccallback_leave), DISPATCH_GL(ctype_state), Dt1(->base), Dt1(->top), DtE(->L), DtE(->cb.gpr[0]), DtE(->cb.fpr[0]), DtE(->cb.gpr[1]), DtE(->cb.fpr[1]));
+#endif
+ dasm_put(Dst, 3487);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 3489, DtF(->spadj), DtF(->nsp), offsetof(CCallState, stack), DtF(->func), DtF(->gpr[1]), DtF(->gpr[2]), DtF(->gpr[3]), DtF(->fpr[0]), DtF(->fpr[1]), DtF(->gpr[0]), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->fpr[0]), DtF(->fpr[1]));
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ dasm_put(Dst, 3545, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ dasm_put(Dst, 3547, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_ENDIAN_SELECT(2,0), (-(BCBIAS_J*4 >> 16) & 65535));
+ if (op == BC_ISLT || op == BC_ISGE) {
+ dasm_put(Dst, 3570);
+ } else {
+ dasm_put(Dst, 3572);
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ dasm_put(Dst, 3574);
+ } else {
+ dasm_put(Dst, 3576);
+ }
+ dasm_put(Dst, 3578);
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 3592, LJ_ENDIAN_SELECT(4,0), -4+LJ_ENDIAN_SELECT(2,0), LJ_ENDIAN_SELECT(4,0), (-(BCBIAS_J*4 >> 16) & 65535), LJ_TISNUM, LJ_TISNUM);
+ if (vk) {
+ dasm_put(Dst, 3616);
+ } else {
+ dasm_put(Dst, 3618);
+ }
+ dasm_put(Dst, 3620, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(0,4));
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3639, LJ_TCDATA);
+ }
+ dasm_put(Dst, 3644, LJ_TISPRI);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3647);
+ }
+ dasm_put(Dst, 3650, LJ_TISTABUD+1);
+ if (vk) {
+ dasm_put(Dst, 3661);
+ } else {
+ dasm_put(Dst, 3663);
+ }
+ dasm_put(Dst, 3665, Dt6(->metatable), Dt6(->nomm), 1<> 16) & 65535));
+ if (vk) {
+ dasm_put(Dst, 3708);
+ } else {
+ dasm_put(Dst, 3710);
+ }
+ dasm_put(Dst, 3712);
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ dasm_put(Dst, 3725, LJ_ENDIAN_SELECT(4,0), -4+LJ_ENDIAN_SELECT(2,0), (-(BCBIAS_J*4 >> 16) & 65535), LJ_TISNUM);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3740);
+ } else {
+ dasm_put(Dst, 3743);
+ }
+ dasm_put(Dst, 3746);
+ if (vk) {
+ dasm_put(Dst, 3749);
+ } else {
+ dasm_put(Dst, 3753);
+ }
+ dasm_put(Dst, 3757);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3769, LJ_TCDATA);
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ dasm_put(Dst, 3779, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(2,0));
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 3788, LJ_TCDATA);
+ }
+ dasm_put(Dst, 3793, (-(BCBIAS_J*4 >> 16) & 65535));
+ if (vk) {
+ dasm_put(Dst, 3799);
+ } else {
+ dasm_put(Dst, 3801);
+ }
+ dasm_put(Dst, 3803);
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ dasm_put(Dst, 3816, LJ_ENDIAN_SELECT(2,0), LJ_ENDIAN_SELECT(4,0));
+ if (op == BC_IST || op == BC_ISF) {
+ dasm_put(Dst, 3823, LJ_TISTRUECOND, (-(BCBIAS_J*4 >> 16) & 65535));
+ if (op == BC_IST) {
+ dasm_put(Dst, 3830);
+ } else {
+ dasm_put(Dst, 3832);
+ }
+ dasm_put(Dst, 3834);
+ } else {
+ dasm_put(Dst, 3836, LJ_TISTRUECOND);
+ if (op == BC_ISTC) {
+ dasm_put(Dst, 3840);
+ } else {
+ dasm_put(Dst, 3843);
+ }
+ dasm_put(Dst, 3846, (-(BCBIAS_J*4 >> 16) & 65535));
+ }
+ dasm_put(Dst, 3855);
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ dasm_put(Dst, 3867);
+ break;
+ case BC_NOT:
+ dasm_put(Dst, 3883, LJ_ENDIAN_SELECT(4,0), LJ_TFALSE, LJ_TISTRUECOND, LJ_TTRUE, LJ_ENDIAN_SELECT(4,0));
+ break;
+ case BC_UNM:
+ dasm_put(Dst, 3907, LJ_ENDIAN_SELECT(4,0), LJ_TISNUM);
+ break;
+ case BC_LEN:
+ dasm_put(Dst, 3930, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TSTR, LJ_TTAB, Dt5(->len));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3964, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 3971, DISPATCH_GOT(lj_tab_len));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3980, Dt6(->nomm), 1<base), DISPATCH_GOT(lj_meta_cat), Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 4376, LJ_TSTR, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0));
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 4398, LJ_TCDATA, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0));
+#endif
+ break;
+ case BC_KSHORT:
+ dasm_put(Dst, 4420);
+ break;
+ case BC_KNUM:
+ dasm_put(Dst, 4437);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 4453, LJ_ENDIAN_SELECT(4,0));
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 4470, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0));
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 4494, LJ_ENDIAN_SELECT(-8,-4), Dt7(->uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+ dasm_put(Dst, 4517, LJ_ENDIAN_SELECT(-8,-4), Dt7(->uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, DtA(->closed), LJ_ENDIAN_SELECT(4,0), LJ_GC_BLACK|1, -(LJ_TISNUM+1), LJ_TISGCV - (LJ_TISNUM+1), LJ_ENDIAN_SELECT(0,4), Dt4(->gch.marked), LJ_GC_WHITES, DISPATCH_GOT(lj_gc_barrieruv), GG_DISP2G);
+ break;
+ case BC_USETS:
+ dasm_put(Dst, 4577, LJ_ENDIAN_SELECT(-8,-4), Dt7(->uvptr), DtA(->marked), DtA(->v), Dt5(->marked), LJ_GC_BLACK, DtA(->closed), LJ_TSTR, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), LJ_GC_WHITES, DISPATCH_GOT(lj_gc_barrieruv), GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 4632, LJ_ENDIAN_SELECT(-8,-4), Dt7(->uvptr), DtA(->v));
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 4655, LJ_ENDIAN_SELECT(-8,-4), Dt7(->uvptr), DtA(->v), LJ_ENDIAN_SELECT(4,0));
+ break;
+
+ case BC_UCLO:
+ dasm_put(Dst, 4679, Dt1(->openupval), (-(BCBIAS_J*4 >> 16) & 65535), DISPATCH_GOT(lj_func_closeuv), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 4710, DISPATCH_GOT(lj_func_newL_gc), LJ_ENDIAN_SELECT(-8,-4), Dt1(->base), Dt1(->base), LJ_TFUNC, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4));
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ dasm_put(Dst, 4743, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base));
+ if (op == BC_TNEW) {
+ dasm_put(Dst, 4755, DISPATCH_GOT(lj_tab_new));
+ } else {
+ dasm_put(Dst, 4766, DISPATCH_GOT(lj_tab_dup));
+ }
+ dasm_put(Dst, 4774, Dt1(->base), LJ_TTAB, LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), DISPATCH_GOT(lj_gc_step_fixtop));
+ break;
+
+ case BC_GGET:
+ case BC_GSET:
+ dasm_put(Dst, 4804, LJ_ENDIAN_SELECT(-8,-4), Dt7(->env));
+ if (op == BC_GGET) {
+ dasm_put(Dst, 4812);
+ } else {
+ dasm_put(Dst, 4815);
+ }
+ dasm_put(Dst, 4818);
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 4820, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_TTAB, LJ_TISNUM, LJ_TSTR, Dt6(->asize), Dt6(->array), LJ_ENDIAN_SELECT(4,0), Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), Dt6(->node), offsetof(Node, key)+LJ_ENDIAN_SELECT(4,0), offsetof(Node, key)+LJ_ENDIAN_SELECT(0,4), DtB(->next), offsetof(Node, val)+LJ_ENDIAN_SELECT(4,0), -LJ_TSTR, Dt6(->metatable), offsetof(Node, val)+LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4));
+ dasm_put(Dst, 4970, LJ_TNIL, Dt6(->nomm), 1<asize), Dt6(->array), LJ_ENDIAN_SELECT(4,0), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), Dt6(->marked), LJ_ENDIAN_SELECT(4,0), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 5145, LJ_ENDIAN_SELECT(4,0), LJ_TTAB, LJ_ENDIAN_SELECT(0,4), Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), offsetof(Node, key)+LJ_ENDIAN_SELECT(4,0), offsetof(Node, key)+LJ_ENDIAN_SELECT(0,4), LJ_TSTR, DtB(->next), offsetof(Node, val)+LJ_ENDIAN_SELECT(4,0), Dt6(->marked), Dt6(->metatable), LJ_GC_BLACK);
+ dasm_put(Dst, 5203, DtB(->val), Dt6(->nomm), 1<metatable), DISPATCH_GL(tmptv), Dt6(->nomm), 1<base), Dt1(->base));
+ dasm_put(Dst, 5265, DISPATCH_GL(gc.grayagain), ~LJ_GC_BLACK & 255, DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 5282, LJ_ENDIAN_SELECT(4,0), LJ_TTAB, LJ_ENDIAN_SELECT(0,4), Dt6(->asize), Dt6(->array), LJ_ENDIAN_SELECT(4,0), Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 5360, -8+LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(0,4), Dt6(->asize), Dt6(->marked), Dt6(->array), LJ_GC_BLACK, DISPATCH_GOT(lj_tab_reasize), Dt1(->base), Dt1(->base), DISPATCH_GL(gc.grayagain), ~LJ_GC_BLACK & 255, DISPATCH_GL(gc.grayagain), Dt6(->marked));
+ dasm_put(Dst, 5433, Dt6(->gclist));
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ dasm_put(Dst, 5438);
+ break;
+ case BC_CALL:
+ dasm_put(Dst, 5443, LJ_TFUNC, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(-4,-8), Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 5472);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 5474, LJ_TFUNC, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(-4,-8), FRAME_TYPE, Dt7(->ffid), FRAME_VARG, LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc), -8+LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc));
+ dasm_put(Dst, 5543, PC2PROTO(k), FRAME_TYPEP, LJ_ENDIAN_SELECT(-4,-8), FRAME_TYPE);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 5559, LJ_TFUNC, -24+LJ_ENDIAN_SELECT(4,0), -24+LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(-4,-8), Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 5594, -16+LJ_ENDIAN_SELECT(0,4), -8+LJ_ENDIAN_SELECT(0,4), Dt6(->asize), Dt6(->array), LJ_ENDIAN_SELECT(4,0), -4+LJ_ENDIAN_SELECT(2,0), (-(BCBIAS_J*4 >> 16) & 65535), -8+LJ_ENDIAN_SELECT(0,4), Dt6(->hmask), Dt6(->node), LJ_ENDIAN_SELECT(4,0), -4+LJ_ENDIAN_SELECT(2,0), DtB(->key), (-(BCBIAS_J*4 >> 16) & 65535));
+ dasm_put(Dst, 5676, -8+LJ_ENDIAN_SELECT(0,4));
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 5679, -24+LJ_ENDIAN_SELECT(4,0), -24+LJ_ENDIAN_SELECT(0,4), -16+LJ_ENDIAN_SELECT(4,0), -8+LJ_ENDIAN_SELECT(4,0), LJ_TFUNC, -LJ_TTAB, Dt8(->ffid), -LJ_TNIL, -FF_next_N, (-(BCBIAS_J*4 >> 16) & 65535), -8+LJ_ENDIAN_SELECT(0,4), BC_JMP, BC_ITERC, -4+LJ_ENDIAN_SELECT(0,3), LJ_ENDIAN_SELECT(0,3));
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 5736, LJ_ENDIAN_SELECT(-4,-8), FRAME_VARG, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), Dt1(->maxstack), DISPATCH_GOT(lj_state_growstack), Dt1(->top), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 5823);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 5825);
+ break;
+
+ case BC_RET:
+ dasm_put(Dst, 5827, LJ_ENDIAN_SELECT(-4,-8), FRAME_TYPE, FRAME_VARG, LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc), PC2PROTO(k), LJ_ENDIAN_SELECT(4,0), FRAME_TYPEP, LJ_ENDIAN_SELECT(-4,-8));
+ break;
+
+ case BC_RET0: case BC_RET1:
+ dasm_put(Dst, 5899, LJ_ENDIAN_SELECT(-4,-8), FRAME_TYPE, FRAME_VARG);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 5912);
+ }
+ dasm_put(Dst, 5914);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 5919);
+ }
+ dasm_put(Dst, 5921, LJ_ENDIAN_SELECT(-8,-4), Dt7(->pc), PC2PROTO(k));
+ if (op == BC_RET1) {
+ dasm_put(Dst, 5949, LJ_ENDIAN_SELECT(4,0));
+ } else {
+ dasm_put(Dst, 5952, -8+LJ_ENDIAN_SELECT(4,0));
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 5955, GG_DISP2HOT, -HOTCOUNT_LOOP, GG_DISP2HOT);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 5967);
+ if (vk) {
+ dasm_put(Dst, 5969, FORL_IDX*8, FORL_STEP*8, FORL_STOP*8, FORL_STEP*8+LJ_ENDIAN_SELECT(4,0), FORL_IDX*8);
+ } else {
+ dasm_put(Dst, 5981, FORL_IDX*8+LJ_ENDIAN_SELECT(4,0), FORL_STEP*8+LJ_ENDIAN_SELECT(4,0), FORL_STOP*8+LJ_ENDIAN_SELECT(4,0), LJ_TISNUM, LJ_TISNUM, LJ_TISNUM, FORL_IDX*8, FORL_STOP*8);
+ }
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 6002, (-(BCBIAS_J*4 >> 16) & 65535));
+ }
+ dasm_put(Dst, 6006, FORL_EXT*8);
+ if (op == BC_JFORI) {
+ dasm_put(Dst, 6011, -4+LJ_ENDIAN_SELECT(2,0), BC_JLOOP);
+ } else if (op == BC_JFORL) {
+ dasm_put(Dst, 6025, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 6035);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 6039);
+ } else {
+ dasm_put(Dst, 6042);
+ }
+ dasm_put(Dst, 6045);
+ }
+ dasm_put(Dst, 6048);
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 6060, GG_DISP2HOT, -HOTCOUNT_LOOP, GG_DISP2HOT);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 6072, LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4));
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 6080, -8+LJ_ENDIAN_SELECT(4,0), BC_JLOOP, -8+LJ_ENDIAN_SELECT(0,4));
+ } else {
+ dasm_put(Dst, 6087, (-(BCBIAS_J*4 >> 16) & 65535), -8+LJ_ENDIAN_SELECT(4,0), -8+LJ_ENDIAN_SELECT(0,4));
+ }
+ dasm_put(Dst, 6097);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 6110, GG_DISP2HOT, -HOTCOUNT_LOOP, GG_DISP2HOT);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 6122);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 6134, DISPATCH_J(trace), DISPATCH_GL(vmstate), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L), DtD(->mcode), GG_DISP2G+32768);
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 6152, (-(BCBIAS_J*4 >> 16) & 65535));
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 6169, GG_DISP2HOT, -HOTCOUNT_CALL, GG_DISP2HOT);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 6181, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k));
+ if (op != BC_JFUNCF) {
+ dasm_put(Dst, 6192);
+ }
+ dasm_put(Dst, 6195);
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 6201, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 6206);
+ }
+ dasm_put(Dst, 6216, LJ_ENDIAN_SELECT(4,0));
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 6223);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 6225, Dt1(->maxstack), LJ_ENDIAN_SELECT(0,4), 8+FRAME_VARG, -4+PC2PROTO(k), LJ_ENDIAN_SELECT(4,0), -4+PC2PROTO(numparams), LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(0,4), 8+LJ_ENDIAN_SELECT(0,4), 8+LJ_ENDIAN_SELECT(4,0), LJ_ENDIAN_SELECT(4,0));
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 6280, Dt8(->f));
+ } else {
+ dasm_put(Dst, 6283, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 6286, Dt1(->maxstack), Dt1(->base), Dt1(->top), ~LJ_VMST_C);
+ if (op == BC_FUNCCW) {
+ dasm_put(Dst, 6298, Dt8(->f));
+ }
+ dasm_put(Dst, 6301, DISPATCH_GL(vmstate), Dt1(->base), Dt1(->top), ~LJ_VMST_INTERP, LJ_ENDIAN_SELECT(-4,-8), DISPATCH_GL(vmstate));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ dasm_put(Dst, 6322);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+ fprintf(ctx->fp,
+ "\t.globl lj_err_unwind_dwarf\n"
+ ".Lframe1:\n"
+ "\t.4byte .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.4byte lj_err_unwind_dwarf\n"
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.4byte .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.4byte .LASFDE2-.Lframe1\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.4byte .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.4byte .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.4byte .LASFDE3-.Lframe2\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_peobj.c b/src/LuaJIT/src/buildvm_peobj.c
new file mode 100644
index 000000000..17b3293a4
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_peobj.c
@@ -0,0 +1,352 @@
+/*
+** LuaJIT VM builder: PE object emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Only used for building on Windows, since we cannot assume the presence
+** of a suitable assembler. The host and target byte order must match.
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+#if LJ_TARGET_X86ORX64
+
+/* Context for PE object emitter. */
+static char *strtab;
+static size_t strtabofs;
+
+/* -- PE object definitions ----------------------------------------------- */
+
+/* PE header. */
+typedef struct PEheader {
+ uint16_t arch;
+ uint16_t nsects;
+ uint32_t time;
+ uint32_t symtabofs;
+ uint32_t nsyms;
+ uint16_t opthdrsz;
+ uint16_t flags;
+} PEheader;
+
+/* PE section. */
+typedef struct PEsection {
+ char name[8];
+ uint32_t vsize;
+ uint32_t vaddr;
+ uint32_t size;
+ uint32_t ofs;
+ uint32_t relocofs;
+ uint32_t lineofs;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t flags;
+} PEsection;
+
+/* PE relocation. */
+typedef struct PEreloc {
+ uint32_t vaddr;
+ uint32_t symidx;
+ uint16_t type;
+} PEreloc;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_RELOC_SIZE (4+4+2)
+
+/* PE symbol table entry. */
+typedef struct PEsym {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ } n;
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl;
+ uint8_t naux;
+} PEsym;
+
+/* PE symbol table auxiliary entry for a section. */
+typedef struct PEsymaux {
+ uint32_t size;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel;
+ uint8_t unused[3];
+} PEsymaux;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_SYM_SIZE (8+4+2+2+1+1)
+
+/* PE object CPU specific defines. */
+#if LJ_TARGET_X86
+#define PEOBJ_ARCH_TARGET 0x014c
+#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x06
+#elif LJ_TARGET_X64
+#define PEOBJ_ARCH_TARGET 0x8664
+#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x02
+#define PEOBJ_RELOC_ADDR32NB 0x03
+#endif
+
+/* Section numbers (0-based). */
+enum {
+ PEOBJ_SECT_ABS = -2,
+ PEOBJ_SECT_UNDEF = -1,
+ PEOBJ_SECT_TEXT,
+#if LJ_TARGET_X64
+ PEOBJ_SECT_PDATA,
+ PEOBJ_SECT_XDATA,
+#endif
+ PEOBJ_SECT_RDATA_Z,
+ PEOBJ_NSECTIONS
+};
+
+/* Symbol types. */
+#define PEOBJ_TYPE_NULL 0
+#define PEOBJ_TYPE_FUNC 0x20
+
+/* Symbol storage class. */
+#define PEOBJ_SCL_EXTERN 2
+#define PEOBJ_SCL_STATIC 3
+
+/* -- PE object emitter --------------------------------------------------- */
+
+/* Emit PE object symbol. */
+static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value,
+ int sect, int type, int scl)
+{
+ PEsym sym;
+ size_t len = strlen(name);
+ if (!strtab) { /* Pass 1: only calculate string table length. */
+ if (len > 8) strtabofs += len+1;
+ return;
+ }
+ if (len <= 8) {
+ memcpy(sym.n.name, name, len);
+ memset(sym.n.name+len, 0, 8-len);
+ } else {
+ sym.n.nameref[0] = 0;
+ sym.n.nameref[1] = (uint32_t)strtabofs;
+ memcpy(strtab + strtabofs, name, len);
+ strtab[strtabofs+len] = 0;
+ strtabofs += len+1;
+ }
+ sym.value = value;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = (uint16_t)type;
+ sym.scl = (uint8_t)scl;
+ sym.naux = 0;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+}
+
+/* Emit PE object section symbol. */
+static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect)
+{
+ PEsym sym;
+ PEsymaux aux;
+ if (!strtab) return; /* Pass 1: no output. */
+ memcpy(sym.n.name, pesect[sect].name, 8);
+ sym.value = 0;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = PEOBJ_TYPE_NULL;
+ sym.scl = PEOBJ_SCL_STATIC;
+ sym.naux = 1;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+ memset(&aux, 0, sizeof(PEsymaux));
+ aux.size = pesect[sect].size;
+ aux.nreloc = pesect[sect].nreloc;
+ owrite(ctx, &aux, PEOBJ_SYM_SIZE);
+}
+
+/* Emit Windows PE object file. */
+void emit_peobj(BuildCtx *ctx)
+{
+ PEheader pehdr;
+ PEsection pesect[PEOBJ_NSECTIONS];
+ uint32_t sofs;
+ int i, nrsym;
+ union { uint8_t b; uint32_t u; } host_endian;
+
+ host_endian.u = 1;
+ if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) {
+ fprintf(stderr, "Error: different byte order for host and target\n");
+ exit(1);
+ }
+
+ sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection);
+
+ /* Fill in PE sections. */
+ memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection));
+ memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1);
+ pesect[PEOBJ_SECT_TEXT].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz);
+ pesect[PEOBJ_SECT_TEXT].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE;
+ /* Flags: 60 = read+execute, 50 = align16, 20 = code. */
+ pesect[PEOBJ_SECT_TEXT].flags = 0x60500020;
+
+#if LJ_TARGET_X64
+ memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1);
+ pesect[PEOBJ_SECT_PDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4);
+ pesect[PEOBJ_SECT_PDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_PDATA].flags = 0x40300040;
+
+ memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1);
+ pesect[PEOBJ_SECT_XDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */
+ pesect[PEOBJ_SECT_XDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_XDATA].flags = 0x40300040;
+#endif
+
+ memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1);
+ pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1);
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040;
+
+ /* Fill in PE header. */
+ pehdr.arch = PEOBJ_ARCH_TARGET;
+ pehdr.nsects = PEOBJ_NSECTIONS;
+ pehdr.time = 0; /* Timestamp is optional. */
+ pehdr.symtabofs = sofs;
+ pehdr.opthdrsz = 0;
+ pehdr.flags = 0;
+
+ /* Compute the size of the symbol table:
+ ** @feat.00 + nsections*2
+ ** + asm_start + nsym
+ ** + nrsym
+ */
+ nrsym = ctx->nrelocsym;
+ pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym;
+#if LJ_TARGET_X64
+ pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */
+#endif
+
+ /* Write PE object header and all sections. */
+ owrite(ctx, &pehdr, sizeof(PEheader));
+ owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS);
+
+ /* Write .text section. */
+ owrite(ctx, ctx->code, ctx->codesz);
+ for (i = 0; i < ctx->nreloc; i++) {
+ PEreloc reloc;
+ reloc.vaddr = (uint32_t)ctx->reloc[i].ofs;
+ reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */
+ reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+
+#if LJ_TARGET_X64
+ { /* Write .pdata section. */
+ uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs;
+ uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */
+ PEreloc reloc;
+ pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0;
+ owrite(ctx, &pdata, sizeof(pdata));
+ pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20;
+ owrite(ctx, &pdata, sizeof(pdata));
+ reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+ { /* Write .xdata section. */
+ uint16_t xdata[8+2+6];
+ PEreloc reloc;
+ xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */
+ xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */
+ xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */
+ xdata[3] = 0x3000; /* Push rbx. */
+ xdata[4] = 0x6000; /* Push rsi. */
+ xdata[5] = 0x7000; /* Push rdi. */
+ xdata[6] = 0x5000; /* Push rbp. */
+ xdata[7] = 0; /* Alignment. */
+ xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */
+ xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */
+ xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */
+ xdata[12] = 0x0300; /* set_fpreg. */
+ xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */
+ xdata[14] = 0x3000; /* Push rbx. */
+ xdata[15] = 0x5000; /* Push rbp. */
+ owrite(ctx, &xdata, sizeof(xdata));
+ reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+#endif
+
+ /* Write .rdata$Z section. */
+ owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1);
+
+ /* Write symbol table. */
+ strtab = NULL; /* 1st pass: collect string sizes. */
+ for (;;) {
+ strtabofs = 4;
+ /* Mark as SafeSEH compliant. */
+ emit_peobj_sym(ctx, "@feat.00", 1,
+ PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT);
+ for (i = 0; i < nrsym; i++)
+ emit_peobj_sym(ctx, ctx->relocsym[i], 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+#if LJ_TARGET_X64
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA);
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA);
+ emit_peobj_sym(ctx, "lj_err_unwind_win64", 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+#endif
+
+ emit_peobj_sym(ctx, ctx->beginsym, 0,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN);
+ for (i = 0; i < ctx->nsym; i++)
+ emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z);
+
+ if (strtab)
+ break;
+ /* 2nd pass: alloc strtab, write syms and copy strings. */
+ strtab = (char *)malloc(strtabofs);
+ *(uint32_t *)strtab = (uint32_t)strtabofs;
+ }
+
+ /* Write string table. */
+ owrite(ctx, strtab, strtabofs);
+}
+
+#else
+
+void emit_peobj(BuildCtx *ctx)
+{
+ UNUSED(ctx);
+ fprintf(stderr, "Error: no PE object support for this target\n");
+ exit(1);
+}
+
+#endif
diff --git a/src/LuaJIT/src/buildvm_ppc.dasc b/src/LuaJIT/src/buildvm_ppc.dasc
new file mode 100644
index 000000000..027e75b0b
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_ppc.dasc
@@ -0,0 +1,4887 @@
+|// Low-level VM code for PowerPC CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch ppc
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA)
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r14 // Base of current Lua stack frame.
+|.define KBASE, r15 // Constants of current Lua function.
+|.define PC, r16 // Next PC.
+|.define DISPATCH, r17 // Opcode dispatch table.
+|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
+|.define JGL, r31 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNUM, r22
+|.define TISNIL, r23
+|.define ZERO, r24
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.define TONUM, f31 // 2^52 + 2^51 + 2^31.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r20 // Callee-save.
+|.define RB, r10
+|.define RC, r11
+|.define RD, r12
+|.define INS, r7 // Overlaps CARG5.
+|
+|.define TMP0, r0
+|.define TMP1, r8
+|.define TMP2, r9
+|.define TMP3, r6 // Overlaps CARG4.
+|
+|// Saved temporaries.
+|.define SAVE0, r21
+|
+|// Calling conventions.
+|.define CARG1, r3
+|.define CARG2, r4
+|.define CARG3, r5
+|.define CARG4, r6 // Overlaps TMP3.
+|.define CARG5, r7 // Overlaps INS.
+|
+|.define FARG1, f1
+|.define FARG2, f2
+|
+|.define CRET1, r3
+|.define CRET2, r4
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define SAVE_LR, 276(sp)
+|.define CFRAME_SPACE, 272 // Delta for sp.
+|// Back chain for sp: 272(sp) <-- sp entering interpreter
+|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves.
+|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves.
+|.define SAVE_CR, 52(sp) // 32 bit CR save.
+|.define SAVE_ERRF, 48(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 44(sp)
+|.define SAVE_CFRAME, 40(sp)
+|.define SAVE_L, 36(sp)
+|.define SAVE_PC, 32(sp)
+|.define SAVE_MULTRES, 28(sp)
+|.define UNUSED1, 24(sp)
+|.define TMPD_LO, 20(sp)
+|.define TMPD_HI, 16(sp)
+|.define TONUM_LO, 12(sp)
+|.define TONUM_HI, 8(sp)
+|// Next frame lr: 4(sp)
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.define TMPD_BLO, 23(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.macro save_, reg
+| stw r..reg, SAVE_GPR_+(reg-14)*4(sp)
+| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|.macro rest_, reg
+| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp)
+| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|
+|.macro saveregs
+| stwu sp, -CFRAME_SPACE(sp)
+| save_ 14; save_ 15; save_ 16
+| mflr r0
+| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22
+| stw r0, SAVE_LR
+| save_ 23; save_ 24; save_ 25
+| mfcr r0
+| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31
+| stw r0, SAVE_CR
+|.endmacro
+|
+|.macro restoreregs
+| lwz r0, SAVE_LR; lwz r12, SAVE_CR
+| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19
+| mtlr r0; mtcrf 0x38, r12
+| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25
+| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31
+| addi sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// These basic macros should really be part of DynASM.
+|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
+|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
+|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
+|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
+|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; tw 4, sp, sp; .endmacro
+|
+|// int/FP conversions.
+|.macro tonum_i, freg, reg
+| xoris reg, reg, 0x8000
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TONUM
+|.endmacro
+|
+|.macro tonum_u, freg, reg
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TOBIT
+|.endmacro
+|
+|.macro toint, reg, freg, tmpfreg
+| fctiwz tmpfreg, freg
+| stfd tmpfreg, TMPD
+| lwz reg, TMPD_LO
+|.endmacro
+|
+|.macro toint, reg, freg
+| toint reg, freg, freg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|
+|// Instruction decode.
+|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
+|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
+|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
+|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
+|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
+|
+|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
+|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch. Note: optimized for e300!
+|.macro ins_NEXT2
+| decode_OP4 TMP1, INS
+| lwzx TMP0, DISPATCH, TMP1
+| mtctr TMP0
+| decode_RB8 RB, INS
+| decode_RD8 RD, INS
+| decode_RA8 RA, INS
+| decode_RC8 RC, INS
+| bctr
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lwz PC, LFUNC:RB->pc
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+| decode_OP4 TMP1, INS
+| decode_RA8 RA, INS
+| lwzx TMP0, DISPATCH, TMP1
+| add RA, RA, BASE
+| mtctr TMP0
+| bctr
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| stw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checknum, reg; cmplw reg, TISNUM; .endmacro
+|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro
+|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro
+|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro
+|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro
+|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro
+|
+|.macro branch_RD
+| srwi TMP0, RD, 1
+| addis PC, PC, -(BCBIAS_J*4 >> 16)
+| add PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta, target
+| rlwinm TMP1, PC, 31, 25, 30
+| addi TMP1, TMP1, GG_DISP2HOT
+| lhzx TMP2, DISPATCH, TMP1
+| addic. TMP2, TMP2, -delta
+| sthx TMP2, DISPATCH, TMP1
+| blt target
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| // Assumes LJ_GC_BLACK is 0x04.
+| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
+| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| stb mark, tab->marked
+| stw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi. TMP0, PC, FRAME_P
+ | li TMP1, LJ_TTRUE
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | mr BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
+ |
+ |->vm_returnc:
+ | andi. TMP0, PC, FRAME_TYPE
+ | addi RD, RD, 8 // RD = (nresults+1)*8.
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | cmpwi TMP0, FRAME_C
+ | rlwinm TMP2, PC, 0, 0, 28
+ | li_vmstate C
+ | sub TMP2, BASE, TMP2 // TMP2 = previous base.
+ | bney ->vm_returnp
+ |
+ | addic. TMP1, RD, -8
+ | stw TMP2, L->base
+ | lwz TMP2, SAVE_NRES
+ | subi BASE, BASE, 8
+ | st_vmstate
+ | slwi TMP2, TMP2, 3
+ | beq >2
+ |1:
+ | addic. TMP1, TMP1, -8
+ | lfd f0, 0(RA)
+ | addi RA, RA, 8
+ | stfd f0, 0(BASE)
+ | addi BASE, BASE, 8
+ | bney <1
+ |
+ |2:
+ | cmpw TMP2, RD // More/less results wanted?
+ | bne >6
+ |3:
+ | stw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lwz TMP0, SAVE_CFRAME // Restore previous C frame.
+ | li CRET1, 0 // Ok return status for vm_pcall.
+ | stw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | blr
+ |
+ |6:
+ | ble >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | lwz TMP1, L->maxstack
+ | cmplw BASE, TMP1
+ | bge >8
+ | stw TISNIL, 0(BASE)
+ | addi RD, RD, 8
+ | addi BASE, BASE, 8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case?
+ | sub TMP0, RD, TMP2
+ | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1
+ | and TMP0, TMP0, TMP1
+ | sub BASE, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | stw BASE, L->top // Save current top held in BASE (yes).
+ | mr SAVE0, RD
+ | mr CARG2, TMP2
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz TMP2, SAVE_NRES
+ | mr RD, SAVE0
+ | slwi TMP2, TMP2, 3
+ | lwz BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mr sp, CARG1
+ | mr CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lwz GL:TMP1, L->glref
+ | stw TMP0, GL:TMP1->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | rlwinm sp, CARG1, 0, 0, 29
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lwz BASE, L->base
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li ZERO, 0
+ | stw TMP3, TMPD
+ | li TMP1, LJ_TFALSE
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lfs TOBIT, TMPD
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | la RA, -8(BASE) // Results start at BASE-8.
+ | stw TMP3, TMPD
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw TMP1, 0(RA) // Prepend false to error message.
+ | li RD, 16 // 2 results: false + error message.
+ | st_vmstate
+ | lfs TONUM, TMPD
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | li CARG2, LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | stw BASE, L->base
+ | addi PC, PC, 4 // Must point after first instruction.
+ | stw RC, L->top
+ | srwi CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | lwz RC, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mr L, CARG1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mr BASE, CARG2
+ | lbz TMP1, L->status
+ | stw L, SAVE_L
+ | li PC, FRAME_CP
+ | addi TMP0, sp, CFRAME_RESUME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG3, SAVE_NRES
+ | cmplwi TMP1, 0
+ | stw CARG3, SAVE_ERRF
+ | stw TMP0, L->cframe
+ | stw CARG3, SAVE_CFRAME
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mr RA, BASE
+ | lwz BASE, L->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | lwz PC, FRAME_PC(BASE)
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | stb CARG3, L->status
+ | stw TMP3, TMPD
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | sub RD, TMP1, BASE
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | addi RD, RD, 8
+ | stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | li ZERO, 0
+ | st_vmstate
+ | andi. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | li PC, FRAME_CP
+ | stw CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lwz TMP1, L:CARG1->cframe
+ | stw CARG3, SAVE_NRES
+ | mr L, CARG1
+ | stw CARG1, SAVE_L
+ | mr BASE, CARG2
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stw TMP1, SAVE_CFRAME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | add PC, PC, BASE
+ | stw TMP3, TMPD
+ | li ZERO, 0
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | sub PC, PC, TMP2 // PC = frame delta + frame type
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | sub NARGS8:RC, TMP1, BASE
+ | stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lwz TMP0, FRAME_PC(BASE)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | checkfunc TMP0; bne ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mr L, CARG1
+ | lwz TMP0, L:CARG1->stack
+ | stw CARG1, SAVE_L
+ | lwz TMP1, L->top
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lwz TMP1, L->cframe
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | li TMP2, 0
+ | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | stw TMP2, SAVE_ERRF // No error function.
+ | stw TMP1, SAVE_CFRAME
+ | mtctr CARG4
+ | bctrl // (lua_State *L, lua_CFunction func, void *ud)
+ | mr. BASE, CRET1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lwz TMP0, -12(BASE) // Continuation.
+ | mr RB, BASE
+ | mr BASE, TMP2 // Restore caller BASE.
+ | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
+#if LJ_HASFFI
+ | cmplwi TMP0, 1
+#endif
+ | lwz PC, -16(RB) // Restore PC from [cont|PC].
+ | subi TMP2, RD, 8
+ | lwz TMP1, LFUNC:TMP1->pc
+ | stwx TISNIL, RA, TMP2 // Ensure one valid arg.
+#if LJ_HASFFI
+ | ble >1
+#endif
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // BASE = base, RA = resultptr, RB = meta base
+ | mtctr TMP0
+ | bctr // Jump to continuation.
+ |
+#if LJ_HASFFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | subi TMP1, RB, 16
+ | sub RC, TMP1, BASE
+ | b ->vm_call_tail
+#endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | subi CARG2, RB, 16
+ | decode_RB8 SAVE0, INS
+ | lfd f0, 0(RA)
+ | add TMP1, BASE, SAVE0
+ | stw BASE, L->base
+ | cmplw TMP1, CARG2
+ | sub CARG3, CARG2, TMP1
+ | decode_RA8 RA, INS
+ | stfd f0, 0(CARG2)
+ | bney ->BC_CAT_Z
+ | stfdx f0, BASE, RA
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ if (!LJ_DUALNUM) {
+ | tonum_u f0, TMP0
+ }
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ if (LJ_DUALNUM) {
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ } else {
+ | stfd f0, 0(CARG3)
+ }
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | beq >3
+ | lfd f0, 0(CRET1)
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 16 // 2 args for func(t, k).
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ if (!LJ_DUALNUM) {
+ | tonum_u f0, TMP0
+ }
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ if (LJ_DUALNUM) {
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ } else {
+ | stfd f0, 0(CARG3)
+ }
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | lfdx f0, BASE, RA
+ | beq >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | stfd f0, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ | stfd f0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mr CARG1, L
+ | subi PC, PC, 4
+ if (LJ_DUALNUM) {
+ | mr CARG2, RA
+ } else {
+ | add CARG2, BASE, RA
+ }
+ | stw PC, SAVE_PC
+ if (LJ_DUALNUM) {
+ | mr CARG3, RD
+ } else {
+ | add CARG3, BASE, RD
+ }
+ | stw BASE, L->base
+ | decode_OP1 CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmplwi CRET1, 1
+ | bgt ->vmeta_binop
+ | subfic CRET1, CRET1, 0
+ |4:
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | decode_RD4 TMP2, INS
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | and TMP2, TMP2, CRET1
+ | add PC, PC, TMP2
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lwz INS, -4(PC)
+ | lfd f0, 0(RA)
+ | decode_RA8 TMP1, INS
+ | stfdx f0, BASE, TMP1
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true.
+ | subfe CRET1, CRET1, CRET1
+ | not CRET1, CRET1
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false.
+ | subfe CRET1, CRET1, CRET1
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | subi PC, PC, 4
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+#if LJ_HASFFI
+ | mr CARG2, INS
+ | subi PC, PC, 4
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+#endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_nv:
+ | add CARG3, KBASE, RC
+ | add CARG4, BASE, RB
+ | b >1
+ |->vmeta_arith_nv2:
+ if (LJ_DUALNUM) {
+ | mr CARG3, RC
+ | mr CARG4, RB
+ | b >1
+ }
+ |
+ |->vmeta_unm:
+ | mr CARG3, RD
+ | mr CARG4, RD
+ | b >1
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ if (LJ_DUALNUM) {
+ | b >1
+ }
+ |->vmeta_arith_vn2:
+ |->vmeta_arith_vv2:
+ if (LJ_DUALNUM) {
+ | mr CARG3, RB
+ | mr CARG4, RC
+ }
+ |1:
+ | add CARG2, BASE, RA
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | stw PC, -16(CRET1) // [cont|PC]
+ | mr TMP2, BASE
+ | addi PC, TMP1, FRAME_CONT
+ | mr BASE, CRET1
+ | li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | mr SAVE0, CARG1
+#endif
+ | mr CARG2, RD
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmplwi CRET1, 0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | mr CARG1, SAVE0
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw TMP2, L->base // This is the callers base!
+ | subi CARG2, BASE, 8
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw BASE, L->base
+ | subi CARG2, RA, 8
+ | stw PC, SAVE_PC
+ | add CARG3, RA, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz TMP1, FRAME_PC(BASE)
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | mr CARG2, RA
+ | stw PC, SAVE_PC
+ | mr SAVE0, INS
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+#if LJ_HASJIT
+ | decode_OP1 TMP0, SAVE0
+#endif
+ | decode_RA8 RA, SAVE0
+#if LJ_HASJIT
+ | cmpwi TMP0, BC_JFORI
+#endif
+ | decode_RD8 RD, SAVE0
+#if LJ_HASJIT
+ | beqy =>BC_JFORI
+#endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz CARG1, 4(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lwz CARG1, 4(BASE)
+ | lwz CARG2, 12(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lfd FARG2, 8(BASE)
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ | checknum CARG4; bge ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
+ |.macro ffgccheck
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | cmplw TMP0, TMP1
+ | bgel ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | li TMP1, LJ_TFALSE
+ | la RA, -8(BASE)
+ | cmplw cr1, CARG3, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | bge cr1, ->fff_fallback
+ | stw CARG3, 0(RA)
+ | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | stw CARG1, 4(RA)
+ | beq ->fff_res // Done if exactly 1 argument.
+ | li TMP1, 8
+ | subi RC, RC, 8
+ |1:
+ | cmplw TMP1, RC
+ | lfdx f0, BASE, TMP1
+ | stfdx f0, RA, TMP1
+ | addi TMP1, TMP1, 8
+ | bney <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | subfc TMP0, TISNUM, CARG1
+ | subfe TMP2, CARG1, CARG1
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 3
+ | la TMP2, CFUNC:RB->upvalue
+ | lfdx FARG1, TMP2, TMP1
+ | b ->fff_resn
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktab CARG3; bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:CARG1, TAB:CARG1->metatable
+ |2:
+ | li CARG3, LJ_TNIL
+ | cmplwi TAB:CARG1, 0
+ | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beq ->fff_restv
+ | lwz TMP0, TAB:CARG1->hmask
+ | li CARG3, LJ_TTAB // Use metatable as default result.
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:CARG1->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lwz CARG4, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG4; bne >4
+ | cmpw TMP0, STR:RC; beq >5
+ |4:
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | beq ->fff_restv // Not found, keep default result.
+ | b <3
+ |5:
+ | checknil CARG2
+ | beq ->fff_restv // Ditto for nil value.
+ | mr CARG3, CARG2 // Return value of mt.__metatable.
+ | mr CARG1, TMP1
+ | b ->fff_restv
+ |
+ |6:
+ | cmpwi CARG3, LJ_TUDATA; beq <1
+ | subfc TMP0, TISNUM, CARG3
+ | subfe TMP2, CARG3, CARG3
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 2
+ | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
+ | lwzx TAB:CARG1, TMP2, TMP1
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktab CARG3; bne ->fff_fallback
+ | lwz TAB:TMP1, TAB:CARG1->metatable
+ | checktab CARG4; bne ->fff_fallback
+ | cmplwi TAB:TMP1, 0
+ | lbz TMP3, TAB:CARG1->marked
+ | bne ->fff_fallback
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stw TAB:CARG2, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, TMP3, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG4, 0(BASE)
+ | lwz TAB:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | checktab CARG4; bne ->fff_fallback
+ | la CARG3, 8(BASE)
+ | mr CARG1, L
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | lfd FARG1, 0(CRET1)
+ | b ->fff_resn
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly one argument.
+ | checknum CARG1; bgt ->fff_fallback
+ | b ->fff_resn
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checkstr CARG3
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | checknum CARG3
+ | cmplwi cr1, TMP0, 0
+ | stw BASE, L->base // Add frame since C call can throw.
+ | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | beq ->fff_fallback
+ | ffgccheck
+ | mr CARG1, L
+ | mr CARG2, BASE
+ if (LJ_DUALNUM) {
+ | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
+ } else {
+ | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ }
+ | // Returns GCstr *.
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | lwz TAB:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
+ | checktab CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+ | stw BASE, L->base // Add frame since C call can throw.
+ | mr CARG1, L
+ | stw BASE, L->top // Dummy frame length is ok.
+ | la CARG3, 8(BASE)
+ | stw PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | cmplwi CRET1, 0
+ | li CARG3, LJ_TNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | lfd f0, 8(BASE) // Copy key and value to results.
+ | la RA, -8(BASE)
+ | lfd f1, 16(BASE)
+ | stfd f0, 0(RA)
+ | li RD, (2+1)*8
+ | stfd f1, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | stw TISNIL, 8(BASE)
+ | li RD, (3+1)*8
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc ipairs_aux
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz TAB:CARG1, 4(BASE)
+ | lwz CARG4, 8(BASE)
+ if (LJ_DUALNUM) {
+ | lwz TMP2, 12(BASE)
+ } else {
+ | lfd FARG2, 8(BASE)
+ }
+ | blt ->fff_fallback
+ | checktab CARG3
+ | checknum cr1, CARG4
+ | lwz PC, FRAME_PC(BASE)
+ if (LJ_DUALNUM) {
+ | bne ->fff_fallback
+ | bne cr1, ->fff_fallback
+ } else {
+ | lus TMP0, 0x3ff0
+ | stw ZERO, TMPD_LO
+ | bne ->fff_fallback
+ | stw TMP0, TMPD_HI
+ | bge cr1, ->fff_fallback
+ | lfd FARG1, TMPD
+ | toint TMP2, FARG2, f0
+ }
+ | lwz TMP0, TAB:CARG1->asize
+ | lwz TMP1, TAB:CARG1->array
+ if (!LJ_DUALNUM) {
+ | fadd FARG2, FARG2, FARG1
+ }
+ | addi TMP2, TMP2, 1
+ | la RA, -8(BASE)
+ | cmplw TMP0, TMP2
+ if (LJ_DUALNUM) {
+ | stw TISNUM, 0(RA)
+ | slwi TMP3, TMP2, 3
+ | stw TMP2, 4(RA)
+ } else {
+ | slwi TMP3, TMP2, 3
+ | stfd FARG2, 0(RA)
+ }
+ | ble >2 // Not in array part?
+ | lwzx TMP2, TMP1, TMP3
+ | lfdx f0, TMP1, TMP3
+ |1:
+ | checknil TMP2
+ | li RD, (0+1)*8
+ | beq ->fff_res // End of iteration, return 0 results.
+ | li RD, (2+1)*8
+ | stfd f0, 8(RA)
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lwz TMP0, TAB:CARG1->hmask
+ | cmplwi TMP0, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | mr CARG2, TMP2
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | lwz TMP2, 0(CRET1)
+ | lfd f0, 0(CRET1)
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ if (LJ_DUALNUM) {
+ | stw TISNUM, 8(BASE)
+ } else {
+ | stw ZERO, 8(BASE)
+ }
+ | stw ZERO, 12(BASE)
+ | li RD, (3+1)*8
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmplwi NARGS8:RC, 8
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | blt ->fff_fallback
+ | mr TMP2, BASE
+ | la BASE, 8(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |.ffunc xpcall
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG4, 8(BASE)
+ | lfd FARG2, 8(BASE)
+ | lfd FARG1, 0(BASE)
+ | blt ->fff_fallback
+ | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | mr TMP2, BASE
+ | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function.
+ | la BASE, 16(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | stfd FARG2, 0(TMP2) // Swap function and traceback.
+ | subi NARGS8:RC, NARGS8:RC, 16
+ | stfd FARG1, 8(TMP2)
+ | addi PC, TMP1, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbz TMP0, L:CARG1->status
+ | lwz TMP1, L:CARG1->cframe
+ | lwz CARG2, L:CARG1->top
+ | cmplwi cr0, TMP0, LUA_YIELD
+ | lwz TMP2, L:CARG1->base
+ | cmplwi cr1, TMP1, 0
+ | lwz TMP0, L:CARG1->maxstack
+ | cmplw cr7, CARG2, TMP2
+ | lwz PC, FRAME_PC(BASE)
+ | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
+ | add TMP2, CARG2, NARGS8:RC
+ | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
+ | cmplw cr1, TMP2, TMP0
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
+ | stw PC, SAVE_PC
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
+ | stw BASE, L->base
+ | blt cr6, ->fff_fallback
+ |1:
+ |.if resume
+ | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | subi TMP2, TMP2, 8
+ |.endif
+ | stw TMP2, L:CARG1->top
+ | li TMP1, 0
+ | stw BASE, L->top
+ |2: // Move args to coroutine.
+ | cmpw TMP1, NARGS8:RC
+ | lfdx f0, BASE, TMP1
+ | beq >3
+ | stfdx f0, CARG2, TMP1
+ | addi TMP1, TMP1, 8
+ | b <2
+ |3:
+ | li CARG3, 0
+ | mr L:SAVE0, L:CARG1
+ | li CARG4, 0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | lwz TMP2, L:SAVE0->base
+ | cmplwi CRET1, LUA_YIELD
+ | lwz TMP3, L:SAVE0->top
+ | li_vmstate INTERP
+ | lwz BASE, L->base
+ | st_vmstate
+ | bgt >8
+ | sub RD, TMP3, TMP2
+ | lwz TMP0, L->maxstack
+ | cmplwi RD, 0
+ | add TMP1, BASE, RD
+ | beq >6 // No results?
+ | cmplw TMP1, TMP0
+ | li TMP1, 0
+ | bgt >9 // Need to grow stack?
+ |
+ | subi TMP3, RD, 8
+ | stw TMP2, L:SAVE0->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | cmplw TMP1, TMP3
+ | lfdx f0, TMP2, TMP1
+ | stfdx f0, BASE, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <5
+ |6:
+ | andi. TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | la RA, -8(BASE)
+ | stw TMP1, -8(BASE) // Prepend true to results.
+ | addi RD, RD, 16
+ |.else
+ | mr RA, BASE
+ | addi RD, RD, 8
+ |.endif
+ |7:
+ | stw PC, SAVE_PC
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | andi. TMP0, PC, FRAME_TYPE
+ | la TMP3, -8(TMP3)
+ | li TMP1, LJ_TFALSE
+ | lfd f0, 0(TMP3)
+ | stw TMP3, L:SAVE0->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | stw TMP1, -8(BASE) // Prepend false to results.
+ | la RA, -8(BASE)
+ | stfd f0, 0(BASE) // Copy error message.
+ | b <7
+ |.else
+ | mr CARG1, L
+ | mr CARG2, L:SAVE0
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mr CARG1, L
+ | srwi CARG2, RD, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | li CRET1, 0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lwz TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | andi. TMP0, TMP0, CFRAME_RESUME
+ | stw TMP1, L->top
+ | li CRET1, LUA_YIELD
+ | beq ->fff_fallback
+ | stw ZERO, L->cframe
+ | stb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | checknum CARG3
+ if (LJ_DUALNUM) {
+ | bne >2
+ | srawi TMP1, CARG1, 31
+ | xor TMP2, TMP1, CARG1
+ | sub. CARG1, TMP2, TMP1
+ | blt >1
+ |->fff_resi:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | stw TISNUM, -8(BASE)
+ | stw CRET1, -4(BASE)
+ | b ->fff_res1
+ |1:
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |2:
+ }
+ | bge ->fff_fallback
+ | rlwinm CARG3, CARG3, 0, 1, 31
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG3/CARG1 = TValue result.
+ | lwz PC, FRAME_PC(BASE)
+ | stw CARG3, -8(BASE)
+ | la RA, -8(BASE)
+ | stw CARG1, -4(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | bney ->vm_return
+ | lwz INS, -4(PC)
+ | decode_RB8 RB, INS
+ |5:
+ | cmplw RB, RD // More results expected?
+ | decode_RA8 TMP0, INS
+ | bgt >6
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, RA, TMP1
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checknum CARG3; beqy ->fff_restv
+ | rlwinm TMP2, CARG3, 12, 21, 31
+ | bge ->fff_fallback
+ | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
+ | cmplwi cr1, TMP2, 31 // 0 <= exp < 31?
+ | subfic TMP0, TMP2, 31
+ | blt >3
+ | slwi TMP1, CARG3, 11
+ | srwi TMP3, CARG1, 21
+ | oris TMP1, TMP1, 0x8000
+ | addi TMP2, TMP2, 1
+ | or TMP1, TMP1, TMP3
+ | slwi CARG2, CARG1, 11
+ | bge cr1, >4
+ | slw TMP3, TMP1, TMP2
+ | srw CARG1, TMP1, TMP0
+ | or TMP3, TMP3, CARG2
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | and TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | add CARG1, CARG1, TMP1
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | b ->fff_resi
+ |.else
+ | andc TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | addo. CARG1, CARG1, TMP1
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | bns ->fff_resi
+ | // Potential overflow.
+ | mcrxr cr0; bley ->fff_resi // Ignore unrelated overflow.
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |.endif
+ |3: // |x| < 1
+ | add TMP2, CARG3, CARG3
+ | srawi TMP1, CARG3, 31
+ | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo
+ |.if "func" == "floor"
+ | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1
+ | subfic TMP2, TMP1, 0
+ | subfe CARG1, CARG1, CARG1
+ |.else
+ | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1
+ | addic TMP2, TMP1, -1
+ | subfe CARG1, TMP2, TMP1
+ |.endif
+ | b ->fff_resi
+ |4: // exp >= 31. Check for -(2^31).
+ | xoris TMP1, TMP1, 0x8000
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | or TMP1, TMP1, CARG2
+ |.endif
+ | orc. TMP1, TMP1, TMP2
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | lus CARG1, 0x8000 // -(2^31).
+ | beqy ->fff_resi
+ |5:
+ | lfd FARG1, 0(BASE)
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ if (LJ_DUALNUM) {
+ | math_round floor
+ | math_round ceil
+ } else {
+ | // NYI: use internal implementation.
+ | math_extern floor
+ | math_extern ceil
+ }
+ |
+ | math_extern sqrt
+ | math_extern log
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ | lfd FARG2, CFUNC:RB->upvalue[0]
+ | fmul FARG1, FARG1, FARG2
+ | b ->fff_resn
+ |
+ if (LJ_DUALNUM) {
+ |.ffunc math_ldexp
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lwz CARG1, 12(BASE)
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ | checknum CARG4; bne ->fff_fallback
+ } else {
+ |.ffunc_nn math_ldexp
+ | toint CARG1, FARG2
+ }
+ | bl extern ldexp
+ | b ->fff_resn
+ |
+ |.ffunc_n math_frexp
+ | la CARG1, DISPATCH_GL(tmptv)(DISPATCH)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern frexp
+ | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | la RA, -8(BASE)
+ if (!LJ_DUALNUM) {
+ | tonum_i FARG2, TMP1
+ }
+ | stfd FARG1, 0(RA)
+ | li RD, (2+1)*8
+ if (LJ_DUALNUM) {
+ | stw TISNUM, 8(RA)
+ | stw TMP1, 12(RA)
+ } else {
+ | stfd FARG2, 8(RA)
+ }
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ | la CARG1, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern modf
+ | la RA, -8(BASE)
+ | stfd FARG1, 0(BASE)
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, ismax
+ ||if (LJ_DUALNUM) {
+ | .ffunc_1 name
+ | checknum CARG3
+ | addi TMP1, BASE, 8
+ | add TMP2, BASE, NARGS8:RC
+ | bne >4
+ |1: // Handle integers.
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ | lwz CARG2, 4(TMP1)
+ | bge cr1, ->fff_resi
+ | checknum CARG4
+ | xoris TMP0, CARG1, 0x8000
+ | xoris TMP3, CARG2, 0x8000
+ | bne >3
+ | subfc TMP3, TMP3, TMP0
+ | subfe TMP0, TMP0, TMP0
+ |.if ismax
+ | andc TMP3, TMP3, TMP0
+ |.else
+ | and TMP3, TMP3, TMP0
+ |.endif
+ | add CARG1, TMP3, CARG2
+ | addi TMP1, TMP1, 8
+ | b <1
+ |3:
+ | bge ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | tonum_i FARG1, CARG1
+ | lfd FARG2, 0(TMP1)
+ | b >6
+ |4:
+ | lfd FARG1, 0(BASE)
+ | bge ->fff_fallback
+ |5: // Handle numbers.
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ | lfd FARG2, 0(TMP1)
+ | bge cr1, ->fff_resn
+ | checknum CARG4; bge >7
+ |6:
+ | fsub f0, FARG1, FARG2
+ | addi TMP1, TMP1, 8
+ |.if ismax
+ | fsel FARG1, f0, FARG1, FARG2
+ |.else
+ | fsel FARG1, f0, FARG2, FARG1
+ |.endif
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | lwz CARG2, 4(TMP1)
+ | bne ->fff_fallback
+ | tonum_i FARG2, CARG2
+ | b <6
+ ||} else {
+ | .ffunc_n name
+ | li TMP1, 8
+ |1:
+ | lwzx CARG2, BASE, TMP1
+ | lfdx FARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_resn
+ | bge ->fff_fallback
+ | fsub f0, FARG1, FARG2
+ | addi TMP1, TMP1, 8
+ |.if ismax
+ | fsel FARG1, f0, FARG1, FARG2
+ |.else
+ | fsel FARG1, f0, FARG2, FARG1
+ |.endif
+ | b <1
+ ||}
+ |.endmacro
+ |
+ | math_minmax math_min, 0
+ | math_minmax math_max, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr CARG3; bne ->fff_fallback
+ | lwz CRET1, STR:CARG1->len
+ | b ->fff_resi
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checkstr CARG3
+ | bne ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ if (LJ_DUALNUM) {
+ | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | li RD, (0+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | cmplwi TMP0, 0
+ | la RA, -8(BASE)
+ | beqy ->fff_res
+ | b ->fff_resi
+ } else {
+ | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8
+ | subfe RD, TMP3, TMP0
+ | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1.
+ | addi RD, RD, 1
+ | lfd f0, TONUM_D
+ | la RA, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | fsub f0, f0, TOBIT
+ | slwi RD, RD, 3
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ }
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ if (LJ_DUALNUM) {
+ | lwz TMP0, 4(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bne ->fff_fallback
+ | la CARG2, 7(BASE)
+ } else {
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP0, FARG1
+ | la CARG2, TMPD_BLO
+ }
+ | li CARG3, 1
+ | cmplwi TMP0, 255; bgt ->fff_fallback
+ |->fff_newstr:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | stw PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | lwz BASE, L->base
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 16(BASE)
+ if (!LJ_DUALNUM) {
+ | lfd f0, 16(BASE)
+ }
+ | lwz TMP0, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | lwz CARG2, 8(BASE)
+ if (LJ_DUALNUM) {
+ | lwz TMP1, 12(BASE)
+ } else {
+ | lfd f1, 8(BASE)
+ }
+ | li TMP2, -1
+ | beq >1
+ if (LJ_DUALNUM) {
+ | checknum CARG3
+ | lwz TMP2, 20(BASE)
+ | bne ->fff_fallback
+ |1:
+ | checknum CARG2; bne ->fff_fallback
+ } else {
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP2, f0
+ |1:
+ | checknum CARG2; bge ->fff_fallback
+ }
+ | checkstr TMP0; bne ->fff_fallback
+ if (!LJ_DUALNUM) {
+ | toint TMP1, f1
+ }
+ | lwz TMP0, STR:CARG1->len
+ | cmplw TMP0, TMP2 // len < end? (unsigned compare)
+ | addi TMP3, TMP2, 1
+ | blt >5
+ |2:
+ | cmpwi TMP1, 0 // start <= 0?
+ | add TMP3, TMP1, TMP0
+ | ble >7
+ |3:
+ | sub CARG3, TMP2, TMP1
+ | addi CARG2, STR:CARG1, #STR-1
+ | srawi TMP0, CARG3, 31
+ | addi CARG3, CARG3, 1
+ | add CARG2, CARG2, TMP1
+ | andc CARG3, CARG3, TMP0
+ | b ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | sub CARG2, TMP0, TMP2
+ | srawi CARG2, CARG2, 31
+ | andc TMP3, TMP3, CARG2 // end = end > len ? len : end+len+1
+ | add TMP2, TMP0, TMP3
+ | b <2
+ |
+ |7: // Negative start or underflow.
+ | addic CARG3, TMP1, -1
+ | subfe CARG3, CARG3, CARG3
+ | srawi CARG2, TMP3, 31 // Note: modifies carry.
+ | andc TMP3, TMP3, CARG3
+ | andc TMP1, TMP3, CARG2
+ | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
+ | b <3
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | lwz TMP0, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | lwz CARG4, 8(BASE)
+ if (LJ_DUALNUM) {
+ | lwz CARG3, 12(BASE)
+ } else {
+ | lfd FARG2, 8(BASE)
+ }
+ | blt ->fff_fallback
+ | checkstr TMP0; bne ->fff_fallback
+ if (LJ_DUALNUM) {
+ | checknum CARG4; bne ->fff_fallback
+ } else {
+ | checknum CARG4; bge ->fff_fallback
+ | toint CARG3, FARG2
+ }
+ | lwz TMP0, STR:CARG1->len
+ | cmpwi CARG3, 0
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | ble >2 // Count <= 0? (or non-int)
+ | cmplwi TMP0, 1
+ | subi TMP2, CARG3, 1
+ | blt >2 // Zero length string?
+ | cmplw cr1, TMP1, CARG3
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | lbz TMP0, STR:CARG1[1]
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | blt cr1, ->fff_fallback
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | cmplwi TMP2, 0
+ | stbx TMP0, CARG2, TMP2
+ | subi TMP2, TMP2, 1
+ | bne <1
+ | b ->fff_newstr
+ |2: // Return empty string.
+ | la STR:CARG1, DISPATCH_GL(strempty)(DISPATCH)
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | checkstr CARG3
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | bne ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | li TMP2, 0
+ | cmplw TMP1, CARG3
+ | subi TMP3, CARG3, 1
+ | blt ->fff_fallback
+ |1: // Reverse string copy.
+ | cmpwi TMP3, 0
+ | lbzx TMP1, CARG1, TMP2
+ | blty ->fff_newstr
+ | stbx TMP1, CARG2, TMP3
+ | subi TMP3, TMP3, 1
+ | addi TMP2, TMP2, 1
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | checkstr CARG3
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | bne ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | cmplw TMP1, CARG3
+ | li TMP2, 0
+ | blt ->fff_fallback
+ |1: // ASCII case conversion.
+ | cmplw TMP2, CARG3
+ | lbzx TMP1, CARG1, TMP2
+ | bgey ->fff_newstr
+ | subi TMP0, TMP1, lo
+ | xori TMP3, TMP1, 0x20
+ | addic TMP0, TMP0, -26
+ | subfe TMP3, TMP3, TMP3
+ | andi. TMP3, TMP3, 0x20
+ | xor TMP1, TMP1, TMP3
+ | stbx TMP1, CARG2, TMP2
+ | addi TMP2, TMP2, 1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG3; bne ->fff_fallback
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b ->fff_resi
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ ||if (LJ_DUALNUM) {
+ | .ffunc_1 bit_..name
+ | checknum CARG3; bnel ->fff_tobit_fb
+ ||} else {
+ | .ffunc_n bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ ||}
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addi TMP1, BASE, 8
+ | add TMP2, BASE, NARGS8:RC
+ |1:
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ ||if (LJ_DUALNUM) {
+ | lwz CARG2, 4(TMP1)
+ ||} else {
+ | lfd FARG1, 0(TMP1)
+ ||}
+ | bgey cr1, ->fff_resi
+ | checknum CARG4
+ ||if (LJ_DUALNUM) {
+ | bnel ->fff_bitop_fb
+ ||} else {
+ | fadd FARG1, FARG1, TOBIT
+ | bge ->fff_fallback
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ ||}
+ | ins CARG1, CARG1, CARG2
+ | addi TMP1, TMP1, 8
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | rotlwi TMP0, CARG1, 8
+ | rlwimi TMP0, CARG1, 24, 0, 7
+ | rlwimi TMP0, CARG1, 24, 16, 23
+ | mr CRET1, TMP0
+ | b ->fff_resi
+ |
+ |.ffunc_bit bnot
+ | not CRET1, CARG1
+ | b ->fff_resi
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ ||if (LJ_DUALNUM) {
+ | .ffunc_2 bit_..name
+ | checknum CARG3; bnel ->fff_tobit_fb
+ | // Note: no inline conversion from number for 2nd argument!
+ | checknum CARG4; bne ->fff_fallback
+ ||} else {
+ | .ffunc_nn bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | fadd FARG2, FARG2, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | stfd FARG2, TMPD
+ | lwz CARG2, TMPD_LO
+ ||}
+ |.if shmod == 1
+ | rlwinm CARG2, CARG2, 0, 27, 31
+ |.elif shmod == 2
+ | neg CARG2, CARG2
+ |.endif
+ | ins CRET1, CARG1, CARG2
+ | b ->fff_resi
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, slw, 1
+ |.ffunc_bit_sh rshift, srw, 1
+ |.ffunc_bit_sh arshift, sraw, 1
+ |.ffunc_bit_sh rol, rotlw, 0
+ |.ffunc_bit_sh ror, rotlw, 2
+ |
+ |.ffunc_bit tobit
+ if (LJ_DUALNUM) {
+ | b ->fff_resi
+ } else {
+ |->fff_resi:
+ | tonum_i FARG1, CRET1
+ }
+ |->fff_resn:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | stfd FARG1, -8(BASE)
+ | b ->fff_res1
+ |
+ |// Fallback FP number to bit conversion.
+ |->fff_tobit_fb:
+ if (LJ_DUALNUM) {
+ | lfd FARG1, 0(BASE)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | blr
+ }
+ |->fff_bitop_fb:
+ if (LJ_DUALNUM) {
+ | lfd FARG1, 0(TMP1)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ | blr
+ }
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lwz TMP3, CFUNC:RB->f
+ | add TMP1, BASE, NARGS8:RC
+ | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addi TMP0, TMP1, 8*LUA_MINSTACK
+ | lwz TMP2, L->maxstack
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | cmplw TMP0, TMP2
+ | stw BASE, L->base
+ | stw TMP1, L->top
+ | mr CARG1, L
+ | bgt >5 // Need to grow stack.
+ | mtctr TMP3
+ | bctrl // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lwz BASE, L->base
+ | cmpwi CRET1, 0
+ | slwi RD, CRET1, 3
+ | la RA, -8(BASE)
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | lwz TMP0, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub NARGS8:RC, TMP0, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi. TMP0, PC, FRAME_TYPE
+ | rlwinm TMP1, PC, 0, 0, 28
+ | bne >3
+ | lwz INS, -4(PC)
+ | decode_RA8 TMP1, INS
+ |3:
+ | sub TMP2, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | li CARG2, LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mflr SAVE0
+ | stw BASE, L->base
+ | add TMP0, BASE, NARGS8:RC
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | stw TMP0, L->top
+ | mr CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | lwz BASE, L->base
+ | mtlr SAVE0
+ | lwz TMP0, L->top
+ | sub NARGS8:RC, TMP0, BASE
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+#if LJ_HASJIT
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE
+ | bne >1
+ | subi TMP2, TMP2, 1
+ | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqy >1
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | b >1
+#endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS.
+ | lwzx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | bctr
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
+ | bne <5
+ |
+ | cmpwi cr1, TMP0, 0
+ | addic. TMP2, TMP2, -1
+ | beq cr1, <5
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | beq >1
+ | bge cr1, <5
+ |1:
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | lwz BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lwz INS, -4(PC)
+ | decode_OP4 TMP1, INS
+ | decode_RB8 RB, INS
+ | addi TMP1, TMP1, GG_DISP2STATIC
+ | decode_RD8 RD, INS
+ | lwzx TMP0, DISPATCH, TMP1
+ | decode_RA8 RA, INS
+ | decode_RC8 RC, INS
+ | mtctr TMP0
+ | bctr
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addi PC, PC, 4
+ | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+#if LJ_HASJIT
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw PC, SAVE_PC
+ | lwz TMP1, LFUNC:TMP1->pc
+ | mr CARG2, PC
+ | stw L, DISPATCH_J(L)(DISPATCH)
+ | lbz TMP1, PC2PROTO(framesize)(TMP1)
+ | stw BASE, L->base
+ | slwi TMP1, TMP1, 3
+ | add TMP1, BASE, TMP1
+ | stw TMP1, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+#endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mr CARG2, PC
+#if LJ_HASJIT
+ | b >1
+#endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+#if LJ_HASJIT
+ | ori CARG2, PC, 1
+ |1:
+#endif
+ | add TMP0, BASE, RC
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw TMP0, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | lwz BASE, L->base
+ | lwz TMP0, L->top
+ | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
+ | sub NARGS8:RC, TMP0, BASE
+ | add RA, BASE, RA
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | lwz INS, -4(PC)
+ | mtctr CRET1
+ | bctr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b, c, d
+ | stfd f..a, 16+a*8(sp)
+ | stfd f..b, 16+b*8(sp)
+ | stfd f..c, 16+c*8(sp)
+ | stfd f..d, 16+d*8(sp)
+ |.endmacro
+ |
+ |->vm_exit_handler:
+#if LJ_HASJIT
+ | addi sp, sp, -(16+32*8+32*4)
+ | stmw r2, 16+32*8+2*4(sp)
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ | li CARG2, ~LJ_VMST_EXIT
+ | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain.
+ | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH)
+ | savex_ 0,1,2,3
+ | stw CARG1, 0(sp) // Store extended stack chain.
+ | mcrxr cr0 // Clear SO flag.
+ | savex_ 4,5,6,7
+ | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | savex_ 8,9,10,11
+ | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP.
+ | savex_ 12,13,14,15
+ | mflr CARG3
+ | li TMP1, 0
+ | savex_ 16,17,18,19
+ | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP.
+ | savex_ 20,21,22,23
+ | lhz CARG4, 2(CARG3) // Load trace number.
+ | savex_ 24,25,26,27
+ | lwz L, DISPATCH_GL(jit_L)(DISPATCH)
+ | savex_ 28,29,30,31
+ | sub CARG3, TMP0, CARG3 // Compute exit number.
+ | lwz BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | srwi CARG3, CARG3, 2
+ | stw L, DISPATCH_J(L)(DISPATCH)
+ | subi CARG3, CARG3, 2
+ | stw TMP1, DISPATCH_GL(jit_L)(DISPATCH)
+ | stw CARG4, DISPATCH_J(parent)(DISPATCH)
+ | stw BASE, L->base
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw CARG3, DISPATCH_J(exitno)(DISPATCH)
+ | addi CARG2, sp, 16
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lwz TMP1, L->cframe
+ | lwz TMP2, 0(sp)
+ | lwz BASE, L->base
+ | rlwinm sp, TMP1, 0, 0, 29
+ | lwz PC, SAVE_PC // Get SAVE_PC.
+ | stw TMP2, 0(sp)
+ | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+#endif
+ |->vm_exit_interp:
+#if LJ_HASJIT
+ | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lwz L, SAVE_L
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ |1:
+ | cmpwi CARG1, 0
+ | blt >3 // Check for error from exit.
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | slwi MULTRES, CARG1, 3
+ | li TMP2, 0
+ | stw MULTRES, SAVE_MULTRES
+ | lwz TMP1, LFUNC:TMP1->pc
+ | stw TMP2, DISPATCH_GL(jit_L)(DISPATCH)
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // Setup type comparison constants.
+ | li TISNUM, LJ_TISNUM
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | stw TMP3, TMPD
+ | li ZERO, 0
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | li TISNIL, LJ_TNIL
+ | stw TMP0, TONUM_HI
+ | lfs TONUM, TMPD
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1.
+ | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OP4 TMP1, INS
+ | decode_RA8 RA, INS
+ | lwzx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | cmplwi TMP1, BC_FUNCF*4 // Function header?
+ | bge >2
+ | decode_RB8 RB, INS
+ | decode_RD8 RD, INS
+ | decode_RC8 RC, INS
+ | bctr
+ |2:
+ | subi RC, MULTRES, 8
+ | add RA, RA, BASE
+ | bctr
+ |
+ |3: // Rethrow error from the right C frame.
+ | neg CARG2, CARG1
+ | mr CARG1, L
+ | bl extern lj_err_throw // (lua_State *L, int errcode)
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ | // NYI: Use internal implementation.
+ |->vm_floor:
+ | b extern floor
+ |->vm_ceil:
+ | b extern ceil
+ |->vm_trunc:
+#if LJ_HASJIT
+ | b extern trunc
+#endif
+ |
+ |->vm_modi:
+ | divwo. TMP0, CARG1, CARG2
+ | bso >1
+ | xor. CARG3, CARG1, CARG2
+ | mullw TMP0, TMP0, CARG2
+ | sub CARG1, CARG1, TMP0
+ | bgelr
+ | cmpwi CARG1, 0; beqlr
+ | add CARG1, CARG1, CARG2
+ | blr
+ |1:
+ | cmpwi CARG2, 0
+ | li CARG1, 0
+ | beqlr
+ | mcrxr cr0 // Clear SO for -2147483648 % -1 and return 0.
+ | blr
+ |
+ |// Callable from C: double lj_vm_foldarith(double x, double y, int op)
+ |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -)
+ |// and basic math functions. ORDER ARITH
+ |->vm_foldarith:
+ | cmplwi CARG1, 1
+ | beq >1; bgt >2
+ | fadd FARG1, FARG1, FARG2; blr
+ |1:
+ | fsub FARG1, FARG1, FARG2; blr
+ |2:
+ | cmplwi CARG1, 3; beq >1; bgt >2
+ | fmul FARG1, FARG1, FARG2; blr
+ |1:
+ | fdiv FARG1, FARG1, FARG2; blr
+ |2:
+ | cmplwi CARG1, 5; beq >1; bgt >2
+ | // NYI: Use internal implementation of floor and avoid spills.
+ | stwu sp, -32(sp); stfd f14, 16(sp); stfd f15, 24(sp)
+ | mflr r0
+ | fmr f14, FARG1
+ | fdiv FARG1, FARG1, FARG2
+ | stw r0, 36(sp)
+ | fmr f15, FARG2
+ | bl extern floor
+ | lwz r0, 36(sp)
+ | fmul FARG1, FARG1, f15
+ | mtlr r0
+ | fsub FARG1, f14, FARG1
+ | lfd f14, 16(sp); lfd f15, 24(sp); addi sp, sp, 32; blr
+ |1:
+ | b extern pow
+ |2:
+ | cmplwi CARG1, 7; beq >1; bgt >2
+ | fneg FARG1, FARG1; blr
+ |1:
+ | fabs FARG1, FARG1; blr
+ |2:
+#if LJ_HASJIT
+ | cmplwi CARG1, 9; beq >9; bgt >2
+ | b extern atan2
+ | // No support needed for IR_LDEXP.
+ |2:
+ | cmplwi CARG1, 11; bgt >9
+ | fsub f0, FARG1, FARG2
+ | beq >1
+ | fsel FARG1, f0, FARG2, FARG1 // IR_MAX
+ | blr
+ |1:
+ | fsel FARG1, f0, FARG1, FARG2 // IR_MIN
+ | blr
+ |9:
+ | NYI // Bad op.
+#else
+ | NYI // Other operations only needed by JIT compiler.
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// void lj_vm_cachesync(void *start, void *end)
+ |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size.
+ |// This is a good lower bound, except for very ancient PPC models.
+ |->vm_cachesync:
+ | // Compute start of first cache line and number of cache lines.
+ | rlwinm CARG1, CARG1, 0, 0, 26
+ | sub CARG2, CARG2, CARG1
+ | addi CARG2, CARG2, 31
+ | rlwinm. CARG2, CARG2, 27, 5, 31
+ | beqlr
+ | mtctr CARG2
+ | mr CARG3, CARG1
+ |1: // Flush D-Cache.
+ | dcbst r0, CARG1
+ | addi CARG1, CARG1, 32
+ | bdnz <1
+ | sync
+ | mtctr CARG2
+ |1: // Invalidate I-Cache.
+ | icbi r0, CARG3
+ | addi CARG3, CARG3, 32
+ | bdnz <1
+ | isync
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r11, g in r12.
+ |->vm_ffi_callback:
+#if LJ_HASFFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lwz CTSTATE, GL:r12->ctype_state
+ | addi DISPATCH, r12, GG_G2DISP
+ | stw r11, CTSTATE->cb.slot
+ | stw r3, CTSTATE->cb.gpr[0]
+ | stfd f1, CTSTATE->cb.fpr[0]
+ | stw r4, CTSTATE->cb.gpr[1]
+ | stfd f2, CTSTATE->cb.fpr[1]
+ | stw r5, CTSTATE->cb.gpr[2]
+ | stfd f3, CTSTATE->cb.fpr[2]
+ | stw r6, CTSTATE->cb.gpr[3]
+ | stfd f4, CTSTATE->cb.fpr[3]
+ | stw r7, CTSTATE->cb.gpr[4]
+ | stfd f5, CTSTATE->cb.fpr[4]
+ | stw r8, CTSTATE->cb.gpr[5]
+ | stfd f6, CTSTATE->cb.fpr[5]
+ | stw r9, CTSTATE->cb.gpr[6]
+ | stfd f7, CTSTATE->cb.fpr[6]
+ | stw r10, CTSTATE->cb.gpr[7]
+ | stfd f8, CTSTATE->cb.fpr[7]
+ | addi TMP0, sp, CFRAME_SPACE+8
+ | stw TMP0, CTSTATE->cb.stack
+ | mr CARG1, CTSTATE
+ | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | mr CARG2, sp
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | lwz BASE, L:CRET1->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lwz RC, L:CRET1->top
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li ZERO, 0
+ | mr L, CRET1
+ | stw TMP3, TMPD
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lfs TOBIT, TMPD
+ | stw TMP3, TMPD
+ | sub RC, RC, BASE
+ | st_vmstate
+ | lfs TONUM, TMPD
+ | ins_callt
+#endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+#if LJ_HASFFI
+ | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | stw BASE, L->base
+ | stw RB, L->top
+ | stw L, CTSTATE->L
+ | mr CARG1, CTSTATE
+ | mr CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | lwz CRET1, CTSTATE->cb.gpr[0]
+ | lfd FARG1, CTSTATE->cb.fpr[0]
+ | lwz CRET2, CTSTATE->cb.gpr[1]
+ | b ->vm_leave_unw
+#endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+#if LJ_HASFFI
+ | .type CCSTATE, CCallState, CARG1
+ | lwz TMP1, CCSTATE->spadj
+ | mflr TMP0
+ | lbz CARG2, CCSTATE->nsp
+ | lbz CARG3, CCSTATE->nfpr
+ | neg TMP1, TMP1
+ | stw TMP0, 4(sp)
+ | cmpwi cr1, CARG3, 0
+ | mr TMP2, sp
+ | addic. CARG2, CARG2, -1
+ | stwux sp, sp, TMP1
+ | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls.
+ | stw r14, -4(TMP2)
+ | stw CCSTATE, -8(TMP2)
+ | mr r14, TMP2
+ | la TMP1, CCSTATE->stack
+ | slwi CARG2, CARG2, 2
+ | blty >2
+ | la TMP2, 8(sp)
+ |1:
+ | lwzx TMP0, TMP1, CARG2
+ | stwx TMP0, TMP2, CARG2
+ | addic. CARG2, CARG2, -4
+ | bge <1
+ |2:
+ | bney cr1, >3
+ | lfd f1, CCSTATE->fpr[0]
+ | lfd f2, CCSTATE->fpr[1]
+ | lfd f3, CCSTATE->fpr[2]
+ | lfd f4, CCSTATE->fpr[3]
+ | lfd f5, CCSTATE->fpr[4]
+ | lfd f6, CCSTATE->fpr[5]
+ | lfd f7, CCSTATE->fpr[6]
+ | lfd f8, CCSTATE->fpr[7]
+ |3:
+ | lwz TMP0, CCSTATE->func
+ | lwz CARG2, CCSTATE->gpr[1]
+ | lwz CARG3, CCSTATE->gpr[2]
+ | lwz CARG4, CCSTATE->gpr[3]
+ | lwz CARG5, CCSTATE->gpr[4]
+ | mtctr TMP0
+ | lwz r8, CCSTATE->gpr[5]
+ | lwz r9, CCSTATE->gpr[6]
+ | lwz r10, CCSTATE->gpr[7]
+ | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | bctrl
+ | lwz CCSTATE:TMP1, -8(r14)
+ | lwz TMP2, -4(r14)
+ | lwz TMP0, 4(r14)
+ | stw CARG1, CCSTATE:TMP1->gpr[0]
+ | stfd FARG1, CCSTATE:TMP1->fpr[0]
+ | stw CARG2, CCSTATE:TMP1->gpr[1]
+ | mtlr TMP0
+ | stw CARG3, CCSTATE:TMP1->gpr[2]
+ | mr sp, r14
+ | stw CARG4, CCSTATE:TMP1->gpr[3]
+ | mr r14, TMP2
+ | blr
+#endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ if (LJ_DUALNUM) {
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, BASE
+ | lwz TMP2, -4(PC)
+ | checknum cr0, TMP0
+ | lwz CARG3, 4(RD)
+ | decode_RD4 TMP2, TMP2
+ | checknum cr1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG3
+ if (op == BC_ISLT) {
+ | bge >2
+ } else if (op == BC_ISGE) {
+ | blt >2
+ } else if (op == BC_ISLE) {
+ | bgt >2
+ } else {
+ | ble >2
+ }
+ |1:
+ | add PC, PC, TMP2
+ |2:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | bgt cr0, ->vmeta_comp
+ | // RA is a number.
+ | lfd f0, 0(RA)
+ | bgt cr1, ->vmeta_comp
+ | blt cr1, >4
+ | // RA is a number, RD is an integer.
+ | tonum_i f1, CARG3
+ | b >5
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | bgt cr1, ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ | tonum_i f0, CARG2
+ |4:
+ | lfd f1, 0(RD)
+ |5:
+ | fcmpu cr0, f0, f1
+ if (op == BC_ISLT) {
+ | bge <2
+ } else if (op == BC_ISGE) {
+ | blt <2
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge <2
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt <2
+ }
+ | b <1
+ } else {
+ | lwzx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwzx TMP1, BASE, RD
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | lfdx f1, BASE, RD
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | bge cr0, ->vmeta_comp
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge cr1, ->vmeta_comp
+ | fcmpu cr0, f0, f1
+ if (op == BC_ISLT) {
+ | bge >1
+ } else if (op == BC_ISGE) {
+ | blt >1
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge >1
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt >1
+ }
+ | add PC, PC, TMP2
+ |1:
+ | ins_next
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ if (LJ_DUALNUM) {
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, BASE
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | lwz CARG3, 4(RD)
+ | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ | ble cr7, ->BC_ISEQN_Z
+ } else {
+ | ble cr7, ->BC_ISNEN_Z
+ }
+ } else {
+ | lwzux TMP0, RA, BASE
+ | lwz TMP2, 0(PC)
+ | lfd f0, 0(RA)
+ | addi PC, PC, 4
+ | lwzux TMP1, RD, BASE
+ | checknum cr0, TMP0
+ | decode_RD4 TMP2, TMP2
+ | lfd f1, 0(RD)
+ | checknum cr1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge cr0, >5
+ | bge cr1, >5
+ | fcmpu cr0, f0, f1
+ if (vk) {
+ | bne >1
+ | add PC, PC, TMP2
+ } else {
+ | beq >1
+ | add PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ }
+ |5: // Either or both types are not numbers.
+ if (!LJ_DUALNUM) {
+ | lwz CARG2, 4(RA)
+ | lwz CARG3, 4(RD)
+ }
+ if (LJ_HASFFI) {
+ | cmpwi cr7, TMP0, LJ_TCDATA
+ | cmpwi cr5, TMP1, LJ_TCDATA
+ }
+ | not TMP3, TMP0
+ | cmplw TMP0, TMP1
+ | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
+ if (LJ_HASFFI) {
+ | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq
+ }
+ | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
+ if (LJ_HASFFI) {
+ | beq cr7, ->vmeta_equal_cd
+ }
+ | cmplw cr5, CARG2, CARG3
+ | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive.
+ | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type.
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv.
+ | mr SAVE0, PC
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2.
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2.
+ if (vk) {
+ | bne cr0, >6
+ | add PC, PC, TMP2
+ |6:
+ } else {
+ | beq cr0, >6
+ | add PC, PC, TMP2
+ |6:
+ }
+ if (LJ_DUALNUM) {
+ | bge cr0, >2 // Done if 1 or 2.
+ |1:
+ | ins_next
+ |2:
+ } else {
+ | blt cr0, <1 // Done if 1 or 2.
+ }
+ | blt cr6, <1 // Done if not tab/ud.
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:TMP2, TAB:CARG2->metatable
+ | li CARG4, 1-vk // ne = 0 or 1.
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable?
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_equal // Handle __eq metamethod.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | lwzux TMP0, RA, BASE
+ | srwi RD, RD, 1
+ | lwz STR:TMP3, 4(RA)
+ | lwz TMP2, 0(PC)
+ | subfic RD, RD, -4
+ | addi PC, PC, 4
+ if (LJ_HASFFI) {
+ | cmpwi TMP0, LJ_TCDATA
+ }
+ | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
+ | subfic TMP0, TMP0, LJ_TSTR
+ if (LJ_HASFFI) {
+ | beq ->vmeta_equal_cd
+ }
+ | sub TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | decode_RD4 TMP2, TMP2
+ | subfic TMP0, TMP0, 0
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ if (LJ_DUALNUM) {
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, KBASE
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | lwz CARG3, 4(RD)
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG3
+ |4:
+ } else {
+ if (vk) {
+ |->BC_ISEQN_Z: // Dummy label.
+ } else {
+ |->BC_ISNEN_Z: // Dummy label.
+ }
+ | lwzx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwz TMP2, -4(PC)
+ | lfdx f1, KBASE, RD
+ | decode_RD4 TMP2, TMP2
+ | checknum TMP0
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge >3
+ | fcmpu cr0, f0, f1
+ }
+ if (vk) {
+ | bne >1
+ | add PC, PC, TMP2
+ |1:
+ if (!LJ_HASFFI) {
+ |3:
+ }
+ } else {
+ | beq >2
+ |1:
+ if (!LJ_HASFFI) {
+ |3:
+ }
+ | add PC, PC, TMP2
+ |2:
+ }
+ | ins_next
+ if (LJ_HASFFI) {
+ |3:
+ | cmpwi TMP0, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ | b <1
+ }
+ if (LJ_DUALNUM) {
+ |7: // RA is not an integer.
+ | bge cr0, <3
+ | // RA is a number.
+ | lfd f0, 0(RA)
+ | blt cr1, >1
+ | // RA is a number, RD is an integer.
+ | tonum_i f1, CARG3
+ | b >2
+ |
+ |8: // RA is an integer, RD is a number.
+ | tonum_i f0, CARG2
+ |1:
+ | lfd f1, 0(RD)
+ |2:
+ | fcmpu cr0, f0, f1
+ | b <4
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | lwz TMP2, 0(PC)
+ | not TMP1, TMP1
+ | addi PC, PC, 4
+ if (LJ_HASFFI) {
+ | cmpwi TMP0, LJ_TCDATA
+ }
+ | sub TMP0, TMP0, TMP1
+ if (LJ_HASFFI) {
+ | beq ->vmeta_equal_cd
+ }
+ | decode_RD4 TMP2, TMP2
+ | addic TMP0, TMP0, -1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | and TMP2, TMP2, TMP1
+ } else {
+ | andc TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | lwzx TMP0, BASE, RD
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | subfic TMP0, TMP0, LJ_TTRUE
+ | decode_RD4 TMP2, INS
+ | subfe TMP1, TMP1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (op == BC_IST) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ } else {
+ | li TMP1, LJ_TFALSE
+ | lfdx f0, BASE, RD
+ | cmplw TMP0, TMP1
+ if (op == BC_ISTC) {
+ | bge >1
+ } else {
+ | blt >1
+ }
+ | addis PC, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | stfdx f0, BASE, RA
+ | add PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lfdx f0, BASE, RD
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lwzx TMP0, BASE, RD
+ | subfic TMP1, TMP0, LJ_TTRUE
+ | adde TMP0, TMP0, TMP1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP1, RD, BASE
+ | lwz TMP0, 4(RD)
+ | checknum TMP1
+ if (LJ_DUALNUM) {
+ | bne >5
+ | nego. TMP0, TMP0
+ | bso >4
+ |1:
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw TMP0, 4(RA)
+ |3:
+ | ins_next2
+ |4: // Potential overflow.
+ | mcrxr cr0; bley <1 // Ignore unrelated overflow.
+ | lus TMP1, 0x41e0 // 2^31.
+ | li TMP0, 0
+ | b >7
+ }
+ |5:
+ | bge ->vmeta_unm
+ | xoris TMP1, TMP1, 0x8000
+ |7:
+ | ins_next1
+ | stwux TMP1, RA, BASE
+ | stw TMP0, 4(RA)
+ if (LJ_DUALNUM) {
+ | b <3
+ } else {
+ | ins_next2
+ }
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP0, RD, BASE
+ | lwz CARG1, 4(RD)
+ | checkstr TMP0; bne >2
+ | lwz CRET1, STR:CARG1->len
+ |1:
+ if (LJ_DUALNUM) {
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CRET1, 4(RA)
+ } else {
+ | tonum_u f0, CRET1 // Result is a non-negative integer.
+ | ins_next1
+ | stfdx f0, BASE, RA
+ }
+ | ins_next2
+ |2:
+ | checktab TMP0; bne ->vmeta_len
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | cmplwi TAB:TMP2, 0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ |9:
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzx TMP1, BASE, RB
+ ||if (LJ_DUALNUM) {
+ | lwzx TMP2, KBASE, RC
+ ||}
+ | lfdx f14, BASE, RB
+ | lfdx f15, KBASE, RC
+ ||if (LJ_DUALNUM) {
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vn
+ ||} else {
+ | checknum TMP1; bge ->vmeta_arith_vn
+ ||}
+ || break;
+ ||case 1:
+ | lwzx TMP1, BASE, RB
+ ||if (LJ_DUALNUM) {
+ | lwzx TMP2, KBASE, RC
+ ||}
+ | lfdx f15, BASE, RB
+ | lfdx f14, KBASE, RC
+ ||if (LJ_DUALNUM) {
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_nv
+ ||} else {
+ | checknum TMP1; bge ->vmeta_arith_nv
+ ||}
+ || break;
+ ||default:
+ | lwzx TMP1, BASE, RB
+ | lwzx TMP2, BASE, RC
+ | lfdx f14, BASE, RB
+ | lfdx f15, BASE, RC
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn2
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv2
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv2
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro intmod, a, b, c
+ | bl ->vm_modi
+ |.endmacro
+ |
+ |.macro fpmod, a, b, c
+ |->BC_MODVN_Z:
+ | fdiv FARG1, b, c
+ | // NYI: Use internal implementation of floor.
+ | bl extern floor // floor(b/c)
+ | fmul a, FARG1, c
+ | fsub a, b, a // b - floor(b/c)*c
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins
+ | ins_arithpre
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.else
+ | fpins f0, f14, f15
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, KBASE
+ | lwz CARG1, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG2, 4(RC)
+ || break;
+ ||case 1:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, KBASE
+ | lwz CARG2, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG1, 4(RC)
+ || break;
+ ||default:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, BASE
+ | lwz CARG1, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG2, 4(RC)
+ || break;
+ ||}
+ | checknum cr1, TMP2
+ | bne >5
+ | bne cr1, >5
+ | intins CARG1, CARG1, CARG2
+ | bso >4
+ |1:
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CARG1, 4(RA)
+ |2:
+ | ins_next2
+ |4: // Overflow.
+ | mcrxr cr0; bley <1 // Ignore unrelated overflow.
+ | ins_arithfallback b
+ |5: // FP variant.
+ ||if (vk == 1) {
+ | lfd f15, 0(RB)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f14, 0(RC)
+ ||} else {
+ | lfd f14, 0(RB)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f15, 0(RC)
+ ||}
+ | ins_arithfallback bge
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.else
+ | fpins f0, f14, f15
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | b <2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins
+ ||if (LJ_DUALNUM) {
+ | ins_arithdn intins, fpins
+ ||} else {
+ | ins_arithfp fpins
+ ||}
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith addo., fadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith subo., fsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mullwo., fmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp fdiv
+ break;
+ case BC_MODVN:
+ | ins_arith intmod, fpmod
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arith intmod, fpmod_
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | lwzx TMP1, BASE, RB
+ | lfdx FARG1, BASE, RB
+ | lwzx TMP2, BASE, RC
+ | lfdx FARG2, BASE, RC
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ | bl extern pow
+ | ins_next1
+ | stfdx FARG1, BASE, RA
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | sub CARG3, RC, RB
+ | stw BASE, L->base
+ | add CARG2, BASE, RC
+ | mr SAVE0, RB
+ |->BC_CAT_Z:
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | srwi CARG3, CARG3, 3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | lwz BASE, L->base
+ | bne ->vmeta_binop
+ | ins_next1
+ | lfdx f0, BASE, SAVE0 // Copy result from RB to RA.
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
+ | li TMP2, LJ_TSTR
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
+ | li TMP2, LJ_TCDATA
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+#endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ if (LJ_DUALNUM) {
+ | slwi RD, RD, 13
+ | srawi RD, RD, 16
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw RD, 4(RA)
+ | ins_next2
+ } else {
+ | // The soft-float approach is faster.
+ | slwi RD, RD, 13
+ | srawi TMP1, RD, 31
+ | xor TMP2, TMP1, RD
+ | sub TMP2, TMP2, TMP1 // TMP2 = abs(x)
+ | cntlzw TMP3, TMP2
+ | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1
+ | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa
+ | subfic TMP3, RD, 0
+ | slwi TMP1, TMP1, 20
+ | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11)
+ | subfe TMP0, TMP0, TMP0
+ | add RD, RD, TMP1 // hi = hi + exponent-1
+ | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi
+ | ins_next1
+ | stwux RD, RA, BASE
+ | stw ZERO, 4(RA)
+ | ins_next2
+ }
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | ins_next1
+ | lfdx f0, KBASE, RD
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srwi TMP1, RD, 3
+ | not TMP0, TMP1
+ | ins_next1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | stwx TISNIL, BASE, RA
+ | addi RA, RA, 8
+ |1:
+ | stwx TISNIL, BASE, RA
+ | cmpw RA, RD
+ | addi RA, RA, 8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RD, RD, 1
+ | addi RD, RD, offsetof(GCfuncL, uvptr)
+ | lwzx UPVAL:RB, LFUNC:RB, RD
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | lfd f0, 0(TMP1)
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lfdux f0, RD, BASE
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP0, UPVAL:RB->closed
+ | lwz TMP2, 0(RD)
+ | stfd f0, 0(CARG2)
+ | cmplwi cr1, TMP0, 0
+ | lwz TMP1, 4(RD)
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | subi TMP2, TMP2, (LJ_TISNUM+1)
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | bge <1 // tvisgcv(v)
+ | lbz TMP3, GCOBJ:TMP1->gch.marked
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | srwi RA, RA, 1
+ | subfic TMP1, TMP1, -4
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP3, STR:TMP1->marked
+ | lbz TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | stw STR:TMP1, 4(CARG2)
+ | stw TMP0, 0(CARG2)
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
+ | cmplwi cr1, TMP2, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lfdx f0, KBASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | stfd f0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | srwi TMP0, RD, 3
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | not TMP0, TMP0
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | stw TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lwz TMP1, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | stw BASE, L->base
+ | cmplwi TMP1, 0
+ | mr CARG1, L
+ | beq >1
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | lwz BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srwi TMP1, RD, 1
+ | stw BASE, L->base
+ | subfic TMP1, TMP1, -4
+ | stw PC, SAVE_PC
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | mr CARG1, L
+ | lwz CARG3, FRAME_FUNC(BASE)
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | lwz BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | stwux TMP0, RA, BASE
+ | stw LFUNC:CRET1, 4(RA)
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | mr CARG1, L
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | stw BASE, L->base
+ | cmplw TMP0, TMP1
+ | stw PC, SAVE_PC
+ | bge >5
+ |1:
+ if (op == BC_TNEW) {
+ | rlwinm CARG2, RD, 29, 21, 31
+ | rlwinm CARG3, RD, 18, 27, 31
+ | cmpwi CARG2, 0x7ff; beq >3
+ |2:
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns Table *.
+ } else {
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns Table *.
+ }
+ | lwz BASE, L->base
+ | li TMP0, LJ_TTAB
+ | stwux TMP0, RA, BASE
+ | stw TAB:CRET1, 4(RA)
+ | ins_next
+ if (op == BC_TNEW) {
+ |3:
+ | li CARG2, 0x801
+ | b <2
+ }
+ |5:
+ | mr SAVE0, RD
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mr RD, SAVE0
+ | mr CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | lwz TAB:RB, LFUNC:TMP2->env
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ if (LJ_DUALNUM) {
+ | lwz RC, 4(RC)
+ } else {
+ | lfd f0, 0(RC)
+ }
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tgetv
+ if (LJ_DUALNUM) {
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP2, RC, 3
+ } else {
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP2, TMP2, 3
+ }
+ | ble ->vmeta_tgetv // Integer key and in array part?
+ | lwzx TMP0, TMP1, TMP2
+ | lfdx f14, TMP1, TMP2
+ | checknil TMP0; beq >2
+ |1:
+ | ins_next1
+ | stfdx f14, BASE, RA
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tgetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tgetv
+ if (!LJ_DUALNUM) {
+ | lwz STR:RC, 4(RC)
+ }
+ | b ->BC_TGETS_Z // String key?
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG1; bne >4
+ | cmpw TMP0, STR:RC; bne >4
+ | checknil CARG2; beq >5 // Key found, but nil value?
+ |3:
+ | stwux CARG2, RA, BASE
+ | stw TMP1, 4(RA)
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ | li CARG2, LJ_TNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <3 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tgets
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tgetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | cmplw TMP0, TMP1; bge ->vmeta_tgetb
+ | lwzx TMP1, TMP2, RC
+ | lfdx f0, TMP2, RC
+ | checknil TMP1; beq >5
+ |1:
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_tgetb // Caveat: preserve TMP0!
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ if (LJ_DUALNUM) {
+ | lwz RC, 4(RC)
+ } else {
+ | lfd f0, 0(RC)
+ }
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tsetv
+ if (LJ_DUALNUM) {
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP0, RC, 3
+ } else {
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP0, TMP2, 3
+ }
+ | ble ->vmeta_tsetv // Integer key and in array part?
+ | lwzx TMP2, TMP1, TMP0
+ | lbz TMP3, TAB:RB->marked
+ | lfdx f14, BASE, RA
+ | checknil TMP2; beq >3
+ |1:
+ | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfdx f14, TMP1, TMP0
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_tsetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tsetv
+ if (!LJ_DUALNUM) {
+ | lwz STR:RC, 4(RC)
+ }
+ | b ->BC_TSETS_Z // String key?
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | lfdx f14, BASE, RA
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | lbz TMP3, TAB:RB->marked
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz NODE:TMP1, NODE:TMP2->next
+ | checkstr CARG1; bne >5
+ | cmpw TMP0, STR:RC; bne >5
+ | checknil CARG2; beq >4 // Key found, but nil value?
+ |2:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfd f14, NODE:TMP2->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <2 // No metatable: done.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | cmplwi NODE:TMP1, 0
+ | mr NODE:TMP2, NODE:TMP1
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | cmplwi TAB:TMP1, 0
+ | stw BASE, L->base
+ | beq >6 // No metatable: continue.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | li TMP0, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | mr CARG2, TAB:RB
+ | stw TMP0, 0(CARG3)
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | lwz BASE, L->base
+ | stfd f14, 0(CRET1)
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tsetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | lbz TMP3, TAB:RB->marked
+ | cmplw TMP0, TMP1
+ | lfdx f14, BASE, RA
+ | bge ->vmeta_tsetb
+ | lwzx TMP1, TMP2, RC
+ | checknil TMP1; beq >5
+ |1:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfdx f14, TMP2, RC
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP1, TAB:TMP1->nomm
+ | andi. TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0!
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | add RA, BASE, RA
+ |1:
+ | add TMP3, KBASE, RD
+ | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
+ | addic. TMP0, MULTRES, -8
+ | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
+ | srwi CARG3, TMP0, 3
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG3, TMP3
+ | lwz TMP2, TAB:CARG2->asize
+ | slwi TMP1, TMP3, 3
+ | lbz TMP3, TAB:CARG2->marked
+ | cmplw CARG3, TMP2
+ | add TMP2, RA, TMP0
+ | lwz TMP0, TAB:CARG2->array
+ | bgt >5
+ | add TMP1, TMP1, TMP0
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | lfd f0, 0(RA)
+ | addi RA, RA, 8
+ | cmpw cr1, RA, TMP2
+ | stfd f0, 0(TMP1)
+ | addi TMP1, TMP1, 8
+ | blt cr1, <3
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | mr SAVE0, RD
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | mr RD, SAVE0
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALL follows.
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | mr TMP2, BASE
+ | lwzux TMP0, BASE, RA
+ | lwz LFUNC:RB, 4(BASE)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi BASE, BASE, 8
+ | checkfunc TMP0; bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | lwzux TMP0, RA, BASE
+ | lwz LFUNC:RB, 4(RA)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | lwz TMP1, FRAME_PC(BASE)
+ | checkfunc TMP0
+ | addi RA, RA, 8
+ | bne ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
+ | lbz TMP3, LFUNC:RB->ffid
+ | xori TMP2, TMP1, FRAME_VARG
+ | cmplwi cr1, NARGS8:RC, 0
+ | bne >7
+ |1:
+ | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | li TMP2, 0
+ | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
+ | beq cr1, >3
+ |2:
+ | addi TMP3, TMP2, 8
+ | lfdx f0, RA, TMP2
+ | cmplw cr1, TMP3, NARGS8:RC
+ | stfdx f0, BASE, TMP2
+ | mr TMP2, TMP3
+ | bne cr1, <2
+ |3:
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
+ | beq >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lwz INS, -4(TMP1)
+ | decode_RA8 RA, INS
+ | sub TMP1, BASE, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | andi. TMP0, TMP2, FRAME_TYPEP
+ | bne <1 // Vararg frame below?
+ | sub BASE, BASE, TMP2 // Relocate BASE down.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andi. TMP0, TMP1, FRAME_TYPE
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | mr TMP2, BASE
+ | add BASE, BASE, RA
+ | lwz TMP1, -24(BASE)
+ | lwz LFUNC:RB, -20(BASE)
+ | lfd f1, -8(BASE)
+ | lfd f0, -16(BASE)
+ | stw TMP1, 0(BASE) // Copy callable.
+ | stw LFUNC:RB, 4(BASE)
+ | checkfunc TMP1
+ | stfd f1, 16(BASE) // Copy control var.
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | stfdu f0, 8(BASE) // Copy state.
+ | bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+#if LJ_HASJIT
+ | // NYI: add hotloop, record BC_ITERN.
+#endif
+ | add RA, BASE, RA
+ | lwz TAB:RB, -12(RA)
+ | lwz RC, -4(RA) // Get index from control var.
+ | lwz TMP0, TAB:RB->asize
+ | lwz TMP1, TAB:RB->array
+ | addi PC, PC, 4
+ |1: // Traverse array part.
+ | cmplw RC, TMP0
+ | slwi TMP3, RC, 3
+ | bge >5 // Index points after array part?
+ | lwzx TMP2, TMP1, TMP3
+ | lfdx f0, TMP1, TMP3
+ | checknil TMP2
+ | lwz INS, -4(PC)
+ | beq >4
+ if (LJ_DUALNUM) {
+ | stw RC, 4(RA)
+ | stw TISNUM, 0(RA)
+ } else {
+ | tonum_u f1, RC
+ }
+ | addi RC, RC, 1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | stfd f0, 8(RA)
+ | decode_RD4 TMP1, INS
+ | stw RC, -4(RA) // Update control var.
+ | add PC, TMP1, TMP3
+ if (!LJ_DUALNUM) {
+ | stfd f1, 0(RA)
+ }
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | addi RC, RC, 1
+ | b <1
+ |
+ |5: // Traverse hash part.
+ | lwz TMP1, TAB:RB->hmask
+ | sub RC, RC, TMP0
+ | lwz TMP2, TAB:RB->node
+ |6:
+ | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
+ | slwi TMP3, RC, 5
+ | bgty <3
+ | slwi RB, RC, 3
+ | sub TMP3, TMP3, RB
+ | lwzx RB, TMP2, TMP3
+ | lfdx f0, TMP2, TMP3
+ | add NODE:TMP3, TMP2, TMP3
+ | checknil RB
+ | lwz INS, -4(PC)
+ | beq >7
+ | lfd f1, NODE:TMP3->key
+ | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
+ | stfd f0, 8(RA)
+ | add RC, RC, TMP0
+ | decode_RD4 TMP1, INS
+ | stfd f1, 0(RA)
+ | addi RC, RC, 1
+ | add PC, TMP1, TMP2
+ | stw RC, -4(RA) // Update control var.
+ | b <3
+ |
+ |7: // Skip holes in hash part.
+ | addi RC, RC, 1
+ | b <6
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | add RA, BASE, RA
+ | lwz TMP0, -24(RA)
+ | lwz CFUNC:TMP1, -20(RA)
+ | lwz TMP2, -16(RA)
+ | lwz TMP3, -8(RA)
+ | cmpwi cr0, TMP2, LJ_TTAB
+ | cmpwi cr1, TMP0, LJ_TFUNC
+ | cmpwi cr6, TMP3, LJ_TNIL
+ | bne cr1, >5
+ | lbz TMP1, CFUNC:TMP1->ffid
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
+ | cmpwi cr7, TMP1, FF_next_N
+ | srwi TMP0, RD, 1
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | add TMP3, PC, TMP0
+ | bne cr0, >5
+ | stw ZERO, -4(RA) // Initialize control var.
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP0, BC_JMP
+ | li TMP1, BC_ITERC
+ | stb TMP0, -1(PC)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ | stb TMP1, 3(PC)
+ | b <1
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lwz TMP0, FRAME_PC(BASE)
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | addi RC, RC, FRAME_VARG
+ | add TMP2, RA, RB
+ | subi TMP3, BASE, 8 // TMP3 = vtop
+ | sub RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmplwi cr1, RB, 0
+ | sub. TMP1, TMP3, RC
+ | beq cr1, >5 // Copy all varargs?
+ | subi TMP2, TMP2, 16
+ | ble >2 // No vararg slots?
+ |1: // Copy vararg slots to destination slots.
+ | lfd f0, 0(RC)
+ | addi RC, RC, 8
+ | stfd f0, 0(RA)
+ | cmplw RA, TMP2
+ | cmplw cr1, RC, TMP3
+ | bge >3 // All destination slots filled?
+ | addi RA, RA, 8
+ | blt cr1, <1 // More vararg slots?
+ |2: // Fill up remainder with nil.
+ | stw TISNIL, 0(RA)
+ | cmplw RA, TMP2
+ | addi RA, RA, 8
+ | blt <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lwz TMP0, L->maxstack
+ | li MULTRES, 8 // MULTRES = (0+1)*8
+ | bley <3 // No vararg slots?
+ | add TMP2, RA, TMP1
+ | cmplw TMP2, TMP0
+ | addi MULTRES, TMP1, 8
+ | bgt >7
+ |6:
+ | lfd f0, 0(RC)
+ | addi RC, RC, 8
+ | stfd f0, 0(RA)
+ | cmplw RC, TMP3
+ | addi RA, RA, 8
+ | blt <6 // More vararg slots?
+ | b <3
+ |
+ |7: // Grow stack for varargs.
+ | mr CARG1, L
+ | stw RA, L->top
+ | sub SAVE0, RC, BASE // Need delta, because BASE may change.
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw PC, SAVE_PC
+ | srwi CARG2, TMP1, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, SAVE0
+ | subi TMP3, BASE, 8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ |1:
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lwz INS, -4(PC)
+ | cmpwi RD, 8
+ | subi TMP2, BASE, 8
+ | subi RC, RD, 8
+ | decode_RB8 RB, INS
+ | beq >3
+ | li TMP1, 0
+ |2:
+ | addi TMP3, TMP1, 8
+ | lfdx f0, RA, TMP1
+ | cmpw TMP3, RC
+ | stfdx f0, TMP2, TMP1
+ | beq >3
+ | addi TMP1, TMP3, 8
+ | lfdx f1, RA, TMP3
+ | cmpw TMP1, RC
+ | stfdx f1, TMP2, TMP3
+ | bne <2
+ |3:
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi. TMP2, TMP1, FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bney ->BC_RETV_Z
+ |
+ | lwz INS, -4(PC)
+ | subi TMP2, BASE, 8
+ | decode_RB8 RB, INS
+ if (op == BC_RET1) {
+ | lfd f0, 0(RA)
+ | stfd f0, 0(TMP2)
+ }
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ if (LJ_DUALNUM) {
+ | // Integer loop.
+ | lwzux TMP1, RA, BASE
+ | lwz CARG1, FORL_IDX*8+4(RA)
+ | cmplw cr0, TMP1, TISNUM
+ if (vk) {
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | bne >9
+ | addo. CARG1, CARG1, CARG3
+ | cmpwi cr6, CARG3, 0
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | bso >6
+ |4:
+ | stw CARG1, FORL_IDX*8+4(RA)
+ } else {
+ | lwz TMP3, FORL_STEP*8(RA)
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | cmplw cr7, TMP3, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | cmpwi cr6, CARG3, 0
+ | bne >9
+ }
+ | blt cr6, >5
+ | cmpw CARG1, CARG2
+ |1:
+ | stw TISNUM, FORL_EXT*8(RA)
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ | stw CARG1, FORL_EXT*8+4(RA)
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ if (op == BC_FORI) {
+ | bgt >3 // See FP loop below.
+ } else if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ | bley >7
+ } else if (op == BC_IFORL) {
+ | bgt >2
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else {
+ | bley =>BC_JLOOP
+ }
+ |2:
+ | ins_next
+ |5: // Invert check for negative step.
+ | cmpw CARG2, CARG1
+ | b <1
+ if (vk) {
+ |6: // Potential overflow.
+ | mcrxr cr0; bley <4 // Ignore unrelated overflow.
+ | b <2
+ }
+ }
+ if (vk) {
+ if (LJ_DUALNUM) {
+ |9: // FP loop.
+ | lfd f1, FORL_IDX*8(RA)
+ } else {
+ | lfdux f1, RA, BASE
+ }
+ | lfd f3, FORL_STEP*8(RA)
+ | lfd f2, FORL_STOP*8(RA)
+ | lwz TMP3, FORL_STEP*8(RA)
+ | fadd f1, f1, f3
+ | stfd f1, FORL_IDX*8(RA)
+ } else {
+ if (LJ_DUALNUM) {
+ |9: // FP loop.
+ } else {
+ | lwzux TMP1, RA, BASE
+ | lwz TMP3, FORL_STEP*8(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | cmplw cr0, TMP1, TISNUM
+ | cmplw cr7, TMP3, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ }
+ | lfd f1, FORL_IDX*8(RA)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f2, FORL_STOP*8(RA)
+ | bge ->vmeta_for
+ }
+ | cmpwi cr6, TMP3, 0
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ | stfd f1, FORL_EXT*8(RA)
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ | fcmpu cr0, f1, f2
+ if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ }
+ | blt cr6, >5
+ if (op == BC_FORI) {
+ | bgt >3
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ | bgty <2
+ } else {
+ | bgt >2
+ }
+ |1:
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_JFORI) {
+ | bley >7
+ } else {
+ | bley =>BC_JLOOP
+ }
+ if (LJ_DUALNUM) {
+ | b <2
+ } else {
+ |2:
+ | ins_next
+ }
+ |5: // Negative step.
+ if (op == BC_FORI) {
+ | bge <2
+ |3: // Used by integer loop, too.
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_IFORL) {
+ | bgey <1
+ } else if (op == BC_JFORI) {
+ | bgey >7
+ } else {
+ | bgey =>BC_JLOOP
+ }
+ | b <2
+ if (op == BC_JFORI) {
+ |7:
+ | lwz INS, -4(PC)
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | lwzux TMP1, RA, BASE
+ | lwz TMP2, 4(RA)
+ | checknil TMP1; beq >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ | b =>BC_JLOOP
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lwz TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srwi RD, RD, 1
+ | // Traces on PPC don't store the trace number, so use 0.
+ | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH)
+ | lwzx TRACE:TMP2, TMP1, RD
+ | mcrxr cr0 // Clear SO flag.
+ | lwz TMP2, TRACE:TMP2->mcode
+ | stw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | mtctr TMP2
+ | stw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | addi JGL, DISPATCH, GG_DISP2G+32768
+ | bctr
+#endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ | hotcall
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | lbz TMP1, -4+PC2PROTO(numparams)(PC)
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw RA, TMP2
+ | slwi TMP1, TMP1, 3
+ | bgt ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
+ | blt >3
+ if (op == BC_JFUNCF) {
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | stwx TISNIL, BASE, NARGS8:RC
+ | addi NARGS8:RC, NARGS8:RC, 8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | add TMP1, BASE, RC
+ | add TMP0, RA, RC
+ | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
+ | addi TMP3, RC, 8+FRAME_VARG
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw TMP0, TMP2
+ | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
+ | bge ->vm_growstack_l
+ | lbz TMP2, -4+PC2PROTO(numparams)(PC)
+ | mr RA, BASE
+ | mr RC, TMP1
+ | ins_next1
+ | cmpwi TMP2, 0
+ | addi BASE, TMP1, 8
+ | beq >3
+ |1:
+ | cmplw RA, RC // Less args than parameters?
+ | lwz TMP0, 0(RA)
+ | lwz TMP3, 4(RA)
+ | bge >4
+ | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
+ | addi RA, RA, 8
+ |2:
+ | addic. TMP2, TMP2, -1
+ | stw TMP0, 8(TMP1)
+ | stw TMP3, 12(TMP1)
+ | addi TMP1, TMP1, 8
+ | bne <1
+ |3:
+ | ins_next2
+ |
+ |4: // Clear missing parameters.
+ | li TMP0, LJ_TNIL
+ | b <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lwz TMP3, CFUNC:RB->f
+ } else {
+ | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | add TMP1, RA, NARGS8:RC
+ | lwz TMP2, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | cmplw TMP1, TMP2
+ | stw RC, L->top
+ | li_vmstate C
+ | mtctr TMP3
+ if (op == BC_FUNCCW) {
+ | lwz CARG2, CFUNC:RB->f
+ }
+ | mr CARG1, L
+ | bgt ->vm_growstack_c // Need to grow stack.
+ | st_vmstate
+ | bctrl // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | lwz BASE, L->base
+ | slwi RD, CRET1, 3
+ | lwz TMP1, L->top
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | sub RA, TMP1, RD // RA = L->top - nresults*8
+ | st_vmstate
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_ppc.h b/src/LuaJIT/src/buildvm_ppc.h
new file mode 100644
index 000000000..81285d63d
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_ppc.h
@@ -0,0 +1,9796 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM ppc version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_ppc.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned int build_actionlist[7762] = {
+0x00010001,
+0x00060014,
+0x72000000,
+0x00090200,
+0x39000000,
+0x00098200,
+0x41820000,
+0x00050815,
+0x8209fff8,
+0x7d2e4b78,
+0x9514fff8,
+0x00060016,
+0x72000000,
+0x00090200,
+0x398c0008,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x00060018,
+0x2c000000,
+0x00098200,
+0x56090038,
+0x38000000,
+0x00098200,
+0x7d297050,
+0x40a20000,
+0x00050814,
+0x350cfff8,
+0x91320000,
+0x00098200,
+0x8121002c,
+0x39cefff8,
+0x90110000,
+0x00098200,
+0x55291800,
+0x000900a1,
+0x41820000,
+0x00050802,
+0x0006000b,
+0x3508fff8,
+0xc8140000,
+0x3a940008,
+0xd80e0000,
+0x39ce0008,
+0x40a20000,
+0x0005080b,
+0x0006000c,
+0x7c096000,
+0x40820000,
+0x00050806,
+0x0006000d,
+0x91d20000,
+0x00098200,
+0x00060019,
+0x00000000,
+0x80010028,
+0x38600000,
+0x90120000,
+0x00098200,
+0x0006001a,
+0x80010114,
+0x81810034,
+0x81c10000,
+0x00098200,
+0xc9c10000,
+0x00098200,
+0x81e10000,
+0x00098200,
+0xc9e10000,
+0x00098200,
+0x82010000,
+0x00098200,
+0xca010000,
+0x00098200,
+0x82210000,
+0x00098200,
+0xca210000,
+0x00098200,
+0x82410000,
+0x00098200,
+0xca410000,
+0x00098200,
+0x82610000,
+0x00098200,
+0xca610000,
+0x00098200,
+0x7c0803a6,
+0x7d838120,
+0x82810000,
+0x00098200,
+0xca810000,
+0x00098200,
+0x82a10000,
+0x00098200,
+0xcaa10000,
+0x00098200,
+0x82c10000,
+0x00098200,
+0xcac10000,
+0x00098200,
+0x82e10000,
+0x00098200,
+0xcae10000,
+0x00098200,
+0x00000000,
+0x83010000,
+0x00098200,
+0xcb010000,
+0x00098200,
+0x83210000,
+0x00098200,
+0xcb210000,
+0x00098200,
+0x83410000,
+0x00098200,
+0xcb410000,
+0x00098200,
+0x83610000,
+0x00098200,
+0xcb610000,
+0x00098200,
+0x83810000,
+0x00098200,
+0xcb810000,
+0x00098200,
+0x83a10000,
+0x00098200,
+0xcba10000,
+0x00098200,
+0x83c10000,
+0x00098200,
+0xcbc10000,
+0x00098200,
+0x83e10000,
+0x00098200,
+0xcbe10000,
+0x00098200,
+0x38210110,
+0x4e800020,
+0x00060010,
+0x40810000,
+0x00050807,
+0x81120000,
+0x00098200,
+0x7c0e4040,
+0x40800000,
+0x00050808,
+0x92ee0000,
+0x398c0008,
+0x39ce0008,
+0x48000000,
+0x0005000c,
+0x00060011,
+0x00000000,
+0x20c90000,
+0x7c096050,
+0x7d084110,
+0x7c004038,
+0x7dc07050,
+0x48000000,
+0x0005000d,
+0x00060012,
+0x91d20000,
+0x00098200,
+0x7d956378,
+0x7d244b78,
+0x7e439378,
+0x48000001,
+0x00030000,
+0x8121002c,
+0x7eacab78,
+0x55291800,
+0x000900a1,
+0x81d20000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x0006001b,
+0x7c611b78,
+0x7c832378,
+0x0006001c,
+0x82410024,
+0x38000000,
+0x00098200,
+0x81120000,
+0x00098200,
+0x90080000,
+0x00098200,
+0x48000000,
+0x0005001a,
+0x0006001d,
+0x5461003a,
+0x0006001e,
+0x82410024,
+0x3ac00000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x3cc059c0,
+0x82320000,
+0x00098200,
+0x3b000000,
+0x90c10010,
+0x39000000,
+0x00098200,
+0x60c60004,
+0x3ae00000,
+0x00098200,
+0x38000000,
+0x00098200,
+0xc3c10010,
+0x820efff8,
+0x3a8efff8,
+0x90c10010,
+0x3a310000,
+0x00098200,
+0x00000000,
+0x91140000,
+0x39800010,
+0x90110000,
+0x00098200,
+0xc3e10010,
+0x48000000,
+0x00050016,
+0x0006001f,
+0x38800000,
+0x00098200,
+0x48000000,
+0x00050002,
+0x00060020,
+0x7d6e5a14,
+0x7e8ea050,
+0x91d20000,
+0x00098200,
+0x3a100004,
+0x91720000,
+0x00098200,
+0x568400fe,
+0x000900ab,
+0x0006000c,
+0x92010020,
+0x7e439378,
+0x48000001,
+0x00030000,
+0x81d20000,
+0x00098200,
+0x81720000,
+0x00098200,
+0x814efffc,
+0x7d6e5850,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060021,
+0x9421fef0,
+0x91c10000,
+0x00098200,
+0xd9c10000,
+0x00098200,
+0x91e10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x92010000,
+0x00098200,
+0xda010000,
+0x00098200,
+0x7c0802a6,
+0x92210000,
+0x00098200,
+0x00000000,
+0xda210000,
+0x00098200,
+0x92410000,
+0x00098200,
+0xda410000,
+0x00098200,
+0x92610000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x92810000,
+0x00098200,
+0xda810000,
+0x00098200,
+0x92a10000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0x92c10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0x90010114,
+0x92e10000,
+0x00098200,
+0xdae10000,
+0x00098200,
+0x93010000,
+0x00098200,
+0xdb010000,
+0x00098200,
+0x93210000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0x7c000026,
+0x93410000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0x93610000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x93810000,
+0x00098200,
+0x00000000,
+0xdb810000,
+0x00098200,
+0x93a10000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0x93c10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0x93e10000,
+0x00098200,
+0xdbe10000,
+0x00098200,
+0x90010034,
+0x7c721b78,
+0x82320000,
+0x00098200,
+0x7c8e2378,
+0x89120000,
+0x00098200,
+0x92410024,
+0x3a000000,
+0x00098200,
+0x38010000,
+0x00098200,
+0x3a310000,
+0x00098200,
+0x90a1002c,
+0x28080000,
+0x90a10030,
+0x90120000,
+0x00098200,
+0x90a10028,
+0x90610020,
+0x41820000,
+0x00050803,
+0x7dd47378,
+0x81d20000,
+0x00098200,
+0x3ac00000,
+0x00098200,
+0x81120000,
+0x00098200,
+0x820efff8,
+0x3cc059c0,
+0x98b20000,
+0x00098200,
+0x90c10010,
+0x60c60004,
+0xc3c10010,
+0x7d8e4050,
+0x90c10010,
+0x3c004338,
+0x398c0008,
+0x90010008,
+0x38000000,
+0x00098200,
+0x3b000000,
+0x90110000,
+0x00098200,
+0x72000000,
+0x00090200,
+0x7d936378,
+0xc3e10010,
+0x3ae00000,
+0x00098200,
+0x00000000,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060022,
+0x9421fef0,
+0x91c10000,
+0x00098200,
+0xd9c10000,
+0x00098200,
+0x91e10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x92010000,
+0x00098200,
+0xda010000,
+0x00098200,
+0x7c0802a6,
+0x92210000,
+0x00098200,
+0xda210000,
+0x00098200,
+0x92410000,
+0x00098200,
+0xda410000,
+0x00098200,
+0x92610000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x92810000,
+0x00098200,
+0xda810000,
+0x00098200,
+0x92a10000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0x92c10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0x90010114,
+0x92e10000,
+0x00098200,
+0x00000000,
+0xdae10000,
+0x00098200,
+0x93010000,
+0x00098200,
+0xdb010000,
+0x00098200,
+0x93210000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0x7c000026,
+0x93410000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0x93610000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x93810000,
+0x00098200,
+0xdb810000,
+0x00098200,
+0x93a10000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0x93c10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0x93e10000,
+0x00098200,
+0xdbe10000,
+0x00098200,
+0x90010034,
+0x3a000000,
+0x00098200,
+0x90c10030,
+0x48000000,
+0x00050001,
+0x00060023,
+0x9421fef0,
+0x91c10000,
+0x00098200,
+0xd9c10000,
+0x00098200,
+0x00000000,
+0x91e10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x92010000,
+0x00098200,
+0xda010000,
+0x00098200,
+0x7c0802a6,
+0x92210000,
+0x00098200,
+0xda210000,
+0x00098200,
+0x92410000,
+0x00098200,
+0xda410000,
+0x00098200,
+0x92610000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x92810000,
+0x00098200,
+0xda810000,
+0x00098200,
+0x92a10000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0x92c10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0x90010114,
+0x92e10000,
+0x00098200,
+0xdae10000,
+0x00098200,
+0x93010000,
+0x00098200,
+0xdb010000,
+0x00098200,
+0x93210000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0x00000000,
+0x7c000026,
+0x93410000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0x93610000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x93810000,
+0x00098200,
+0xdb810000,
+0x00098200,
+0x93a10000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0x93c10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0x93e10000,
+0x00098200,
+0xdbe10000,
+0x00098200,
+0x90010034,
+0x3a000000,
+0x00098200,
+0x0006000b,
+0x81030000,
+0x00098200,
+0x90a1002c,
+0x7c721b78,
+0x90610024,
+0x7c8e2378,
+0x90320000,
+0x00098200,
+0x82320000,
+0x00098200,
+0x90610020,
+0x91010028,
+0x3a310000,
+0x00098200,
+0x0006000d,
+0x81320000,
+0x00098200,
+0x3ac00000,
+0x00098200,
+0x81120000,
+0x00098200,
+0x00000000,
+0x3cc059c0,
+0x7e107214,
+0x90c10010,
+0x3b000000,
+0x60c60004,
+0xc3c10010,
+0x7e098050,
+0x90c10010,
+0x3c004338,
+0x7d6e4050,
+0x90010008,
+0x38000000,
+0x00098200,
+0xc3e10010,
+0x3ae00000,
+0x00098200,
+0x90110000,
+0x00098200,
+0x00060024,
+0x800efff8,
+0x814efffc,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050825,
+0x00060026,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060027,
+0x9421fef0,
+0x91c10000,
+0x00098200,
+0xd9c10000,
+0x00098200,
+0x91e10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x92010000,
+0x00098200,
+0xda010000,
+0x00098200,
+0x7c0802a6,
+0x92210000,
+0x00098200,
+0xda210000,
+0x00098200,
+0x92410000,
+0x00098200,
+0xda410000,
+0x00098200,
+0x92610000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x92810000,
+0x00098200,
+0x00000000,
+0xda810000,
+0x00098200,
+0x92a10000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0x92c10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0x90010114,
+0x92e10000,
+0x00098200,
+0xdae10000,
+0x00098200,
+0x93010000,
+0x00098200,
+0xdb010000,
+0x00098200,
+0x93210000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0x7c000026,
+0x93410000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0x93610000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x93810000,
+0x00098200,
+0xdb810000,
+0x00098200,
+0x93a10000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0x93c10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0x93e10000,
+0x00098200,
+0x00000000,
+0xdbe10000,
+0x00098200,
+0x90010034,
+0x7c721b78,
+0x80030000,
+0x00098200,
+0x90610024,
+0x81120000,
+0x00098200,
+0x90610020,
+0x7c080050,
+0x81120000,
+0x00098200,
+0x90320000,
+0x00098200,
+0x39200000,
+0x9001002c,
+0x91210030,
+0x91010028,
+0x7cc903a6,
+0x4e800421,
+0x7c6e1b79,
+0x82320000,
+0x00098200,
+0x3a000000,
+0x00098200,
+0x3a310000,
+0x00098200,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x00050019,
+0x00060015,
+0x800efff4,
+0x7dca7378,
+0x7d2e4b78,
+0x8109fffc,
+0x00000000,
+0x28000001,
+0x00000000,
+0x820afff0,
+0x392cfff8,
+0x81080000,
+0x00098200,
+0x7ef4492e,
+0x00000000,
+0x40810000,
+0x00050801,
+0x00000000,
+0x81e80000,
+0x00098200,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x0006000b,
+0x41820000,
+0x00050828,
+0x390afff0,
+0x7d6e4050,
+0x48000000,
+0x00050029,
+0x00000000,
+0x0006002a,
+0x80f0fffc,
+0x388afff0,
+0x54f55d78,
+0xc8140000,
+0x7d0eaa14,
+0x91d20000,
+0x00098200,
+0x7c082040,
+0x7ca82050,
+0x54f4dd78,
+0xd8040000,
+0x40a20000,
+0x0005082b,
+0x7c0ea5ae,
+0x48000000,
+0x0005002c,
+0x0006002d,
+0x38b10000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x54ea5d78,
+0x91650004,
+0x7c8e5214,
+0x90050000,
+0x48000000,
+0x00050001,
+0x0006002e,
+0x38910000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x91440004,
+0x38b10000,
+0x00098200,
+0x90040000,
+0x39000000,
+0x00098200,
+0x91650004,
+0x91050000,
+0x48000000,
+0x00050001,
+0x0006002f,
+0x00000000,
+0x9001000c,
+0xc8010008,
+0xfc00f028,
+0x00000000,
+0x54ea5d78,
+0x38b10000,
+0x00098200,
+0x7c8e5214,
+0x00000000,
+0x92c50000,
+0x90050004,
+0x00000000,
+0xd8050000,
+0x00000000,
+0x48000000,
+0x00050001,
+0x00060030,
+0x54ea5d78,
+0x54eb9d78,
+0x7c8e5214,
+0x7cae5a14,
+0x0006000b,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x48000001,
+0x00030001,
+0x28030000,
+0x41820000,
+0x00050803,
+0xc8030000,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000d,
+0x210e0000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x920efff0,
+0x7e087214,
+0x814efffc,
+0x39600010,
+0x48000000,
+0x00050026,
+0x00060031,
+0x38b10000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x54ea5d78,
+0x91650004,
+0x7c8e5214,
+0x90050000,
+0x48000000,
+0x00050001,
+0x00060032,
+0x38910000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x91440004,
+0x38b10000,
+0x00098200,
+0x90040000,
+0x39000000,
+0x00098200,
+0x91650004,
+0x91050000,
+0x48000000,
+0x00050001,
+0x00060033,
+0x00000000,
+0x9001000c,
+0xc8010008,
+0xfc00f028,
+0x00000000,
+0x54ea5d78,
+0x38b10000,
+0x00098200,
+0x7c8e5214,
+0x00000000,
+0x92c50000,
+0x90050004,
+0x00000000,
+0xd8050000,
+0x00000000,
+0x48000000,
+0x00050001,
+0x00060034,
+0x54ea5d78,
+0x54eb9d78,
+0x7c8e5214,
+0x7cae5a14,
+0x0006000b,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x48000001,
+0x00030002,
+0x28030000,
+0x7c0ea4ae,
+0x41820000,
+0x00050803,
+0x80f00000,
+0x3a100004,
+0xd8030000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000d,
+0x210e0000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x920efff0,
+0x7e087214,
+0x814efffc,
+0x39600018,
+0xd80e0010,
+0x48000000,
+0x00050026,
+0x00060035,
+0x7e439378,
+0x3a10fffc,
+0x00000000,
+0x7e84a378,
+0x00000000,
+0x7c8ea214,
+0x00000000,
+0x92010020,
+0x00000000,
+0x7d856378,
+0x00000000,
+0x7cae6214,
+0x00000000,
+0x91d20000,
+0x00098200,
+0x54e6063e,
+0x48000001,
+0x00030003,
+0x0006000d,
+0x28030001,
+0x41810000,
+0x00050836,
+0x20630000,
+0x0006000e,
+0x80f00000,
+0x3a100004,
+0x54e993ba,
+0x3d290000,
+0x00098200,
+0x7d291838,
+0x7e104a14,
+0x0006002c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00060037,
+0x80f0fffc,
+0xc8140000,
+0x54e8dd78,
+0x7c0e45ae,
+0x48000000,
+0x0005002c,
+0x00060038,
+0x80140000,
+0x20000000,
+0x00098200,
+0x7c631910,
+0x7c6318f8,
+0x48000000,
+0x0005000e,
+0x00060039,
+0x80140000,
+0x20000000,
+0x00098200,
+0x7c631910,
+0x48000000,
+0x0005000e,
+0x0006003a,
+0x3a10fffc,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x48000001,
+0x00030004,
+0x48000000,
+0x0005000d,
+0x0006003b,
+0x00000000,
+0x7ce43b78,
+0x3a10fffc,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x48000001,
+0x00030005,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x0006003c,
+0x7caf5a14,
+0x7cce5214,
+0x48000000,
+0x00050001,
+0x0006003d,
+0x00000000,
+0x7d655b78,
+0x7d465378,
+0x48000000,
+0x00050001,
+0x00000000,
+0x0006003e,
+0x7d856378,
+0x7d866378,
+0x48000000,
+0x00050001,
+0x0006003f,
+0x7cae5214,
+0x7ccf5a14,
+0x48000000,
+0x00050001,
+0x00060040,
+0x7cae5214,
+0x7cce5a14,
+0x00000000,
+0x48000000,
+0x00050001,
+0x00000000,
+0x00060041,
+0x00060042,
+0x00000000,
+0x7d455378,
+0x7d665b78,
+0x00000000,
+0x0006000b,
+0x7c8ea214,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x54e7063e,
+0x48000001,
+0x00030006,
+0x28030000,
+0x41820000,
+0x0005082c,
+0x00060036,
+0x7d0e1850,
+0x9203fff0,
+0x7dc97378,
+0x3a080000,
+0x00098200,
+0x7c6e1b78,
+0x39600010,
+0x48000000,
+0x00050024,
+0x00060043,
+0x00000000,
+0x7c751b78,
+0x00000000,
+0x7d846378,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x48000001,
+0x00030007,
+0x00000000,
+0x28030000,
+0x40820000,
+0x00050836,
+0x7ea3ab78,
+0x48000000,
+0x00050044,
+0x00000000,
+0x48000000,
+0x00050036,
+0x00000000,
+0x00060025,
+0x7e439378,
+0x91320000,
+0x00098200,
+0x388efff8,
+0x92010020,
+0x7cae5a14,
+0x7d755b78,
+0x48000001,
+0x00030008,
+0x814efffc,
+0x39750008,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060045,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x3894fff8,
+0x92010020,
+0x7cb45a14,
+0x7d755b78,
+0x48000001,
+0x00030008,
+0x810efff8,
+0x39750008,
+0x8154fffc,
+0x48000000,
+0x00050046,
+0x00060047,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x7e84a378,
+0x92010020,
+0x7cf53b78,
+0x48000001,
+0x00030009,
+0x00000000,
+0x56a0063e,
+0x00000000,
+0x56b4dd78,
+0x00000000,
+0x2c000000,
+0x00098200,
+0x00000000,
+0x56ac9b78,
+0x00000000,
+0x41a20000,
+0x00070800,
+0x00000000,
+0x48000000,
+0x00070000,
+0x00060048,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x39000000,
+0x00098200,
+0x3a8efff8,
+0x7c854040,
+0x820efff8,
+0x40840000,
+0x00050849,
+0x90b40000,
+0x398b0008,
+0x90740004,
+0x41820000,
+0x0005084a,
+0x39000008,
+0x396bfff8,
+0x0006000b,
+0x7c085840,
+0x7c0e44ae,
+0x7c1445ae,
+0x39080008,
+0x40a20000,
+0x0005080b,
+0x48000000,
+0x0005004a,
+0x0006004b,
+0x280b0008,
+0x806e0000,
+0x41800000,
+0x00050849,
+0x7c161810,
+0x7d231910,
+0x7d280338,
+0x39080000,
+0x00098200,
+0x55081800,
+0x000900a1,
+0x392a0000,
+0x00098200,
+0x7c2944ae,
+0x48000000,
+0x0005004c,
+0x0006004d,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050806,
+0x0006000b,
+0x80630000,
+0x00098200,
+0x0006000c,
+0x00000000,
+0x38a00000,
+0x00098200,
+0x28030000,
+0x81710000,
+0x00098200,
+0x41820000,
+0x0005084e,
+0x80030000,
+0x00098200,
+0x38a00000,
+0x00098200,
+0x810b0000,
+0x00098200,
+0x81230000,
+0x00098200,
+0x7d080038,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x7d294214,
+0x0006000d,
+0x80c90000,
+0x00098200,
+0x80090000,
+0x00098200,
+0x80890000,
+0x00098200,
+0x81090000,
+0x00098200,
+0x2c060000,
+0x00098200,
+0x40820000,
+0x00050804,
+0x7c005800,
+0x41820000,
+0x00050805,
+0x0006000e,
+0x81290000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005084e,
+0x48000000,
+0x0005000d,
+0x0006000f,
+0x00000000,
+0x2c040000,
+0x00098200,
+0x41820000,
+0x0005084e,
+0x7c852378,
+0x7d034378,
+0x48000000,
+0x0005004e,
+0x00060010,
+0x2c050000,
+0x00098200,
+0x41820000,
+0x0005080b,
+0x7c162810,
+0x7d252910,
+0x7d280338,
+0x39080000,
+0x00098200,
+0x55081000,
+0x000900a1,
+0x39310000,
+0x00098200,
+0x7c69402e,
+0x48000000,
+0x0005000c,
+0x0006004f,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x81030000,
+0x00098200,
+0x2c060000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x28080000,
+0x88c30000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x70c00000,
+0x00090200,
+0x90830000,
+0x00098200,
+0x41820000,
+0x0005084e,
+0x00000000,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x90710000,
+0x00098200,
+0x98c30000,
+0x00098200,
+0x90030000,
+0x00098200,
+0x48000000,
+0x0005004e,
+0x00060050,
+0x280b0010,
+0x80ce0000,
+0x808e0004,
+0x41800000,
+0x00050849,
+0x2c060000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x38ae0008,
+0x7e439378,
+0x48000001,
+0x0003000a,
+0xc8230000,
+0x48000000,
+0x0005004c,
+0x00060051,
+0x280b0008,
+0x806e0000,
+0xc82e0000,
+0x40820000,
+0x00050849,
+0x7c03b040,
+0x41810000,
+0x00050849,
+0x48000000,
+0x0005004c,
+0x00060052,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x41820000,
+0x0005084e,
+0x80110000,
+0x00098200,
+0x7c05b040,
+0x28800000,
+0x91d20000,
+0x00098200,
+0x4c413342,
+0x92010020,
+0x41820000,
+0x00050849,
+0x00000000,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x7e439378,
+0x7dc47378,
+0x00000000,
+0x48000001,
+0x0003000b,
+0x00000000,
+0x48000001,
+0x0003000c,
+0x00000000,
+0x38a00000,
+0x00098200,
+0x48000000,
+0x0005004e,
+0x00060054,
+0x280b0008,
+0x806e0000,
+0x808e0004,
+0x41800000,
+0x00050849,
+0x7eee592e,
+0x2c030000,
+0x00098200,
+0x820efff8,
+0x40820000,
+0x00050849,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x38ae0008,
+0x92010020,
+0x48000001,
+0x0003000d,
+0x28030000,
+0x38a00000,
+0x00098200,
+0x41820000,
+0x0005084e,
+0xc80e0008,
+0x3a8efff8,
+0xc82e0010,
+0xd8140000,
+0x39800000,
+0x00098200,
+0xd8340008,
+0x48000000,
+0x0005004a,
+0x00060055,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x820efff8,
+0x40820000,
+0x00050849,
+0x00000000,
+0x81230000,
+0x00098200,
+0xc80a0000,
+0x00098200,
+0x28090000,
+0x3a8efff8,
+0x40820000,
+0x00050849,
+0x00000000,
+0xc80a0000,
+0x00098200,
+0x3a8efff8,
+0x00000000,
+0x92ee0008,
+0x39800000,
+0x00098200,
+0xd8140000,
+0x48000000,
+0x0005004a,
+0x00060056,
+0x280b0010,
+0x80ae0000,
+0x806e0004,
+0x80ce0008,
+0x00000000,
+0x812e000c,
+0x00000000,
+0xc84e0008,
+0x00000000,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x7c86b040,
+0x820efff8,
+0x00000000,
+0x40820000,
+0x00050849,
+0x40860000,
+0x00050849,
+0x00000000,
+0x3c003ff0,
+0x93010014,
+0x40820000,
+0x00050849,
+0x90010010,
+0x40840000,
+0x00050849,
+0xc8210010,
+0xfc00101e,
+0xd8010010,
+0x81210014,
+0x00000000,
+0x80030000,
+0x00098200,
+0x81030000,
+0x00098200,
+0x00000000,
+0xfc42082a,
+0x00000000,
+0x39290001,
+0x3a8efff8,
+0x7c004840,
+0x00000000,
+0x92d40000,
+0x55261800,
+0x000900a1,
+0x91340004,
+0x00000000,
+0x55261800,
+0x000900a1,
+0xd8540000,
+0x00000000,
+0x40810000,
+0x00050802,
+0x7d28302e,
+0x7c0834ae,
+0x0006000b,
+0x2c090000,
+0x00098200,
+0x39800000,
+0x00098200,
+0x41820000,
+0x0005084a,
+0x39800000,
+0x00098200,
+0xd8140008,
+0x48000000,
+0x0005004a,
+0x0006000c,
+0x80030000,
+0x00098200,
+0x28000000,
+0x39800000,
+0x00098200,
+0x41820000,
+0x0005084a,
+0x7d244b78,
+0x48000001,
+0x0003000e,
+0x28030000,
+0x39800000,
+0x00098200,
+0x41820000,
+0x0005084a,
+0x81230000,
+0xc8030000,
+0x48000000,
+0x0005000b,
+0x00060057,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x820efff8,
+0x40820000,
+0x00050849,
+0x00000000,
+0x81230000,
+0x00098200,
+0xc80a0000,
+0x00098200,
+0x28090000,
+0x3a8efff8,
+0x40820000,
+0x00050849,
+0x00000000,
+0xc80a0000,
+0x00098200,
+0x3a8efff8,
+0x00000000,
+0x92ce0008,
+0x00000000,
+0x930e0008,
+0x00000000,
+0x930e000c,
+0x39800000,
+0x00098200,
+0xd8140000,
+0x48000000,
+0x0005004a,
+0x00060058,
+0x280b0008,
+0x88d10000,
+0x00098200,
+0x41800000,
+0x00050849,
+0x7dc97378,
+0x39ce0008,
+0x54c607fe,
+0x000900ab,
+0x396bfff8,
+0x3a060000,
+0x00098200,
+0x48000000,
+0x00050024,
+0x00060059,
+0x280b0010,
+0x80ce0008,
+0xc84e0008,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x89110000,
+0x00098200,
+0x7dc97378,
+0x2c060000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x39ce0010,
+0x550807fe,
+0x000900ab,
+0xd8490000,
+0x396bfff0,
+0xd8290008,
+0x3a080000,
+0x00098200,
+0x48000000,
+0x00050024,
+0x0006005a,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x88030000,
+0x00098200,
+0x81030000,
+0x00098200,
+0x00000000,
+0x80830000,
+0x00098200,
+0x28000000,
+0x00090200,
+0x81230000,
+0x00098200,
+0x28880000,
+0x80030000,
+0x00098200,
+0x7f844840,
+0x820efff8,
+0x4f013342,
+0x7d245a14,
+0x4f3e1102,
+0x7c890040,
+0x4f18cb82,
+0x92010020,
+0x4f182b82,
+0x91d20000,
+0x00098200,
+0x41980000,
+0x00050849,
+0x0006000b,
+0x39ce0008,
+0x396bfff8,
+0x3929fff8,
+0x91230000,
+0x00098200,
+0x39000000,
+0x91d20000,
+0x00098200,
+0x0006000c,
+0x7c085800,
+0x7c0e44ae,
+0x41820000,
+0x00050803,
+0x7c0445ae,
+0x39080008,
+0x48000000,
+0x0005000c,
+0x0006000d,
+0x38a00000,
+0x7c751b78,
+0x38c00000,
+0x48000001,
+0x00050021,
+0x0006000e,
+0x81350000,
+0x00098200,
+0x28030000,
+0x00090200,
+0x80d50000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x90110000,
+0x00098200,
+0x41810000,
+0x00050808,
+0x00000000,
+0x7d893050,
+0x80120000,
+0x00098200,
+0x280c0000,
+0x7d0e6214,
+0x41820000,
+0x00050806,
+0x7c080040,
+0x39000000,
+0x41810000,
+0x00050809,
+0x38ccfff8,
+0x91350000,
+0x00098200,
+0x0006000f,
+0x7c083040,
+0x7c0944ae,
+0x7c0e45ae,
+0x39080008,
+0x40820000,
+0x0005080f,
+0x00060010,
+0x72000000,
+0x00090200,
+0x39000000,
+0x00098200,
+0x3a8efff8,
+0x910efff8,
+0x398c0010,
+0x00060011,
+0x92010020,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060012,
+0x72000000,
+0x00090200,
+0x38c6fff8,
+0x39000000,
+0x00098200,
+0xc8060000,
+0x90d50000,
+0x00098200,
+0x39800000,
+0x00098200,
+0x910efff8,
+0x3a8efff8,
+0xd80e0000,
+0x48000000,
+0x00050011,
+0x00060013,
+0x7e439378,
+0x558400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x38600000,
+0x48000000,
+0x0005000e,
+0x0006005b,
+0x00000000,
+0x806a0000,
+0x00098200,
+0x88030000,
+0x00098200,
+0x81030000,
+0x00098200,
+0x80830000,
+0x00098200,
+0x28000000,
+0x00090200,
+0x81230000,
+0x00098200,
+0x28880000,
+0x80030000,
+0x00098200,
+0x7f844840,
+0x820efff8,
+0x4f013342,
+0x7d245a14,
+0x4f3e1102,
+0x7c890040,
+0x4f18cb82,
+0x92010020,
+0x4f182b82,
+0x91d20000,
+0x00098200,
+0x41980000,
+0x00050849,
+0x0006000b,
+0x91230000,
+0x00098200,
+0x39000000,
+0x91d20000,
+0x00098200,
+0x0006000c,
+0x7c085800,
+0x7c0e44ae,
+0x41820000,
+0x00050803,
+0x7c0445ae,
+0x39080008,
+0x48000000,
+0x0005000c,
+0x0006000d,
+0x38a00000,
+0x7c751b78,
+0x38c00000,
+0x48000001,
+0x00050021,
+0x0006000e,
+0x81350000,
+0x00098200,
+0x28030000,
+0x00090200,
+0x80d50000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x90110000,
+0x00098200,
+0x41810000,
+0x00050808,
+0x7d893050,
+0x80120000,
+0x00098200,
+0x280c0000,
+0x7d0e6214,
+0x41820000,
+0x00050806,
+0x7c080040,
+0x39000000,
+0x41810000,
+0x00050809,
+0x38ccfff8,
+0x91350000,
+0x00098200,
+0x0006000f,
+0x7c083040,
+0x7c0944ae,
+0x7c0e45ae,
+0x39080008,
+0x40820000,
+0x0005080f,
+0x00060010,
+0x72000000,
+0x00090200,
+0x7dd47378,
+0x398c0008,
+0x00060011,
+0x92010020,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060012,
+0x7e439378,
+0x7ea4ab78,
+0x48000001,
+0x0003000f,
+0x00060013,
+0x7e439378,
+0x558400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x38600000,
+0x48000000,
+0x0005000e,
+0x0006005c,
+0x80120000,
+0x00098200,
+0x00000000,
+0x7d0e5a14,
+0x91d20000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x91120000,
+0x00098200,
+0x38600000,
+0x00098200,
+0x41820000,
+0x00050849,
+0x93120000,
+0x00098200,
+0x98720000,
+0x00098200,
+0x48000000,
+0x0005001a,
+0x0006005d,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x00000000,
+0x40820000,
+0x00050802,
+0x7c68fe70,
+0x7d091a78,
+0x7c684851,
+0x41800000,
+0x00050801,
+0x0006005e,
+0x820efff8,
+0x3a8efff8,
+0x92cefff8,
+0x906efffc,
+0x48000000,
+0x0005005f,
+0x0006000b,
+0x3ca041e0,
+0x38600000,
+0x48000000,
+0x0005004e,
+0x0006000c,
+0x00000000,
+0x40800000,
+0x00050849,
+0x54a5007e,
+0x0006004e,
+0x820efff8,
+0x90aefff8,
+0x3a8efff8,
+0x906efffc,
+0x0006005f,
+0x39800000,
+0x00098200,
+0x0006004a,
+0x72000000,
+0x00090200,
+0x7d936378,
+0x40a20000,
+0x00050818,
+0x80f0fffc,
+0x54ea5d78,
+0x0006000f,
+0x7c0a6040,
+0x54e0dd78,
+0x41810000,
+0x00050806,
+0x80f00000,
+0x3a100004,
+0x7dc0a050,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x7ef4412e,
+0x48000000,
+0x0005000f,
+0x00000000,
+0x00060060,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x41a20000,
+0x0005084e,
+0x54a9657e,
+0x40800000,
+0x00050849,
+0x3529fc01,
+0x2889001f,
+0x2009001f,
+0x41800000,
+0x00050803,
+0x54a85800,
+0x000900a1,
+0x5466057e,
+0x000900ab,
+0x65088000,
+0x39290001,
+0x7d083378,
+0x54645800,
+0x000900a1,
+0x40840000,
+0x00050804,
+0x7d064830,
+0x7d030430,
+0x7cc62378,
+0x7ca9fe70,
+0x7cc84838,
+0x3008ffff,
+0x7d004110,
+0x7c634214,
+0x7c634a78,
+0x7c691850,
+0x48000000,
+0x0005005e,
+0x0006000d,
+0x7d252a14,
+0x7ca8fe70,
+0x7c694b78,
+0x7d284038,
+0x21280000,
+0x7c631910,
+0x48000000,
+0x0005005e,
+0x0006000e,
+0x6d088000,
+0x7ca9fe70,
+0x7d082378,
+0x7d084b39,
+0x4c423202,
+0x3c608000,
+0x41a20000,
+0x0005085e,
+0x0006000f,
+0xc82e0000,
+0x48000001,
+0x00030010,
+0x48000000,
+0x0005004c,
+0x00060061,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x41a20000,
+0x0005084e,
+0x54a9657e,
+0x40800000,
+0x00050849,
+0x3529fc01,
+0x2889001f,
+0x2009001f,
+0x41800000,
+0x00050803,
+0x00000000,
+0x54a85800,
+0x000900a1,
+0x5466057e,
+0x000900ab,
+0x65088000,
+0x39290001,
+0x7d083378,
+0x54645800,
+0x000900a1,
+0x40840000,
+0x00050804,
+0x7d064830,
+0x7d030430,
+0x7cc62378,
+0x7ca9fe70,
+0x7cc84878,
+0x3008ffff,
+0x7d004110,
+0x7c634615,
+0x7c634a78,
+0x7c691850,
+0x40830000,
+0x0005085e,
+0x7c000400,
+0x40a10000,
+0x0005085e,
+0x3ca041e0,
+0x38600000,
+0x48000000,
+0x0005004e,
+0x0006000d,
+0x7d252a14,
+0x7ca8fe70,
+0x7c694b78,
+0x7d284078,
+0x3128ffff,
+0x7c694110,
+0x48000000,
+0x0005005e,
+0x0006000e,
+0x6d088000,
+0x7ca9fe70,
+0x7d084b39,
+0x4c423202,
+0x3c608000,
+0x41a20000,
+0x0005085e,
+0x0006000f,
+0xc82e0000,
+0x48000001,
+0x00030011,
+0x48000000,
+0x0005004c,
+0x00000000,
+0x00060060,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030010,
+0x48000000,
+0x0005004c,
+0x00060061,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030011,
+0x48000000,
+0x0005004c,
+0x00000000,
+0x00060062,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030012,
+0x48000000,
+0x0005004c,
+0x00060063,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030013,
+0x48000000,
+0x0005004c,
+0x00060064,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030014,
+0x48000000,
+0x0005004c,
+0x00060065,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030015,
+0x48000000,
+0x0005004c,
+0x00060066,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x00000000,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030016,
+0x48000000,
+0x0005004c,
+0x00060067,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030017,
+0x48000000,
+0x0005004c,
+0x00060068,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030018,
+0x48000000,
+0x0005004c,
+0x00060069,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030019,
+0x48000000,
+0x0005004c,
+0x0006006a,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001a,
+0x00000000,
+0x48000000,
+0x0005004c,
+0x0006006b,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001b,
+0x48000000,
+0x0005004c,
+0x0006006c,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001c,
+0x48000000,
+0x0005004c,
+0x0006006d,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001d,
+0x48000000,
+0x0005004c,
+0x0006006e,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001e,
+0x48000000,
+0x0005004c,
+0x0006006f,
+0x00000000,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x0003001f,
+0x48000000,
+0x0005004c,
+0x00060070,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030020,
+0x48000000,
+0x0005004c,
+0x00060071,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0x48000001,
+0x00030021,
+0x48000000,
+0x0005004c,
+0x00060072,
+0x00060073,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xc84a0000,
+0x00098200,
+0x00000000,
+0xfc2100b2,
+0x48000000,
+0x0005004c,
+0x00000000,
+0x00060074,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0x806e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x00060074,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc40101e,
+0xd8410010,
+0x80610014,
+0x00000000,
+0x48000001,
+0x00030022,
+0x48000000,
+0x0005004c,
+0x00060075,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x38710000,
+0x00098200,
+0x820efff8,
+0x48000001,
+0x00030023,
+0x81110000,
+0x00098200,
+0x3a8efff8,
+0x00000000,
+0x6d088000,
+0x9101000c,
+0xc8410008,
+0xfc42f828,
+0x00000000,
+0xd8340000,
+0x39800000,
+0x00098200,
+0x00000000,
+0x92d40008,
+0x9114000c,
+0x00000000,
+0xd8540008,
+0x00000000,
+0x48000000,
+0x0005004a,
+0x00060076,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x386efff8,
+0x820efff8,
+0x48000001,
+0x00030024,
+0x3a8efff8,
+0xd82e0000,
+0x39800000,
+0x00098200,
+0x48000000,
+0x0005004a,
+0x00000000,
+0x00060077,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x390e0008,
+0x7d2e5a14,
+0x40820000,
+0x00050804,
+0x0006000b,
+0x80c80000,
+0x7c884840,
+0x80880004,
+0x40840000,
+0x0005085e,
+0x7c06b040,
+0x6c608000,
+0x6c868000,
+0x40820000,
+0x00050803,
+0x7cc60010,
+0x7c000110,
+0x7cc60038,
+0x7c662214,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x0006000d,
+0x40800000,
+0x00050849,
+0x6c638000,
+0x9061000c,
+0xc8210008,
+0xfc21f828,
+0xc8480000,
+0x48000000,
+0x00050006,
+0x0006000e,
+0xc82e0000,
+0x40800000,
+0x00050849,
+0x0006000f,
+0x80c80000,
+0x7c884840,
+0xc8480000,
+0x40840000,
+0x0005084c,
+0x7c06b040,
+0x40800000,
+0x00050807,
+0x00060010,
+0xfc011028,
+0x39080008,
+0xfc2008ae,
+0x48000000,
+0x0005000f,
+0x00060011,
+0x80880004,
+0x40820000,
+0x00050849,
+0x6c848000,
+0x9081000c,
+0xc8410008,
+0xfc42f828,
+0x48000000,
+0x00050010,
+0x00000000,
+0x00060077,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x39000008,
+0x0006000b,
+0x7c8e402e,
+0x7c4e44ae,
+0x7c885840,
+0x7c04b040,
+0x40840000,
+0x0005084c,
+0x40800000,
+0x00050849,
+0xfc011028,
+0x39080008,
+0xfc2008ae,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060078,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x390e0008,
+0x7d2e5a14,
+0x40820000,
+0x00050804,
+0x0006000b,
+0x80c80000,
+0x7c884840,
+0x80880004,
+0x40840000,
+0x0005085e,
+0x7c06b040,
+0x6c608000,
+0x6c868000,
+0x40820000,
+0x00050803,
+0x7cc60010,
+0x7c000110,
+0x7cc60078,
+0x7c662214,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x0006000d,
+0x40800000,
+0x00050849,
+0x6c638000,
+0x9061000c,
+0xc8210008,
+0xfc21f828,
+0xc8480000,
+0x48000000,
+0x00050006,
+0x0006000e,
+0xc82e0000,
+0x40800000,
+0x00050849,
+0x0006000f,
+0x80c80000,
+0x7c884840,
+0xc8480000,
+0x40840000,
+0x0005084c,
+0x7c06b040,
+0x40800000,
+0x00050807,
+0x00060010,
+0xfc011028,
+0x39080008,
+0xfc20106e,
+0x48000000,
+0x0005000f,
+0x00060011,
+0x80880004,
+0x40820000,
+0x00050849,
+0x6c848000,
+0x9081000c,
+0xc8410008,
+0xfc42f828,
+0x48000000,
+0x00050010,
+0x00000000,
+0x00060078,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x39000008,
+0x0006000b,
+0x7c8e402e,
+0x7c4e44ae,
+0x7c885840,
+0x7c04b040,
+0x40840000,
+0x0005084c,
+0x40800000,
+0x00050849,
+0xfc011028,
+0x39080008,
+0xfc20106e,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060079,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x80630000,
+0x00098200,
+0x48000000,
+0x0005005e,
+0x0006007a,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x40820000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x80030000,
+0x00098200,
+0x00000000,
+0x88630000,
+0x00098200,
+0x39800000,
+0x00098200,
+0x820efff8,
+0x28000000,
+0x3a8efff8,
+0x41a20000,
+0x0005084a,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x89030000,
+0x00098200,
+0x30c0ffff,
+0x7d860110,
+0x9101000c,
+0x398c0001,
+0xc8010008,
+0x3a8efff8,
+0x820efff8,
+0xfc00f028,
+0x558c1800,
+0x000900a1,
+0xd8140000,
+0x48000000,
+0x0005004a,
+0x00000000,
+0x0006007b,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0008,
+0x80ae0000,
+0x00000000,
+0x800e0004,
+0x40820000,
+0x00050849,
+0x7c05b040,
+0x40820000,
+0x00050849,
+0x388e0007,
+0x00000000,
+0xc82e0000,
+0x40820000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc20081e,
+0xd8210010,
+0x80010014,
+0x38810017,
+0x00000000,
+0x38a00001,
+0x280000ff,
+0x41810000,
+0x00050849,
+0x0006007c,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x92010020,
+0x48000001,
+0x00030025,
+0x81d20000,
+0x00098200,
+0x38a00000,
+0x00098200,
+0x48000000,
+0x0005004e,
+0x0006007d,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0010,
+0x80ae0010,
+0x00000000,
+0xc80e0010,
+0x00000000,
+0x800e0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x808e0008,
+0x00000000,
+0x810e000c,
+0x00000000,
+0xc82e0008,
+0x00000000,
+0x3920ffff,
+0x41820000,
+0x00050801,
+0x00000000,
+0x7c05b040,
+0x812e0014,
+0x40820000,
+0x00050849,
+0x0006000b,
+0x7c04b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc00001e,
+0xd8010010,
+0x81210014,
+0x0006000b,
+0x7c04b040,
+0x40800000,
+0x00050849,
+0x00000000,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x00000000,
+0xfc20081e,
+0xd8210010,
+0x81010014,
+0x00000000,
+0x80030000,
+0x00098200,
+0x7c004840,
+0x38c90001,
+0x41800000,
+0x00050805,
+0x0006000c,
+0x2c080000,
+0x7cc80214,
+0x40810000,
+0x00050807,
+0x0006000d,
+0x7ca84850,
+0x38830000,
+0x00098200,
+0x7ca0fe70,
+0x38a50001,
+0x7c844214,
+0x7ca50078,
+0x48000000,
+0x0005007c,
+0x0006000f,
+0x7c890050,
+0x7c84fe70,
+0x7cc62078,
+0x7d203214,
+0x48000000,
+0x0005000c,
+0x00060011,
+0x30a8ffff,
+0x7ca52910,
+0x7cc4fe70,
+0x7cc62878,
+0x7cc82078,
+0x39080001,
+0x48000000,
+0x0005000d,
+0x0006007e,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0010,
+0x800e0000,
+0x806e0004,
+0x80ce0008,
+0x00000000,
+0x80ae000c,
+0x00000000,
+0xc84e0008,
+0x00000000,
+0x41800000,
+0x00050849,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x00000000,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc40101e,
+0xd8410010,
+0x80a10014,
+0x00000000,
+0x80030000,
+0x00098200,
+0x2c050000,
+0x81110000,
+0x00098200,
+0x40810000,
+0x00050802,
+0x28000001,
+0x3925ffff,
+0x41800000,
+0x00050802,
+0x7c882840,
+0x40820000,
+0x00050849,
+0x88030000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x41840000,
+0x00050849,
+0x0006000b,
+0x28090000,
+0x7c0449ae,
+0x3929ffff,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x0005007c,
+0x0006000c,
+0x38710000,
+0x00098200,
+0x38a00000,
+0x00098200,
+0x48000000,
+0x0005004e,
+0x0006007f,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x00000000,
+0x40820000,
+0x00050849,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x39200000,
+0x7c082840,
+0x38c5ffff,
+0x41800000,
+0x00050849,
+0x0006000b,
+0x2c060000,
+0x7d0348ae,
+0x41a00000,
+0x0005087c,
+0x7d0431ae,
+0x38c6ffff,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x00060080,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x7c082840,
+0x39200000,
+0x41800000,
+0x00050849,
+0x0006000b,
+0x7c092840,
+0x7d0348ae,
+0x40a00000,
+0x0005087c,
+0x00000000,
+0x3808ffbf,
+0x69060020,
+0x3000ffe6,
+0x7cc63110,
+0x70c60020,
+0x7d083278,
+0x7d0449ae,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x00060081,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x00050853,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x7c082840,
+0x39200000,
+0x41800000,
+0x00050849,
+0x0006000b,
+0x7c092840,
+0x7d0348ae,
+0x40a00000,
+0x0005087c,
+0x3808ff9f,
+0x69060020,
+0x3000ffe6,
+0x7cc63110,
+0x70c60020,
+0x7d083278,
+0x7d0449ae,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x00060082,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x2c050000,
+0x00098200,
+0x40820000,
+0x00050849,
+0x48000001,
+0x00030026,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x00060083,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x00060083,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x390e0008,
+0x7d2e5a14,
+0x0006000b,
+0x80c80000,
+0x7c884840,
+0x00000000,
+0x80880004,
+0x00000000,
+0xc8280000,
+0x00000000,
+0x40a40000,
+0x0005085e,
+0x7c06b040,
+0x00000000,
+0x40820001,
+0x00050885,
+0x00000000,
+0xfc21f02a,
+0x40800000,
+0x00050849,
+0xd8210010,
+0x80810014,
+0x00000000,
+0x7c632038,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060086,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x00060086,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x390e0008,
+0x7d2e5a14,
+0x0006000b,
+0x80c80000,
+0x7c884840,
+0x00000000,
+0x80880004,
+0x00000000,
+0xc8280000,
+0x00000000,
+0x40a40000,
+0x0005085e,
+0x7c06b040,
+0x00000000,
+0x40820001,
+0x00050885,
+0x00000000,
+0xfc21f02a,
+0x40800000,
+0x00050849,
+0xd8210010,
+0x80810014,
+0x00000000,
+0x7c632378,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060087,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x00060087,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x390e0008,
+0x7d2e5a14,
+0x0006000b,
+0x80c80000,
+0x7c884840,
+0x00000000,
+0x80880004,
+0x00000000,
+0xc8280000,
+0x00000000,
+0x40a40000,
+0x0005085e,
+0x7c06b040,
+0x00000000,
+0x40820001,
+0x00050885,
+0x00000000,
+0xfc21f02a,
+0x40800000,
+0x00050849,
+0xd8210010,
+0x80810014,
+0x00000000,
+0x7c632278,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060088,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x00060088,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x5460403e,
+0x5060c00e,
+0x5060c42e,
+0x7c030378,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x00060089,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x00060089,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x7c6318f8,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008a,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x0006008a,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xfc42f02a,
+0xd8210010,
+0x80610014,
+0xd8410010,
+0x80810014,
+0x00000000,
+0x548406fe,
+0x7c632030,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008b,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x0006008b,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xfc42f02a,
+0xd8210010,
+0x80610014,
+0xd8410010,
+0x80810014,
+0x00000000,
+0x548406fe,
+0x7c632430,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008c,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x0006008c,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xfc42f02a,
+0xd8210010,
+0x80610014,
+0xd8410010,
+0x80810014,
+0x00000000,
+0x548406fe,
+0x7c632630,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008d,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x0006008d,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xfc42f02a,
+0xd8210010,
+0x80610014,
+0xd8410010,
+0x80810014,
+0x00000000,
+0x5c63203e,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008e,
+0x280b0010,
+0x80ae0000,
+0x80ce0008,
+0x806e0004,
+0x808e000c,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x7c06b040,
+0x40820000,
+0x00050849,
+0x00000000,
+0x0006008e,
+0x280b0010,
+0x80ae0000,
+0xc82e0000,
+0x80ce0008,
+0xc84e0008,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0x7c06b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xfc42f02a,
+0xd8210010,
+0x80610014,
+0xd8410010,
+0x80810014,
+0x00000000,
+0x7c8400d0,
+0x5c63203e,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006008f,
+0x280b0008,
+0x80ae0000,
+0x806e0004,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40820001,
+0x00050884,
+0x00000000,
+0x0006008f,
+0x280b0008,
+0x80ae0000,
+0xc82e0000,
+0x41800000,
+0x00050849,
+0x7c05b040,
+0x40800000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x00000000,
+0x48000000,
+0x0005005e,
+0x00000000,
+0x0006005e,
+0x6c638000,
+0x9061000c,
+0xc8210008,
+0xfc21f828,
+0x00000000,
+0x0006004c,
+0x820efff8,
+0x3a8efff8,
+0xd82efff8,
+0x48000000,
+0x0005005f,
+0x00060084,
+0x00000000,
+0xc82e0000,
+0x41810000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80610014,
+0x4e800020,
+0x00000000,
+0x00060085,
+0x00000000,
+0xc8280000,
+0x41810000,
+0x00050849,
+0xfc21f02a,
+0xd8210010,
+0x80810014,
+0x4e800020,
+0x00000000,
+0x00060049,
+0x80ca0000,
+0x00098200,
+0x7d0e5a14,
+0x820efff8,
+0x38080000,
+0x00098200,
+0x81320000,
+0x00098200,
+0x92010020,
+0x7c004840,
+0x91d20000,
+0x00098200,
+0x91120000,
+0x00098200,
+0x7e439378,
+0x41810000,
+0x00050805,
+0x7cc903a6,
+0x4e800421,
+0x81d20000,
+0x00098200,
+0x2c030000,
+0x546c1800,
+0x000900a1,
+0x3a8efff8,
+0x41810000,
+0x0005084a,
+0x0006000b,
+0x80120000,
+0x00098200,
+0x814efffc,
+0x7d6e0050,
+0x40820000,
+0x00050829,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060029,
+0x72000000,
+0x00090200,
+0x56080038,
+0x40820000,
+0x00050803,
+0x80f0fffc,
+0x54e8dd78,
+0x0006000d,
+0x7d287050,
+0x48000000,
+0x00050024,
+0x0006000f,
+0x38800000,
+0x00098200,
+0x48000001,
+0x00030000,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x7c000000,
+0x48000000,
+0x0005000b,
+0x00060053,
+0x7ea802a6,
+0x91d20000,
+0x00098200,
+0x7c0e5a14,
+0x92010020,
+0x90120000,
+0x00098200,
+0x7e439378,
+0x48000001,
+0x00030027,
+0x81d20000,
+0x00098200,
+0x7ea803a6,
+0x80120000,
+0x00098200,
+0x7d6e0050,
+0x814efffc,
+0x4e800020,
+0x00060090,
+0x00000000,
+0x88d10000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x40820000,
+0x00050805,
+0x81310000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x40820000,
+0x00050801,
+0x3929ffff,
+0x70c00000,
+0x00090200,
+0x41a20000,
+0x00050801,
+0x91310000,
+0x00098200,
+0x48000000,
+0x00050001,
+0x00000000,
+0x00060091,
+0x88d10000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x41820000,
+0x00050801,
+0x0006000f,
+0x39080000,
+0x00098200,
+0x7c11402e,
+0x7c0903a6,
+0x4e800420,
+0x00060092,
+0x88d10000,
+0x00098200,
+0x81310000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x54c007c0,
+0x000900ab,
+0x40820000,
+0x0005080f,
+0x2c800000,
+0x3529ffff,
+0x41860000,
+0x0005080f,
+0x91310000,
+0x00098200,
+0x41820000,
+0x00050801,
+0x40840000,
+0x0005080f,
+0x0006000b,
+0x7e439378,
+0x9261001c,
+0x7e048378,
+0x91d20000,
+0x00098200,
+0x48000001,
+0x00030028,
+0x0006000d,
+0x81d20000,
+0x00098200,
+0x0006000e,
+0x00000000,
+0x80f0fffc,
+0x54e815ba,
+0x54ea5d78,
+0x39080000,
+0x00098200,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00060093,
+0x3a100004,
+0x826affec,
+0x48000000,
+0x0005000e,
+0x00060094,
+0x00000000,
+0x810efffc,
+0x38710000,
+0x00098200,
+0x92010020,
+0x81080000,
+0x00098200,
+0x7e048378,
+0x92510000,
+0x00098200,
+0x89080000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x55081800,
+0x000900a1,
+0x7d0e4214,
+0x91120000,
+0x00098200,
+0x48000001,
+0x00030029,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x00060095,
+0x7e048378,
+0x00000000,
+0x48000000,
+0x00050001,
+0x00000000,
+0x00060096,
+0x00000000,
+0x62040001,
+0x0006000b,
+0x00000000,
+0x7c0e5a14,
+0x92010020,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x7e8ea050,
+0x90120000,
+0x00098200,
+0x48000001,
+0x0003002a,
+0x81d20000,
+0x00098200,
+0x80120000,
+0x00098200,
+0x93010020,
+0x7d6e0050,
+0x7e8ea214,
+0x814efffc,
+0x80f0fffc,
+0x7c6903a6,
+0x4e800420,
+0x00060097,
+0x00000000,
+0x38210000,
+0x00098200,
+0xbc410000,
+0x00098200,
+0x3a3f0000,
+0x00098200,
+0x38800000,
+0x00098200,
+0x80610000,
+0x00098200,
+0x90910000,
+0x00098200,
+0xd8010000,
+0x00098200,
+0xd8210000,
+0x00098200,
+0xd8410000,
+0x00098200,
+0xd8610000,
+0x00098200,
+0x90610000,
+0x7c000400,
+0xd8810000,
+0x00098200,
+0xd8a10000,
+0x00098200,
+0xd8c10000,
+0x00098200,
+0xd8e10000,
+0x00098200,
+0x38810000,
+0x00098200,
+0xd9010000,
+0x00098200,
+0xd9210000,
+0x00098200,
+0xd9410000,
+0x00098200,
+0xd9610000,
+0x00098200,
+0x90810000,
+0x00098200,
+0xd9810000,
+0x00098200,
+0xd9a10000,
+0x00098200,
+0x00000000,
+0xd9c10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x7ca802a6,
+0x39000000,
+0xda010000,
+0x00098200,
+0xda210000,
+0x00098200,
+0xda410000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x91010000,
+0x00098200,
+0xda810000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0xdae10000,
+0x00098200,
+0xa0c50002,
+0xdb010000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x82510000,
+0x00098200,
+0xdb810000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0xdbe10000,
+0x00098200,
+0x7ca50050,
+0x81d10000,
+0x00098200,
+0x54a500be,
+0x000900ab,
+0x00000000,
+0x92510000,
+0x00098200,
+0x38a5fffe,
+0x91110000,
+0x00098200,
+0x90d10000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x38710000,
+0x00098200,
+0x90b10000,
+0x00098200,
+0x38810010,
+0x48000001,
+0x0003002b,
+0x81120000,
+0x00098200,
+0x81210000,
+0x81d20000,
+0x00098200,
+0x5501003a,
+0x82010020,
+0x91210000,
+0x92410024,
+0x48000000,
+0x00050001,
+0x00000000,
+0x00060098,
+0x00000000,
+0x82410024,
+0x3a3f0000,
+0x00098200,
+0x0006000b,
+0x2c030000,
+0x41800000,
+0x00050803,
+0x810efffc,
+0x54731800,
+0x000900a1,
+0x39200000,
+0x9261001c,
+0x81080000,
+0x00098200,
+0x91310000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x3ac00000,
+0x00098200,
+0x3cc059c0,
+0x90c10010,
+0x3b000000,
+0x60c60004,
+0xc3c10010,
+0x90c10010,
+0x3c004338,
+0x3ae00000,
+0x00098200,
+0x90010008,
+0xc3e10010,
+0x80f00000,
+0x3a100004,
+0x92f10000,
+0x00098200,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7c0903a6,
+0x28080000,
+0x00090200,
+0x40800000,
+0x00050802,
+0x54ea5d78,
+0x54ec9b78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x3973fff8,
+0x7e947214,
+0x4e800420,
+0x0006000d,
+0x7c8300d0,
+0x7e439378,
+0x48000001,
+0x0003002c,
+0x00000000,
+0x00060099,
+0x48000000,
+0x00030010,
+0x0006009a,
+0x48000000,
+0x00030011,
+0x0006009b,
+0x00000000,
+0x48000000,
+0x0003002d,
+0x00000000,
+0x0006009c,
+0x7c0327d7,
+0x41830000,
+0x00050801,
+0x7c652279,
+0x7c0021d6,
+0x7c601850,
+0x4c800020,
+0x2c030000,
+0x4d820020,
+0x7c632214,
+0x4e800020,
+0x0006000b,
+0x2c040000,
+0x38600000,
+0x4d820020,
+0x7c000400,
+0x4e800020,
+0x0006009d,
+0x28030001,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0xfc21102a,
+0x4e800020,
+0x0006000b,
+0xfc211028,
+0x4e800020,
+0x0006000c,
+0x28030003,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0xfc2100b2,
+0x4e800020,
+0x0006000b,
+0xfc211024,
+0x4e800020,
+0x0006000c,
+0x28030005,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0x9421ffe0,
+0xd9c10010,
+0xd9e10018,
+0x7c0802a6,
+0xfdc00890,
+0xfc211024,
+0x90010024,
+0xfde01090,
+0x48000001,
+0x00030010,
+0x80010024,
+0xfc2103f2,
+0x7c0803a6,
+0xfc2e0828,
+0xc9c10010,
+0xc9e10018,
+0x38210020,
+0x4e800020,
+0x0006000b,
+0x48000000,
+0x0003001f,
+0x0006000c,
+0x28030007,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0xfc200850,
+0x4e800020,
+0x0006000b,
+0xfc200a10,
+0x4e800020,
+0x0006000c,
+0x00000000,
+0x28030009,
+0x41820000,
+0x00050809,
+0x41810000,
+0x00050802,
+0x48000000,
+0x00030020,
+0x0006000c,
+0x2803000b,
+0x41810000,
+0x00050809,
+0xfc011028,
+0x41820000,
+0x00050801,
+0xfc2008ae,
+0x4e800020,
+0x0006000b,
+0xfc20106e,
+0x4e800020,
+0x00060013,
+0x7c810808,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x0006009e,
+0x54630034,
+0x7c832050,
+0x3884001f,
+0x5484d97f,
+0x4d820020,
+0x7c8903a6,
+0x7c651b78,
+0x0006000b,
+0x7c00186c,
+0x38630020,
+0x42000000,
+0x0005080b,
+0x7c0004ac,
+0x7c8903a6,
+0x0006000b,
+0x7c002fac,
+0x38a50020,
+0x42000000,
+0x0005080b,
+0x4c00012c,
+0x4e800020,
+0x0006009f,
+0x00000000,
+0x9421fef0,
+0x91c10000,
+0x00098200,
+0xd9c10000,
+0x00098200,
+0x91e10000,
+0x00098200,
+0xd9e10000,
+0x00098200,
+0x92010000,
+0x00098200,
+0xda010000,
+0x00098200,
+0x7c0802a6,
+0x92210000,
+0x00098200,
+0xda210000,
+0x00098200,
+0x92410000,
+0x00098200,
+0xda410000,
+0x00098200,
+0x92610000,
+0x00098200,
+0xda610000,
+0x00098200,
+0x92810000,
+0x00098200,
+0xda810000,
+0x00098200,
+0x92a10000,
+0x00098200,
+0xdaa10000,
+0x00098200,
+0x92c10000,
+0x00098200,
+0xdac10000,
+0x00098200,
+0x90010114,
+0x92e10000,
+0x00098200,
+0xdae10000,
+0x00098200,
+0x93010000,
+0x00098200,
+0xdb010000,
+0x00098200,
+0x00000000,
+0x93210000,
+0x00098200,
+0xdb210000,
+0x00098200,
+0x7c000026,
+0x93410000,
+0x00098200,
+0xdb410000,
+0x00098200,
+0x93610000,
+0x00098200,
+0xdb610000,
+0x00098200,
+0x93810000,
+0x00098200,
+0xdb810000,
+0x00098200,
+0x93a10000,
+0x00098200,
+0xdba10000,
+0x00098200,
+0x93c10000,
+0x00098200,
+0xdbc10000,
+0x00098200,
+0x93e10000,
+0x00098200,
+0xdbe10000,
+0x00098200,
+0x90010034,
+0x820c0000,
+0x00098200,
+0x3a2c0000,
+0x00098200,
+0x91700000,
+0x00098200,
+0x90700000,
+0x00098200,
+0xd8300000,
+0x00098200,
+0x90900000,
+0x00098200,
+0xd8500000,
+0x00098200,
+0x90b00000,
+0x00098200,
+0x00000000,
+0xd8700000,
+0x00098200,
+0x90d00000,
+0x00098200,
+0xd8900000,
+0x00098200,
+0x90f00000,
+0x00098200,
+0xd8b00000,
+0x00098200,
+0x91100000,
+0x00098200,
+0xd8d00000,
+0x00098200,
+0x91300000,
+0x00098200,
+0xd8f00000,
+0x00098200,
+0x91500000,
+0x00098200,
+0xd9100000,
+0x00098200,
+0x38010000,
+0x00098200,
+0x90100000,
+0x00098200,
+0x7e038378,
+0x92010020,
+0x7c240b78,
+0x48000001,
+0x0003002e,
+0x81c30000,
+0x00098200,
+0x3ac00000,
+0x00098200,
+0x81630000,
+0x00098200,
+0x3cc059c0,
+0x3b000000,
+0x7c721b78,
+0x90c10010,
+0x814efffc,
+0x60c60004,
+0x3ae00000,
+0x00098200,
+0x38000000,
+0x00098200,
+0xc3c10010,
+0x90c10010,
+0x7d6e5850,
+0x90110000,
+0x00098200,
+0xc3e10010,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x00060028,
+0x00000000,
+0x82110000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x91520000,
+0x00098200,
+0x92500000,
+0x00098200,
+0x7e038378,
+0x7e84a378,
+0x48000001,
+0x0003002f,
+0x80700000,
+0x00098200,
+0xc8300000,
+0x00098200,
+0x80900000,
+0x00098200,
+0x48000000,
+0x0005001a,
+0x00000000,
+0x000600a0,
+0x00000000,
+0x81030000,
+0x00098200,
+0x7c0802a6,
+0x88830000,
+0x00098200,
+0x88a30000,
+0x00098200,
+0x7d0800d0,
+0x90010004,
+0x2c850000,
+0x7c290b78,
+0x3484ffff,
+0x7c21416e,
+0x4cc63042,
+0x91c9fffc,
+0x9069fff8,
+0x7d2e4b78,
+0x39030000,
+0x00098200,
+0x54841000,
+0x000900a1,
+0x41a00000,
+0x00050802,
+0x39210008,
+0x0006000b,
+0x7c08202e,
+0x7c09212e,
+0x3484fffc,
+0x40800000,
+0x0005080b,
+0x0006000c,
+0x40a60000,
+0x00050803,
+0xc8230000,
+0x00098200,
+0xc8430000,
+0x00098200,
+0xc8630000,
+0x00098200,
+0xc8830000,
+0x00098200,
+0xc8a30000,
+0x00098200,
+0xc8c30000,
+0x00098200,
+0xc8e30000,
+0x00098200,
+0xc9030000,
+0x00098200,
+0x0006000d,
+0x80030000,
+0x00098200,
+0x80830000,
+0x00098200,
+0x80a30000,
+0x00098200,
+0x00000000,
+0x80c30000,
+0x00098200,
+0x80e30000,
+0x00098200,
+0x7c0903a6,
+0x81030000,
+0x00098200,
+0x81230000,
+0x00098200,
+0x81430000,
+0x00098200,
+0x80630000,
+0x00098200,
+0x4e800421,
+0x810efff8,
+0x812efffc,
+0x800e0004,
+0x90680000,
+0x00098200,
+0xd8280000,
+0x00098200,
+0x90880000,
+0x00098200,
+0x7c0803a6,
+0x90a80000,
+0x00098200,
+0x7dc17378,
+0x90c80000,
+0x00098200,
+0x7d2e4b78,
+0x4e800020,
+0x00000000,
+0x00080000,
+0x00000000,
+0x7c14706e,
+0x3a100004,
+0x80940004,
+0x7d0c706e,
+0x8130fffc,
+0x7c00b040,
+0x80ac0004,
+0x552993ba,
+0x7c88b040,
+0x3d290000,
+0x00098200,
+0x40820000,
+0x00050807,
+0x40860000,
+0x00050808,
+0x7c042800,
+0x00000000,
+0x40800000,
+0x00050802,
+0x00000000,
+0x41800000,
+0x00050802,
+0x00000000,
+0x41810000,
+0x00050802,
+0x00000000,
+0x40810000,
+0x00050802,
+0x00000000,
+0x0006000b,
+0x7e104a14,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00060011,
+0x41810000,
+0x00050835,
+0xc8140000,
+0x41850000,
+0x00050835,
+0x41840000,
+0x00050804,
+0x6ca58000,
+0x90a1000c,
+0xc8210008,
+0xfc21f828,
+0x48000000,
+0x00050005,
+0x00060012,
+0x41850000,
+0x00050835,
+0x6c848000,
+0x9081000c,
+0xc8010008,
+0xfc00f828,
+0x0006000e,
+0xc82c0000,
+0x0006000f,
+0xfc000800,
+0x00000000,
+0x40800000,
+0x0005080c,
+0x00000000,
+0x41800000,
+0x0005080c,
+0x00000000,
+0x4c001382,
+0x40800000,
+0x0005080c,
+0x00000000,
+0x4c001382,
+0x41800000,
+0x0005080c,
+0x00000000,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x7c0ea02e,
+0x3a100004,
+0x7c0ea4ae,
+0x7d0e602e,
+0x7c00b040,
+0x8130fffc,
+0x7c2e64ae,
+0x7c88b040,
+0x552993ba,
+0x40800000,
+0x00050835,
+0x3d290000,
+0x00098200,
+0x40840000,
+0x00050835,
+0xfc000800,
+0x00000000,
+0x40800000,
+0x00050801,
+0x00000000,
+0x41800000,
+0x00050801,
+0x00000000,
+0x4c001382,
+0x40800000,
+0x00050801,
+0x00000000,
+0x4c001382,
+0x41800000,
+0x00050801,
+0x00000000,
+0x7e104a14,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7c14706e,
+0x3a100004,
+0x80940004,
+0x7d0c706e,
+0x7c00b040,
+0x8130fffc,
+0x7c88b040,
+0x552993ba,
+0x80ac0004,
+0x4fa12b82,
+0x3d290000,
+0x00098200,
+0x00000000,
+0x409d0000,
+0x000508a1,
+0x00000000,
+0x409d0000,
+0x000508a2,
+0x00000000,
+0x7c14706e,
+0x81300000,
+0xc8140000,
+0x3a100004,
+0x7d0c706e,
+0x7c00b040,
+0x552993ba,
+0xc82c0000,
+0x7c88b040,
+0x3d290000,
+0x00098200,
+0x40800000,
+0x00050805,
+0x40840000,
+0x00050805,
+0xfc000800,
+0x00000000,
+0x40820000,
+0x00050801,
+0x7e104a14,
+0x00000000,
+0x41820000,
+0x00050801,
+0x7e104a14,
+0x00000000,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x0006000f,
+0x00000000,
+0x80940004,
+0x80ac0004,
+0x00000000,
+0x2f800000,
+0x00098200,
+0x2e880000,
+0x00098200,
+0x00000000,
+0x7c0600f8,
+0x7c004040,
+0x28860000,
+0x00090200,
+0x00000000,
+0x4fdeb382,
+0x00000000,
+0x2b060000,
+0x00090200,
+0x00000000,
+0x419e0000,
+0x0005083b,
+0x00000000,
+0x7e842840,
+0x4c222902,
+0x4c161342,
+0x4c42b202,
+0x7e158378,
+0x4c420b82,
+0x4c000b82,
+0x00000000,
+0x40820000,
+0x00050806,
+0x7e104a14,
+0x00060010,
+0x00000000,
+0x41820000,
+0x00050806,
+0x7e104a14,
+0x00060010,
+0x00000000,
+0x40800000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x00000000,
+0x41800000,
+0x0005080b,
+0x00000000,
+0x41980000,
+0x0005080b,
+0x81240000,
+0x00098200,
+0x38c00000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x7eb0ab78,
+0x48000000,
+0x0005003a,
+0x00000000,
+0x7c14706e,
+0x558c007e,
+0x000900ab,
+0x80d40004,
+0x81300000,
+0x218cfffc,
+0x3a100004,
+0x00000000,
+0x2c000000,
+0x00098200,
+0x00000000,
+0x7d0f602e,
+0x20000000,
+0x00098200,
+0x00000000,
+0x41820000,
+0x0005083b,
+0x00000000,
+0x7d064050,
+0x7c004378,
+0x552993ba,
+0x20000000,
+0x3d290000,
+0x00098200,
+0x7d084110,
+0x00000000,
+0x7d294078,
+0x00000000,
+0x7d294038,
+0x00000000,
+0x7e104a14,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7c14706e,
+0x3a100004,
+0x80940004,
+0x7d0c786e,
+0x7c00b040,
+0x8130fffc,
+0x7c88b040,
+0x552993ba,
+0x80ac0004,
+0x3d290000,
+0x00098200,
+0x00000000,
+0x000600a1,
+0x00000000,
+0x000600a2,
+0x00000000,
+0x40820000,
+0x00050807,
+0x40860000,
+0x00050808,
+0x7c042800,
+0x0006000e,
+0x00000000,
+0x000600a1,
+0x00000000,
+0x000600a2,
+0x00000000,
+0x7c0ea02e,
+0x3a100004,
+0x7c0ea4ae,
+0x8130fffc,
+0x7c2f64ae,
+0x552993ba,
+0x7c00b040,
+0x3d290000,
+0x00098200,
+0x40800000,
+0x00050803,
+0xfc000800,
+0x00000000,
+0x40820000,
+0x00050801,
+0x7e104a14,
+0x0006000b,
+0x00000000,
+0x0006000d,
+0x00000000,
+0x41820000,
+0x00050802,
+0x0006000b,
+0x00000000,
+0x0006000d,
+0x00000000,
+0x7e104a14,
+0x0006000c,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x0006000d,
+0x2c000000,
+0x00098200,
+0x41820000,
+0x0005083b,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060011,
+0x40800000,
+0x0005080d,
+0xc8140000,
+0x41840000,
+0x00050801,
+0x6ca58000,
+0x90a1000c,
+0xc8210008,
+0xfc21f828,
+0x48000000,
+0x00050002,
+0x00060012,
+0x6c848000,
+0x9081000c,
+0xc8010008,
+0xfc00f828,
+0x0006000b,
+0xc82c0000,
+0x0006000c,
+0xfc000800,
+0x48000000,
+0x0005000e,
+0x00000000,
+0x7c0ea02e,
+0x558800fe,
+0x000900ab,
+0x81300000,
+0x7d0840f8,
+0x3a100004,
+0x00000000,
+0x2c000000,
+0x00098200,
+0x00000000,
+0x7c080050,
+0x00000000,
+0x41820000,
+0x0005083b,
+0x00000000,
+0x552993ba,
+0x3000ffff,
+0x3d290000,
+0x00098200,
+0x7d084110,
+0x00000000,
+0x7d294038,
+0x00000000,
+0x7d294078,
+0x00000000,
+0x7e104a14,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7c0e602e,
+0x80f00000,
+0x3a100004,
+0x00000000,
+0x20000000,
+0x00098200,
+0x54e993ba,
+0x7d084110,
+0x3d290000,
+0x00098200,
+0x00000000,
+0x7d294078,
+0x00000000,
+0x7d294038,
+0x00000000,
+0x7e104a14,
+0x00000000,
+0x39000000,
+0x00098200,
+0x7c0e64ae,
+0x7c004040,
+0x00000000,
+0x40800000,
+0x00050801,
+0x00000000,
+0x41800000,
+0x00050801,
+0x00000000,
+0x3e100000,
+0x00098200,
+0x54e993ba,
+0x7c0ea5ae,
+0x7e104a14,
+0x0006000b,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x7c0e64ae,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x7c0e602e,
+0x21000000,
+0x00098200,
+0x7c004114,
+0x7c0ea12e,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0c706e,
+0x800c0004,
+0x7c08b040,
+0x00000000,
+0x40820000,
+0x00050805,
+0x7c0004d1,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90140004,
+0x0006000d,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x3d0041e0,
+0x38000000,
+0x48000000,
+0x00050007,
+0x00000000,
+0x0006000f,
+0x40800000,
+0x0005083e,
+0x6d088000,
+0x00060011,
+0x80f00000,
+0x3a100004,
+0x7d14716e,
+0x90140004,
+0x00000000,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7c0c706e,
+0x806c0004,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050802,
+0x80630000,
+0x00098200,
+0x0006000b,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x00000000,
+0x9061000c,
+0xc8010008,
+0xfc00f028,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x00000000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050843,
+0x00000000,
+0x81230000,
+0x00098200,
+0x28090000,
+0x40820000,
+0x00050809,
+0x0006000d,
+0x00000000,
+0x00060044,
+0x48000001,
+0x00030026,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060013,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x00050043,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x808a0004,
+0x7c08b040,
+0x806b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b706e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7c89b040,
+0x40820000,
+0x00050805,
+0x40860000,
+0x00050805,
+0x7c632615,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x0006000c,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x00000000,
+0x48000000,
+0x00050041,
+0x00000000,
+0x48000000,
+0x0005003d,
+0x00000000,
+0x48000000,
+0x00050042,
+0x00000000,
+0x0006000f,
+0x00000000,
+0xc9ea0000,
+0x4c002202,
+0xc9cb0000,
+0x00000000,
+0xc9ca0000,
+0x4c002202,
+0xc9eb0000,
+0x00000000,
+0x40800000,
+0x00050841,
+0x00000000,
+0x40800000,
+0x0005083d,
+0x00000000,
+0x40800000,
+0x00050842,
+0x00000000,
+0xfc0e782a,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0xfc0e782a,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x808a0004,
+0x7c08b040,
+0x806b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b706e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7c89b040,
+0x40820000,
+0x00050805,
+0x40860000,
+0x00050805,
+0x7c641c51,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x0006000c,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x00000000,
+0x48000000,
+0x00050041,
+0x00000000,
+0x48000000,
+0x0005003d,
+0x00000000,
+0x48000000,
+0x00050042,
+0x00000000,
+0x0006000f,
+0x00000000,
+0xc9ea0000,
+0x4c002202,
+0xc9cb0000,
+0x00000000,
+0xc9ca0000,
+0x4c002202,
+0xc9eb0000,
+0x00000000,
+0x40800000,
+0x00050841,
+0x00000000,
+0x40800000,
+0x0005083d,
+0x00000000,
+0x40800000,
+0x00050842,
+0x00000000,
+0xfc0e7828,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0xfc0e7828,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x808a0004,
+0x7c08b040,
+0x806b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b706e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7c89b040,
+0x40820000,
+0x00050805,
+0x40860000,
+0x00050805,
+0x7c6325d7,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x0006000c,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x00000000,
+0x48000000,
+0x00050041,
+0x00000000,
+0x48000000,
+0x0005003d,
+0x00000000,
+0x48000000,
+0x00050042,
+0x00000000,
+0x0006000f,
+0x00000000,
+0xc9ea0000,
+0x4c002202,
+0xc9cb0000,
+0x00000000,
+0xc9ca0000,
+0x4c002202,
+0xc9eb0000,
+0x00000000,
+0x40800000,
+0x00050841,
+0x00000000,
+0x40800000,
+0x0005083d,
+0x00000000,
+0x40800000,
+0x00050842,
+0x00000000,
+0xfc0e03f2,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0xfc0e03f2,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0xfc0e7824,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x808a0004,
+0x7c08b040,
+0x806b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b706e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7c89b040,
+0x40820000,
+0x00050805,
+0x40860000,
+0x00050805,
+0x48000001,
+0x0005009c,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x0006000c,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x00000000,
+0x48000000,
+0x00050041,
+0x00000000,
+0x48000000,
+0x0005003d,
+0x00000000,
+0x48000000,
+0x00050042,
+0x00000000,
+0x0006000f,
+0x00000000,
+0xc9ea0000,
+0x4c002202,
+0xc9cb0000,
+0x00000000,
+0xc9ca0000,
+0x4c002202,
+0xc9eb0000,
+0x00000000,
+0x40800000,
+0x00050841,
+0x00000000,
+0x40800000,
+0x0005083d,
+0x00000000,
+0x40800000,
+0x00050842,
+0x00000000,
+0x000600a3,
+0xfc2e7824,
+0x48000001,
+0x00030010,
+0xfc0103f2,
+0xfc0e0028,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0x000600a3,
+0xfc2e7824,
+0x48000001,
+0x00030010,
+0xfc0103f2,
+0xfc0e0028,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b786e,
+0x808a0004,
+0x7c08b040,
+0x806b0004,
+0x00000000,
+0x7d0a706e,
+0x7d2b706e,
+0x806a0004,
+0x7c08b040,
+0x808b0004,
+0x00000000,
+0x7c89b040,
+0x40820000,
+0x00050805,
+0x40860000,
+0x00050805,
+0x48000001,
+0x0005009c,
+0x41830000,
+0x00050804,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x90740004,
+0x0006000c,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x7c000400,
+0x40a10000,
+0x0005080b,
+0x00000000,
+0x48000000,
+0x00050041,
+0x00000000,
+0x48000000,
+0x0005003d,
+0x00000000,
+0x48000000,
+0x00050042,
+0x00000000,
+0x0006000f,
+0x00000000,
+0xc9ea0000,
+0x4c002202,
+0xc9cb0000,
+0x00000000,
+0xc9ca0000,
+0x4c002202,
+0xc9eb0000,
+0x00000000,
+0x40800000,
+0x00050841,
+0x00000000,
+0x40800000,
+0x0005083d,
+0x00000000,
+0x40800000,
+0x00050842,
+0x00000000,
+0x48000000,
+0x000500a3,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dce54ae,
+0x7def5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083f,
+0x00000000,
+0x7d0e502e,
+0x00000000,
+0x7d2f582e,
+0x00000000,
+0x7dee54ae,
+0x7dcf5cae,
+0x00000000,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7c08b040,
+0x40800000,
+0x0005083c,
+0x00000000,
+0x7d0e502e,
+0x7d2e582e,
+0x7dce54ae,
+0x7dee5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x00000000,
+0x48000000,
+0x000500a3,
+0x00000000,
+0x7d0e502e,
+0x7c2e54ae,
+0x7d2e582e,
+0x7c4e5cae,
+0x7c08b040,
+0x7c89b040,
+0x4c002202,
+0x40800000,
+0x00050840,
+0x48000001,
+0x0003001f,
+0x80f00000,
+0x3a100004,
+0x7c2ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7caa5850,
+0x91d20000,
+0x00098200,
+0x7c8e5a14,
+0x7d555378,
+0x0006002b,
+0x92010020,
+0x7e439378,
+0x54a500fe,
+0x000900ab,
+0x48000001,
+0x00030030,
+0x28030000,
+0x81d20000,
+0x00098200,
+0x40820000,
+0x00050836,
+0x80f00000,
+0x3a100004,
+0x7c0eacae,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x80f00000,
+0x3a100004,
+0x7c0f402e,
+0x39200000,
+0x00098200,
+0x7d34716e,
+0x90140004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x80f00000,
+0x3a100004,
+0x7c0f402e,
+0x39200000,
+0x00098200,
+0x7d34716e,
+0x90140004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x558c6800,
+0x000900a1,
+0x7d8c8670,
+0x80f00000,
+0x3a100004,
+0x7ed4716e,
+0x91940004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x558c6800,
+0x000900a1,
+0x7d88fe70,
+0x7d096278,
+0x7d284850,
+0x7d260034,
+0x2106040d,
+0x7d293030,
+0x20cc0000,
+0x5508a000,
+0x000900a1,
+0x512ca87e,
+0x7c000110,
+0x7d8c4214,
+0x7d8c0038,
+0x80f00000,
+0x3a100004,
+0x7d94716e,
+0x93140004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x7c0f64ae,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x558800fe,
+0x000900ab,
+0x7d0040f8,
+0x80f00000,
+0x3a100004,
+0x7c0ea12e,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x7eeea12e,
+0x3a940008,
+0x0006000b,
+0x7eeea12e,
+0x7c146000,
+0x3a940008,
+0x41800000,
+0x0005080b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x814efffc,
+0x558c007e,
+0x000900ab,
+0x398c0000,
+0x00098200,
+0x7d4a602e,
+0x80f00000,
+0x3a100004,
+0x810a0000,
+0x00098200,
+0xc8080000,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x7c0c74ee,
+0x7d4aa02e,
+0x88ca0000,
+0x00098200,
+0x808a0000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x880a0000,
+0x00098200,
+0x812c0000,
+0xd8040000,
+0x28800000,
+0x810c0004,
+0x4c423382,
+0x39290000,
+0x00098200,
+0x40820000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x28090000,
+0x00090200,
+0x40800000,
+0x0005080b,
+0x88c80000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x38710000,
+0x00098200,
+0x41820000,
+0x0005080b,
+0x48000001,
+0x00030031,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x814efffc,
+0x5588007e,
+0x000900ab,
+0x5694007e,
+0x000900ab,
+0x2108fffc,
+0x3a940000,
+0x00098200,
+0x7d0f402e,
+0x7d4aa02e,
+0x88ca0000,
+0x00098200,
+0x808a0000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x88c80000,
+0x00098200,
+0x892a0000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x91040004,
+0x90040000,
+0x40820000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x70c60000,
+0x00090200,
+0x28890000,
+0x4c423382,
+0x38710000,
+0x00098200,
+0x41820000,
+0x0005080b,
+0x48000001,
+0x00030031,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x7c0f64ae,
+0x7d4aa02e,
+0x80f00000,
+0x3a100004,
+0x810a0000,
+0x00098200,
+0xd8080000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x558000fe,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x7c0000f8,
+0x7d4aa02e,
+0x80f00000,
+0x3a100004,
+0x810a0000,
+0x00098200,
+0x90080000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x81120000,
+0x00098200,
+0x5580007e,
+0x000900ab,
+0x3e100000,
+0x00098200,
+0x7e100214,
+0x91d20000,
+0x00098200,
+0x28080000,
+0x7e439378,
+0x41820000,
+0x00050801,
+0x7c8ea214,
+0x48000001,
+0x00030032,
+0x81d20000,
+0x00098200,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x91d20000,
+0x00098200,
+0x2108fffc,
+0x92010020,
+0x7c8f402e,
+0x7e439378,
+0x80aefffc,
+0x48000001,
+0x00030033,
+0x81d20000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x7c14716e,
+0x90740004,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x80110000,
+0x00098200,
+0x7e439378,
+0x81110000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x7c004040,
+0x92010020,
+0x40800000,
+0x00050805,
+0x0006000b,
+0x00000000,
+0x5584ed7e,
+0x558596fe,
+0x2c0407ff,
+0x41820000,
+0x00050803,
+0x0006000c,
+0x48000001,
+0x00030034,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x7c8f402e,
+0x48000001,
+0x00030035,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x7c14716e,
+0x90740004,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x0006000d,
+0x38800801,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x0006000f,
+0x7d956378,
+0x48000001,
+0x00030036,
+0x7eacab78,
+0x7e439378,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x812efffc,
+0x5588007e,
+0x000900ab,
+0x81490000,
+0x00098200,
+0x2108fffc,
+0x7d6f402e,
+0x00000000,
+0x48000000,
+0x000500a4,
+0x00000000,
+0x48000000,
+0x000500a5,
+0x00000000,
+0x7c6a706e,
+0x7c8b706e,
+0x814a0004,
+0x00000000,
+0x816b0004,
+0x00000000,
+0xc80b0000,
+0x00000000,
+0x2c030000,
+0x00098200,
+0x7c84b040,
+0x40820000,
+0x00050830,
+0x00000000,
+0x800a0000,
+0x00098200,
+0x40860000,
+0x00050805,
+0x810a0000,
+0x00098200,
+0x7c005840,
+0x55691800,
+0x000900a1,
+0x00000000,
+0x40840000,
+0x00050805,
+0xfc20001e,
+0xfc40f02a,
+0xd8210010,
+0x800a0000,
+0x00098200,
+0xfc42f028,
+0x81210014,
+0x810a0000,
+0x00098200,
+0xfc801000,
+0x7c004840,
+0x4c213202,
+0x55291800,
+0x000900a1,
+0x00000000,
+0x40810000,
+0x00050830,
+0x7c08482e,
+0x7dc84cae,
+0x2c000000,
+0x00098200,
+0x41820000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7dcea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000c,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050030,
+0x0006000f,
+0x2c040000,
+0x00098200,
+0x40820000,
+0x00050830,
+0x00000000,
+0x816b0004,
+0x00000000,
+0x48000000,
+0x000500a4,
+0x00000000,
+0x7c6a706e,
+0x5568007e,
+0x000900ab,
+0x814a0004,
+0x2108fffc,
+0x2c030000,
+0x00098200,
+0x7d6f402e,
+0x40820000,
+0x0005082d,
+0x000600a4,
+0x800a0000,
+0x00098200,
+0x810b0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x7d080038,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x7d294214,
+0x0006000b,
+0x80690000,
+0x00098200,
+0x80090000,
+0x00098200,
+0x80890000,
+0x00098200,
+0x81090000,
+0x00098200,
+0x2c030000,
+0x00098200,
+0x40820000,
+0x00050804,
+0x7c005800,
+0x40820000,
+0x00050804,
+0x2c040000,
+0x00098200,
+0x41820000,
+0x00050805,
+0x0006000d,
+0x7c94716e,
+0x91140004,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x81290000,
+0x00098200,
+0x00000000,
+0x28090000,
+0x40820000,
+0x0005080b,
+0x38800000,
+0x00098200,
+0x0006000f,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080d,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x0005002e,
+0x00000000,
+0x7c6a706e,
+0x556000fe,
+0x000900ab,
+0x814a0004,
+0x2c030000,
+0x00098200,
+0x40820000,
+0x0005082f,
+0x810a0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x7c004040,
+0x40800000,
+0x0005082f,
+0x7d09582e,
+0x7c095cae,
+0x2c080000,
+0x00098200,
+0x41820000,
+0x00050805,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x7c0ea5ae,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x0005002f,
+0x00000000,
+0x7c6a706e,
+0x7c8b706e,
+0x814a0004,
+0x00000000,
+0x816b0004,
+0x00000000,
+0xc80b0000,
+0x00000000,
+0x2c030000,
+0x00098200,
+0x7c84b040,
+0x40820000,
+0x00050834,
+0x00000000,
+0x800a0000,
+0x00098200,
+0x40860000,
+0x00050805,
+0x810a0000,
+0x00098200,
+0x7c005840,
+0x55601800,
+0x000900a1,
+0x00000000,
+0x40840000,
+0x00050805,
+0xfc20001e,
+0xfc40f02a,
+0xd8210010,
+0x800a0000,
+0x00098200,
+0xfc42f028,
+0x81210014,
+0x810a0000,
+0x00098200,
+0xfc801000,
+0x7c004840,
+0x4c213202,
+0x55201800,
+0x000900a1,
+0x00000000,
+0x40810000,
+0x00050834,
+0x7d28002e,
+0x88ca0000,
+0x00098200,
+0x7dcea4ae,
+0x2c090000,
+0x00098200,
+0x41820000,
+0x00050803,
+0x0006000b,
+0x70c90000,
+0x00090200,
+0x7dc805ae,
+0x40820000,
+0x00050807,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000d,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050034,
+0x0006000f,
+0x2c040000,
+0x00098200,
+0x40820000,
+0x00050834,
+0x00000000,
+0x816b0004,
+0x00000000,
+0x48000000,
+0x000500a5,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x98ca0000,
+0x00098200,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7c6a706e,
+0x5568007e,
+0x000900ab,
+0x814a0004,
+0x2108fffc,
+0x2c030000,
+0x00098200,
+0x7d6f402e,
+0x40820000,
+0x00050831,
+0x000600a5,
+0x800a0000,
+0x00098200,
+0x810b0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x9b0a0000,
+0x00098200,
+0x7d080038,
+0x7dcea4ae,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x88ca0000,
+0x00098200,
+0x7d294214,
+0x0006000b,
+0x80690000,
+0x00098200,
+0x80090000,
+0x00098200,
+0x80890000,
+0x00098200,
+0x81090000,
+0x00098200,
+0x2c030000,
+0x00098200,
+0x40820000,
+0x00050805,
+0x7c005800,
+0x40820000,
+0x00050805,
+0x2c040000,
+0x00098200,
+0x41820000,
+0x00050804,
+0x0006000c,
+0x00000000,
+0x70c00000,
+0x00090200,
+0xd9c90000,
+0x00098200,
+0x40820000,
+0x00050807,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x810a0000,
+0x00098200,
+0x28080000,
+0x41820000,
+0x0005080c,
+0x88080000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080c,
+0x48000000,
+0x00050032,
+0x0006000f,
+0x28080000,
+0x7d094378,
+0x40820000,
+0x0005080b,
+0x810a0000,
+0x00098200,
+0x38b10000,
+0x00098200,
+0x92010020,
+0x7e439378,
+0x28080000,
+0x91d20000,
+0x00098200,
+0x41820000,
+0x00050806,
+0x88080000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x41820000,
+0x00050832,
+0x00060010,
+0x38000000,
+0x00098200,
+0x00000000,
+0x91650004,
+0x7d445378,
+0x90050000,
+0x48000001,
+0x00030037,
+0x81d20000,
+0x00098200,
+0xd9c30000,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x98ca0000,
+0x00098200,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x7c6a706e,
+0x556000fe,
+0x000900ab,
+0x814a0004,
+0x2c030000,
+0x00098200,
+0x40820000,
+0x00050833,
+0x810a0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x88ca0000,
+0x00098200,
+0x7c004040,
+0x7dcea4ae,
+0x40800000,
+0x00050833,
+0x7d09582e,
+0x2c080000,
+0x00098200,
+0x41820000,
+0x00050805,
+0x0006000b,
+0x70c00000,
+0x00090200,
+0x7dc95dae,
+0x40820000,
+0x00050807,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x810a0000,
+0x00098200,
+0x28080000,
+0x41820000,
+0x0005080b,
+0x89080000,
+0x00098200,
+0x71080000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050033,
+0x00060011,
+0x80110000,
+0x00098200,
+0x00000000,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x98ca0000,
+0x00098200,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7e8ea214,
+0x0006000b,
+0x7ccf6214,
+0x8094fffc,
+0x3413fff8,
+0x80c60004,
+0x540500fe,
+0x000900ab,
+0x41820000,
+0x00050804,
+0x7ca53214,
+0x81240000,
+0x00098200,
+0x54c81800,
+0x000900a1,
+0x88c40000,
+0x00098200,
+0x7c054840,
+0x7d340214,
+0x80040000,
+0x00098200,
+0x41810000,
+0x00050805,
+0x7d080214,
+0x70c00000,
+0x00090200,
+0x0006000d,
+0xc8140000,
+0x3a940008,
+0x7c944800,
+0xd8080000,
+0x39080008,
+0x41840000,
+0x0005080d,
+0x40820000,
+0x00050807,
+0x0006000e,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x92010020,
+0x7d956378,
+0x48000001,
+0x00030038,
+0x7eacab78,
+0x48000000,
+0x0005000b,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x90910000,
+0x00098200,
+0x98c40000,
+0x00098200,
+0x90040000,
+0x00098200,
+0x00000000,
+0x48000000,
+0x0005000e,
+0x00000000,
+0x7d6b9a14,
+0x00000000,
+0x7dc97378,
+0x7c0ea06e,
+0x814e0004,
+0x396bfff8,
+0x39ce0008,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050825,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7d6b9a14,
+0x00000000,
+0x7c14706e,
+0x81540004,
+0x396bfff8,
+0x810efff8,
+0x2c000000,
+0x00098200,
+0x3a940008,
+0x40820000,
+0x00050845,
+0x00060046,
+0x71000000,
+0x00090200,
+0x88ca0000,
+0x00098200,
+0x69090000,
+0x00090200,
+0x288b0000,
+0x40820000,
+0x00050807,
+0x0006000b,
+0x914efffc,
+0x39200000,
+0x2b860001,
+0x41860000,
+0x00050803,
+0x0006000c,
+0x38c90008,
+0x7c144cae,
+0x7c865840,
+0x7c0e4dae,
+0x7cc93378,
+0x40860000,
+0x0005080c,
+0x0006000d,
+0x4c42ea02,
+0x41820000,
+0x00050805,
+0x0006000e,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x80e8fffc,
+0x54f4dd78,
+0x7d147050,
+0x81080000,
+0x00098200,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x48000000,
+0x0005000e,
+0x00060011,
+0x71200000,
+0x00090200,
+0x00000000,
+0x40820000,
+0x0005080b,
+0x7dc97050,
+0x810efff8,
+0x71000000,
+0x00090200,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x7dc97378,
+0x7dcea214,
+0x810effe8,
+0x814effec,
+0xc82efff8,
+0xc80efff0,
+0x910e0000,
+0x914e0004,
+0x2c080000,
+0x00098200,
+0xd82e0010,
+0x39600010,
+0xdc0e0008,
+0x40820000,
+0x00050825,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7e8ea214,
+0x8154fff4,
+0x8174fffc,
+0x800a0000,
+0x00098200,
+0x810a0000,
+0x00098200,
+0x3a100004,
+0x0006000b,
+0x7c0b0040,
+0x55661800,
+0x000900a1,
+0x40800000,
+0x00050805,
+0x7d28302e,
+0x7c0834ae,
+0x2c090000,
+0x00098200,
+0x80f0fffc,
+0x41820000,
+0x00050804,
+0x00000000,
+0x91740004,
+0x92d40000,
+0x00000000,
+0x9161000c,
+0xc8210008,
+0xfc21f028,
+0x00000000,
+0x396b0001,
+0x3cd00000,
+0x00098200,
+0xd8140008,
+0x54e893ba,
+0x9174fffc,
+0x7e083214,
+0x00000000,
+0xd8340000,
+0x00000000,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x396b0001,
+0x48000000,
+0x0005000b,
+0x0006000f,
+0x810a0000,
+0x00098200,
+0x7d605850,
+0x812a0000,
+0x00098200,
+0x00060010,
+0x7c0b4040,
+0x55662800,
+0x000900a1,
+0x41a10000,
+0x0005080d,
+0x556a1800,
+0x000900a1,
+0x7cca3050,
+0x7d49302e,
+0x7c0934ae,
+0x7cc93214,
+0x2c0a0000,
+0x00098200,
+0x80f0fffc,
+0x41820000,
+0x00050807,
+0xc8260000,
+0x00098200,
+0x3d300000,
+0x00098200,
+0xd8140008,
+0x7d6b0214,
+0x54e893ba,
+0xd8340000,
+0x396b0001,
+0x7e084a14,
+0x9174fffc,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x396b0001,
+0x48000000,
+0x00050010,
+0x00000000,
+0x7e8ea214,
+0x8014ffe8,
+0x8114ffec,
+0x8134fff0,
+0x80d4fff8,
+0x2c090000,
+0x00098200,
+0x2c800000,
+0x00098200,
+0x2f060000,
+0x00098200,
+0x40860000,
+0x00050805,
+0x89080000,
+0x00098200,
+0x4c42d202,
+0x2f880000,
+0x00098200,
+0x5580007e,
+0x000900ab,
+0x4c42f202,
+0x7cd00214,
+0x40820000,
+0x00050805,
+0x9314fffc,
+0x3e060000,
+0x00098200,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x38000000,
+0x00098200,
+0x39000000,
+0x00098200,
+0x9810ffff,
+0x3e060000,
+0x00098200,
+0x99100003,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x800efff8,
+0x7d6e5a14,
+0x7e8ea214,
+0x396b0000,
+0x00098200,
+0x7d345214,
+0x38cefff8,
+0x7d605850,
+0x288a0000,
+0x7d0b3051,
+0x41860000,
+0x00050805,
+0x3929fff0,
+0x40810000,
+0x00050802,
+0x0006000b,
+0xc80b0000,
+0x396b0008,
+0xd8140000,
+0x7c144840,
+0x7c8b3040,
+0x40800000,
+0x00050803,
+0x3a940008,
+0x41840000,
+0x0005080b,
+0x0006000c,
+0x92f40000,
+0x7c144840,
+0x3a940008,
+0x41800000,
+0x0005080c,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x80120000,
+0x00098200,
+0x3a600008,
+0x40a10000,
+0x0005080d,
+0x7d344214,
+0x7c090040,
+0x3a680008,
+0x41810000,
+0x00050807,
+0x00060010,
+0xc80b0000,
+0x396b0008,
+0xd8140000,
+0x7c0b3040,
+0x3a940008,
+0x41800000,
+0x00050810,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x7e439378,
+0x92920000,
+0x00098200,
+0x7eae5850,
+0x91d20000,
+0x00098200,
+0x7e8ea050,
+0x92010020,
+0x550400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x81d20000,
+0x00098200,
+0x00000000,
+0x7e8ea214,
+0x7d6eaa14,
+0x38cefff8,
+0x48000000,
+0x00050010,
+0x00000000,
+0x7d8c9a14,
+0x00000000,
+0x820efff8,
+0x7e8ea214,
+0x7d936378,
+0x0006000b,
+0x72000000,
+0x00090200,
+0x6a080000,
+0x00090200,
+0x40820000,
+0x000508a6,
+0x00060017,
+0x80f0fffc,
+0x2c0c0008,
+0x392efff8,
+0x396cfff8,
+0x54ea5d78,
+0x41820000,
+0x00050803,
+0x39000000,
+0x0006000c,
+0x38c80008,
+0x7c1444ae,
+0x7c065800,
+0x7c0945ae,
+0x41820000,
+0x00050803,
+0x39060008,
+0x7c3434ae,
+0x7c085800,
+0x7c2935ae,
+0x40820000,
+0x0005080c,
+0x0006000d,
+0x0006000f,
+0x7c0a6040,
+0x54f4dd78,
+0x41810000,
+0x00050806,
+0x7dd44850,
+0x810efffc,
+0x80f00000,
+0x3a100004,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x7ee9412e,
+0x48000000,
+0x0005000f,
+0x000600a6,
+0x71090000,
+0x00090200,
+0x40820000,
+0x00050818,
+0x7dc87050,
+0x820efff8,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x820efff8,
+0x7e8ea214,
+0x7d936378,
+0x72000000,
+0x00090200,
+0x6a080000,
+0x00090200,
+0x40a20000,
+0x000508a6,
+0x80f0fffc,
+0x392efff8,
+0x54ea5d78,
+0x00000000,
+0xc8140000,
+0xd8090000,
+0x00000000,
+0x0006000f,
+0x7c0a6040,
+0x54f4dd78,
+0x41810000,
+0x00050806,
+0x7dd44850,
+0x810efffc,
+0x80f00000,
+0x3a100004,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x7ee9412e,
+0x48000000,
+0x0005000f,
+0x00000000,
+0x5608fe7c,
+0x39080000,
+0x00098200,
+0x7d31422e,
+0x35290000,
+0x00098200,
+0x7d31432e,
+0x41800000,
+0x00050894,
+0x00000000,
+0x7d14706e,
+0x80740000,
+0x00098200,
+0x7c08b040,
+0x00000000,
+0x80b40000,
+0x00098200,
+0x40820000,
+0x00050809,
+0x7c632e15,
+0x2f050000,
+0x80940000,
+0x00098200,
+0x41830000,
+0x00050806,
+0x0006000e,
+0x90740000,
+0x00098200,
+0x00000000,
+0x80d40000,
+0x00098200,
+0x80b40000,
+0x00098200,
+0x81340000,
+0x00098200,
+0x80940000,
+0x00098200,
+0x7f86b040,
+0x7c89b040,
+0x4c42f202,
+0x4c423202,
+0x2f050000,
+0x40820000,
+0x00050809,
+0x00000000,
+0x41980000,
+0x00050805,
+0x7c032000,
+0x0006000b,
+0x92d40000,
+0x00098200,
+0x00000000,
+0x558c007e,
+0x000900ab,
+0x00000000,
+0x90740000,
+0x00098200,
+0x00000000,
+0x7d906214,
+0x00000000,
+0x41810000,
+0x00050803,
+0x00000000,
+0x3e0c0000,
+0x00098200,
+0x40a10000,
+0x00050807,
+0x00000000,
+0x41810000,
+0x00050802,
+0x3e0c0000,
+0x00098200,
+0x00000000,
+0x40a10000,
+0x00070800,
+0x00000000,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000f,
+0x7c041800,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060010,
+0x7c000400,
+0x40a10000,
+0x0005080e,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x00060013,
+0xc8340000,
+0x00098200,
+0x00000000,
+0x7c3474ee,
+0x00000000,
+0xc8740000,
+0x00098200,
+0xc8540000,
+0x00098200,
+0x80d40000,
+0x00098200,
+0xfc21182a,
+0xd8340000,
+0x00098200,
+0x00000000,
+0x00060013,
+0x00000000,
+0x7d14706e,
+0x80d40000,
+0x00098200,
+0x81340000,
+0x00098200,
+0x7c08b040,
+0x7f86b040,
+0x7c89b040,
+0x00000000,
+0xc8340000,
+0x00098200,
+0x4c00e202,
+0x4c002202,
+0xc8540000,
+0x00098200,
+0x40800000,
+0x00050847,
+0x00000000,
+0x2f060000,
+0x00000000,
+0x558c007e,
+0x000900ab,
+0x00000000,
+0xd8340000,
+0x00098200,
+0x00000000,
+0x7d906214,
+0x00000000,
+0xfc011000,
+0x00000000,
+0x3e0c0000,
+0x00098200,
+0x00000000,
+0x41980000,
+0x00050805,
+0x00000000,
+0x41810000,
+0x00050803,
+0x00000000,
+0x41a10000,
+0x0005080c,
+0x00000000,
+0x41810000,
+0x00050802,
+0x00000000,
+0x0006000b,
+0x3e0c0000,
+0x00098200,
+0x00000000,
+0x40a10000,
+0x00050807,
+0x00000000,
+0x40a10000,
+0x00070800,
+0x00000000,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x0006000f,
+0x00000000,
+0x40800000,
+0x0005080c,
+0x0006000d,
+0x3e0c0000,
+0x00098200,
+0x00000000,
+0x40a00000,
+0x0005080b,
+0x00000000,
+0x40a00000,
+0x00050807,
+0x00000000,
+0x40a00000,
+0x00070800,
+0x00000000,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x00060011,
+0x80f0fffc,
+0x54ec9b78,
+0x48000000,
+0x00070000,
+0x00000000,
+0x5608fe7c,
+0x39080000,
+0x00098200,
+0x7d31422e,
+0x35290000,
+0x00098200,
+0x7d31432e,
+0x41800000,
+0x00050894,
+0x00000000,
+0x7d14706e,
+0x81340004,
+0x2c080000,
+0x00098200,
+0x41820000,
+0x00050801,
+0x00000000,
+0x9114fff8,
+0x9134fffc,
+0x48000000,
+0x00070000,
+0x00000000,
+0x5580007e,
+0x000900ab,
+0x3e100000,
+0x00098200,
+0x7e100214,
+0x9114fff8,
+0x9134fffc,
+0x00000000,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x5608fe7c,
+0x39080000,
+0x00098200,
+0x7d31422e,
+0x35290000,
+0x00098200,
+0x7d31432e,
+0x41800000,
+0x00050894,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x81110000,
+0x00098200,
+0x558c007e,
+0x000900ab,
+0x93110000,
+0x00098200,
+0x7d28602e,
+0x7c000400,
+0x81290000,
+0x00098200,
+0x91d10000,
+0x00098200,
+0x7d2903a6,
+0x92510000,
+0x00098200,
+0x3bf10000,
+0x00098200,
+0x4e800420,
+0x00000000,
+0x5580007e,
+0x000900ab,
+0x3e100000,
+0x00098200,
+0x7e100214,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x5608fe7c,
+0x39080000,
+0x00098200,
+0x7d31422e,
+0x35290000,
+0x00098200,
+0x7d31432e,
+0x41800000,
+0x00050896,
+0x00000000,
+0x81320000,
+0x00098200,
+0x89100000,
+0x00098200,
+0x81f00000,
+0x00098200,
+0x7c144840,
+0x55081800,
+0x000900a1,
+0x41810000,
+0x00050820,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x00000000,
+0x0006000c,
+0x7c0b4040,
+0x41800000,
+0x00050803,
+0x00000000,
+0x54ec9b78,
+0x48000000,
+0x00070000,
+0x00000000,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x00000000,
+0x0006000d,
+0x7eee592e,
+0x396b0008,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x81320000,
+0x00098200,
+0x7d0e5a14,
+0x7c145a14,
+0x91480004,
+0x38cb0000,
+0x00098200,
+0x81f00000,
+0x00098200,
+0x7c004840,
+0x90c80000,
+0x40800000,
+0x00050820,
+0x89300000,
+0x00098200,
+0x7dd47378,
+0x7d0b4378,
+0x80f00000,
+0x3a100004,
+0x2c090000,
+0x39c80008,
+0x41820000,
+0x00050803,
+0x0006000b,
+0x7c145840,
+0x80140000,
+0x80d40004,
+0x40800000,
+0x00050804,
+0x92f40000,
+0x3a940008,
+0x0006000c,
+0x3529ffff,
+0x90080008,
+0x90c8000c,
+0x39080008,
+0x40820000,
+0x0005080b,
+0x0006000d,
+0x54e815ba,
+0x7c11402e,
+0x7c0903a6,
+0x54ea5d78,
+0x54ec9b78,
+0x54f4dd78,
+0x54eb9d78,
+0x4e800420,
+0x0006000e,
+0x38000000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x80ca0000,
+0x00098200,
+0x00000000,
+0x80d10000,
+0x00098200,
+0x00000000,
+0x7d145a14,
+0x81320000,
+0x00098200,
+0x7d6e5a14,
+0x91d20000,
+0x00098200,
+0x7c084840,
+0x91720000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x7cc903a6,
+0x00000000,
+0x808a0000,
+0x00098200,
+0x00000000,
+0x7e439378,
+0x41810000,
+0x0005081f,
+0x90110000,
+0x00098200,
+0x4e800421,
+0x81d20000,
+0x00098200,
+0x546c1800,
+0x000900a1,
+0x81120000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x820efff8,
+0x7e8c4050,
+0x90110000,
+0x00098200,
+0x48000000,
+0x00050016,
+0x00000000,
+0x00010000
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_l,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_BC_CAT_Z,
+ GLOB_cont_nop,
+ GLOB_vmeta_tgets1,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets1,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_ra,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_arith_nv2,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_arith_vn2,
+ GLOB_vmeta_arith_vv2,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_callt,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res,
+ GLOB_ff_type,
+ GLOB_fff_resn,
+ GLOB_ff_getmetatable,
+ GLOB_fff_restv,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_resi,
+ GLOB_fff_res1,
+ GLOB_ff_math_floor,
+ GLOB_ff_math_ceil,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_pow,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_ff_string_rep,
+ GLOB_ff_string_reverse,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_band,
+ GLOB_fff_tobit_fb,
+ GLOB_fff_bitop_fb,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_ff_bit_tobit,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_floor,
+ GLOB_vm_ceil,
+ GLOB_vm_trunc,
+ GLOB_vm_modi,
+ GLOB_vm_foldarith,
+ GLOB_vm_cachesync,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_ISEQN_Z,
+ GLOB_BC_ISNEN_Z,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB_BC_RETV_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c",
+ "vm_unwind_c_eh",
+ "vm_unwind_ff",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_l",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "BC_CAT_Z",
+ "cont_nop",
+ "vmeta_tgets1",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets1",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_ra",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_arith_nv",
+ "vmeta_arith_nv2",
+ "vmeta_unm",
+ "vmeta_arith_vn",
+ "vmeta_arith_vv",
+ "vmeta_arith_vn2",
+ "vmeta_arith_vv2",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_callt",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res",
+ "ff_type",
+ "fff_resn",
+ "ff_getmetatable",
+ "fff_restv",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_resi",
+ "fff_res1",
+ "ff_math_floor",
+ "ff_math_ceil",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_pow",
+ "ff_math_atan2",
+ "ff_math_fmod",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "ff_string_rep",
+ "ff_string_reverse",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_band",
+ "fff_tobit_fb",
+ "fff_bitop_fb",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "ff_bit_tobit",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_floor",
+ "vm_ceil",
+ "vm_trunc",
+ "vm_modi",
+ "vm_foldarith",
+ "vm_cachesync",
+ "vm_ffi_callback",
+ "vm_ffi_call",
+ "BC_ISEQN_Z",
+ "BC_ISNEN_Z",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ "BC_RETV_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_equal_cd",
+ "lj_meta_arith",
+ "lj_meta_len",
+ "lj_meta_call",
+ "lj_meta_for",
+ "lj_tab_get",
+ "lj_str_fromnumber",
+ "lj_str_fromnum",
+ "lj_tab_next",
+ "lj_tab_getinth",
+ "lj_ffh_coroutine_wrap_err",
+ "floor",
+ "ceil",
+ "sqrt",
+ "log",
+ "log10",
+ "exp",
+ "sin",
+ "cos",
+ "tan",
+ "asin",
+ "acos",
+ "atan",
+ "sinh",
+ "cosh",
+ "tanh",
+ "pow",
+ "atan2",
+ "fmod",
+ "ldexp",
+ "frexp",
+ "modf",
+ "lj_str_new",
+ "lj_tab_len",
+ "lj_gc_step",
+ "lj_dispatch_ins",
+ "lj_trace_hot",
+ "lj_dispatch_call",
+ "lj_trace_exit",
+ "lj_err_throw",
+ "trunc",
+ "lj_ccallback_enter",
+ "lj_ccallback_leave",
+ "lj_meta_cat",
+ "lj_gc_barrieruv",
+ "lj_func_closeuv",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_tab_dup",
+ "lj_gc_step_fixtop",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 1, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, ~LJ_VMST_C, Dt1(->base), DISPATCH_GL(vmstate), 31-3, Dt1(->top));
+ dasm_put(Dst, 55, Dt1(->cframe), 56+(14-14)*4, 128+(14-14)*8, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4, 128+(23-14)*8);
+ dasm_put(Dst, 105, 56+(24-14)*4, 128+(24-14)*8, 56+(25-14)*4, 128+(25-14)*8, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4, 128+(31-14)*8, Dt1(->maxstack));
+ dasm_put(Dst, 154, Dt1(->top), 31-3, Dt1(->top), ~LJ_VMST_C, Dt1(->glref), Dt2(->vmstate), LJ_TISNUM, Dt1(->base), Dt1(->glref), LJ_TFALSE, LJ_TNIL, ~LJ_VMST_INTERP, GG_G2DISP);
+ dasm_put(Dst, 217, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), 32-3, Dt1(->base), Dt1(->top), Dt7(->pc), 56+(14-14)*4, 128+(14-14)*8, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4);
+ dasm_put(Dst, 278, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4, 128+(23-14)*8, 56+(24-14)*4, 128+(24-14)*8, 56+(25-14)*4, 128+(25-14)*8, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4);
+ dasm_put(Dst, 325, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4, 128+(31-14)*8, Dt1(->glref), Dt1(->status), FRAME_CP, CFRAME_RESUME, GG_G2DISP, Dt1(->cframe), Dt1(->base), LJ_TISNUM, Dt1(->top), Dt1(->status), ~LJ_VMST_INTERP, DISPATCH_GL(vmstate), FRAME_TYPE, LJ_TNIL);
+ dasm_put(Dst, 393, 56+(14-14)*4, 128+(14-14)*8, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4);
+ dasm_put(Dst, 440, 128+(23-14)*8, 56+(24-14)*4, 128+(24-14)*8, 56+(25-14)*4, 128+(25-14)*8, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4, 128+(31-14)*8, FRAME_CP, 56+(14-14)*4, 128+(14-14)*8);
+ dasm_put(Dst, 488, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4, 128+(23-14)*8, 56+(24-14)*4, 128+(24-14)*8, 56+(25-14)*4, 128+(25-14)*8);
+ dasm_put(Dst, 535, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4, 128+(31-14)*8, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, Dt1(->base), LJ_TISNUM, Dt1(->top));
+ dasm_put(Dst, 586, ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate), LJ_TFUNC, Dt7(->pc), 56+(14-14)*4, 128+(14-14)*8, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4);
+ dasm_put(Dst, 653, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4, 128+(23-14)*8, 56+(24-14)*4, 128+(24-14)*8, 56+(25-14)*4, 128+(25-14)*8, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4);
+ dasm_put(Dst, 700, 128+(31-14)*8, Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP);
+#if LJ_HASFFI
+ dasm_put(Dst, 738);
+#endif
+ dasm_put(Dst, 740, Dt7(->pc));
+#if LJ_HASFFI
+ dasm_put(Dst, 746);
+#endif
+ dasm_put(Dst, 749, PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 754);
+#endif
+ dasm_put(Dst, 762, Dt1(->base), DISPATCH_GL(tmptv), LJ_TSTR, DISPATCH_GL(tmptv), LJ_TTAB, DISPATCH_GL(tmptv2), LJ_TSTR);
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 807);
+ }
+ dasm_put(Dst, 811, DISPATCH_GL(tmptv));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 816);
+ } else {
+ dasm_put(Dst, 819);
+ }
+ dasm_put(Dst, 821, Dt1(->base), FRAME_CONT, Dt1(->top), DISPATCH_GL(tmptv), LJ_TSTR, DISPATCH_GL(tmptv), LJ_TTAB, DISPATCH_GL(tmptv2), LJ_TSTR);
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 889);
+ }
+ dasm_put(Dst, 893, DISPATCH_GL(tmptv));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 898);
+ } else {
+ dasm_put(Dst, 901);
+ }
+ dasm_put(Dst, 903, Dt1(->base), FRAME_CONT, Dt1(->top));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 948);
+ } else {
+ dasm_put(Dst, 950);
+ }
+ dasm_put(Dst, 952);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 954);
+ } else {
+ dasm_put(Dst, 956);
+ }
+ dasm_put(Dst, 958, Dt1(->base), -(BCBIAS_J*4 >> 16), LJ_TTRUE, LJ_TTRUE, Dt1(->base));
+#if LJ_HASFFI
+ dasm_put(Dst, 1021, Dt1(->base));
+#endif
+ dasm_put(Dst, 1032);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1039);
+ }
+ dasm_put(Dst, 1044);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1058);
+ }
+ dasm_put(Dst, 1061);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1064);
+ }
+ dasm_put(Dst, 1067, Dt1(->base), FRAME_CONT);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1091);
+#endif
+ dasm_put(Dst, 1093, Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1101);
+#else
+ dasm_put(Dst, 1108);
+#endif
+ dasm_put(Dst, 1111, Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base));
+#if LJ_HASJIT
+ dasm_put(Dst, 1159);
+#endif
+ dasm_put(Dst, 1161);
+#if LJ_HASJIT
+ dasm_put(Dst, 1163, BC_JFORI);
+#endif
+ dasm_put(Dst, 1166);
+#if LJ_HASJIT
+ dasm_put(Dst, 1168, BC_JFORI);
+#endif
+ dasm_put(Dst, 1171, BC_FORI, LJ_TFALSE, ~LJ_TISNUM+1, 31-3, Dt8(->upvalue), LJ_TTAB, Dt6(->metatable));
+ dasm_put(Dst, 1234, LJ_TNIL, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable]), Dt6(->hmask), LJ_TTAB, Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), 4+offsetof(Node, key), DtB(->val), 4+offsetof(Node, val), LJ_TSTR, DtB(->next));
+ dasm_put(Dst, 1282, LJ_TNIL, LJ_TUDATA, ~LJ_TISNUM+1, 31-2, DISPATCH_GL(gcroot[GCROOT_BASEMT]), LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable));
+ dasm_put(Dst, 1337, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist), LJ_TTAB, LJ_TSTR, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), Dt1(->base));
+ dasm_put(Dst, 1397, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1407);
+ } else {
+ dasm_put(Dst, 1410);
+ }
+ dasm_put(Dst, 1413, LJ_TSTR, LJ_TTAB, Dt1(->base), Dt1(->top), LJ_TNIL, (2+1)*8, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1464, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1473, Dt8(->upvalue[0]));
+#endif
+ dasm_put(Dst, 1477, (3+1)*8);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1489);
+ } else {
+ dasm_put(Dst, 1491);
+ }
+ dasm_put(Dst, 1493, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1500);
+ } else {
+ dasm_put(Dst, 1505);
+ }
+ dasm_put(Dst, 1517, Dt6(->asize), Dt6(->array));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 1522);
+ }
+ dasm_put(Dst, 1524);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1528, 31-3);
+ } else {
+ dasm_put(Dst, 1533, 31-3);
+ }
+ dasm_put(Dst, 1537, LJ_TNIL, (0+1)*8, (2+1)*8, Dt6(->hmask), (0+1)*8, (0+1)*8, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1585, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1594, Dt8(->upvalue[0]));
+#endif
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1598);
+ } else {
+ dasm_put(Dst, 1600);
+ }
+ dasm_put(Dst, 1602, (3+1)*8, DISPATCH_GL(hookmask), 32-HOOK_ACTIVE_SHIFT, 8+FRAME_PCALL, DISPATCH_GL(hookmask), LJ_TFUNC, 32-HOOK_ACTIVE_SHIFT, 16+FRAME_PCALL, LJ_TTHREAD, Dt1(->status), Dt1(->cframe));
+ dasm_put(Dst, 1662, Dt1(->top), LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP, Dt1(->base), DISPATCH_GL(vmstate));
+ dasm_put(Dst, 1724, Dt1(->maxstack), Dt1(->top), FRAME_TYPE, LJ_TTRUE, FRAME_TYPE, LJ_TFALSE, Dt1(->top), (2+1)*8, 32-3);
+ dasm_put(Dst, 1787, Dt8(->upvalue[0].gcr), Dt1(->status), Dt1(->cframe), Dt1(->top), LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 1846, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, 32-3, Dt1(->cframe));
+ dasm_put(Dst, 1903, Dt1(->base), CFRAME_RESUME, Dt1(->top), LUA_YIELD, Dt1(->cframe), Dt1(->status));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1928);
+ }
+ dasm_put(Dst, 1949, (1+1)*8, FRAME_TYPE);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 1991, 31-11, 32-21, 31-11);
+ dasm_put(Dst, 2073, 31-11, 32-21, 31-11);
+ } else {
+ dasm_put(Dst, 2127);
+ }
+ dasm_put(Dst, 2154);
+ dasm_put(Dst, 2213);
+ dasm_put(Dst, 2271);
+ dasm_put(Dst, 2327, Dt8(->upvalue[0]));
+ dasm_put(Dst, 2393);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2397);
+ } else {
+ dasm_put(Dst, 2412);
+ }
+ dasm_put(Dst, 2430, DISPATCH_GL(tmptv), DISPATCH_GL(tmptv));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 2452);
+ }
+ dasm_put(Dst, 2457, (2+1)*8);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2461);
+ } else {
+ dasm_put(Dst, 2464);
+ }
+ dasm_put(Dst, 2466, (2+1)*8);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2488);
+ } else {
+ dasm_put(Dst, 2557);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2582);
+ } else {
+ dasm_put(Dst, 2651);
+ }
+ dasm_put(Dst, 2676, LJ_TSTR, Dt5(->len), LJ_TSTR, Dt5(->len));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2703, Dt5([1]), (0+1)*8);
+ } else {
+ dasm_put(Dst, 2715, Dt5([1]), 31-3);
+ }
+ dasm_put(Dst, 2731, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2742);
+ } else {
+ dasm_put(Dst, 2750);
+ }
+ dasm_put(Dst, 2761, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 2789);
+ }
+ dasm_put(Dst, 2791);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2797);
+ } else {
+ dasm_put(Dst, 2799);
+ }
+ dasm_put(Dst, 2801);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2805);
+ } else {
+ dasm_put(Dst, 2814);
+ }
+ dasm_put(Dst, 2825, LJ_TSTR);
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 2830);
+ }
+ dasm_put(Dst, 2834, Dt5(->len), sizeof(GCstr)-1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2884);
+ } else {
+ dasm_put(Dst, 2886);
+ }
+ dasm_put(Dst, 2888, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2895);
+ } else {
+ dasm_put(Dst, 2899);
+ }
+ dasm_put(Dst, 2906, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(strempty), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, DISPATCH_GL(tmpbuf.sz));
+ dasm_put(Dst, 2959, Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 3017, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3086);
+ } else {
+ dasm_put(Dst, 3096);
+ }
+ dasm_put(Dst, 3109);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3115);
+ } else {
+ dasm_put(Dst, 3117);
+ }
+ dasm_put(Dst, 3119);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3123);
+ } else {
+ dasm_put(Dst, 3126);
+ }
+ dasm_put(Dst, 3132);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3137);
+ } else {
+ dasm_put(Dst, 3147);
+ }
+ dasm_put(Dst, 3160);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3166);
+ } else {
+ dasm_put(Dst, 3168);
+ }
+ dasm_put(Dst, 3170);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3174);
+ } else {
+ dasm_put(Dst, 3177);
+ }
+ dasm_put(Dst, 3183);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3188);
+ } else {
+ dasm_put(Dst, 3198);
+ }
+ dasm_put(Dst, 3211);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3217);
+ } else {
+ dasm_put(Dst, 3219);
+ }
+ dasm_put(Dst, 3221);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3225);
+ } else {
+ dasm_put(Dst, 3228);
+ }
+ dasm_put(Dst, 3234);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3239);
+ } else {
+ dasm_put(Dst, 3249);
+ }
+ dasm_put(Dst, 3262);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3269);
+ } else {
+ dasm_put(Dst, 3279);
+ }
+ dasm_put(Dst, 3292);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3296);
+ } else {
+ dasm_put(Dst, 3311);
+ }
+ dasm_put(Dst, 3332);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3337);
+ } else {
+ dasm_put(Dst, 3352);
+ }
+ dasm_put(Dst, 3373);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3378);
+ } else {
+ dasm_put(Dst, 3393);
+ }
+ dasm_put(Dst, 3414);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3419);
+ } else {
+ dasm_put(Dst, 3434);
+ }
+ dasm_put(Dst, 3455);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3459);
+ } else {
+ dasm_put(Dst, 3474);
+ }
+ dasm_put(Dst, 3495);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3500);
+ } else {
+ dasm_put(Dst, 3510);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3523);
+ } else {
+ dasm_put(Dst, 3526);
+ }
+ dasm_put(Dst, 3532);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3540);
+ }
+ dasm_put(Dst, 3548);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3550);
+ }
+ dasm_put(Dst, 3558, Dt8(->f), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->base), 31-3, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK);
+ dasm_put(Dst, 3621, Dt1(->base), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 3647, DISPATCH_GL(hookmask), HOOK_VMEVENT, DISPATCH_GL(hookcount), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 3669, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, 31-LUA_HOOKLINE, DISPATCH_GL(hookcount), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 3716, GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 3734, GG_DISP2J, Dt7(->pc), DISPATCH_J(L), PC2PROTO(framesize), Dt1(->base), 31-3, Dt1(->top));
+#endif
+ dasm_put(Dst, 3757);
+#if LJ_HASJIT
+ dasm_put(Dst, 3760);
+#endif
+ dasm_put(Dst, 3763);
+#if LJ_HASJIT
+ dasm_put(Dst, 3765);
+#endif
+ dasm_put(Dst, 3768, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 3791, -(16+32*8+32*4), 16+32*8+2*4, -GG_DISP2G-32768, ~LJ_VMST_EXIT, 16+32*8+32*4, DISPATCH_GL(vmstate), 16+0*8, 16+1*8, 16+2*8, 16+3*8, 16+4*8, 16+5*8, 16+6*8, 16+7*8, 16+32*8+32*4, 16+8*8, 16+9*8, 16+10*8, 16+11*8, 16+32*8+1*4, 16+12*8, 16+13*8);
+ dasm_put(Dst, 3838, 16+14*8, 16+15*8, 16+16*8, 16+17*8, 16+18*8, 16+19*8, 16+32*8+0*4, 16+20*8, 16+21*8, 16+22*8, 16+23*8, 16+24*8, 16+25*8, 16+26*8, 16+27*8, DISPATCH_GL(jit_L), 16+28*8, 16+29*8, 16+30*8, 16+31*8, DISPATCH_GL(jit_base), 32-2);
+ dasm_put(Dst, 3887, DISPATCH_J(L), DISPATCH_GL(jit_L), DISPATCH_J(parent), Dt1(->base), GG_DISP2J, DISPATCH_J(exitno), Dt1(->cframe), Dt1(->base));
+#endif
+ dasm_put(Dst, 3915);
+#if LJ_HASJIT
+ dasm_put(Dst, 3917, -GG_DISP2G-32768, 31-3, Dt7(->pc), DISPATCH_GL(jit_L), PC2PROTO(k), LJ_TISNUM, LJ_TNIL, DISPATCH_GL(vmstate), BC_FUNCF*4);
+#endif
+ dasm_put(Dst, 3974);
+#if LJ_HASJIT
+ dasm_put(Dst, 3982);
+#endif
+ dasm_put(Dst, 3985);
+#if LJ_HASJIT
+ dasm_put(Dst, 4065);
+#else
+ dasm_put(Dst, 4087);
+#endif
+ dasm_put(Dst, 4089);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 4113, 56+(14-14)*4, 128+(14-14)*8, 56+(15-14)*4, 128+(15-14)*8, 56+(16-14)*4, 128+(16-14)*8, 56+(17-14)*4, 128+(17-14)*8, 56+(18-14)*4, 128+(18-14)*8, 56+(19-14)*4, 128+(19-14)*8, 56+(20-14)*4, 128+(20-14)*8, 56+(21-14)*4, 128+(21-14)*8, 56+(22-14)*4, 128+(22-14)*8, 56+(23-14)*4, 128+(23-14)*8, 56+(24-14)*4, 128+(24-14)*8);
+ dasm_put(Dst, 4161, 56+(25-14)*4, 128+(25-14)*8, 56+(26-14)*4, 128+(26-14)*8, 56+(27-14)*4, 128+(27-14)*8, 56+(28-14)*4, 128+(28-14)*8, 56+(29-14)*4, 128+(29-14)*8, 56+(30-14)*4, 128+(30-14)*8, 56+(31-14)*4, 128+(31-14)*8, Dt2(->ctype_state), GG_G2DISP, DtE(->cb.slot), DtE(->cb.gpr[0]), DtE(->cb.fpr[0]), DtE(->cb.gpr[1]), DtE(->cb.fpr[1]), DtE(->cb.gpr[2]));
+ dasm_put(Dst, 4208, DtE(->cb.fpr[2]), DtE(->cb.gpr[3]), DtE(->cb.fpr[3]), DtE(->cb.gpr[4]), DtE(->cb.fpr[4]), DtE(->cb.gpr[5]), DtE(->cb.fpr[5]), DtE(->cb.gpr[6]), DtE(->cb.fpr[6]), DtE(->cb.gpr[7]), DtE(->cb.fpr[7]), 272+8, DtE(->cb.stack), Dt1(->base), LJ_TISNUM, Dt1(->top), LJ_TNIL, ~LJ_VMST_INTERP, DISPATCH_GL(vmstate), Dt7(->pc));
+#endif
+ dasm_put(Dst, 4272);
+#if LJ_HASFFI
+ dasm_put(Dst, 4274, DISPATCH_GL(ctype_state), Dt1(->base), Dt1(->top), DtE(->L), DtE(->cb.gpr[0]), DtE(->cb.fpr[0]), DtE(->cb.gpr[1]));
+#endif
+ dasm_put(Dst, 4295);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 4297, DtF(->spadj), DtF(->nsp), DtF(->nfpr), DtF(->stack), 31-2, DtF(->fpr[0]), DtF(->fpr[1]), DtF(->fpr[2]), DtF(->fpr[3]), DtF(->fpr[4]), DtF(->fpr[5]), DtF(->fpr[6]), DtF(->fpr[7]), DtF(->func), DtF(->gpr[1]), DtF(->gpr[2]));
+ dasm_put(Dst, 4354, DtF(->gpr[3]), DtF(->gpr[4]), DtF(->gpr[5]), DtF(->gpr[6]), DtF(->gpr[7]), DtF(->gpr[0]), DtF(->gpr[0]), DtF(->fpr[0]), DtF(->gpr[1]), DtF(->gpr[2]), DtF(->gpr[3]));
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ dasm_put(Dst, 4386, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4388, -(BCBIAS_J*4 >> 16));
+ if (op == BC_ISLT) {
+ dasm_put(Dst, 4405);
+ } else if (op == BC_ISGE) {
+ dasm_put(Dst, 4408);
+ } else if (op == BC_ISLE) {
+ dasm_put(Dst, 4411);
+ } else {
+ dasm_put(Dst, 4414);
+ }
+ dasm_put(Dst, 4417);
+ if (op == BC_ISLT) {
+ dasm_put(Dst, 4456);
+ } else if (op == BC_ISGE) {
+ dasm_put(Dst, 4459);
+ } else if (op == BC_ISLE) {
+ dasm_put(Dst, 4462);
+ } else {
+ dasm_put(Dst, 4466);
+ }
+ dasm_put(Dst, 4470);
+ } else {
+ dasm_put(Dst, 4473, -(BCBIAS_J*4 >> 16));
+ if (op == BC_ISLT) {
+ dasm_put(Dst, 4490);
+ } else if (op == BC_ISGE) {
+ dasm_put(Dst, 4493);
+ } else if (op == BC_ISLE) {
+ dasm_put(Dst, 4496);
+ } else {
+ dasm_put(Dst, 4500);
+ }
+ dasm_put(Dst, 4504);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4517, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 4530);
+ } else {
+ dasm_put(Dst, 4533);
+ }
+ } else {
+ dasm_put(Dst, 4536, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 4553);
+ } else {
+ dasm_put(Dst, 4557);
+ }
+ dasm_put(Dst, 4561);
+ }
+ dasm_put(Dst, 4573);
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 4575);
+ }
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4578, LJ_TCDATA, LJ_TCDATA);
+ }
+ dasm_put(Dst, 4583, ~LJ_TISPRI);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4588);
+ }
+ dasm_put(Dst, 4590, ~LJ_TISTABUD);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4593);
+ }
+ dasm_put(Dst, 4596);
+ if (vk) {
+ dasm_put(Dst, 4604);
+ } else {
+ dasm_put(Dst, 4609);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4614);
+ } else {
+ dasm_put(Dst, 4629);
+ }
+ dasm_put(Dst, 4632, Dt6(->metatable), 1-vk, Dt6(->nomm), 1<> 16));
+ if (vk) {
+ dasm_put(Dst, 4677);
+ } else {
+ dasm_put(Dst, 4679);
+ }
+ dasm_put(Dst, 4681);
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4693, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 4705);
+ } else {
+ dasm_put(Dst, 4707);
+ }
+ dasm_put(Dst, 4709);
+ } else {
+ if (vk) {
+ dasm_put(Dst, 4716);
+ } else {
+ dasm_put(Dst, 4718);
+ }
+ dasm_put(Dst, 4720, -(BCBIAS_J*4 >> 16));
+ }
+ if (vk) {
+ dasm_put(Dst, 4733);
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4738);
+ }
+ } else {
+ dasm_put(Dst, 4740);
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4744);
+ }
+ dasm_put(Dst, 4746);
+ }
+ dasm_put(Dst, 4749);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4760, LJ_TCDATA);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4768);
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ dasm_put(Dst, 4792, 32-3);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4799, LJ_TCDATA);
+ }
+ dasm_put(Dst, 4802);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 4804);
+ }
+ dasm_put(Dst, 4807, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 4813);
+ } else {
+ dasm_put(Dst, 4815);
+ }
+ dasm_put(Dst, 4817);
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ dasm_put(Dst, 4829);
+ if (op == BC_IST || op == BC_ISF) {
+ dasm_put(Dst, 4833, LJ_TTRUE, -(BCBIAS_J*4 >> 16));
+ if (op == BC_IST) {
+ dasm_put(Dst, 4840);
+ } else {
+ dasm_put(Dst, 4842);
+ }
+ dasm_put(Dst, 4844);
+ } else {
+ dasm_put(Dst, 4846, LJ_TFALSE);
+ if (op == BC_ISTC) {
+ dasm_put(Dst, 4851);
+ } else {
+ dasm_put(Dst, 4854);
+ }
+ dasm_put(Dst, 4857, -(BCBIAS_J*4 >> 16));
+ }
+ dasm_put(Dst, 4864);
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ dasm_put(Dst, 4875);
+ break;
+ case BC_NOT:
+ dasm_put(Dst, 4888, LJ_TTRUE);
+ break;
+ case BC_UNM:
+ dasm_put(Dst, 4904);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4908);
+ }
+ dasm_put(Dst, 4936);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4946);
+ } else {
+ dasm_put(Dst, 4949);
+ }
+ break;
+ case BC_LEN:
+ dasm_put(Dst, 4958, LJ_TSTR, Dt5(->len));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 4968);
+ } else {
+ dasm_put(Dst, 4973);
+ }
+ dasm_put(Dst, 4980, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 4994, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 5001);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 5007, Dt6(->nomm), 1<base), 32-3, Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 5810, 32-1, LJ_TSTR);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 5829, 32-1, LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5848, 31-13);
+ } else {
+ dasm_put(Dst, 5864, 31-13, 31-20);
+ }
+ break;
+ case BC_KNUM:
+ dasm_put(Dst, 5892);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 5905, 32-3);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 5920);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 5939, 32-1, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+ dasm_put(Dst, 5960, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, DtA(->closed), -(LJ_TISNUM+1), LJ_TISGCV - (LJ_TISNUM+1), Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G);
+ break;
+ case BC_USETS:
+ dasm_put(Dst, 6013, 32-1, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, Dt5(->marked), DtA(->closed), LJ_TSTR, LJ_GC_WHITES, GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 6064, 32-1, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 6085, 32-1, 32-3, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+
+ case BC_UCLO:
+ dasm_put(Dst, 6108, Dt1(->openupval), 32-1, -(BCBIAS_J*4 >> 16), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 6138, 32-1, Dt1(->base), Dt1(->base), LJ_TFUNC);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ dasm_put(Dst, 6166, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base));
+ if (op == BC_TNEW) {
+ dasm_put(Dst, 6179);
+ } else {
+ dasm_put(Dst, 6188, 32-1);
+ }
+ dasm_put(Dst, 6195, Dt1(->base), LJ_TTAB);
+ if (op == BC_TNEW) {
+ dasm_put(Dst, 6212);
+ }
+ dasm_put(Dst, 6217);
+ break;
+
+ case BC_GGET:
+ case BC_GSET:
+ dasm_put(Dst, 6226, 32-1, Dt7(->env));
+ if (op == BC_GGET) {
+ dasm_put(Dst, 6234);
+ } else {
+ dasm_put(Dst, 6237);
+ }
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 6240);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6244);
+ } else {
+ dasm_put(Dst, 6246);
+ }
+ dasm_put(Dst, 6248, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6254, Dt6(->asize), Dt6(->array), 31-3);
+ } else {
+ dasm_put(Dst, 6264, Dt6(->asize), Dt6(->array), 31-3);
+ }
+ dasm_put(Dst, 6281, LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), 4+offsetof(Node, key), DtB(->val), 4+offsetof(Node, val), LJ_TSTR, LJ_TNIL, DtB(->next));
+ dasm_put(Dst, 6387, LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), 31-3);
+ } else {
+ dasm_put(Dst, 6479, Dt6(->asize), Dt6(->array), 31-3);
+ }
+ dasm_put(Dst, 6496, Dt6(->marked), LJ_TNIL, LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 6560, 32-1, LJ_TTAB, Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), 31-5, 31-3, Dt6(->marked), DtB(->key), 4+offsetof(Node, key), DtB(->val), DtB(->next), LJ_TSTR, LJ_TNIL);
+ dasm_put(Dst, 6611, LJ_GC_BLACK, DtB(->val), Dt6(->metatable), Dt6(->nomm), 1<metatable), DISPATCH_GL(tmptv), Dt1(->base), Dt6(->nomm), 1<base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 6691, 32-3, LJ_TTAB, Dt6(->asize), Dt6(->array), Dt6(->marked), LJ_TNIL, LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 6759, 32-3, Dt6(->asize), 31-3, Dt6(->marked), Dt6(->array), LJ_GC_BLACK, Dt1(->base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ dasm_put(Dst, 6828);
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ dasm_put(Dst, 6831);
+ break;
+ case BC_CALL:
+ dasm_put(Dst, 6833, LJ_TFUNC, Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 6854);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 6856, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), FRAME_VARG, Dt7(->pc), -4-8, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP);
+ dasm_put(Dst, 6920, FRAME_TYPE);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 6929, LJ_TFUNC, Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 6956, Dt6(->asize), Dt6(->array), 31-3, LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6978);
+ } else {
+ dasm_put(Dst, 6981);
+ }
+ dasm_put(Dst, 6985, -(BCBIAS_J*4 >> 16));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 6993);
+ }
+ dasm_put(Dst, 6995, Dt6(->hmask), Dt6(->node), 31-5, 31-3, LJ_TNIL, DtB(->key), -(BCBIAS_J*4 >> 16));
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 7051, LJ_TTAB, LJ_TFUNC, LJ_TNIL, Dt8(->ffid), FF_next_N, 32-1, -(BCBIAS_J*4 >> 16), BC_JMP, BC_ITERC, -(BCBIAS_J*4 >> 16));
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 7101, FRAME_VARG, Dt1(->maxstack), Dt1(->top), Dt1(->base), 32-3, Dt1(->base));
+ dasm_put(Dst, 7181);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 7187);
+ break;
+
+ case BC_RET:
+ dasm_put(Dst, 7189, FRAME_TYPE, FRAME_VARG, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP);
+ break;
+
+ case BC_RET0: case BC_RET1:
+ dasm_put(Dst, 7259, FRAME_TYPE, FRAME_VARG);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 7272);
+ }
+ dasm_put(Dst, 7275, Dt7(->pc), PC2PROTO(k));
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 7303, GG_DISP2HOT, -HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7313, FORL_IDX*8+4);
+ if (vk) {
+ dasm_put(Dst, 7318, FORL_STEP*8+4, FORL_STOP*8+4, FORL_IDX*8+4);
+ } else {
+ dasm_put(Dst, 7332, FORL_STEP*8, FORL_STEP*8+4, FORL_STOP*8, FORL_STOP*8+4);
+ }
+ dasm_put(Dst, 7348, FORL_EXT*8);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 7355, 32-1);
+ }
+ dasm_put(Dst, 7358, FORL_EXT*8+4);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 7361);
+ }
+ if (op == BC_FORI) {
+ dasm_put(Dst, 7363);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 7366, -(BCBIAS_J*4 >> 16));
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 7371, -(BCBIAS_J*4 >> 16));
+ } else {
+ dasm_put(Dst, 7376, BC_JLOOP);
+ }
+ dasm_put(Dst, 7379);
+ if (vk) {
+ dasm_put(Dst, 7395);
+ }
+ }
+ if (vk) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7402, FORL_IDX*8);
+ } else {
+ dasm_put(Dst, 7406);
+ }
+ dasm_put(Dst, 7408, FORL_STEP*8, FORL_STOP*8, FORL_STEP*8, FORL_IDX*8);
+ } else {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7418);
+ } else {
+ dasm_put(Dst, 7420, FORL_STEP*8, FORL_STOP*8);
+ }
+ dasm_put(Dst, 7429, FORL_IDX*8, FORL_STOP*8);
+ }
+ dasm_put(Dst, 7438);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 7440, 32-1);
+ }
+ dasm_put(Dst, 7443, FORL_EXT*8);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 7446);
+ }
+ dasm_put(Dst, 7448);
+ if (op == BC_JFORI) {
+ dasm_put(Dst, 7450, -(BCBIAS_J*4 >> 16));
+ }
+ dasm_put(Dst, 7453);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 7456);
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7459);
+ } else {
+ dasm_put(Dst, 7462);
+ }
+ dasm_put(Dst, 7465, -(BCBIAS_J*4 >> 16));
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 7469);
+ } else {
+ dasm_put(Dst, 7472, BC_JLOOP);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7475);
+ } else {
+ dasm_put(Dst, 7478);
+ }
+ dasm_put(Dst, 7490);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 7492, -(BCBIAS_J*4 >> 16));
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 7498);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 7501);
+ } else {
+ dasm_put(Dst, 7504, BC_JLOOP);
+ }
+ dasm_put(Dst, 7507);
+ if (op == BC_JFORI) {
+ dasm_put(Dst, 7510, BC_JLOOP);
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 7516, GG_DISP2HOT, -HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 7526, LJ_TNIL);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 7533, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 7538, 32-1, -(BCBIAS_J*4 >> 16));
+ }
+ dasm_put(Dst, 7546);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 7558, GG_DISP2HOT, -HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 7568);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 7579, DISPATCH_J(trace), 32-1, DISPATCH_GL(vmstate), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L), GG_DISP2G+32768);
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 7598, 32-1, -(BCBIAS_J*4 >> 16));
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 7614, GG_DISP2HOT, -HOTCOUNT_CALL);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 7624, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k), 31-3);
+ if (op != BC_JFUNCF) {
+ dasm_put(Dst, 7636);
+ }
+ dasm_put(Dst, 7639);
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 7644, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 7648);
+ }
+ dasm_put(Dst, 7657);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 7663);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 7665, Dt1(->maxstack), 8+FRAME_VARG, -4+PC2PROTO(k), -4+PC2PROTO(numparams), LJ_TNIL);
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 7718, Dt8(->f));
+ } else {
+ dasm_put(Dst, 7721, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 7724, Dt1(->maxstack), Dt1(->base), Dt1(->top), ~LJ_VMST_C);
+ if (op == BC_FUNCCW) {
+ dasm_put(Dst, 7737, Dt8(->f));
+ }
+ dasm_put(Dst, 7740, DISPATCH_GL(vmstate), Dt1(->base), 31-3, Dt1(->top), ~LJ_VMST_INTERP, DISPATCH_GL(vmstate));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ dasm_put(Dst, 7761);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_ppcspe.dasc b/src/LuaJIT/src/buildvm_ppcspe.dasc
new file mode 100644
index 000000000..b9ee5b01e
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_ppcspe.dasc
@@ -0,0 +1,3704 @@
+|// Low-level VM code for PowerPC/e500 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch ppc
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r1 = sp, r2 and r13 = reserved and/or small data area ptr
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r14 // Base of current Lua stack frame.
+|.define KBASE, r15 // Constants of current Lua function.
+|.define PC, r16 // Next PC.
+|.define DISPATCH, r17 // Opcode dispatch table.
+|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
+|
+|// Constants for vectorized type-comparisons (hi+low GPR). C callee-save.
+|.define TISNUM, r22
+|.define TISSTR, r23
+|.define TISTAB, r24
+|.define TISFUNC, r25
+|.define TISNIL, r26
+|.define TOBIT, r27
+|.define ZERO, TOBIT // Zero in lo word.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r20 // Callee-save.
+|.define RB, r10
+|.define RC, r11
+|.define RD, r12
+|.define INS, r7 // Overlaps CARG5.
+|
+|.define TMP0, r0
+|.define TMP1, r8
+|.define TMP2, r9
+|.define TMP3, r6 // Overlaps CARG4.
+|
+|// Saved temporaries.
+|.define SAVE0, r21
+|
+|// Calling conventions.
+|.define CARG1, r3
+|.define CARG2, r4
+|.define CARG3, r5
+|.define CARG4, r6 // Overlaps TMP3.
+|.define CARG5, r7 // Overlaps INS.
+|
+|.define CRET1, r3
+|.define CRET2, r4
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define SAVE_LR, 188(sp)
+|.define CFRAME_SPACE, 184 // Delta for sp.
+|// Back chain for sp: 184(sp) <-- sp entering interpreter
+|.define SAVE_r31, 176(sp) // 64 bit register saves.
+|.define SAVE_r30, 168(sp)
+|.define SAVE_r29, 160(sp)
+|.define SAVE_r28, 152(sp)
+|.define SAVE_r27, 144(sp)
+|.define SAVE_r26, 136(sp)
+|.define SAVE_r25, 128(sp)
+|.define SAVE_r24, 120(sp)
+|.define SAVE_r23, 112(sp)
+|.define SAVE_r22, 104(sp)
+|.define SAVE_r21, 96(sp)
+|.define SAVE_r20, 88(sp)
+|.define SAVE_r19, 80(sp)
+|.define SAVE_r18, 72(sp)
+|.define SAVE_r17, 64(sp)
+|.define SAVE_r16, 56(sp)
+|.define SAVE_r15, 48(sp)
+|.define SAVE_r14, 40(sp)
+|.define SAVE_CR, 36(sp)
+|.define UNUSED1, 32(sp)
+|.define SAVE_ERRF, 28(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 24(sp)
+|.define SAVE_CFRAME, 20(sp)
+|.define SAVE_L, 16(sp)
+|.define SAVE_PC, 12(sp)
+|.define SAVE_MULTRES, 8(sp)
+|// Next frame lr: 4(sp)
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.macro save_, reg; evstdd reg, SAVE_..reg; .endmacro
+|.macro rest_, reg; evldd reg, SAVE_..reg; .endmacro
+|
+|.macro saveregs
+| stwu sp, -CFRAME_SPACE(sp)
+| save_ r14; save_ r15; save_ r16; save_ r17; save_ r18; save_ r19
+| mflr r0; mfcr r12
+| save_ r20; save_ r21; save_ r22; save_ r23; save_ r24; save_ r25
+| stw r0, SAVE_LR; stw r12, SAVE_CR
+| save_ r26; save_ r27; save_ r28; save_ r29; save_ r30; save_ r31
+|.endmacro
+|
+|.macro restoreregs
+| lwz r0, SAVE_LR; lwz r12, SAVE_CR
+| rest_ r14; rest_ r15; rest_ r16; rest_ r17; rest_ r18; rest_ r19
+| mtlr r0; mtcrf 0x38, r12
+| rest_ r20; rest_ r21; rest_ r22; rest_ r23; rest_ r24; rest_ r25
+| rest_ r26; rest_ r27; rest_ r28; rest_ r29; rest_ r30; rest_ r31
+| addi sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// These basic macros should really be part of DynASM.
+|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
+|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
+|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
+|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
+|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; tw 4, sp, sp; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|
+|// Instruction decode.
+|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
+|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
+|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
+|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
+|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
+|
+|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
+|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP4 TMP1, INS
+| decode_RB8 RB, INS
+| decode_RD8 RD, INS
+| lwzx TMP0, DISPATCH, TMP1
+| decode_RA8 RA, INS
+| decode_RC8 RC, INS
+| mtctr TMP0
+| bctr
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lwz PC, LFUNC:RB->pc
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+| decode_OP4 TMP1, INS
+| decode_RA8 RA, INS
+| lwzx TMP0, DISPATCH, TMP1
+| add RA, RA, BASE
+| mtctr TMP0
+| bctr
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| stw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checknum, reg; evcmpltu reg, TISNUM; .endmacro
+|.macro checkstr, reg; evcmpeq reg, TISSTR; .endmacro
+|.macro checktab, reg; evcmpeq reg, TISTAB; .endmacro
+|.macro checkfunc, reg; evcmpeq reg, TISFUNC; .endmacro
+|.macro checknil, reg; evcmpeq reg, TISNIL; .endmacro
+|.macro checkok, label; blt label; .endmacro
+|.macro checkfail, label; bge label; .endmacro
+|.macro checkanyfail, label; bns label; .endmacro
+|.macro checkallok, label; bso label; .endmacro
+|
+|.macro branch_RD
+| srwi TMP0, RD, 1
+| add PC, PC, TMP0
+| addis PC, PC, -(BCBIAS_J*4 >> 16)
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotloop
+| NYI
+|.endmacro
+|
+|.macro hotcall
+| NYI
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| // Assumes LJ_GC_BLACK is 0x04.
+| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
+| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| stb mark, tab->marked
+| stw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi. TMP0, PC, FRAME_P
+ | evsplati TMP1, LJ_TTRUE
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | mr BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
+ |
+ |->vm_returnc:
+ | andi. TMP0, PC, FRAME_TYPE
+ | addi RD, RD, 8 // RD = (nresults+1)*8.
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | cmpwi TMP0, FRAME_C
+ | rlwinm TMP2, PC, 0, 0, 28
+ | li_vmstate C
+ | sub TMP2, BASE, TMP2 // TMP2 = previous base.
+ | bne ->vm_returnp
+ |
+ | addic. TMP1, RD, -8
+ | stw TMP2, L->base
+ | lwz TMP2, SAVE_NRES
+ | subi BASE, BASE, 8
+ | st_vmstate
+ | slwi TMP2, TMP2, 3
+ | beq >2
+ |1:
+ | addic. TMP1, TMP1, -8
+ | evldd TMP0, 0(RA)
+ | addi RA, RA, 8
+ | evstdd TMP0, 0(BASE)
+ | addi BASE, BASE, 8
+ | bne <1
+ |
+ |2:
+ | cmpw TMP2, RD // More/less results wanted?
+ | bne >6
+ |3:
+ | stw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lwz TMP0, SAVE_CFRAME // Restore previous C frame.
+ | li CRET1, 0 // Ok return status for vm_pcall.
+ | stw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | blr
+ |
+ |6:
+ | ble >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | lwz TMP1, L->maxstack
+ | cmplw BASE, TMP1
+ | bge >8
+ | evstdd TISNIL, 0(BASE)
+ | addi RD, RD, 8
+ | addi BASE, BASE, 8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | sub TMP0, RD, TMP2
+ | cmpwi TMP2, 0 // LUA_MULTRET+1 case?
+ | sub TMP0, BASE, TMP0 // Subtract the difference.
+ | iseleq BASE, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | stw BASE, L->top // Save current top held in BASE (yes).
+ | mr SAVE0, RD
+ | mr CARG2, TMP2
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz TMP2, SAVE_NRES
+ | mr RD, SAVE0
+ | slwi TMP2, TMP2, 3
+ | lwz BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mr sp, CARG1
+ | mr CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lwz GL:TMP1, L->glref
+ | stw TMP0, GL:TMP1->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | rlwinm sp, CARG1, 0, 0, 29
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | evsplati TISFUNC, LJ_TFUNC
+ | lus TOBIT, 0x4338
+ | evsplati TISTAB, LJ_TTAB
+ | li TMP0, 0
+ | lwz BASE, L->base
+ | evmergelo TOBIT, TOBIT, TMP0
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | evsplati TISSTR, LJ_TSTR
+ | li TMP1, LJ_TFALSE
+ | evsplati TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | la RA, -8(BASE) // Results start at BASE-8.
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw TMP1, 0(RA) // Prepend false to error message.
+ | li RD, 16 // 2 results: false + error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | li CARG2, LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | stw BASE, L->base
+ | addi PC, PC, 4 // Must point after first instruction.
+ | stw RC, L->top
+ | srwi CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | lwz RC, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mr L, CARG1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mr BASE, CARG2
+ | lbz TMP1, L->status
+ | stw L, SAVE_L
+ | li PC, FRAME_CP
+ | addi TMP0, sp, CFRAME_RESUME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG3, SAVE_NRES
+ | cmplwi TMP1, 0
+ | stw CARG3, SAVE_ERRF
+ | stw TMP0, L->cframe
+ | stw CARG3, SAVE_CFRAME
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mr RA, BASE
+ | lwz BASE, L->base
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | evsplati TISFUNC, LJ_TFUNC
+ | lus TOBIT, 0x4338
+ | evsplati TISTAB, LJ_TTAB
+ | lwz PC, FRAME_PC(BASE)
+ | li TMP2, 0
+ | evsplati TISSTR, LJ_TSTR
+ | sub RD, TMP1, BASE
+ | evmergelo TOBIT, TOBIT, TMP2
+ | stb CARG3, L->status
+ | andi. TMP0, PC, FRAME_TYPE
+ | li_vmstate INTERP
+ | addi RD, RD, 8
+ | evsplati TISNIL, LJ_TNIL
+ | mr MULTRES, RD
+ | st_vmstate
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | li PC, FRAME_CP
+ | stw CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lwz TMP1, L:CARG1->cframe
+ | stw CARG3, SAVE_NRES
+ | mr L, CARG1
+ | stw CARG1, SAVE_L
+ | mr BASE, CARG2
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stw TMP1, SAVE_CFRAME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | evsplati TISFUNC, LJ_TFUNC
+ | add PC, PC, BASE
+ | evsplati TISTAB, LJ_TTAB
+ | lus TOBIT, 0x4338
+ | li TMP0, 0
+ | sub PC, PC, TMP2 // PC = frame delta + frame type
+ | evsplati TISSTR, LJ_TSTR
+ | sub NARGS8:RC, TMP1, BASE
+ | evmergelo TOBIT, TOBIT, TMP0
+ | li_vmstate INTERP
+ | evsplati TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | li TMP0, -8
+ | evlddx LFUNC:RB, BASE, TMP0
+ | checkfunc LFUNC:RB
+ | checkfail ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mr L, CARG1
+ | lwz TMP0, L:CARG1->stack
+ | stw CARG1, SAVE_L
+ | lwz TMP1, L->top
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lwz TMP1, L->cframe
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | li TMP2, 0
+ | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | stw TMP2, SAVE_ERRF // No error function.
+ | stw TMP1, SAVE_CFRAME
+ | mtctr CARG4
+ | bctrl // (lua_State *L, lua_CFunction func, void *ud)
+ | mr. BASE, CRET1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lwz TMP0, -12(BASE) // Continuation.
+ | mr RB, BASE
+ | mr BASE, TMP2 // Restore caller BASE.
+ | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
+ | cmplwi TMP0, 0
+ | lwz PC, -16(RB) // Restore PC from [cont|PC].
+ | beq >1
+ | subi TMP2, RD, 8
+ | lwz TMP1, LFUNC:TMP1->pc
+ | evstddx TISNIL, RA, TMP2 // Ensure one valid arg.
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // BASE = base, RA = resultptr, RB = meta base
+ | mtctr TMP0
+ | bctr // Jump to continuation.
+ |
+ |1: // Tail call from C function.
+ | subi TMP1, RB, 16
+ | sub RC, TMP1, BASE
+ | b ->vm_call_tail
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | subi CARG2, RB, 16
+ | decode_RB8 SAVE0, INS
+ | evldd TMP0, 0(RA)
+ | add TMP1, BASE, SAVE0
+ | stw BASE, L->base
+ | cmplw TMP1, CARG2
+ | sub CARG3, CARG2, TMP1
+ | decode_RA8 RA, INS
+ | evstdd TMP0, 0(CARG2)
+ | bne ->BC_CAT_Z
+ | evstddx TMP0, BASE, RA
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | decode_RB8 RB, INS
+ | evstdd STR:RC, 0(CARG3)
+ | add CARG2, BASE, RB
+ | b >1
+ |
+ |->vmeta_tgets:
+ | evmergelo TAB:RB, TISTAB, TAB:RB
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | evstdd TAB:RB, 0(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | evstdd STR:RC, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | efdcfsi TMP0, TMP0
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ | evstdd TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | beq >3
+ | evldd TMP0, 0(CRET1)
+ | evstddx TMP0, BASE, RA
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 16 // 2 args for func(t, k).
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | decode_RB8 RB, INS
+ | evstdd STR:RC, 0(CARG3)
+ | add CARG2, BASE, RB
+ | b >1
+ |
+ |->vmeta_tsets:
+ | evmergelo TAB:RB, TISTAB, TAB:RB
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | evstdd TAB:RB, 0(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | evstdd STR:RC, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | efdcfsi TMP0, TMP0
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ | evstdd TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | evlddx TMP0, BASE, RA
+ | beq >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | evstdd TMP0, 0(CRET1)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ | evstdd TMP0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mr CARG1, L
+ | subi PC, PC, 4
+ | add CARG2, BASE, RA
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RD
+ | stw BASE, L->base
+ | decode_OP1 CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmplwi CRET1, 1
+ | bgt ->vmeta_binop
+ |4:
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | decode_RD4 TMP2, INS
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | add TMP2, TMP2, TMP3
+ | isellt PC, PC, TMP2
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lwz INS, -4(PC)
+ | evldd TMP0, 0(RA)
+ | decode_RA8 TMP1, INS
+ | evstddx TMP0, BASE, TMP1
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | li TMP1, LJ_TTRUE
+ | cmplw TMP1, TMP0 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | li TMP1, LJ_TFALSE
+ | cmplw TMP0, TMP1 // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | subi PC, PC, 4
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | add CARG3, KBASE, RC
+ | add CARG4, BASE, RB
+ | b >1
+ |
+ |->vmeta_unm:
+ | add CARG3, BASE, RD
+ | mr CARG4, CARG3
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |1:
+ | add CARG2, BASE, RA
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | stw PC, -16(CRET1) // [cont|PC]
+ | mr TMP2, BASE
+ | addi PC, TMP1, FRAME_CONT
+ | mr BASE, CRET1
+ | li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | mr SAVE0, CARG1
+#endif
+ | add CARG2, BASE, RD
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmplwi CRET1, 0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | mr CARG1, SAVE0
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw TMP2, L->base // This is the callers base!
+ | subi CARG2, BASE, 8
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw BASE, L->base
+ | subi CARG2, RA, 8
+ | stw PC, SAVE_PC
+ | add CARG3, RA, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz TMP1, FRAME_PC(BASE)
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | mr CARG2, RA
+ | stw PC, SAVE_PC
+ | mr SAVE0, INS
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+#if LJ_HASJIT
+ | decode_OP1 TMP0, SAVE0
+#endif
+ | decode_RA8 RA, SAVE0
+#if LJ_HASJIT
+ | cmpwi TMP0, BC_JFORI
+#endif
+ | decode_RD8 RD, SAVE0
+#if LJ_HASJIT
+ | beq =>BC_JFORI
+#endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG1, 0(BASE)
+ | evldd CARG2, 8(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | checknum CARG1
+ | checkfail ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | evmergehi TMP0, CARG1, CARG2
+ | checknum TMP0
+ | checkanyfail ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
+ |.macro ffgccheck
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | cmplw TMP0, TMP1
+ | bgel ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc assert
+ | cmplwi NARGS8:RC, 8
+ | evldd TMP0, 0(BASE)
+ | blt ->fff_fallback
+ | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
+ | la RA, -8(BASE)
+ | evcmpltu cr1, TMP0, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | bge cr1, ->fff_fallback
+ | evstdd TMP0, 0(RA)
+ | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | beq ->fff_res // Done if exactly 1 argument.
+ | li TMP1, 8
+ | subi RC, RC, 8
+ |1:
+ | cmplw TMP1, RC
+ | evlddx TMP0, BASE, TMP1
+ | evstddx TMP0, RA, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | li TMP2, ~LJ_TNUMX
+ | cmplw CARG1, TISNUM
+ | not TMP1, CARG1
+ | isellt TMP1, TMP2, TMP1
+ | slwi TMP1, TMP1, 3
+ | la TMP2, CFUNC:RB->upvalue
+ | evlddx STR:CRET1, TMP2, TMP1
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktab CARG1
+ | evmergehi TMP1, CARG1, CARG1
+ | checkfail >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:RB, TAB:CARG1->metatable
+ |2:
+ | evmr CRET1, TISNIL
+ | cmplwi TAB:RB, 0
+ | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beq ->fff_restv
+ | lwz TMP0, TAB:RB->hmask
+ | evmergelo CRET1, TISTAB, TAB:RB // Use metatable as default result.
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | checkallok >5
+ | cmplwi NODE:TMP2, 0
+ | beq ->fff_restv // Not found, keep default result.
+ | b <3
+ |5:
+ | checknil TMP1
+ | checkok ->fff_restv // Ditto for nil value.
+ | evmr CRET1, TMP1 // Return value of mt.__metatable.
+ | b ->fff_restv
+ |
+ |6:
+ | cmpwi TMP1, LJ_TUDATA
+ | not TMP1, TMP1
+ | beq <1
+ | checknum CARG1
+ | slwi TMP1, TMP1, 2
+ | li TMP2, 4*~LJ_TNUMX
+ | isellt TMP1, TMP2, TMP1
+ | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
+ | lwzx TAB:RB, TMP2, TMP1
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | evmergehi TMP0, TAB:CARG1, TAB:CARG2
+ | checktab TMP0
+ | checkanyfail ->fff_fallback
+ | lwz TAB:TMP1, TAB:CARG1->metatable
+ | cmplwi TAB:TMP1, 0
+ | lbz TMP3, TAB:CARG1->marked
+ | bne ->fff_fallback
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stw TAB:CARG2, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, TMP3, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checktab CARG2
+ | la CARG3, 8(BASE)
+ | checkfail ->fff_fallback
+ | mr CARG1, L
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | evldd CRET1, 0(CRET1)
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly one argument.
+ | checknum CARG1
+ | checkok ->fff_restv
+ | b ->fff_fallback
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checkstr CARG1
+ | // A __tostring method in the string base metatable is ignored.
+ | checkok ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | checknum CARG1
+ | cmplwi cr1, TMP0, 0
+ | stw BASE, L->base // Add frame since C call can throw.
+ | crand 4*cr0+eq, 4*cr0+lt, 4*cr1+eq
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | bne ->fff_fallback
+ | ffgccheck
+ | mr CARG1, L
+ | mr CARG2, BASE
+ | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ | // Returns GCstr *.
+ | evmergelo STR:CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | evstddx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
+ | checktab TAB:CARG2
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+ | stw BASE, L->base // Add frame since C call can throw.
+ | mr CARG1, L
+ | stw BASE, L->top // Dummy frame length is ok.
+ | la CARG3, 8(BASE)
+ | stw PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | cmplwi CRET1, 0
+ | evmr CRET1, TISNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | evldd TMP0, 8(BASE) // Copy key and value to results.
+ | la RA, -8(BASE)
+ | evldd TMP1, 16(BASE)
+ | evstdd TMP0, 0(RA)
+ | li RD, (2+1)*8
+ | evstdd TMP1, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | evstdd TISNIL, 8(BASE)
+ | li RD, (3+1)*8
+ | evstdd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+ | checknum CARG2
+ | lus TMP3, 0x3ff0
+ | checkfail ->fff_fallback
+ | efdctsi TMP2, CARG2
+ | lwz TMP0, TAB:CARG1->asize
+ | evmergelo TMP3, TMP3, ZERO
+ | lwz TMP1, TAB:CARG1->array
+ | efdadd CARG2, CARG2, TMP3
+ | addi TMP2, TMP2, 1
+ | la RA, -8(BASE)
+ | cmplw TMP0, TMP2
+ | slwi TMP3, TMP2, 3
+ | evstdd CARG2, 0(RA)
+ | ble >2 // Not in array part?
+ | evlddx TMP1, TMP1, TMP3
+ |1:
+ | checknil TMP1
+ | li RD, (0+1)*8
+ | checkok ->fff_res // End of iteration, return 0 results.
+ | li RD, (2+1)*8
+ | evstdd TMP1, 8(RA)
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lwz TMP0, TAB:CARG1->hmask
+ | cmplwi TMP0, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | mr CARG2, TMP2
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | evldd TMP1, 0(CRET1)
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | evsplati TMP1, 0
+ | li RD, (3+1)*8
+ | evstdd TMP1, 8(BASE)
+ | evstdd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmplwi NARGS8:RC, 8
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | blt ->fff_fallback
+ | mr TMP2, BASE
+ | la BASE, 8(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | mr TMP2, BASE
+ | checkfunc CARG2 // Traceback must be a function.
+ | checkfail ->fff_fallback
+ | la BASE, 16(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | evstdd CARG2, 0(TMP2) // Swap function and traceback.
+ | subi NARGS8:RC, NARGS8:RC, 16
+ | evstdd CARG1, 8(TMP2)
+ | addi PC, TMP3, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | evmergehi TMP0, L:CARG1, L:CARG1
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ |.if resume
+ | cmpwi TMP0, LJ_TTHREAD
+ | bne ->fff_fallback
+ |.endif
+ | lbz TMP0, L:CARG1->status
+ | lwz TMP1, L:CARG1->cframe
+ | lwz CARG2, L:CARG1->top
+ | cmplwi cr0, TMP0, LUA_YIELD
+ | lwz TMP2, L:CARG1->base
+ | cmplwi cr1, TMP1, 0
+ | lwz TMP0, L:CARG1->maxstack
+ | cmplw cr7, CARG2, TMP2
+ | lwz PC, FRAME_PC(BASE)
+ | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
+ | add TMP2, CARG2, NARGS8:RC
+ | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
+ | cmplw cr1, TMP2, TMP0
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
+ | stw PC, SAVE_PC
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
+ | stw BASE, L->base
+ | blt cr6, ->fff_fallback
+ |1:
+ |.if resume
+ | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | subi TMP2, TMP2, 8
+ |.endif
+ | stw TMP2, L:CARG1->top
+ | li TMP1, 0
+ | stw BASE, L->top
+ |2: // Move args to coroutine.
+ | cmpw TMP1, NARGS8:RC
+ | evlddx TMP0, BASE, TMP1
+ | beq >3
+ | evstddx TMP0, CARG2, TMP1
+ | addi TMP1, TMP1, 8
+ | b <2
+ |3:
+ | li CARG3, 0
+ | mr L:SAVE0, L:CARG1
+ | li CARG4, 0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | lwz TMP2, L:SAVE0->base
+ | cmplwi CRET1, LUA_YIELD
+ | lwz TMP3, L:SAVE0->top
+ | li_vmstate INTERP
+ | lwz BASE, L->base
+ | st_vmstate
+ | bgt >8
+ | sub RD, TMP3, TMP2
+ | lwz TMP0, L->maxstack
+ | cmplwi RD, 0
+ | add TMP1, BASE, RD
+ | beq >6 // No results?
+ | cmplw TMP1, TMP0
+ | li TMP1, 0
+ | bgt >9 // Need to grow stack?
+ |
+ | subi TMP3, RD, 8
+ | stw TMP2, L:SAVE0->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | cmplw TMP1, TMP3
+ | evlddx TMP0, TMP2, TMP1
+ | evstddx TMP0, BASE, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <5
+ |6:
+ | andi. TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | la RA, -8(BASE)
+ | stw TMP1, -8(BASE) // Prepend true to results.
+ | addi RD, RD, 16
+ |.else
+ | mr RA, BASE
+ | addi RD, RD, 8
+ |.endif
+ |7:
+ | stw PC, SAVE_PC
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | andi. TMP0, PC, FRAME_TYPE
+ | la TMP3, -8(TMP3)
+ | li TMP1, LJ_TFALSE
+ | evldd TMP0, 0(TMP3)
+ | stw TMP3, L:SAVE0->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | stw TMP1, -8(BASE) // Prepend false to results.
+ | la RA, -8(BASE)
+ | evstdd TMP0, 0(BASE) // Copy error message.
+ | b <7
+ |.else
+ | mr CARG1, L
+ | mr CARG2, L:SAVE0
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mr CARG1, L
+ | srwi CARG2, RD, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | li CRET1, 0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lwz TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | andi. TMP0, TMP0, CFRAME_RESUME
+ | stw TMP1, L->top
+ | li CRET1, LUA_YIELD
+ | beq ->fff_fallback
+ | stw ZERO, L->cframe
+ | stb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_n math_abs
+ | efdabs CRET1, CARG1
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CRET1 = TValue result.
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(RA)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | bne ->vm_return
+ | lwz INS, -4(PC)
+ | decode_RB8 RB, INS
+ |5:
+ | cmplw RB, RD // More results expected?
+ | decode_RA8 TMP0, INS
+ | bgt >6
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, RA, TMP1
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | bl extern func
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | evldd CARG4, 8(BASE)
+ | blt ->fff_fallback
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | evmergehi CARG3, CARG4, CARG4
+ | checkanyfail ->fff_fallback
+ | bl extern func
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | lwz PC, FRAME_PC(BASE)
+ | bl ->vm_..func.._hilo;
+ | la RA, -8(BASE)
+ | evstdd CRET2, 0(RA)
+ | b ->fff_res1
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ | math_extern sqrt
+ | math_extern log
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ | evldd CARG2, CFUNC:RB->upvalue[0]
+ | efdmul CRET1, CARG1, CARG2
+ | b ->fff_restv
+ |
+ |.ffunc math_ldexp
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | evldd CARG4, 8(BASE)
+ | blt ->fff_fallback
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | checkanyfail ->fff_fallback
+ | efdctsi CARG3, CARG4
+ | bl extern ldexp
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |
+ |.ffunc math_frexp
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern frexp
+ | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo CRET1, CRET1, CRET2
+ | efdcfsi CRET2, TMP1
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(RA)
+ | li RD, (2+1)*8
+ | evstdd CRET2, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc math_modf
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | la CARG3, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern modf
+ | evmergelo CRET1, CRET1, CRET2
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(BASE)
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, cmpop
+ | .ffunc_1 name
+ | checknum CARG1
+ | li TMP1, 8
+ | checkfail ->fff_fallback
+ |1:
+ | evlddx CARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_restv // Ok, since CRET1 = CARG1.
+ | checkfail ->fff_fallback
+ | cmpop CARG2, CARG1
+ | addi TMP1, TMP1, 8
+ | crmove 4*cr0+lt, 4*cr0+gt
+ | evsel CARG1, CARG2, CARG1
+ | b <1
+ |.endmacro
+ |
+ | math_minmax math_min, efdtstlt
+ | math_minmax math_max, efdtstgt
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr STR:CARG1
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmplwi NARGS8:RC, 8
+ | evldd STR:CARG1, 0(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checkstr STR:CARG1
+ | la RA, -8(BASE)
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | li RD, (0+1)*8
+ | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | li TMP2, (1+1)*8
+ | cmplwi TMP0, 0
+ | lwz PC, FRAME_PC(BASE)
+ | efdcfsi CRET1, TMP1
+ | iseleq RD, RD, TMP2
+ | evstdd CRET1, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG1
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | checkfail ->fff_fallback
+ | efdctsiz TMP0, CARG1
+ | li CARG3, 1
+ | cmplwi TMP0, 255
+ | stb TMP0, 0(CARG2)
+ | bgt ->fff_fallback
+ |->fff_newstr:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | stw PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | lwz BASE, L->base
+ | evmergelo STR:CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG3, 16(BASE)
+ | evldd STR:CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | evldd CARG2, 8(BASE)
+ | li TMP2, -1
+ | beq >1
+ | checknum CARG3
+ | checkfail ->fff_fallback
+ | efdctsiz TMP2, CARG3
+ |1:
+ | checknum CARG2
+ | checkfail ->fff_fallback
+ | checkstr STR:CARG1
+ | efdctsiz TMP1, CARG2
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | cmplw TMP0, TMP2 // len < end? (unsigned compare)
+ | add TMP3, TMP2, TMP0
+ | blt >5
+ |2:
+ | cmpwi TMP1, 0 // start <= 0?
+ | add TMP3, TMP1, TMP0
+ | ble >7
+ |3:
+ | sub. CARG3, TMP2, TMP1
+ | addi CARG2, STR:CARG1, #STR-1
+ | addi CARG3, CARG3, 1
+ | add CARG2, CARG2, TMP1
+ | isellt CARG3, r0, CARG3
+ | b ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | cmpw TMP0, TMP2
+ | addi TMP3, TMP3, 1
+ | iselgt TMP2, TMP3, TMP0 // end = end > len ? len : end+len+1
+ | b <2
+ |
+ |7: // Negative start or underflow.
+ | cmpwi cr1, TMP3, 0
+ | iseleq TMP1, r0, TMP3
+ | isel TMP1, r0, TMP1, 4*cr1+lt
+ | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
+ | b <3
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG1, 0(BASE)
+ | evldd CARG2, 8(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | checkfail ->fff_fallback
+ | checkstr STR:CARG1
+ | efdctsiz CARG3, CARG2
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | cmpwi CARG3, 0
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | ble >2 // Count <= 0? (or non-int)
+ | cmplwi TMP0, 1
+ | subi TMP2, CARG3, 1
+ | blt >2 // Zero length string?
+ | cmplw cr1, TMP1, CARG3
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | lbz TMP0, STR:CARG1[1]
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | blt cr1, ->fff_fallback
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | cmplwi TMP2, 0
+ | stbx TMP0, CARG2, TMP2
+ | subi TMP2, TMP2, 1
+ | bne <1
+ | b ->fff_newstr
+ |2: // Return empty string.
+ | la STR:CRET1, DISPATCH_GL(strempty)(DISPATCH)
+ | evmergelo CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checkstr STR:CARG1
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | checkfail ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | li TMP2, 0
+ | cmplw TMP1, CARG3
+ | subi TMP3, CARG3, 1
+ | blt ->fff_fallback
+ |1: // Reverse string copy.
+ | cmpwi TMP3, 0
+ | lbzx TMP1, CARG1, TMP2
+ | blt ->fff_newstr
+ | stbx TMP1, CARG2, TMP3
+ | subi TMP3, TMP3, 1
+ | addi TMP2, TMP2, 1
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checkstr STR:CARG1
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | checkfail ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | cmplw TMP1, CARG3
+ | li TMP2, 0
+ | blt ->fff_fallback
+ |1: // ASCII case conversion.
+ | cmplw TMP2, CARG3
+ | lbzx TMP1, CARG1, TMP2
+ | bge ->fff_newstr
+ | subi TMP0, TMP1, lo
+ | xori TMP3, TMP1, 0x20
+ | cmplwi TMP0, 26
+ | isellt TMP1, TMP3, TMP1
+ | stbx TMP1, CARG2, TMP2
+ | addi TMP2, TMP2, 1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG1
+ | checkfail ->fff_fallback
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | efdcfsi CRET1, CRET1
+ | b ->fff_restv
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_n bit_..name
+ | efdadd CARG1, CARG1, TOBIT
+ |.endmacro
+ |
+ |.ffunc_bit tobit
+ |->fff_resbit:
+ | efdcfsi CRET1, CARG1
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | li TMP1, 8
+ |1:
+ | evlddx CARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_resbit
+ | checkfail ->fff_fallback
+ | efdadd CARG2, CARG2, TOBIT
+ | ins CARG1, CARG1, CARG2
+ | addi TMP1, TMP1, 8
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | rotlwi TMP0, CARG1, 8
+ | rlwimi TMP0, CARG1, 24, 0, 7
+ | rlwimi TMP0, CARG1, 24, 16, 23
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | not TMP0, CARG1
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc_nn bit_..name
+ | efdadd CARG2, CARG2, TOBIT
+ | efdadd CARG1, CARG1, TOBIT
+ |.if shmod == 1
+ | rlwinm CARG2, CARG2, 0, 27, 31
+ |.elif shmod == 2
+ | neg CARG2, CARG2
+ |.endif
+ | ins TMP0, CARG1, CARG2
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, slw, 1
+ |.ffunc_bit_sh rshift, srw, 1
+ |.ffunc_bit_sh arshift, sraw, 1
+ |.ffunc_bit_sh rol, rotlw, 0
+ |.ffunc_bit_sh ror, rotlw, 2
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lwz TMP3, CFUNC:RB->f
+ | add TMP1, BASE, NARGS8:RC
+ | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addi TMP0, TMP1, 8*LUA_MINSTACK
+ | lwz TMP2, L->maxstack
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | cmplw TMP0, TMP2
+ | stw BASE, L->base
+ | stw TMP1, L->top
+ | mr CARG1, L
+ | bgt >5 // Need to grow stack.
+ | mtctr TMP3
+ | bctrl // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lwz BASE, L->base
+ | cmpwi CRET1, 0
+ | slwi RD, CRET1, 3
+ | la RA, -8(BASE)
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | lwz TMP0, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub NARGS8:RC, TMP0, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi. TMP0, PC, FRAME_TYPE
+ | rlwinm TMP1, PC, 0, 0, 28
+ | bne >3
+ | lwz INS, -4(PC)
+ | decode_RA8 TMP1, INS
+ |3:
+ | sub TMP2, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | li CARG2, LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mflr SAVE0
+ | stw BASE, L->base
+ | add TMP0, BASE, NARGS8:RC
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | stw TMP0, L->top
+ | mr CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | lwz BASE, L->base
+ | mtlr SAVE0
+ | lwz TMP0, L->top
+ | sub NARGS8:RC, TMP0, BASE
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+#if LJ_HASJIT
+ | NYI
+#endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS.
+ | lwzx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | bctr
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
+ | bne <5
+ |
+ | cmpwi cr1, TMP0, 0
+ | addic. TMP2, TMP2, -1
+ | beq cr1, <5
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | beq >1
+ | bge cr1, <5
+ |1:
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | lwz BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lwz INS, -4(PC)
+ | decode_OP4 TMP1, INS
+ | decode_RB8 RB, INS
+ | addi TMP1, TMP1, GG_DISP2STATIC
+ | decode_RD8 RD, INS
+ | lwzx TMP0, DISPATCH, TMP1
+ | decode_RA8 RA, INS
+ | decode_RC8 RC, INS
+ | mtctr TMP0
+ | bctr
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addi PC, PC, 4
+ | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+#if LJ_HASJIT
+ | NYI
+#endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mr CARG2, PC
+#if LJ_HASJIT
+ | b >1
+#endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+#if LJ_HASJIT
+ | ori CARG2, PC, 1
+ |1:
+#endif
+ | add TMP0, BASE, RC
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw TMP0, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | lwz BASE, L->base
+ | lwz TMP0, L->top
+ | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
+ | sub NARGS8:RC, TMP0, BASE
+ | add RA, BASE, RA
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | mtctr CRET1
+ | bctr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_exit_handler:
+#if LJ_HASJIT
+ | NYI
+#endif
+ |->vm_exit_interp:
+#if LJ_HASJIT
+ | NYI
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code.
+ |//
+ |// This can be inlined if the CPU has the frin/friz/frip/frim instructions.
+ |// The alternative hard-float approaches have a deep dependency chain.
+ |// The resulting latency is at least 3x-7x the double-precision FP latency
+ |// (e500v2: 6cy, e600: 5cy, Cell: 10cy) or around 20-70 cycles.
+ |//
+ |// The soft-float approach is tedious, but much faster (e500v2: ~11cy/~6cy).
+ |// However it relies on a fast way to transfer the FP value to GPRs
+ |// (e500v2: 0cy for lo-word, 1cy for hi-word).
+ |//
+ |.macro vm_round, name, mode
+ | // Used temporaries: TMP0, TMP1, TMP2, TMP3.
+ |->name.._efd: // Input: CARG2, output: CRET2
+ | evmergehi CARG1, CARG2, CARG2
+ |->name.._hilo:
+ | // Input: CARG1 (hi), CARG2 (hi, lo), output: CRET2
+ | rlwinm TMP2, CARG1, 12, 21, 31
+ | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
+ | li TMP1, -1
+ | cmplwi cr1, TMP2, 51 // 0 <= exp <= 51?
+ | subfic TMP0, TMP2, 52
+ | bgt cr1, >1
+ | lus TMP3, 0xfff0
+ | slw TMP0, TMP1, TMP0 // lomask = -1 << (52-exp)
+ | sraw TMP1, TMP3, TMP2 // himask = (int32_t)0xfff00000 >> exp
+ |.if mode == 2 // trunc(x):
+ | evmergelo TMP0, TMP1, TMP0
+ | evand CRET2, CARG2, TMP0 // hi &= himask, lo &= lomask
+ |.else
+ | andc TMP2, CARG2, TMP0
+ | andc TMP3, CARG1, TMP1
+ | or TMP2, TMP2, TMP3 // ztest = (hi&~himask) | (lo&~lomask)
+ | srawi TMP3, CARG1, 31 // signmask = (int32_t)hi >> 31
+ |.if mode == 0 // floor(x):
+ | and. TMP2, TMP2, TMP3 // iszero = ((ztest & signmask) == 0)
+ |.else // ceil(x):
+ | andc. TMP2, TMP2, TMP3 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | and CARG2, CARG2, TMP0 // lo &= lomask
+ | and CARG1, CARG1, TMP1 // hi &= himask
+ | subc TMP0, CARG2, TMP0
+ | iseleq TMP0, CARG2, TMP0 // lo = iszero ? lo : lo-lomask
+ | sube TMP1, CARG1, TMP1
+ | iseleq TMP1, CARG1, TMP1 // hi = iszero ? hi : hi-himask+carry
+ | evmergelo CRET2, TMP1, TMP0
+ |.endif
+ | blr
+ |1:
+ | bgtlr // Already done if >=2^52, +-inf or nan.
+ |.if mode == 2 // trunc(x):
+ | rlwinm TMP1, CARG1, 0, 0, 0 // hi = sign(x)
+ | li TMP0, 0
+ | evmergelo CRET2, TMP1, TMP0
+ |.else
+ | rlwinm TMP2, CARG1, 0, 1, 31
+ | srawi TMP0, CARG1, 31 // signmask = (int32_t)hi >> 31
+ | or TMP2, TMP2, CARG2 // ztest = abs(hi) | lo
+ | lus TMP1, 0x3ff0
+ |.if mode == 0 // floor(x):
+ | and. TMP2, TMP2, TMP0 // iszero = ((ztest & signmask) == 0)
+ |.else // ceil(x):
+ | andc. TMP2, TMP2, TMP0 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | li TMP0, 0
+ | iseleq TMP1, r0, TMP1
+ | rlwimi CARG1, TMP1, 0, 1, 31 // hi = sign(x) | (iszero ? 0.0 : 1.0)
+ | evmergelo CRET2, CARG1, TMP0
+ |.endif
+ | blr
+ |.endmacro
+ |
+ |->vm_floor:
+ | mflr CARG3
+ | bl ->vm_floor_hilo
+ | mtlr CARG3
+ | evmergehi CRET1, CRET2, CRET2
+ | blr
+ |
+ | vm_round vm_floor, 0
+ | vm_round vm_ceil, 1
+#if LJ_HASJIT
+ | vm_round vm_trunc, 2
+#else
+ |->vm_trunc_efd:
+ |->vm_trunc_hilo:
+#endif
+ |
+ |// Callable from C: double lj_vm_foldarith(double x, double y, int op)
+ |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -)
+ |// and basic math functions. ORDER ARITH
+ |->vm_foldarith:
+ | evmergelo CARG2, CARG1, CARG2
+ | cmplwi CARG5, 1
+ | evmergelo CARG4, CARG3, CARG4
+ | beq >1; bgt >2
+ | efdadd CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr
+ |1:
+ | efdsub CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr
+ |2:
+ | cmplwi CARG5, 3; beq >1; bgt >2
+ | efdmul CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr
+ |1:
+ | efddiv CRET2, CARG2, CARG4; evmergehi CRET1, CRET2, CRET2; blr
+ |2:
+ | cmplwi CARG5, 5; beq >1; bgt >2
+ | evmr CARG3, CARG2; efddiv CRET2, CARG2, CARG4; evmr RB, CARG4
+ | mflr RC; bl ->vm_floor_efd; mtlr RC
+ | efdmul CRET2, CRET2, RB; efdsub CRET2, CARG3, CRET2
+ | evmergehi CRET1, CRET2, CRET2; blr
+ |1:
+ | b extern pow
+ |2:
+ | cmplwi CARG5, 7; beq >1; bgt >2
+ | xoris CARG1, CARG1, 0x8000; blr
+ |1:
+ | rlwinm CARG1, CARG1, 0, 1, 31; blr
+ |2:
+ | NYI // Other operations only needed by JIT compiler.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_ffi_call:
+#if LJ_HASFFI
+ | NYI
+#endif
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | evlddx TMP1, BASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz TMP2, -4(PC)
+ | evmergehi RB, TMP0, TMP1
+ | decode_RD4 TMP2, TMP2
+ | checknum RB
+ | add TMP2, TMP2, TMP3
+ | checkanyfail ->vmeta_comp
+ | efdcmplt TMP0, TMP1
+ if (op == BC_ISLE || op == BC_ISGT) {
+ | efdcmpeq cr1, TMP0, TMP1
+ | cror 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ | iselgt PC, TMP2, PC
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | evlddx CARG2, BASE, RA
+ | addi PC, PC, 4
+ | evlddx CARG3, BASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz TMP2, -4(PC)
+ | evmergehi RB, CARG2, CARG3
+ | decode_RD4 TMP2, TMP2
+ | checknum RB
+ | add TMP2, TMP2, TMP3
+ | checkanyfail >5
+ | efdcmpeq CARG2, CARG3
+ if (vk) {
+ | iselgt PC, TMP2, PC
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ |
+ |5: // Either or both types are not numbers.
+ | evcmpeq CARG2, CARG3
+ | not TMP3, RB
+ | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
+ | crorc 4*cr7+lt, 4*cr0+so, 4*cr0+lt // 1: Same tv or different type.
+ | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
+ | crandc 4*cr7+gt, 4*cr0+lt, 4*cr1+gt // 2: Same type and primitive.
+ | mr SAVE0, PC
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr7+gt
+ } else {
+ | isel TMP2, PC, TMP2, 4*cr7+gt
+ }
+ | cror 4*cr7+lt, 4*cr7+lt, 4*cr7+gt // 1 or 2.
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr0+so
+ } else {
+ | isel PC, PC, TMP2, 4*cr0+so
+ }
+ | blt cr7, <1 // Done if 1 or 2.
+ | blt cr6, <1 // Done if not tab/ud.
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:TMP2, TAB:CARG2->metatable
+ | li CARG4, 1-vk // ne = 0 or 1.
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable?
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_equal // Handle __eq metamethod.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | srwi RD, RD, 1
+ | lwz INS, 0(PC)
+ | subfic RD, RD, -4
+ | addi PC, PC, 4
+ | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | evmergelo STR:TMP1, TISSTR, STR:TMP1
+ | add TMP2, TMP2, TMP3
+ | evcmpeq TMP0, STR:TMP1
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr0+so
+ } else {
+ | isel PC, PC, TMP2, 4*cr0+so
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | evlddx TMP1, KBASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz INS, -4(PC)
+ | checknum TMP0
+ | checkfail >5
+ | efdcmpeq TMP0, TMP1
+ |1:
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (vk) {
+ | iselgt PC, TMP2, PC
+ |5:
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ |3:
+ | ins_next
+ if (!vk) {
+ |5:
+ | decode_RD4 TMP2, INS
+ | add PC, TMP2, TMP3
+ | b <3
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | not TMP1, TMP1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | cmplw TMP0, TMP1
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (vk) {
+ | iseleq PC, TMP2, PC
+ } else {
+ | iseleq PC, PC, TMP2
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | evlddx TMP0, BASE, RD
+ | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
+ | lwz INS, 0(PC)
+ | evcmpltu TMP0, TMP1
+ | addi PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (op == BC_IST) {
+ | isellt PC, TMP2, PC
+ } else {
+ | isellt PC, PC, TMP2
+ }
+ } else {
+ if (op == BC_ISTC) {
+ | checkfail >1
+ } else {
+ | checkok >1
+ }
+ | addis PC, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | evstddx TMP0, BASE, RA
+ | add PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | evlddx TMP0, BASE, RD
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lwzx TMP0, BASE, RD
+ | subfic TMP1, TMP0, LJ_TTRUE
+ | adde TMP0, TMP0, TMP1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | evlddx TMP0, BASE, RD
+ | checknum TMP0
+ | checkfail ->vmeta_unm
+ | efdneg TMP0, TMP0
+ | ins_next1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | evlddx CARG1, BASE, RD
+ | checkstr CARG1
+ | checkfail >2
+ | lwz CRET1, STR:CARG1->len
+ |1:
+ | ins_next1
+ | efdcfsi TMP0, CRET1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ |2:
+ | checktab CARG1
+ | checkfail ->vmeta_len
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | cmplwi TAB:TMP2, 0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ |9:
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, t0, t1
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | evlddx t0, BASE, RB
+ | checknum t0
+ | evlddx t1, KBASE, RC
+ | checkfail ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | evlddx t1, BASE, RB
+ | checknum t1
+ | evlddx t0, KBASE, RC
+ | checkfail ->vmeta_arith_nv
+ || break;
+ ||default:
+ | evlddx t0, BASE, RB
+ | evlddx t1, BASE, RC
+ | evmergehi TMP2, t0, t1
+ | checknum TMP2
+ | checkanyfail ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, ins
+ | ins_arithpre TMP0, TMP1
+ | ins_next1
+ | ins TMP0, TMP0, TMP1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith efdadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith efdsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith efdmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith efddiv
+ break;
+ case BC_MODVN:
+ | ins_arithpre RD, SAVE0
+ |->BC_MODVN_Z:
+ | efddiv CARG2, RD, SAVE0
+ | bl ->vm_floor_efd // floor(b/c)
+ | efdmul TMP0, CRET2, SAVE0
+ | ins_next1
+ | efdsub TMP0, RD, TMP0 // b - floor(b/c)*c
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre RD, SAVE0
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | evlddx CARG2, BASE, RB
+ | evlddx CARG4, BASE, RC
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | evmergehi CARG3, CARG4, CARG4
+ | checkanyfail ->vmeta_arith_vv
+ | bl extern pow
+ | evmergelo CRET2, CRET1, CRET2
+ | evstddx CRET2, BASE, RA
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | sub CARG3, RC, RB
+ | stw BASE, L->base
+ | add CARG2, BASE, RC
+ | mr SAVE0, RB
+ |->BC_CAT_Z:
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | srwi CARG3, CARG3, 3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | lwz BASE, L->base
+ | bne ->vmeta_binop
+ | evlddx TMP0, BASE, SAVE0 // Copy result from RB to RA.
+ | evstddx TMP0, BASE, RA
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | ins_next1
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
+ | evmergelo TMP0, TISSTR, TMP0
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | ins_next1
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
+ | li TMP2, LJ_TCDATA
+ | evmergelo TMP0, TMP2, TMP0
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+#endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | srwi TMP1, RD, 3
+ | extsh TMP1, TMP1
+ | ins_next1
+ | efdcfsi TMP0, TMP1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | evlddx TMP0, KBASE, RD
+ | ins_next1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srwi TMP1, RD, 3
+ | not TMP0, TMP1
+ | ins_next1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | evstddx TISNIL, BASE, RA
+ | addi RA, RA, 8
+ |1:
+ | evstddx TISNIL, BASE, RA
+ | cmpw RA, RD
+ | addi RA, RA, 8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RD, RD, 1
+ | addi RD, RD, offsetof(GCfuncL, uvptr)
+ | lwzx UPVAL:RB, LFUNC:RB, RD
+ | lwz TMP1, UPVAL:RB->v
+ | evldd TMP0, 0(TMP1)
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | evlddx TMP1, BASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP0, UPVAL:RB->closed
+ | evmergehi TMP2, TMP1, TMP1
+ | evstdd TMP1, 0(CARG2)
+ | cmplwi cr1, TMP0, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | subi TMP2, TMP2, (LJ_TISNUM+1)
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | bge <1 // tvisgcv(v)
+ | lbz TMP3, GCOBJ:TMP1->gch.marked
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | srwi RA, RA, 1
+ | subfic TMP1, TMP1, -4
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | evmergelo STR:TMP1, TISSTR, STR:TMP1
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP3, STR:TMP1->marked
+ | lbz TMP2, UPVAL:RB->closed
+ | evstdd STR:TMP1, 0(CARG2)
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
+ | cmplwi cr1, TMP2, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | evlddx TMP0, KBASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lwz TMP1, UPVAL:RB->v
+ | evstdd TMP0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | srwi TMP0, RD, 3
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | not TMP0, TMP0
+ | lwz TMP1, UPVAL:RB->v
+ | stw TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lwz TMP1, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | stw BASE, L->base
+ | cmplwi TMP1, 0
+ | mr CARG1, L
+ | beq >1
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | lwz BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srwi TMP1, RD, 1
+ | stw BASE, L->base
+ | subfic TMP1, TMP1, -4
+ | stw PC, SAVE_PC
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | mr CARG1, L
+ | lwz CARG3, FRAME_FUNC(BASE)
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | lwz BASE, L->base
+ | evmergelo LFUNC:CRET1, TISFUNC, LFUNC:CRET1
+ | evstddx LFUNC:CRET1, BASE, RA
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | mr CARG1, L
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | stw BASE, L->base
+ | cmplw TMP0, TMP1
+ | stw PC, SAVE_PC
+ | bge >5
+ |1:
+ if (op == BC_TNEW) {
+ | rlwinm CARG2, RD, 29, 21, 31
+ | rlwinm CARG3, RD, 18, 27, 31
+ | cmpwi CARG2, 0x7ff
+ | li TMP1, 0x801
+ | iseleq CARG2, TMP1, CARG2
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns Table *.
+ } else {
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns Table *.
+ }
+ | lwz BASE, L->base
+ | evmergelo TAB:CRET1, TISTAB, TAB:CRET1
+ | evstddx TAB:CRET1, BASE, RA
+ | ins_next
+ |5:
+ | mr SAVE0, RD
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mr RD, SAVE0
+ | mr CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | lwz TAB:RB, LFUNC:TMP2->env
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | evlddx TAB:RB, BASE, RB
+ | evlddx RC, BASE, RC
+ | checktab TAB:RB
+ | checkfail ->vmeta_tgetv
+ | checknum RC
+ | checkfail >5
+ | // Convert number key to integer
+ | efdctsi TMP2, RC
+ | lwz TMP0, TAB:RB->asize
+ | efdcfsi TMP1, TMP2
+ | cmplw cr0, TMP0, TMP2
+ | efdcmpeq cr1, RC, TMP1
+ | lwz TMP1, TAB:RB->array
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ | slwi TMP2, TMP2, 3
+ | ble ->vmeta_tgetv // Integer key and in array part?
+ | evlddx TMP1, TMP1, TMP2
+ | checknil TMP1
+ | checkok >2
+ |1:
+ | evstddx TMP1, BASE, RA
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tgetv
+ |
+ |5:
+ | checkstr STR:RC // String key?
+ | checkok ->BC_TGETS_Z
+ | b ->vmeta_tgetv
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP1, RC, 1
+ | checktab TAB:RB
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | checkfail ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | checkanyfail >4
+ | checknil TMP1
+ | checkok >5 // Key found, but nil value?
+ |3:
+ | evstddx TMP1, BASE, RA
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ | evmr TMP1, TISNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <3 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tgets
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP0, RC, 3
+ | checktab TAB:RB
+ | checkfail ->vmeta_tgetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | cmplw TMP0, TMP1
+ | bge ->vmeta_tgetb
+ | evlddx TMP1, TMP2, RC
+ | checknil TMP1
+ | checkok >5
+ |1:
+ | ins_next1
+ | evstddx TMP1, BASE, RA
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_tgetb // Caveat: preserve TMP0!
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | evlddx TAB:RB, BASE, RB
+ | evlddx RC, BASE, RC
+ | checktab TAB:RB
+ | checkfail ->vmeta_tsetv
+ | checknum RC
+ | checkfail >5
+ | // Convert number key to integer
+ | efdctsi TMP2, RC
+ | evlddx SAVE0, BASE, RA
+ | lwz TMP0, TAB:RB->asize
+ | efdcfsi TMP1, TMP2
+ | cmplw cr0, TMP0, TMP2
+ | efdcmpeq cr1, RC, TMP1
+ | lwz TMP1, TAB:RB->array
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ | slwi TMP0, TMP2, 3
+ | ble ->vmeta_tsetv // Integer key and in array part?
+ | lbz TMP3, TAB:RB->marked
+ | evlddx TMP2, TMP1, TMP0
+ | checknil TMP2
+ | checkok >3
+ |1:
+ | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstddx SAVE0, TMP1, TMP0
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<vmeta_tsetv
+ |
+ |5:
+ | checkstr STR:RC // String key?
+ | checkok ->BC_TSETS_Z
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP1, RC, 1
+ | checktab TAB:RB
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | checkfail ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | evlddx SAVE0, BASE, RA
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | lbz TMP3, TAB:RB->marked
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | checkanyfail >5
+ | checknil TMP1
+ | checkok >4 // Key found, but nil value?
+ |2:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstdd SAVE0, NODE:TMP2->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <2 // No metatable: done.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | cmplwi TAB:TMP1, 0
+ | stw BASE, L->base
+ | beq >6 // No metatable: continue.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mr CARG2, TAB:RB
+ | evstdd STR:RC, 0(CARG3)
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | lwz BASE, L->base
+ | evstdd SAVE0, 0(CRET1)
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP0, RC, 3
+ | checktab TAB:RB
+ | checkfail ->vmeta_tsetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | lbz TMP3, TAB:RB->marked
+ | cmplw TMP0, TMP1
+ | evlddx SAVE0, BASE, RA
+ | bge ->vmeta_tsetb
+ | evlddx TMP1, TMP2, RC
+ | checknil TMP1
+ | checkok >5
+ |1:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstddx SAVE0, TMP2, RC
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP1, TAB:TMP1->nomm
+ | andi. TMP1, TMP1, 1<vmeta_tsetb // Caveat: preserve TMP0!
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | add RA, BASE, RA
+ |1:
+ | add TMP3, KBASE, RD
+ | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
+ | addic. TMP0, MULTRES, -8
+ | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
+ | srwi CARG3, TMP0, 3
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG3, TMP3
+ | lwz TMP2, TAB:CARG2->asize
+ | slwi TMP1, TMP3, 3
+ | lbz TMP3, TAB:CARG2->marked
+ | cmplw CARG3, TMP2
+ | add TMP2, RA, TMP0
+ | lwz TMP0, TAB:CARG2->array
+ | bgt >5
+ | add TMP1, TMP1, TMP0
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | evldd TMP0, 0(RA)
+ | addi RA, RA, 8
+ | cmpw cr1, RA, TMP2
+ | evstdd TMP0, 0(TMP1)
+ | addi TMP1, TMP1, 8
+ | blt cr1, <3
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | mr SAVE0, RD
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | mr RD, SAVE0
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALL follows.
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | evlddx LFUNC:RB, BASE, RA
+ | mr TMP2, BASE
+ | add BASE, BASE, RA
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | checkfunc LFUNC:RB
+ | addi BASE, BASE, 8
+ | checkfail ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | evlddx LFUNC:RB, BASE, RA
+ | add RA, BASE, RA
+ | lwz TMP1, FRAME_PC(BASE)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | checkfunc LFUNC:RB
+ | addi RA, RA, 8
+ | checkfail ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
+ | lbz TMP3, LFUNC:RB->ffid
+ | xori TMP2, TMP1, FRAME_VARG
+ | cmplwi cr1, NARGS8:RC, 0
+ | bne >7
+ |1:
+ | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | li TMP2, 0
+ | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
+ | beq cr1, >3
+ |2:
+ | addi TMP3, TMP2, 8
+ | evlddx TMP0, RA, TMP2
+ | cmplw cr1, TMP3, NARGS8:RC
+ | evstddx TMP0, BASE, TMP2
+ | mr TMP2, TMP3
+ | bne cr1, <2
+ |3:
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
+ | beq >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lwz INS, -4(TMP1)
+ | decode_RA8 RA, INS
+ | sub TMP1, BASE, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | andi. TMP0, TMP2, FRAME_TYPEP
+ | bne <1 // Vararg frame below?
+ | sub BASE, BASE, TMP2 // Relocate BASE down.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andi. TMP0, TMP1, FRAME_TYPE
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | subi RA, RA, 24 // evldd doesn't support neg. offsets.
+ | mr TMP2, BASE
+ | evlddx LFUNC:RB, BASE, RA
+ | add BASE, BASE, RA
+ | evldd TMP0, 8(BASE)
+ | evldd TMP1, 16(BASE)
+ | evstdd LFUNC:RB, 24(BASE) // Copy callable.
+ | checkfunc LFUNC:RB
+ | evstdd TMP0, 32(BASE) // Copy state.
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | evstdd TMP1, 40(BASE) // Copy control var.
+ | addi BASE, BASE, 32
+ | checkfail ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+#if LJ_HASJIT
+ | // NYI: add hotloop, record BC_ITERN.
+#endif
+ | add RA, BASE, RA
+ | lwz TAB:RB, -12(RA)
+ | lwz RC, -4(RA) // Get index from control var.
+ | lwz TMP0, TAB:RB->asize
+ | lwz TMP1, TAB:RB->array
+ | addi PC, PC, 4
+ |1: // Traverse array part.
+ | cmplw RC, TMP0
+ | slwi TMP3, RC, 3
+ | bge >5 // Index points after array part?
+ | evlddx TMP2, TMP1, TMP3
+ | checknil TMP2
+ | lwz INS, -4(PC)
+ | checkok >4
+ | efdcfsi TMP0, RC
+ | addi RC, RC, 1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | evstdd TMP2, 8(RA)
+ | decode_RD4 TMP1, INS
+ | stw RC, -4(RA) // Update control var.
+ | add PC, TMP1, TMP3
+ | evstdd TMP0, 0(RA)
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | addi RC, RC, 1
+ | b <1
+ |
+ |5: // Traverse hash part.
+ | lwz TMP1, TAB:RB->hmask
+ | sub RC, RC, TMP0
+ | lwz TMP2, TAB:RB->node
+ |6:
+ | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
+ | slwi TMP3, RC, 5
+ | bgt <3
+ | slwi RB, RC, 3
+ | sub TMP3, TMP3, RB
+ | evlddx RB, TMP2, TMP3
+ | add NODE:TMP3, TMP2, TMP3
+ | checknil RB
+ | lwz INS, -4(PC)
+ | checkok >7
+ | evldd TMP3, NODE:TMP3->key
+ | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
+ | evstdd RB, 8(RA)
+ | add RC, RC, TMP0
+ | decode_RD4 TMP1, INS
+ | evstdd TMP3, 0(RA)
+ | addi RC, RC, 1
+ | add PC, TMP1, TMP2
+ | stw RC, -4(RA) // Update control var.
+ | b <3
+ |
+ |7: // Skip holes in hash part.
+ | addi RC, RC, 1
+ | b <6
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | add RA, BASE, RA
+ | li TMP2, -24
+ | evlddx CFUNC:TMP1, RA, TMP2
+ | lwz TMP2, -16(RA)
+ | lwz TMP3, -8(RA)
+ | evmergehi TMP0, CFUNC:TMP1, CFUNC:TMP1
+ | cmpwi cr0, TMP2, LJ_TTAB
+ | cmpwi cr1, TMP0, LJ_TFUNC
+ | cmpwi cr6, TMP3, LJ_TNIL
+ | bne cr1, >5
+ | lbz TMP1, CFUNC:TMP1->ffid
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
+ | cmpwi cr7, TMP1, FF_next_N
+ | srwi TMP0, RD, 1
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | add TMP3, PC, TMP0
+ | bne cr0, >5
+ | stw ZERO, -4(RA) // Initialize control var.
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP0, BC_JMP
+ | li TMP1, BC_ITERC
+ | stb TMP0, -1(PC)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ | stb TMP1, 3(PC)
+ | b <1
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lwz TMP0, FRAME_PC(BASE)
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | addi RC, RC, FRAME_VARG
+ | add TMP2, RA, RB
+ | subi TMP3, BASE, 8 // TMP3 = vtop
+ | sub RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmplwi cr1, RB, 0
+ | sub. TMP1, TMP3, RC
+ | beq cr1, >5 // Copy all varargs?
+ | subi TMP2, TMP2, 16
+ | ble >2 // No vararg slots?
+ |1: // Copy vararg slots to destination slots.
+ | evldd TMP0, 0(RC)
+ | addi RC, RC, 8
+ | evstdd TMP0, 0(RA)
+ | cmplw RA, TMP2
+ | cmplw cr1, RC, TMP3
+ | bge >3 // All destination slots filled?
+ | addi RA, RA, 8
+ | blt cr1, <1 // More vararg slots?
+ |2: // Fill up remainder with nil.
+ | evstdd TISNIL, 0(RA)
+ | cmplw RA, TMP2
+ | addi RA, RA, 8
+ | blt <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lwz TMP0, L->maxstack
+ | li MULTRES, 8 // MULTRES = (0+1)*8
+ | ble <3 // No vararg slots?
+ | add TMP2, RA, TMP1
+ | cmplw TMP2, TMP0
+ | addi MULTRES, TMP1, 8
+ | bgt >7
+ |6:
+ | evldd TMP0, 0(RC)
+ | addi RC, RC, 8
+ | evstdd TMP0, 0(RA)
+ | cmplw RC, TMP3
+ | addi RA, RA, 8
+ | blt <6 // More vararg slots?
+ | b <3
+ |
+ |7: // Grow stack for varargs.
+ | mr CARG1, L
+ | stw RA, L->top
+ | sub SAVE0, RC, BASE // Need delta, because BASE may change.
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw PC, SAVE_PC
+ | srwi CARG2, TMP1, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, SAVE0
+ | subi TMP3, BASE, 8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ |1:
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lwz INS, -4(PC)
+ | cmpwi RD, 8
+ | subi TMP2, BASE, 8
+ | subi RC, RD, 8
+ | decode_RB8 RB, INS
+ | beq >3
+ | li TMP1, 0
+ |2:
+ | addi TMP3, TMP1, 8
+ | evlddx TMP0, RA, TMP1
+ | cmpw TMP3, RC
+ | evstddx TMP0, TMP2, TMP1
+ | beq >3
+ | addi TMP1, TMP3, 8
+ | evlddx TMP0, RA, TMP3
+ | cmpw TMP1, RC
+ | evstddx TMP0, TMP2, TMP3
+ | bne <2
+ |3:
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, TMP2, TMP1
+ | b <5
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi. TMP2, TMP1, FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ | lwz INS, -4(PC)
+ | subi TMP2, BASE, 8
+ | decode_RB8 RB, INS
+ if (op == BC_RET1) {
+ | evldd TMP0, 0(RA)
+ | evstdd TMP0, 0(TMP2)
+ }
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, TMP2, TMP1
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | add RA, BASE, RA
+ | evldd TMP1, FORL_IDX*8(RA)
+ | evldd TMP3, FORL_STEP*8(RA)
+ | evldd TMP2, FORL_STOP*8(RA)
+ if (!vk) {
+ | evcmpgtu cr0, TMP1, TISNUM
+ | evcmpgtu cr7, TMP3, TISNUM
+ | evcmpgtu cr1, TMP2, TISNUM
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | blt ->vmeta_for
+ }
+ if (vk) {
+ | efdadd TMP1, TMP1, TMP3
+ | evstdd TMP1, FORL_IDX*8(RA)
+ }
+ | evcmpgts TMP3, TISNIL
+ | evstdd TMP1, FORL_EXT*8(RA)
+ | bge >2
+ | efdcmpgt TMP1, TMP2
+ |1:
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ | add RD, PC, RD
+ if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else {
+ | addis RD, RD, -(BCBIAS_J*4 >> 16)
+ }
+ }
+ if (op == BC_FORI) {
+ | iselgt PC, RD, PC
+ } else if (op == BC_IFORL) {
+ | iselgt PC, PC, RD
+ } else {
+ | ble =>BC_JLOOP
+ }
+ | ins_next
+ |2:
+ | efdcmpgt TMP2, TMP1
+ | b <1
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | evlddx TMP1, BASE, RA
+ | subi RA, RA, 8
+ | checknil TMP1
+ | checkok >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | NYI
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | evstddx TMP1, BASE, RA
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+#if LJ_HASJIT
+ | hotloop
+#endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ | NYI
+#endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ | hotcall
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | lbz TMP1, -4+PC2PROTO(numparams)(PC)
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw RA, TMP2
+ | slwi TMP1, TMP1, 3
+ | bgt ->vm_growstack_l
+ | ins_next1
+ |2:
+ | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
+ | ble >3
+ if (op == BC_JFUNCF) {
+ | NYI
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | evstddx TISNIL, BASE, NARGS8:RC
+ | addi NARGS8:RC, NARGS8:RC, 8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | add TMP1, BASE, RC
+ | add TMP0, RA, RC
+ | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
+ | addi TMP3, RC, 8+FRAME_VARG
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw TMP0, TMP2
+ | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
+ | bge ->vm_growstack_l
+ | lbz TMP2, -4+PC2PROTO(numparams)(PC)
+ | mr RA, BASE
+ | mr RC, TMP1
+ | ins_next1
+ | cmpwi TMP2, 0
+ | addi BASE, TMP1, 8
+ | beq >3
+ |1:
+ | cmplw RA, RC // Less args than parameters?
+ | evldd TMP0, 0(RA)
+ | bge >4
+ | evstdd TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
+ | addi RA, RA, 8
+ |2:
+ | addic. TMP2, TMP2, -1
+ | evstdd TMP0, 8(TMP1)
+ | addi TMP1, TMP1, 8
+ | bne <1
+ |3:
+ | ins_next2
+ |
+ |4: // Clear missing parameters.
+ | evmr TMP0, TISNIL
+ | b <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lwz TMP3, CFUNC:RB->f
+ } else {
+ | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | add TMP1, RA, NARGS8:RC
+ | lwz TMP2, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | cmplw TMP1, TMP2
+ | stw RC, L->top
+ | li_vmstate C
+ | mtctr TMP3
+ if (op == BC_FUNCCW) {
+ | lwz CARG2, CFUNC:RB->f
+ }
+ | mr CARG1, L
+ | bgt ->vm_growstack_c // Need to grow stack.
+ | st_vmstate
+ | bctrl // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | lwz TMP1, L->top
+ | slwi RD, CRET1, 3
+ | lwz BASE, L->base
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | sub RA, TMP1, RD // RA = L->top - nresults*8
+ | st_vmstate
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .LASFDE1-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE1:\n\n");
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_ppcspe.h b/src/LuaJIT/src/buildvm_ppcspe.h
new file mode 100644
index 000000000..32571ebe9
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_ppcspe.h
@@ -0,0 +1,6093 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM ppc version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_ppcspe.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned int build_actionlist[4995] = {
+0x00010001,
+0x00060014,
+0x72000000,
+0x00090200,
+0x11000229,
+0x000980b0,
+0x41820000,
+0x00050815,
+0x8209fff8,
+0x7d2e4b78,
+0x9514fff8,
+0x00060016,
+0x72000000,
+0x00090200,
+0x398c0008,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x00060018,
+0x2c000000,
+0x00098200,
+0x56090038,
+0x38000000,
+0x00098200,
+0x7d297050,
+0x40820000,
+0x00050814,
+0x350cfff8,
+0x91320000,
+0x00098200,
+0x81210018,
+0x39cefff8,
+0x90110000,
+0x00098200,
+0x55291800,
+0x000900a1,
+0x41820000,
+0x00050802,
+0x0006000b,
+0x3508fff8,
+0x10140301,
+0x3a940008,
+0x100e0321,
+0x39ce0008,
+0x40820000,
+0x0005080b,
+0x0006000c,
+0x7c096000,
+0x40820000,
+0x00050806,
+0x0006000d,
+0x91d20000,
+0x00098200,
+0x00060019,
+0x00000000,
+0x80010014,
+0x38600000,
+0x90120000,
+0x00098200,
+0x0006001a,
+0x800100bc,
+0x81810024,
+0x11c12b01,
+0x11e13301,
+0x12013b01,
+0x12214301,
+0x12414b01,
+0x12615301,
+0x7c0803a6,
+0x7d838120,
+0x12815b01,
+0x12a16301,
+0x12c16b01,
+0x12e17301,
+0x13017b01,
+0x13218301,
+0x13418b01,
+0x13619301,
+0x13819b01,
+0x13a1a301,
+0x13c1ab01,
+0x13e1b301,
+0x382100b8,
+0x4e800020,
+0x00060010,
+0x40810000,
+0x00050807,
+0x81120000,
+0x00098200,
+0x7c0e4040,
+0x40800000,
+0x00050808,
+0x134e0321,
+0x398c0008,
+0x39ce0008,
+0x48000000,
+0x0005000c,
+0x00060011,
+0x7c096050,
+0x2c090000,
+0x7c007050,
+0x7dce009e,
+0x48000000,
+0x0005000d,
+0x00060012,
+0x91d20000,
+0x00098200,
+0x7d956378,
+0x7d244b78,
+0x7e439378,
+0x48000001,
+0x00030000,
+0x81210018,
+0x7eacab78,
+0x55291800,
+0x000900a1,
+0x81d20000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x0006001b,
+0x7c611b78,
+0x7c832378,
+0x0006001c,
+0x82410010,
+0x38000000,
+0x00098200,
+0x81120000,
+0x00098200,
+0x90080000,
+0x00098200,
+0x48000000,
+0x0005001a,
+0x0006001d,
+0x00000000,
+0x5461003a,
+0x0006001e,
+0x82410010,
+0x12c00229,
+0x000980b0,
+0x13200229,
+0x000980b0,
+0x3f604338,
+0x13000229,
+0x000980b0,
+0x38000000,
+0x81d20000,
+0x00098200,
+0x137b022d,
+0x82320000,
+0x00098200,
+0x12e00229,
+0x000980b0,
+0x39000000,
+0x00098200,
+0x13400229,
+0x000980b0,
+0x38000000,
+0x00098200,
+0x820efff8,
+0x3a8efff8,
+0x3a310000,
+0x00098200,
+0x91140000,
+0x39800010,
+0x90110000,
+0x00098200,
+0x48000000,
+0x00050016,
+0x0006001f,
+0x38800000,
+0x00098200,
+0x48000000,
+0x00050002,
+0x00060020,
+0x7d6e5a14,
+0x7e8ea050,
+0x91d20000,
+0x00098200,
+0x3a100004,
+0x91720000,
+0x00098200,
+0x568400fe,
+0x000900ab,
+0x0006000c,
+0x9201000c,
+0x7e439378,
+0x48000001,
+0x00030000,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x81720000,
+0x00098200,
+0x814efffc,
+0x7d6e5850,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060021,
+0x9421ff48,
+0x11c12b21,
+0x11e13321,
+0x12013b21,
+0x12214321,
+0x12414b21,
+0x12615321,
+0x7c0802a6,
+0x7d800026,
+0x12815b21,
+0x12a16321,
+0x12c16b21,
+0x12e17321,
+0x13017b21,
+0x13218321,
+0x900100bc,
+0x91810024,
+0x13418b21,
+0x13619321,
+0x13819b21,
+0x13a1a321,
+0x13c1ab21,
+0x13e1b321,
+0x7c721b78,
+0x82320000,
+0x00098200,
+0x7c8e2378,
+0x89120000,
+0x00098200,
+0x92410010,
+0x3a000000,
+0x00098200,
+0x38010000,
+0x00098200,
+0x3a310000,
+0x00098200,
+0x90a10018,
+0x28080000,
+0x90a1001c,
+0x90120000,
+0x00098200,
+0x90a10014,
+0x9061000c,
+0x41820000,
+0x00050803,
+0x7dd47378,
+0x81d20000,
+0x00098200,
+0x12c00229,
+0x000980b0,
+0x81120000,
+0x00098200,
+0x13200229,
+0x000980b0,
+0x3f604338,
+0x13000229,
+0x000980b0,
+0x820efff8,
+0x39200000,
+0x12e00229,
+0x000980b0,
+0x7d8e4050,
+0x137b4a2d,
+0x98b20000,
+0x00098200,
+0x72000000,
+0x00090200,
+0x38000000,
+0x00098200,
+0x398c0008,
+0x13400229,
+0x000980b0,
+0x7d936378,
+0x90110000,
+0x00098200,
+0x00000000,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060022,
+0x9421ff48,
+0x11c12b21,
+0x11e13321,
+0x12013b21,
+0x12214321,
+0x12414b21,
+0x12615321,
+0x7c0802a6,
+0x7d800026,
+0x12815b21,
+0x12a16321,
+0x12c16b21,
+0x12e17321,
+0x13017b21,
+0x13218321,
+0x900100bc,
+0x91810024,
+0x13418b21,
+0x13619321,
+0x13819b21,
+0x13a1a321,
+0x13c1ab21,
+0x13e1b321,
+0x3a000000,
+0x00098200,
+0x90c1001c,
+0x48000000,
+0x00050001,
+0x00060023,
+0x9421ff48,
+0x11c12b21,
+0x11e13321,
+0x12013b21,
+0x12214321,
+0x12414b21,
+0x12615321,
+0x7c0802a6,
+0x7d800026,
+0x12815b21,
+0x12a16321,
+0x12c16b21,
+0x12e17321,
+0x13017b21,
+0x13218321,
+0x900100bc,
+0x91810024,
+0x13418b21,
+0x13619321,
+0x13819b21,
+0x13a1a321,
+0x13c1ab21,
+0x13e1b321,
+0x3a000000,
+0x00098200,
+0x0006000b,
+0x81030000,
+0x00098200,
+0x90a10018,
+0x7c721b78,
+0x90610010,
+0x7c8e2378,
+0x90320000,
+0x00098200,
+0x82320000,
+0x00098200,
+0x9061000c,
+0x91010014,
+0x3a310000,
+0x00098200,
+0x0006000d,
+0x81320000,
+0x00098200,
+0x12c00229,
+0x000980b0,
+0x81120000,
+0x00098200,
+0x13200229,
+0x000980b0,
+0x7e107214,
+0x13000229,
+0x000980b0,
+0x3f604338,
+0x38000000,
+0x7e098050,
+0x12e00229,
+0x000980b0,
+0x7d6e4050,
+0x137b022d,
+0x38000000,
+0x00098200,
+0x13400229,
+0x000980b0,
+0x90110000,
+0x00098200,
+0x00060024,
+0x00000000,
+0x3800fff8,
+0x114e0300,
+0x100aca34,
+0x40800000,
+0x00050825,
+0x00060026,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060027,
+0x9421ff48,
+0x11c12b21,
+0x11e13321,
+0x12013b21,
+0x12214321,
+0x12414b21,
+0x12615321,
+0x7c0802a6,
+0x7d800026,
+0x12815b21,
+0x12a16321,
+0x12c16b21,
+0x12e17321,
+0x13017b21,
+0x13218321,
+0x900100bc,
+0x91810024,
+0x13418b21,
+0x13619321,
+0x13819b21,
+0x13a1a321,
+0x13c1ab21,
+0x13e1b321,
+0x7c721b78,
+0x80030000,
+0x00098200,
+0x90610010,
+0x81120000,
+0x00098200,
+0x9061000c,
+0x7c080050,
+0x81120000,
+0x00098200,
+0x90320000,
+0x00098200,
+0x39200000,
+0x90010018,
+0x9121001c,
+0x91010014,
+0x7cc903a6,
+0x4e800421,
+0x7c6e1b79,
+0x82320000,
+0x00098200,
+0x3a000000,
+0x00098200,
+0x3a310000,
+0x00098200,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x00050019,
+0x00060015,
+0x800efff4,
+0x7dca7378,
+0x7d2e4b78,
+0x8109fffc,
+0x28000000,
+0x820afff0,
+0x41820000,
+0x00050801,
+0x392cfff8,
+0x81080000,
+0x00098200,
+0x13544b20,
+0x81e80000,
+0x00098200,
+0x7c0903a6,
+0x4e800420,
+0x0006000b,
+0x390afff0,
+0x7d6e4050,
+0x48000000,
+0x00050028,
+0x00060029,
+0x80f0fffc,
+0x388afff0,
+0x54f55d78,
+0x10140301,
+0x7d0eaa14,
+0x91d20000,
+0x00098200,
+0x7c082040,
+0x7ca82050,
+0x54f4dd78,
+0x10040321,
+0x40820000,
+0x0005082a,
+0x00000000,
+0x100ea320,
+0x48000000,
+0x0005002b,
+0x0006002c,
+0x11775a2d,
+0x38b10000,
+0x00098200,
+0x54ea5d78,
+0x11650321,
+0x7c8e5214,
+0x48000000,
+0x00050001,
+0x0006002d,
+0x1158522d,
+0x38910000,
+0x00098200,
+0x11775a2d,
+0x11440321,
+0x38b10000,
+0x00098200,
+0x11650321,
+0x48000000,
+0x00050001,
+0x0006002e,
+0x100002f1,
+0x54ea5d78,
+0x38b10000,
+0x00098200,
+0x7c8e5214,
+0x10050321,
+0x48000000,
+0x00050001,
+0x0006002f,
+0x54ea5d78,
+0x54eb9d78,
+0x7c8e5214,
+0x7cae5a14,
+0x0006000b,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x48000001,
+0x00030001,
+0x28030000,
+0x41820000,
+0x00050803,
+0x10030301,
+0x100ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000d,
+0x210e0000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x920efff0,
+0x7e087214,
+0x814efffc,
+0x39600010,
+0x48000000,
+0x00050026,
+0x00060030,
+0x11775a2d,
+0x38b10000,
+0x00098200,
+0x00000000,
+0x54ea5d78,
+0x11650321,
+0x7c8e5214,
+0x48000000,
+0x00050001,
+0x00060031,
+0x1158522d,
+0x38910000,
+0x00098200,
+0x11775a2d,
+0x11440321,
+0x38b10000,
+0x00098200,
+0x11650321,
+0x48000000,
+0x00050001,
+0x00060032,
+0x100002f1,
+0x54ea5d78,
+0x38b10000,
+0x00098200,
+0x7c8e5214,
+0x10050321,
+0x48000000,
+0x00050001,
+0x00060033,
+0x54ea5d78,
+0x54eb9d78,
+0x7c8e5214,
+0x7cae5a14,
+0x0006000b,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x48000001,
+0x00030002,
+0x28030000,
+0x100ea300,
+0x41820000,
+0x00050803,
+0x10030321,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000d,
+0x210e0000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x920efff0,
+0x7e087214,
+0x814efffc,
+0x39600018,
+0x100e1321,
+0x48000000,
+0x00050026,
+0x00060034,
+0x7e439378,
+0x3a10fffc,
+0x7c8ea214,
+0x9201000c,
+0x7cae6214,
+0x91d20000,
+0x00098200,
+0x54e6063e,
+0x48000001,
+0x00030003,
+0x0006000d,
+0x28030001,
+0x41810000,
+0x00050835,
+0x0006000e,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e993ba,
+0x3cd00000,
+0x00098200,
+0x7d293214,
+0x7e10481e,
+0x0006002b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00060036,
+0x80f0fffc,
+0x10140301,
+0x54e8dd78,
+0x100e4320,
+0x48000000,
+0x0005002b,
+0x00060037,
+0x80140000,
+0x39000000,
+0x00098200,
+0x7c080040,
+0x48000000,
+0x0005000e,
+0x00060038,
+0x80140000,
+0x39000000,
+0x00098200,
+0x7c004040,
+0x48000000,
+0x0005000e,
+0x00060039,
+0x3a10fffc,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x48000001,
+0x00030004,
+0x48000000,
+0x0005000d,
+0x0006003a,
+0x7cae5214,
+0x7ccf5a14,
+0x48000000,
+0x00050001,
+0x0006003b,
+0x7caf5a14,
+0x7cce5214,
+0x48000000,
+0x00050001,
+0x0006003c,
+0x7cae6214,
+0x7ca62b78,
+0x48000000,
+0x00050001,
+0x0006003d,
+0x7cae5214,
+0x7cce5a14,
+0x0006000b,
+0x00000000,
+0x7c8ea214,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x54e7063e,
+0x48000001,
+0x00030005,
+0x28030000,
+0x41820000,
+0x0005082b,
+0x00060035,
+0x7d0e1850,
+0x9203fff0,
+0x7dc97378,
+0x3a080000,
+0x00098200,
+0x7c6e1b78,
+0x39600010,
+0x48000000,
+0x00050024,
+0x0006003e,
+0x00000000,
+0x7c751b78,
+0x00000000,
+0x7c8e6214,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x48000001,
+0x00030006,
+0x00000000,
+0x28030000,
+0x40820000,
+0x00050835,
+0x7ea3ab78,
+0x48000000,
+0x0005003f,
+0x00000000,
+0x48000000,
+0x00050035,
+0x00000000,
+0x00060025,
+0x7e439378,
+0x91320000,
+0x00098200,
+0x388efff8,
+0x9201000c,
+0x7cae5a14,
+0x7d755b78,
+0x48000001,
+0x00030007,
+0x814efffc,
+0x39750008,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060040,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x3894fff8,
+0x9201000c,
+0x7cb45a14,
+0x7d755b78,
+0x48000001,
+0x00030007,
+0x810efff8,
+0x39750008,
+0x8154fffc,
+0x48000000,
+0x00050041,
+0x00060042,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x7e84a378,
+0x9201000c,
+0x7cf53b78,
+0x48000001,
+0x00030008,
+0x00000000,
+0x56a0063e,
+0x00000000,
+0x56b4dd78,
+0x00000000,
+0x2c000000,
+0x00098200,
+0x00000000,
+0x56ac9b78,
+0x00000000,
+0x41820000,
+0x00070800,
+0x00000000,
+0x48000000,
+0x00070000,
+0x00060043,
+0x280b0008,
+0x100e0301,
+0x41800000,
+0x00050844,
+0x111ad200,
+0x3a8efff8,
+0x10804232,
+0x820efff8,
+0x40840000,
+0x00050844,
+0x10140321,
+0x398b0008,
+0x41820000,
+0x00050845,
+0x39000008,
+0x396bfff8,
+0x0006000b,
+0x7c085840,
+0x100e4300,
+0x10144320,
+0x39080008,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050045,
+0x00060046,
+0x280b0008,
+0x806e0000,
+0x41800000,
+0x00050844,
+0x39200000,
+0x00098200,
+0x7c03b040,
+0x7c6818f8,
+0x7d09401e,
+0x55081800,
+0x000900a1,
+0x392a0000,
+0x00098200,
+0x10694300,
+0x48000000,
+0x00050047,
+0x00060048,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003c234,
+0x11031a2c,
+0x40800000,
+0x00050806,
+0x0006000b,
+0x81430000,
+0x00098200,
+0x0006000c,
+0x107ad217,
+0x280a0000,
+0x81710000,
+0x00098200,
+0x41820000,
+0x00050847,
+0x00000000,
+0x800a0000,
+0x00098200,
+0x1078522d,
+0x810b0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x11775a2d,
+0x7d080038,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x7d294214,
+0x0006000d,
+0x10090301,
+0x00090cab,
+0x11090301,
+0x00090cab,
+0x10005a34,
+0x81290000,
+0x00098200,
+0x41830000,
+0x00050805,
+0x28090000,
+0x41820000,
+0x00050847,
+0x48000000,
+0x0005000d,
+0x0006000f,
+0x1008d234,
+0x41800000,
+0x00050847,
+0x10684217,
+0x48000000,
+0x00050047,
+0x00060010,
+0x2c080000,
+0x00098200,
+0x7d0840f8,
+0x41820000,
+0x0005080b,
+0x1003b232,
+0x55081000,
+0x000900a1,
+0x39200000,
+0x00098200,
+0x7d09401e,
+0x39310000,
+0x00098200,
+0x7d49402e,
+0x48000000,
+0x0005000c,
+0x00060049,
+0x00000000,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000c234,
+0x40830000,
+0x00050844,
+0x81030000,
+0x00098200,
+0x28080000,
+0x88c30000,
+0x00098200,
+0x40820000,
+0x00050844,
+0x70c00000,
+0x00090200,
+0x90830000,
+0x00098200,
+0x41820000,
+0x00050847,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x90710000,
+0x00098200,
+0x98c30000,
+0x00098200,
+0x90030000,
+0x00098200,
+0x48000000,
+0x00050047,
+0x0006004a,
+0x280b0010,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004c234,
+0x38ae0008,
+0x40800000,
+0x00050844,
+0x7e439378,
+0x48000001,
+0x00030009,
+0x10630301,
+0x48000000,
+0x00050047,
+0x0006004b,
+0x280b0008,
+0x106e0301,
+0x40820000,
+0x00050844,
+0x1003b232,
+0x41800000,
+0x00050847,
+0x48000000,
+0x00050044,
+0x0006004c,
+0x00000000,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003ba34,
+0x41800000,
+0x00050847,
+0x80110000,
+0x00098200,
+0x1003b232,
+0x28800000,
+0x91d20000,
+0x00098200,
+0x4c403202,
+0x9201000c,
+0x40820000,
+0x00050844,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x7e439378,
+0x7dc47378,
+0x48000001,
+0x0003000a,
+0x10771a2d,
+0x48000000,
+0x00050047,
+0x0006004e,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x134e5b20,
+0x1004c234,
+0x820efff8,
+0x40800000,
+0x00050844,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x38ae0008,
+0x9201000c,
+0x48000001,
+0x0003000b,
+0x28030000,
+0x107ad217,
+0x41820000,
+0x00050847,
+0x100e0b01,
+0x3a8efff8,
+0x110e1301,
+0x10140321,
+0x39800000,
+0x00098200,
+0x11140b21,
+0x48000000,
+0x00050045,
+0x0006004f,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003c234,
+0x820efff8,
+0x40800000,
+0x00050844,
+0x00000000,
+0x81230000,
+0x00098200,
+0x100a0301,
+0x00090cab,
+0x28090000,
+0x3a8efff8,
+0x40820000,
+0x00050844,
+0x00000000,
+0x100a0301,
+0x00090cab,
+0x3a8efff8,
+0x00000000,
+0x134e0b21,
+0x39800000,
+0x00098200,
+0x10140321,
+0x48000000,
+0x00050045,
+0x00060050,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003c234,
+0x820efff8,
+0x40800000,
+0x00050844,
+0x1004b232,
+0x3cc03ff0,
+0x40800000,
+0x00050844,
+0x112022f5,
+0x80030000,
+0x00098200,
+0x10c6da2d,
+0x81030000,
+0x00098200,
+0x108432e0,
+0x39290001,
+0x3a8efff8,
+0x7c004840,
+0x55261800,
+0x000900a1,
+0x10940321,
+0x40810000,
+0x00050802,
+0x11083300,
+0x0006000b,
+0x1008d234,
+0x39800000,
+0x00098200,
+0x41800000,
+0x00050845,
+0x39800000,
+0x00098200,
+0x11140b21,
+0x48000000,
+0x00050045,
+0x0006000c,
+0x80030000,
+0x00098200,
+0x28000000,
+0x39800000,
+0x00098200,
+0x41820000,
+0x00050845,
+0x7d244b78,
+0x48000001,
+0x0003000c,
+0x28030000,
+0x39800000,
+0x00098200,
+0x41820000,
+0x00050845,
+0x00000000,
+0x11030301,
+0x48000000,
+0x0005000b,
+0x00060051,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003c234,
+0x820efff8,
+0x40800000,
+0x00050844,
+0x00000000,
+0x81230000,
+0x00098200,
+0x100a0301,
+0x00090cab,
+0x28090000,
+0x3a8efff8,
+0x40820000,
+0x00050844,
+0x00000000,
+0x100a0301,
+0x00090cab,
+0x3a8efff8,
+0x00000000,
+0x11000229,
+0x39800000,
+0x00098200,
+0x110e0b21,
+0x10140321,
+0x48000000,
+0x00050045,
+0x00060052,
+0x280b0008,
+0x88d10000,
+0x00098200,
+0x41800000,
+0x00050844,
+0x7dc97378,
+0x39ce0008,
+0x54c607fe,
+0x000900ab,
+0x396bfff8,
+0x3a060000,
+0x00098200,
+0x48000000,
+0x00050024,
+0x00060053,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x88d10000,
+0x00098200,
+0x7dc97378,
+0x1004ca34,
+0x40800000,
+0x00050844,
+0x39ce0010,
+0x54c607fe,
+0x000900ab,
+0x10890321,
+0x396bfff0,
+0x10690b21,
+0x3a060000,
+0x00098200,
+0x48000000,
+0x00050024,
+0x00060054,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x10031a2c,
+0x2c000000,
+0x00098200,
+0x40820000,
+0x00050844,
+0x88030000,
+0x00098200,
+0x81030000,
+0x00098200,
+0x80830000,
+0x00098200,
+0x00000000,
+0x28000000,
+0x00090200,
+0x81230000,
+0x00098200,
+0x28880000,
+0x80030000,
+0x00098200,
+0x7f844840,
+0x820efff8,
+0x4f013342,
+0x7d245a14,
+0x4f3e1102,
+0x7c890040,
+0x4f18cb82,
+0x9201000c,
+0x4f182b82,
+0x91d20000,
+0x00098200,
+0x41980000,
+0x00050844,
+0x0006000b,
+0x39ce0008,
+0x396bfff8,
+0x3929fff8,
+0x91230000,
+0x00098200,
+0x39000000,
+0x91d20000,
+0x00098200,
+0x0006000c,
+0x7c085800,
+0x100e4300,
+0x41820000,
+0x00050803,
+0x10044320,
+0x39080008,
+0x48000000,
+0x0005000c,
+0x0006000d,
+0x38a00000,
+0x7c751b78,
+0x38c00000,
+0x48000001,
+0x00050021,
+0x0006000e,
+0x81350000,
+0x00098200,
+0x28030000,
+0x00090200,
+0x80d50000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x81d20000,
+0x00098200,
+0x90110000,
+0x00098200,
+0x41810000,
+0x00050808,
+0x7d893050,
+0x80120000,
+0x00098200,
+0x00000000,
+0x280c0000,
+0x7d0e6214,
+0x41820000,
+0x00050806,
+0x7c080040,
+0x39000000,
+0x41810000,
+0x00050809,
+0x38ccfff8,
+0x91350000,
+0x00098200,
+0x0006000f,
+0x7c083040,
+0x10094300,
+0x100e4320,
+0x39080008,
+0x40820000,
+0x0005080f,
+0x00060010,
+0x72000000,
+0x00090200,
+0x39000000,
+0x00098200,
+0x3a8efff8,
+0x910efff8,
+0x398c0010,
+0x00060011,
+0x9201000c,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060012,
+0x72000000,
+0x00090200,
+0x38c6fff8,
+0x39000000,
+0x00098200,
+0x10060301,
+0x90d50000,
+0x00098200,
+0x39800000,
+0x00098200,
+0x910efff8,
+0x3a8efff8,
+0x100e0321,
+0x48000000,
+0x00050011,
+0x00060013,
+0x7e439378,
+0x558400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x38600000,
+0x48000000,
+0x0005000e,
+0x00060055,
+0x00000000,
+0x806a0000,
+0x00098200,
+0x88030000,
+0x00098200,
+0x81030000,
+0x00098200,
+0x80830000,
+0x00098200,
+0x28000000,
+0x00090200,
+0x81230000,
+0x00098200,
+0x28880000,
+0x80030000,
+0x00098200,
+0x7f844840,
+0x820efff8,
+0x4f013342,
+0x7d245a14,
+0x4f3e1102,
+0x7c890040,
+0x4f18cb82,
+0x9201000c,
+0x4f182b82,
+0x91d20000,
+0x00098200,
+0x41980000,
+0x00050844,
+0x0006000b,
+0x91230000,
+0x00098200,
+0x39000000,
+0x91d20000,
+0x00098200,
+0x0006000c,
+0x7c085800,
+0x100e4300,
+0x41820000,
+0x00050803,
+0x10044320,
+0x39080008,
+0x48000000,
+0x0005000c,
+0x0006000d,
+0x38a00000,
+0x7c751b78,
+0x38c00000,
+0x48000001,
+0x00050021,
+0x0006000e,
+0x81350000,
+0x00098200,
+0x28030000,
+0x00090200,
+0x80d50000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x90110000,
+0x00098200,
+0x41810000,
+0x00050808,
+0x7d893050,
+0x80120000,
+0x00098200,
+0x280c0000,
+0x7d0e6214,
+0x41820000,
+0x00050806,
+0x7c080040,
+0x39000000,
+0x41810000,
+0x00050809,
+0x38ccfff8,
+0x91350000,
+0x00098200,
+0x0006000f,
+0x7c083040,
+0x10094300,
+0x100e4320,
+0x39080008,
+0x40820000,
+0x0005080f,
+0x00060010,
+0x72000000,
+0x00090200,
+0x7dd47378,
+0x398c0008,
+0x00060011,
+0x9201000c,
+0x7d936378,
+0x41820000,
+0x00050817,
+0x48000000,
+0x00050018,
+0x00060012,
+0x7e439378,
+0x7ea4ab78,
+0x48000001,
+0x0003000d,
+0x00060013,
+0x7e439378,
+0x558400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x38600000,
+0x48000000,
+0x0005000e,
+0x00060056,
+0x80120000,
+0x00098200,
+0x00000000,
+0x7d0e5a14,
+0x91d20000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x91120000,
+0x00098200,
+0x38600000,
+0x00098200,
+0x41820000,
+0x00050844,
+0x93720000,
+0x00098200,
+0x98720000,
+0x00098200,
+0x48000000,
+0x0005001a,
+0x00060057,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x106302e4,
+0x00060047,
+0x820efff8,
+0x3a8efff8,
+0x10740321,
+0x00060058,
+0x39800000,
+0x00098200,
+0x00060045,
+0x72000000,
+0x00090200,
+0x7d936378,
+0x40820000,
+0x00050818,
+0x80f0fffc,
+0x54ea5d78,
+0x0006000f,
+0x7c0a6040,
+0x54e0dd78,
+0x41810000,
+0x00050806,
+0x80f00000,
+0x3a100004,
+0x7dc0a050,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x13544320,
+0x48000000,
+0x0005000f,
+0x00060059,
+0x00000000,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x820efff8,
+0x48000001,
+0x0005005a,
+0x3a8efff8,
+0x10940321,
+0x48000000,
+0x00050058,
+0x0006005b,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x820efff8,
+0x48000001,
+0x0005005c,
+0x3a8efff8,
+0x10940321,
+0x48000000,
+0x00050058,
+0x0006005d,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x0003000e,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006005e,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x0003000f,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006005f,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x00000000,
+0x48000001,
+0x00030010,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060060,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030011,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060061,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030012,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060062,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030013,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060063,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030014,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060064,
+0x00000000,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030015,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060065,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030016,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060066,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030017,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060067,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030018,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060068,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x00000000,
+0x48000001,
+0x00030019,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060069,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x48000001,
+0x0003001a,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006006a,
+0x280b0010,
+0x108e0301,
+0x10ce0b01,
+0x41800000,
+0x00050844,
+0x1066222c,
+0x1003b232,
+0x10a6322c,
+0x40830000,
+0x00050844,
+0x48000001,
+0x0003001b,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006006b,
+0x280b0010,
+0x108e0301,
+0x10ce0b01,
+0x41800000,
+0x00050844,
+0x1066222c,
+0x1003b232,
+0x10a6322c,
+0x40830000,
+0x00050844,
+0x48000001,
+0x0003001c,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006006c,
+0x280b0010,
+0x108e0301,
+0x10ce0b01,
+0x41800000,
+0x00050844,
+0x1066222c,
+0x1003b232,
+0x10a6322c,
+0x40830000,
+0x00050844,
+0x48000001,
+0x0003001d,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x0006006d,
+0x0006006e,
+0x00000000,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x108a0301,
+0x00090cab,
+0x106322e8,
+0x48000000,
+0x00050047,
+0x0006006f,
+0x280b0010,
+0x108e0301,
+0x10ce0b01,
+0x41800000,
+0x00050844,
+0x1066222c,
+0x1003b232,
+0x40830000,
+0x00050844,
+0x10a032f5,
+0x48000001,
+0x0003001e,
+0x1063222d,
+0x48000000,
+0x00050047,
+0x00060070,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x38b10000,
+0x00098200,
+0x820efff8,
+0x48000001,
+0x0003001f,
+0x81110000,
+0x00098200,
+0x1063222d,
+0x108042f1,
+0x3a8efff8,
+0x10740321,
+0x39800000,
+0x00098200,
+0x10940b21,
+0x48000000,
+0x00050045,
+0x00060071,
+0x280b0008,
+0x108e0301,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x1064222c,
+0x40800000,
+0x00050844,
+0x38aefff8,
+0x820efff8,
+0x48000001,
+0x00030020,
+0x1063222d,
+0x3a8efff8,
+0x106e0321,
+0x39800000,
+0x00098200,
+0x00000000,
+0x48000000,
+0x00050045,
+0x00060072,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x39000008,
+0x40800000,
+0x00050844,
+0x0006000b,
+0x108e4300,
+0x7c885840,
+0x1004b232,
+0x40840000,
+0x00050847,
+0x40800000,
+0x00050844,
+0x10041afd,
+0x39080008,
+0x4c010b82,
+0x10641a78,
+0x48000000,
+0x0005000b,
+0x00060073,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x39000008,
+0x40800000,
+0x00050844,
+0x0006000b,
+0x108e4300,
+0x7c885840,
+0x1004b232,
+0x40840000,
+0x00050847,
+0x40800000,
+0x00050844,
+0x10041afc,
+0x39080008,
+0x4c010b82,
+0x10641a78,
+0x48000000,
+0x0005000b,
+0x00060074,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003ba34,
+0x40800000,
+0x00050844,
+0x80030000,
+0x00098200,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060075,
+0x280b0008,
+0x106e0301,
+0x40820000,
+0x00050844,
+0x00000000,
+0x1003ba34,
+0x3a8efff8,
+0x40800000,
+0x00050844,
+0x80030000,
+0x00098200,
+0x39800000,
+0x00098200,
+0x89030000,
+0x00098200,
+0x39200000,
+0x00098200,
+0x28000000,
+0x820efff8,
+0x106042f1,
+0x7d8c489e,
+0x10740321,
+0x48000000,
+0x00050045,
+0x00060076,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x280b0008,
+0x106e0301,
+0x40820000,
+0x00050844,
+0x1003b232,
+0x38910000,
+0x00098200,
+0x40800000,
+0x00050844,
+0x10001afa,
+0x38a00001,
+0x280000ff,
+0x98040000,
+0x41810000,
+0x00050844,
+0x00060077,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x9201000c,
+0x48000001,
+0x00030021,
+0x81d20000,
+0x00098200,
+0x10771a2d,
+0x48000000,
+0x00050047,
+0x00060078,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x00000000,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x280b0010,
+0x10ae1301,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x108e0b01,
+0x3920ffff,
+0x41820000,
+0x00050801,
+0x1005b232,
+0x40800000,
+0x00050844,
+0x11202afa,
+0x0006000b,
+0x1004b232,
+0x40800000,
+0x00050844,
+0x1003ba34,
+0x110022fa,
+0x40800000,
+0x00050844,
+0x80030000,
+0x00098200,
+0x7c004840,
+0x7cc90214,
+0x41800000,
+0x00050805,
+0x0006000c,
+0x2c080000,
+0x7cc80214,
+0x40810000,
+0x00050807,
+0x0006000d,
+0x7ca84851,
+0x38830000,
+0x00098200,
+0x38a50001,
+0x7c844214,
+0x7ca0281e,
+0x48000000,
+0x00050077,
+0x0006000f,
+0x7c004800,
+0x38c60001,
+0x7d26005e,
+0x48000000,
+0x0005000c,
+0x00060011,
+0x2c860000,
+0x7d00309e,
+0x7d00411e,
+0x39080001,
+0x48000000,
+0x0005000d,
+0x00060079,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x00000000,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1004b232,
+0x40800000,
+0x00050844,
+0x1003ba34,
+0x10a022fa,
+0x40800000,
+0x00050844,
+0x80030000,
+0x00098200,
+0x2c050000,
+0x81110000,
+0x00098200,
+0x40810000,
+0x00050802,
+0x28000001,
+0x3925ffff,
+0x41800000,
+0x00050802,
+0x7c882840,
+0x40820000,
+0x00050844,
+0x88030000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x41840000,
+0x00050844,
+0x0006000b,
+0x28090000,
+0x7c0449ae,
+0x3929ffff,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050077,
+0x0006000c,
+0x38710000,
+0x00098200,
+0x10771a2d,
+0x48000000,
+0x00050047,
+0x0006007a,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x00000000,
+0x1003ba34,
+0x81110000,
+0x00098200,
+0x40800000,
+0x00050844,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x39200000,
+0x7c082840,
+0x38c5ffff,
+0x41800000,
+0x00050844,
+0x0006000b,
+0x2c060000,
+0x7d0348ae,
+0x41800000,
+0x00050877,
+0x7d0431ae,
+0x38c6ffff,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x0006007b,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003ba34,
+0x81110000,
+0x00098200,
+0x40800000,
+0x00050844,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x7c082840,
+0x39200000,
+0x41800000,
+0x00050844,
+0x0006000b,
+0x7c092840,
+0x7d0348ae,
+0x40800000,
+0x00050877,
+0x00000000,
+0x3808ffbf,
+0x69060020,
+0x2800001a,
+0x7d06401e,
+0x7d0449ae,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x0006007c,
+0x80110000,
+0x00098200,
+0x81110000,
+0x00098200,
+0x7c004040,
+0x40800001,
+0x0005084d,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003ba34,
+0x81110000,
+0x00098200,
+0x40800000,
+0x00050844,
+0x80a30000,
+0x00098200,
+0x38630000,
+0x00098200,
+0x80910000,
+0x00098200,
+0x7c082840,
+0x39200000,
+0x41800000,
+0x00050844,
+0x0006000b,
+0x7c092840,
+0x7d0348ae,
+0x40800000,
+0x00050877,
+0x3808ff9f,
+0x69060020,
+0x2800001a,
+0x7d06401e,
+0x7d0449ae,
+0x39290001,
+0x48000000,
+0x0005000b,
+0x0006007d,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003c234,
+0x40800000,
+0x00050844,
+0x48000001,
+0x00030022,
+0x10601af1,
+0x48000000,
+0x00050047,
+0x0006007e,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x00000000,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x0006007f,
+0x10601af1,
+0x48000000,
+0x00050047,
+0x00060080,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x39000008,
+0x0006000b,
+0x108e4300,
+0x7c885840,
+0x1004b232,
+0x40840000,
+0x0005087f,
+0x40800000,
+0x00050844,
+0x1084dae0,
+0x7c632038,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00060081,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x39000008,
+0x0006000b,
+0x108e4300,
+0x7c885840,
+0x1004b232,
+0x40840000,
+0x0005087f,
+0x40800000,
+0x00050844,
+0x1084dae0,
+0x7c632378,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00060082,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x39000008,
+0x0006000b,
+0x108e4300,
+0x7c885840,
+0x1004b232,
+0x40840000,
+0x0005087f,
+0x00000000,
+0x40800000,
+0x00050844,
+0x1084dae0,
+0x7c632278,
+0x39080008,
+0x48000000,
+0x0005000b,
+0x00060083,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x5460403e,
+0x5060c00e,
+0x5060c42e,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060084,
+0x280b0008,
+0x106e0301,
+0x41800000,
+0x00050844,
+0x1003b232,
+0x40800000,
+0x00050844,
+0x1063dae0,
+0x7c6018f8,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060085,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000b232,
+0x40830000,
+0x00050844,
+0x1084dae0,
+0x1063dae0,
+0x548406fe,
+0x7c602030,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060086,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000b232,
+0x40830000,
+0x00050844,
+0x1084dae0,
+0x1063dae0,
+0x548406fe,
+0x7c602430,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060087,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000b232,
+0x40830000,
+0x00050844,
+0x1084dae0,
+0x1063dae0,
+0x548406fe,
+0x7c602630,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060088,
+0x00000000,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000b232,
+0x40830000,
+0x00050844,
+0x1084dae0,
+0x1063dae0,
+0x5c60203e,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060089,
+0x280b0010,
+0x106e0301,
+0x108e0b01,
+0x41800000,
+0x00050844,
+0x1003222c,
+0x1000b232,
+0x40830000,
+0x00050844,
+0x1084dae0,
+0x1063dae0,
+0x7c8400d0,
+0x5c60203e,
+0x106002f1,
+0x48000000,
+0x00050047,
+0x00060044,
+0x80ca0000,
+0x00098200,
+0x7d0e5a14,
+0x820efff8,
+0x38080000,
+0x00098200,
+0x81320000,
+0x00098200,
+0x9201000c,
+0x7c004840,
+0x91d20000,
+0x00098200,
+0x91120000,
+0x00098200,
+0x7e439378,
+0x41810000,
+0x00050805,
+0x7cc903a6,
+0x4e800421,
+0x81d20000,
+0x00098200,
+0x2c030000,
+0x546c1800,
+0x000900a1,
+0x3a8efff8,
+0x41810000,
+0x00050845,
+0x0006000b,
+0x80120000,
+0x00098200,
+0x814efffc,
+0x7d6e0050,
+0x40820000,
+0x00050828,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00060028,
+0x00000000,
+0x72000000,
+0x00090200,
+0x56080038,
+0x40820000,
+0x00050803,
+0x80f0fffc,
+0x54e8dd78,
+0x0006000d,
+0x7d287050,
+0x48000000,
+0x00050024,
+0x0006000f,
+0x38800000,
+0x00098200,
+0x48000001,
+0x00030000,
+0x81d20000,
+0x00098200,
+0x7c000000,
+0x48000000,
+0x0005000b,
+0x0006004d,
+0x7ea802a6,
+0x91d20000,
+0x00098200,
+0x7c0e5a14,
+0x9201000c,
+0x90120000,
+0x00098200,
+0x7e439378,
+0x48000001,
+0x00030023,
+0x81d20000,
+0x00098200,
+0x7ea803a6,
+0x80120000,
+0x00098200,
+0x7d6e0050,
+0x814efffc,
+0x4e800020,
+0x0006008a,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x0006008b,
+0x88d10000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x41820000,
+0x00050801,
+0x0006000f,
+0x39080000,
+0x00098200,
+0x7c11402e,
+0x7c0903a6,
+0x4e800420,
+0x0006008c,
+0x88d10000,
+0x00098200,
+0x81310000,
+0x00098200,
+0x70c00000,
+0x00090200,
+0x54c007c0,
+0x000900ab,
+0x40820000,
+0x0005080f,
+0x2c800000,
+0x3529ffff,
+0x41860000,
+0x0005080f,
+0x91310000,
+0x00098200,
+0x41820000,
+0x00050801,
+0x40840000,
+0x0005080f,
+0x0006000b,
+0x7e439378,
+0x92610008,
+0x7e048378,
+0x91d20000,
+0x00098200,
+0x48000001,
+0x00030024,
+0x0006000d,
+0x81d20000,
+0x00098200,
+0x0006000e,
+0x00000000,
+0x80f0fffc,
+0x54e815ba,
+0x54ea5d78,
+0x39080000,
+0x00098200,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006008d,
+0x3a100004,
+0x826affec,
+0x48000000,
+0x0005000e,
+0x0006008e,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x0006008f,
+0x7e048378,
+0x00000000,
+0x48000000,
+0x00050001,
+0x00000000,
+0x00060090,
+0x00000000,
+0x62040001,
+0x0006000b,
+0x00000000,
+0x7c0e5a14,
+0x9201000c,
+0x7e439378,
+0x91d20000,
+0x00098200,
+0x7e8ea050,
+0x90120000,
+0x00098200,
+0x48000001,
+0x00030025,
+0x81d20000,
+0x00098200,
+0x80120000,
+0x00098200,
+0x9361000c,
+0x7d6e0050,
+0x7e8ea214,
+0x814efffc,
+0x7c6903a6,
+0x4e800420,
+0x00060091,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x00060092,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x00060093,
+0x7ca802a6,
+0x48000001,
+0x0005005a,
+0x7ca803a6,
+0x1064222c,
+0x4e800020,
+0x00060094,
+0x1064222c,
+0x0006005a,
+0x5469657e,
+0x3529fc01,
+0x3900ffff,
+0x28890033,
+0x20090034,
+0x41850000,
+0x00050801,
+0x3cc0fff0,
+0x7d000030,
+0x7cc84e30,
+0x7c890078,
+0x7c664078,
+0x7d293378,
+0x7c66fe70,
+0x7d293039,
+0x7c840038,
+0x7c634038,
+0x7c002010,
+0x7c04009e,
+0x7d081910,
+0x7d03409e,
+0x1088022d,
+0x4e800020,
+0x0006000b,
+0x4d810020,
+0x5469007e,
+0x7c60fe70,
+0x7d292378,
+0x3d003ff0,
+0x7d290039,
+0x38000000,
+0x7d00409e,
+0x5103007e,
+0x1083022d,
+0x4e800020,
+0x00060095,
+0x1064222c,
+0x0006005c,
+0x5469657e,
+0x3529fc01,
+0x3900ffff,
+0x28890033,
+0x20090034,
+0x41850000,
+0x00050801,
+0x3cc0fff0,
+0x7d000030,
+0x7cc84e30,
+0x7c890078,
+0x7c664078,
+0x7d293378,
+0x7c66fe70,
+0x7d293079,
+0x7c840038,
+0x7c634038,
+0x7c002010,
+0x7c04009e,
+0x7d081910,
+0x7d03409e,
+0x1088022d,
+0x4e800020,
+0x0006000b,
+0x4d810020,
+0x5469007e,
+0x7c60fe70,
+0x7d292378,
+0x3d003ff0,
+0x7d290079,
+0x38000000,
+0x7d00409e,
+0x5103007e,
+0x1083022d,
+0x4e800020,
+0x00000000,
+0x00060096,
+0x1064222c,
+0x00060097,
+0x5469657e,
+0x3529fc01,
+0x3900ffff,
+0x28890033,
+0x20090034,
+0x41850000,
+0x00050801,
+0x3cc0fff0,
+0x7d000030,
+0x7cc84e30,
+0x1008022d,
+0x10840211,
+0x4e800020,
+0x0006000b,
+0x4d810020,
+0x54680000,
+0x38000000,
+0x1088022d,
+0x4e800020,
+0x00000000,
+0x00060096,
+0x00060097,
+0x00000000,
+0x00060098,
+0x1083222d,
+0x28070001,
+0x10c5322d,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0x108432e0,
+0x1064222c,
+0x4e800020,
+0x0006000b,
+0x108432e1,
+0x1064222c,
+0x4e800020,
+0x0006000c,
+0x28070003,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0x108432e8,
+0x1064222c,
+0x4e800020,
+0x0006000b,
+0x108432e9,
+0x1064222c,
+0x4e800020,
+0x0006000c,
+0x28070005,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0x10a42217,
+0x108432e9,
+0x11463217,
+0x7d6802a6,
+0x48000001,
+0x00050094,
+0x7d6803a6,
+0x108452e8,
+0x108522e1,
+0x1064222c,
+0x4e800020,
+0x0006000b,
+0x48000000,
+0x0003001b,
+0x0006000c,
+0x28070007,
+0x41820000,
+0x00050801,
+0x41810000,
+0x00050802,
+0x6c638000,
+0x4e800020,
+0x0006000b,
+0x5463007e,
+0x4e800020,
+0x0006000c,
+0x7c810808,
+0x00060099,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x00080000,
+0x00000000,
+0x100ea300,
+0x3a100004,
+0x110e6300,
+0x3cd00000,
+0x00098200,
+0x8130fffc,
+0x1140422c,
+0x552993ba,
+0x100ab232,
+0x7d293214,
+0x40830000,
+0x00050834,
+0x100042ed,
+0x00000000,
+0x108042ee,
+0x4c212b82,
+0x00000000,
+0x7e09805e,
+0x00000000,
+0x7e10485e,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x108ea300,
+0x3a100004,
+0x10ae6300,
+0x3cd00000,
+0x00098200,
+0x8130fffc,
+0x11442a2c,
+0x552993ba,
+0x100ab232,
+0x7d293214,
+0x40830000,
+0x00050805,
+0x10042aee,
+0x00000000,
+0x7e09805e,
+0x00000000,
+0x7e10485e,
+0x00000000,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x10042a34,
+0x7d4650f8,
+0x28860000,
+0x00090200,
+0x4f830342,
+0x2b060000,
+0x00090200,
+0x4fa02902,
+0x7e158378,
+0x00000000,
+0x7e09875e,
+0x00000000,
+0x7d304f5e,
+0x00000000,
+0x4f9ceb82,
+0x00000000,
+0x7e0980de,
+0x00000000,
+0x7e1048de,
+0x00000000,
+0x419c0000,
+0x0005080b,
+0x41980000,
+0x0005080b,
+0x81240000,
+0x00098200,
+0x38c00000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x7eb0ab78,
+0x48000000,
+0x00050039,
+0x00000000,
+0x100ea300,
+0x558c007e,
+0x000900ab,
+0x80f00000,
+0x218cfffc,
+0x3a100004,
+0x7d0f602e,
+0x3cd00000,
+0x00098200,
+0x54e993ba,
+0x1117422d,
+0x7d293214,
+0x10004234,
+0x00000000,
+0x7e0980de,
+0x00000000,
+0x7e1048de,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100ea300,
+0x3a100004,
+0x110f6300,
+0x3cd00000,
+0x00098200,
+0x80f0fffc,
+0x1000b232,
+0x40800000,
+0x00050805,
+0x100042ee,
+0x0006000b,
+0x54e993ba,
+0x7d293214,
+0x00000000,
+0x7e09805e,
+0x0006000f,
+0x00000000,
+0x7e10485e,
+0x00000000,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x0006000f,
+0x54e993ba,
+0x7e093214,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x7c0ea02e,
+0x558800fe,
+0x000900ab,
+0x80f00000,
+0x3a100004,
+0x7d0840f8,
+0x3cd00000,
+0x00098200,
+0x7c004040,
+0x54e993ba,
+0x7d293214,
+0x00000000,
+0x7e09809e,
+0x00000000,
+0x7e10489e,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100e6300,
+0x111ad200,
+0x80f00000,
+0x10004232,
+0x3a100004,
+0x00000000,
+0x3cd00000,
+0x00098200,
+0x54e993ba,
+0x7d293214,
+0x00000000,
+0x7e09801e,
+0x00000000,
+0x7e10481e,
+0x00000000,
+0x40800000,
+0x00050801,
+0x00000000,
+0x41800000,
+0x00050801,
+0x00000000,
+0x3e100000,
+0x00098200,
+0x54e993ba,
+0x100ea320,
+0x7e104a14,
+0x0006000b,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x100e6300,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x7c0e602e,
+0x21000000,
+0x00098200,
+0x7c004114,
+0x7c0ea12e,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100e6300,
+0x1000b232,
+0x40800000,
+0x0005083c,
+0x100002e6,
+0x80f00000,
+0x3a100004,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x106e6300,
+0x1003ba34,
+0x40800000,
+0x00050802,
+0x80630000,
+0x00098200,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x10001af1,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000c,
+0x1003c234,
+0x40800000,
+0x0005083e,
+0x00000000,
+0x81230000,
+0x00098200,
+0x28090000,
+0x40820000,
+0x00050809,
+0x0006000d,
+0x00000000,
+0x0006003f,
+0x48000001,
+0x00030022,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x00060013,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x0005003e,
+0x00000000,
+0x100e5300,
+0x1000b232,
+0x110f5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x110e5300,
+0x1008b232,
+0x100f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x100e5300,
+0x110e5b00,
+0x1120422c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x100042e0,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100e5300,
+0x1000b232,
+0x110f5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x110e5300,
+0x1008b232,
+0x100f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x100e5300,
+0x110e5b00,
+0x1120422c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x100042e1,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100e5300,
+0x1000b232,
+0x110f5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x110e5300,
+0x1008b232,
+0x100f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x100e5300,
+0x110e5b00,
+0x1120422c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x100042e8,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100e5300,
+0x1000b232,
+0x110f5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x110e5300,
+0x1008b232,
+0x100f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x100e5300,
+0x110e5b00,
+0x1120422c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x100042e9,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x118e5300,
+0x100cb232,
+0x12af5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x12ae5300,
+0x1015b232,
+0x118f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x118e5300,
+0x12ae5b00,
+0x112caa2c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x0006009a,
+0x108caae9,
+0x48000001,
+0x00050094,
+0x1004aae8,
+0x80f00000,
+0x3a100004,
+0x100c02e1,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x118e5300,
+0x100cb232,
+0x12af5b00,
+0x40800000,
+0x0005083a,
+0x00000000,
+0x12ae5300,
+0x1015b232,
+0x118f5b00,
+0x40800000,
+0x0005083b,
+0x00000000,
+0x118e5300,
+0x12ae5b00,
+0x112caa2c,
+0x1009b232,
+0x40830000,
+0x0005083d,
+0x00000000,
+0x48000000,
+0x0005009a,
+0x00000000,
+0x108e5300,
+0x10ce5b00,
+0x1066222c,
+0x1003b232,
+0x10a6322c,
+0x40830000,
+0x0005083d,
+0x48000001,
+0x0003001b,
+0x1083222d,
+0x108ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7caa5850,
+0x91d20000,
+0x00098200,
+0x7c8e5a14,
+0x7d555378,
+0x0006002a,
+0x9201000c,
+0x7e439378,
+0x54a500fe,
+0x000900ab,
+0x48000001,
+0x00030026,
+0x28030000,
+0x81d20000,
+0x00098200,
+0x40820000,
+0x00050835,
+0x100eab00,
+0x100ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x7c0f402e,
+0x1017022d,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x7c0f402e,
+0x39200000,
+0x00098200,
+0x1009022d,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x558800fe,
+0x000900ab,
+0x7d080734,
+0x80f00000,
+0x3a100004,
+0x100042f1,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x100f6300,
+0x80f00000,
+0x3a100004,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x558800fe,
+0x000900ab,
+0x7d0040f8,
+0x80f00000,
+0x3a100004,
+0x7c0ea12e,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x134ea320,
+0x3a940008,
+0x0006000b,
+0x134ea320,
+0x7c146000,
+0x3a940008,
+0x41800000,
+0x0005080b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x814efffc,
+0x558c007e,
+0x000900ab,
+0x398c0000,
+0x00098200,
+0x7d4a602e,
+0x810a0000,
+0x00098200,
+0x10080301,
+0x100ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x110e6300,
+0x7d4aa02e,
+0x88ca0000,
+0x00098200,
+0x808a0000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x880a0000,
+0x00098200,
+0x1128422c,
+0x11040321,
+0x28800000,
+0x4c423382,
+0x39290000,
+0x00098200,
+0x40820000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000c,
+0x28090000,
+0x00090200,
+0x40800000,
+0x0005080b,
+0x88c80000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x38710000,
+0x00098200,
+0x41820000,
+0x0005080b,
+0x48000001,
+0x00030027,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x814efffc,
+0x5588007e,
+0x000900ab,
+0x5694007e,
+0x000900ab,
+0x2108fffc,
+0x3a940000,
+0x00098200,
+0x7d0f402e,
+0x7d4aa02e,
+0x1117422d,
+0x88ca0000,
+0x00098200,
+0x808a0000,
+0x00098200,
+0x70c60000,
+0x00090200,
+0x88c80000,
+0x00098200,
+0x892a0000,
+0x00098200,
+0x11040321,
+0x40820000,
+0x00050802,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000c,
+0x70c60000,
+0x00090200,
+0x28890000,
+0x4c423382,
+0x38710000,
+0x00098200,
+0x41820000,
+0x0005080b,
+0x48000001,
+0x00030027,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x100f6300,
+0x7d4aa02e,
+0x810a0000,
+0x00098200,
+0x10080321,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x814efffc,
+0x5694007e,
+0x000900ab,
+0x3a940000,
+0x00098200,
+0x558000fe,
+0x000900ab,
+0x7d4aa02e,
+0x7c0000f8,
+0x810a0000,
+0x00098200,
+0x90080000,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x81120000,
+0x00098200,
+0x5580007e,
+0x000900ab,
+0x7e100214,
+0x3e100000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x28080000,
+0x7e439378,
+0x41820000,
+0x00050801,
+0x7c8ea214,
+0x48000001,
+0x00030028,
+0x81d20000,
+0x00098200,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x91d20000,
+0x00098200,
+0x2108fffc,
+0x9201000c,
+0x7c8f402e,
+0x7e439378,
+0x80aefffc,
+0x48000001,
+0x00030029,
+0x81d20000,
+0x00098200,
+0x10791a2d,
+0x106ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x80110000,
+0x00098200,
+0x7e439378,
+0x81110000,
+0x00098200,
+0x91d20000,
+0x00098200,
+0x7c004040,
+0x9201000c,
+0x40800000,
+0x00050805,
+0x0006000b,
+0x00000000,
+0x5584ed7e,
+0x558596fe,
+0x2c0407ff,
+0x39000801,
+0x7c88209e,
+0x48000001,
+0x0003002a,
+0x00000000,
+0x5588007e,
+0x000900ab,
+0x2108fffc,
+0x7c8f402e,
+0x48000001,
+0x0003002b,
+0x00000000,
+0x81d20000,
+0x00098200,
+0x10781a2d,
+0x106ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x7d956378,
+0x48000001,
+0x0003002c,
+0x7eacab78,
+0x7e439378,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x812efffc,
+0x5588007e,
+0x000900ab,
+0x81490000,
+0x00098200,
+0x2108fffc,
+0x7d6f402e,
+0x00000000,
+0x48000000,
+0x0005009b,
+0x00000000,
+0x48000000,
+0x0005009c,
+0x00000000,
+0x114e5300,
+0x116e5b00,
+0x100ac234,
+0x40800000,
+0x0005082f,
+0x100bb232,
+0x40800000,
+0x00050805,
+0x11205af5,
+0x800a0000,
+0x00098200,
+0x11004af1,
+0x7c004840,
+0x108b42ee,
+0x810a0000,
+0x00098200,
+0x4c212a02,
+0x55291800,
+0x000900a1,
+0x40810000,
+0x0005082f,
+0x11084b00,
+0x1008d234,
+0x41800000,
+0x00050802,
+0x0006000b,
+0x110ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000c,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x0005002f,
+0x0006000f,
+0x100bba34,
+0x41800000,
+0x0005089b,
+0x48000000,
+0x0005002f,
+0x00000000,
+0x114e5300,
+0x5568007e,
+0x000900ab,
+0x100ac234,
+0x2108fffc,
+0x7d6f402e,
+0x40800000,
+0x0005082c,
+0x0006009b,
+0x800a0000,
+0x00098200,
+0x810b0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x11775a2d,
+0x7d080038,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x7d294214,
+0x0006000b,
+0x10090301,
+0x00090cab,
+0x11090301,
+0x00090cab,
+0x10005a34,
+0x40830000,
+0x00050804,
+0x1008d234,
+0x41800000,
+0x00050805,
+0x0006000d,
+0x110ea320,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000e,
+0x81290000,
+0x00098200,
+0x28090000,
+0x40820000,
+0x0005080b,
+0x111ad217,
+0x0006000f,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080d,
+0x88090000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x00000000,
+0x40820000,
+0x0005080d,
+0x48000000,
+0x0005002d,
+0x00000000,
+0x114e5300,
+0x556000fe,
+0x000900ab,
+0x100ac234,
+0x40800000,
+0x0005082e,
+0x810a0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x7c004040,
+0x40800000,
+0x0005082e,
+0x11095b00,
+0x1008d234,
+0x41800000,
+0x00050805,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x110ea320,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x0005002e,
+0x00000000,
+0x114e5300,
+0x116e5b00,
+0x100ac234,
+0x40800000,
+0x00050833,
+0x100bb232,
+0x40800000,
+0x00050805,
+0x11205af5,
+0x12aea300,
+0x800a0000,
+0x00098200,
+0x11004af1,
+0x7c004840,
+0x108b42ee,
+0x810a0000,
+0x00098200,
+0x4c212a02,
+0x55201800,
+0x000900a1,
+0x40810000,
+0x00050833,
+0x88ca0000,
+0x00098200,
+0x11280300,
+0x1009d234,
+0x41800000,
+0x00050803,
+0x0006000b,
+0x70c90000,
+0x00090200,
+0x12a80320,
+0x40820000,
+0x00050807,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000d,
+0x812a0000,
+0x00098200,
+0x28090000,
+0x41820000,
+0x0005080b,
+0x89290000,
+0x00098200,
+0x71290000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050033,
+0x0006000f,
+0x100bba34,
+0x41800000,
+0x0005089c,
+0x48000000,
+0x00050033,
+0x00060011,
+0x00000000,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x98ca0000,
+0x00098200,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x114e5300,
+0x5568007e,
+0x000900ab,
+0x100ac234,
+0x2108fffc,
+0x7d6f402e,
+0x40800000,
+0x00050830,
+0x0006009c,
+0x800a0000,
+0x00098200,
+0x810b0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x11775a2d,
+0x9b6a0000,
+0x00098200,
+0x7d080038,
+0x12aea300,
+0x55002800,
+0x000900a1,
+0x55081800,
+0x000900a1,
+0x7d080050,
+0x88ca0000,
+0x00098200,
+0x7d294214,
+0x0006000b,
+0x10090301,
+0x00090cab,
+0x11090301,
+0x00090cab,
+0x10005a34,
+0x40830000,
+0x00050805,
+0x1008d234,
+0x41800000,
+0x00050804,
+0x0006000c,
+0x70c00000,
+0x00090200,
+0x12a90321,
+0x00090cab,
+0x40820000,
+0x00050807,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000e,
+0x810a0000,
+0x00098200,
+0x00000000,
+0x28080000,
+0x41820000,
+0x0005080c,
+0x88080000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x40820000,
+0x0005080c,
+0x48000000,
+0x00050031,
+0x0006000f,
+0x81290000,
+0x00098200,
+0x28090000,
+0x40820000,
+0x0005080b,
+0x810a0000,
+0x00098200,
+0x38b10000,
+0x00098200,
+0x9201000c,
+0x7e439378,
+0x28080000,
+0x91d20000,
+0x00098200,
+0x41820000,
+0x00050806,
+0x88080000,
+0x00098200,
+0x70000000,
+0x00090200,
+0x41820000,
+0x00050831,
+0x00060010,
+0x7d445378,
+0x11650321,
+0x48000001,
+0x0003002d,
+0x81d20000,
+0x00098200,
+0x12a30321,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x00000000,
+0x98ca0000,
+0x00098200,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000d,
+0x00000000,
+0x114e5300,
+0x556000fe,
+0x000900ab,
+0x100ac234,
+0x40800000,
+0x00050832,
+0x810a0000,
+0x00098200,
+0x812a0000,
+0x00098200,
+0x88ca0000,
+0x00098200,
+0x7c004040,
+0x12aea300,
+0x40800000,
+0x00050832,
+0x11095b00,
+0x1008d234,
+0x41800000,
+0x00050805,
+0x0006000b,
+0x70c00000,
+0x00090200,
+0x12a95b20,
+0x40820000,
+0x00050807,
+0x0006000c,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x810a0000,
+0x00098200,
+0x28080000,
+0x41820000,
+0x0005080b,
+0x89080000,
+0x00098200,
+0x71080000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x48000000,
+0x00050032,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x91510000,
+0x00098200,
+0x98ca0000,
+0x00098200,
+0x00000000,
+0x900a0000,
+0x00098200,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7e8ea214,
+0x0006000b,
+0x7ccf6214,
+0x8094fffc,
+0x3413fff8,
+0x80c60004,
+0x540500fe,
+0x000900ab,
+0x41820000,
+0x00050804,
+0x7ca53214,
+0x81240000,
+0x00098200,
+0x54c81800,
+0x000900a1,
+0x88c40000,
+0x00098200,
+0x7c054840,
+0x7d340214,
+0x80040000,
+0x00098200,
+0x41810000,
+0x00050805,
+0x7d080214,
+0x70c00000,
+0x00090200,
+0x0006000d,
+0x10140301,
+0x3a940008,
+0x7c944800,
+0x10080321,
+0x39080008,
+0x41840000,
+0x0005080d,
+0x40820000,
+0x00050807,
+0x0006000e,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x91d20000,
+0x00098200,
+0x7e439378,
+0x9201000c,
+0x7d956378,
+0x48000001,
+0x0003002e,
+0x7eacab78,
+0x48000000,
+0x0005000b,
+0x00060011,
+0x80110000,
+0x00098200,
+0x54c607b8,
+0x90910000,
+0x00098200,
+0x98c40000,
+0x00098200,
+0x90040000,
+0x00098200,
+0x00000000,
+0x48000000,
+0x0005000e,
+0x00000000,
+0x7d6b9a14,
+0x00000000,
+0x114ea300,
+0x7dc97378,
+0x7dcea214,
+0x396bfff8,
+0x100aca34,
+0x39ce0008,
+0x40800000,
+0x00050825,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7d6b9a14,
+0x00000000,
+0x114ea300,
+0x7e8ea214,
+0x810efff8,
+0x396bfff8,
+0x100aca34,
+0x3a940008,
+0x40800000,
+0x00050840,
+0x00060041,
+0x71000000,
+0x00090200,
+0x88ca0000,
+0x00098200,
+0x69090000,
+0x00090200,
+0x288b0000,
+0x40820000,
+0x00050807,
+0x0006000b,
+0x914efffc,
+0x39200000,
+0x2b860001,
+0x41860000,
+0x00050803,
+0x0006000c,
+0x38c90008,
+0x10144b00,
+0x7c865840,
+0x100e4b20,
+0x7cc93378,
+0x40860000,
+0x0005080c,
+0x0006000d,
+0x4c42ea02,
+0x41820000,
+0x00050805,
+0x0006000e,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x80e8fffc,
+0x54f4dd78,
+0x7d147050,
+0x81080000,
+0x00098200,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x48000000,
+0x0005000e,
+0x00060011,
+0x71200000,
+0x00090200,
+0x40820000,
+0x0005080b,
+0x00000000,
+0x7dc97050,
+0x810efff8,
+0x71000000,
+0x00090200,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x3a94ffe8,
+0x7dc97378,
+0x114ea300,
+0x7dcea214,
+0x100e0b01,
+0x110e1301,
+0x114e1b21,
+0x100aca34,
+0x100e2321,
+0x39600010,
+0x110e2b21,
+0x39ce0020,
+0x40800000,
+0x00050825,
+0x920efff8,
+0x820a0000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54f4dd78,
+0x7c11402e,
+0x7e947214,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7e8ea214,
+0x8154fff4,
+0x8174fffc,
+0x800a0000,
+0x00098200,
+0x810a0000,
+0x00098200,
+0x3a100004,
+0x0006000b,
+0x7c0b0040,
+0x55661800,
+0x000900a1,
+0x40800000,
+0x00050805,
+0x11283300,
+0x1009d234,
+0x80f0fffc,
+0x41800000,
+0x00050804,
+0x10005af1,
+0x396b0001,
+0x3cd00000,
+0x00098200,
+0x11340b21,
+0x54e893ba,
+0x9174fffc,
+0x7e083214,
+0x10140321,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000e,
+0x396b0001,
+0x48000000,
+0x0005000b,
+0x0006000f,
+0x810a0000,
+0x00098200,
+0x7d605850,
+0x812a0000,
+0x00098200,
+0x00060010,
+0x7c0b4040,
+0x55662800,
+0x000900a1,
+0x41810000,
+0x0005080d,
+0x556a1800,
+0x000900a1,
+0x7cca3050,
+0x11493300,
+0x7cc93214,
+0x100ad234,
+0x80f0fffc,
+0x41800000,
+0x00050807,
+0x10c60301,
+0x00090cab,
+0x3d300000,
+0x00098200,
+0x11540b21,
+0x7d6b0214,
+0x54e893ba,
+0x10d40321,
+0x396b0001,
+0x7e084a14,
+0x9174fffc,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x00000000,
+0x396b0001,
+0x48000000,
+0x00050010,
+0x00000000,
+0x7e8ea214,
+0x3920ffe8,
+0x11144b00,
+0x8134fff0,
+0x80d4fff8,
+0x1008422c,
+0x2c090000,
+0x00098200,
+0x2c800000,
+0x00098200,
+0x2f060000,
+0x00098200,
+0x40860000,
+0x00050805,
+0x89080000,
+0x00098200,
+0x4c42d202,
+0x2f880000,
+0x00098200,
+0x5580007e,
+0x000900ab,
+0x4c42f202,
+0x7cd00214,
+0x40820000,
+0x00050805,
+0x9374fffc,
+0x3e060000,
+0x00098200,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x38000000,
+0x00098200,
+0x39000000,
+0x00098200,
+0x9810ffff,
+0x3e060000,
+0x00098200,
+0x99100003,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x800efff8,
+0x7d6e5a14,
+0x7e8ea214,
+0x396b0000,
+0x00098200,
+0x7d345214,
+0x38cefff8,
+0x7d605850,
+0x288a0000,
+0x7d0b3051,
+0x41860000,
+0x00050805,
+0x3929fff0,
+0x40810000,
+0x00050802,
+0x0006000b,
+0x100b0301,
+0x396b0008,
+0x10140321,
+0x7c144840,
+0x7c8b3040,
+0x40800000,
+0x00050803,
+0x3a940008,
+0x41840000,
+0x0005080b,
+0x0006000c,
+0x13540321,
+0x7c144840,
+0x3a940008,
+0x41800000,
+0x0005080c,
+0x0006000d,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000f,
+0x80120000,
+0x00098200,
+0x3a600008,
+0x40810000,
+0x0005080d,
+0x7d344214,
+0x7c090040,
+0x3a680008,
+0x41810000,
+0x00050807,
+0x00060010,
+0x100b0301,
+0x396b0008,
+0x10140321,
+0x7c0b3040,
+0x3a940008,
+0x41800000,
+0x00050810,
+0x48000000,
+0x0005000d,
+0x00060011,
+0x7e439378,
+0x92920000,
+0x00098200,
+0x7eae5850,
+0x91d20000,
+0x00098200,
+0x7e8ea050,
+0x9201000c,
+0x550400fe,
+0x000900ab,
+0x48000001,
+0x00030000,
+0x81d20000,
+0x00098200,
+0x00000000,
+0x7e8ea214,
+0x7d6eaa14,
+0x38cefff8,
+0x48000000,
+0x00050010,
+0x00000000,
+0x7d8c9a14,
+0x00000000,
+0x820efff8,
+0x7e8ea214,
+0x7d936378,
+0x0006000b,
+0x72000000,
+0x00090200,
+0x6a080000,
+0x00090200,
+0x40820000,
+0x0005089d,
+0x00060017,
+0x80f0fffc,
+0x2c0c0008,
+0x392efff8,
+0x396cfff8,
+0x54ea5d78,
+0x41820000,
+0x00050803,
+0x39000000,
+0x0006000c,
+0x38c80008,
+0x10144300,
+0x7c065800,
+0x10094320,
+0x41820000,
+0x00050803,
+0x39060008,
+0x10143300,
+0x7c085800,
+0x10093320,
+0x40820000,
+0x0005080c,
+0x0006000d,
+0x0006000f,
+0x7c0a6040,
+0x54f4dd78,
+0x41810000,
+0x00050806,
+0x7dd44850,
+0x810efffc,
+0x80f00000,
+0x3a100004,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x13494320,
+0x48000000,
+0x0005000f,
+0x0006009d,
+0x71090000,
+0x00090200,
+0x40820000,
+0x00050818,
+0x7dc87050,
+0x820efff8,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x820efff8,
+0x7e8ea214,
+0x7d936378,
+0x72000000,
+0x00090200,
+0x6a080000,
+0x00090200,
+0x40820000,
+0x0005089d,
+0x80f0fffc,
+0x392efff8,
+0x54ea5d78,
+0x00000000,
+0x10140301,
+0x10090321,
+0x00000000,
+0x0006000f,
+0x7c0a6040,
+0x54f4dd78,
+0x41810000,
+0x00050806,
+0x7dd44850,
+0x810efffc,
+0x80f00000,
+0x3a100004,
+0x81080000,
+0x00098200,
+0x81e80000,
+0x00098200,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00060010,
+0x390cfff8,
+0x398c0008,
+0x13494320,
+0x48000000,
+0x0005000f,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x7e8ea214,
+0x11140301,
+0x00090cab,
+0x10d40301,
+0x00090cab,
+0x11340301,
+0x00090cab,
+0x00000000,
+0x1008b230,
+0x1386b230,
+0x1089b230,
+0x4c00e382,
+0x4c002382,
+0x41800000,
+0x00050842,
+0x00000000,
+0x110832e0,
+0x11140321,
+0x00090cab,
+0x00000000,
+0x1006d231,
+0x11140321,
+0x00090cab,
+0x40800000,
+0x00050802,
+0x10084aec,
+0x0006000b,
+0x00000000,
+0x558c007e,
+0x000900ab,
+0x7d906214,
+0x00000000,
+0x3e0c0000,
+0x00098200,
+0x00000000,
+0x3d8c0000,
+0x00098200,
+0x00000000,
+0x7e0c805e,
+0x00000000,
+0x7e10605e,
+0x00000000,
+0x40810000,
+0x00070800,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000c,
+0x100942ec,
+0x48000000,
+0x0005000b,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x110ea300,
+0x3a94fff8,
+0x1008d234,
+0x41800000,
+0x00050801,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x5580007e,
+0x000900ab,
+0x7e100214,
+0x3e100000,
+0x00098200,
+0x110ea320,
+0x00000000,
+0x0006000b,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x5580007e,
+0x000900ab,
+0x7e100214,
+0x3e100000,
+0x00098200,
+0x80f00000,
+0x3a100004,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x81320000,
+0x00098200,
+0x89100000,
+0x00098200,
+0x81f00000,
+0x00098200,
+0x7c144840,
+0x55081800,
+0x000900a1,
+0x41810000,
+0x00050820,
+0x80f00000,
+0x3a100004,
+0x0006000c,
+0x7c0b4040,
+0x40810000,
+0x00050803,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x00000000,
+0x0006000d,
+0x134e5b20,
+0x396b0008,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x7c810808,
+0x00000000,
+0x81320000,
+0x00098200,
+0x7d0e5a14,
+0x7c145a14,
+0x91480004,
+0x38cb0000,
+0x00098200,
+0x81f00000,
+0x00098200,
+0x7c004840,
+0x90c80000,
+0x40800000,
+0x00050820,
+0x89300000,
+0x00098200,
+0x7dd47378,
+0x7d0b4378,
+0x80f00000,
+0x3a100004,
+0x2c090000,
+0x39c80008,
+0x41820000,
+0x00050803,
+0x0006000b,
+0x7c145840,
+0x10140301,
+0x40800000,
+0x00050804,
+0x13540321,
+0x3a940008,
+0x0006000c,
+0x3529ffff,
+0x10080b21,
+0x39080008,
+0x40820000,
+0x0005080b,
+0x0006000d,
+0x54e815ba,
+0x54ea5d78,
+0x54ec9b78,
+0x7c11402e,
+0x54f4dd78,
+0x54eb9d78,
+0x7c0903a6,
+0x4e800420,
+0x0006000e,
+0x101ad217,
+0x48000000,
+0x0005000c,
+0x00000000,
+0x80ca0000,
+0x00098200,
+0x00000000,
+0x80d10000,
+0x00098200,
+0x00000000,
+0x7d145a14,
+0x81320000,
+0x00098200,
+0x7d6e5a14,
+0x91d20000,
+0x00098200,
+0x7c084840,
+0x91720000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x7cc903a6,
+0x00000000,
+0x808a0000,
+0x00098200,
+0x00000000,
+0x7e439378,
+0x41810000,
+0x0005081f,
+0x90110000,
+0x00098200,
+0x4e800421,
+0x81120000,
+0x00098200,
+0x546c1800,
+0x000900a1,
+0x81d20000,
+0x00098200,
+0x38000000,
+0x00098200,
+0x820efff8,
+0x7e8c4050,
+0x90110000,
+0x00098200,
+0x48000000,
+0x00050016,
+0x00000000,
+0x00010000
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_l,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_BC_CAT_Z,
+ GLOB_cont_nop,
+ GLOB_vmeta_tgets1,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets1,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_ra,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_callt,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res,
+ GLOB_ff_type,
+ GLOB_fff_restv,
+ GLOB_ff_getmetatable,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_res1,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor_hilo,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil_hilo,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_pow,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_ff_string_rep,
+ GLOB_ff_string_reverse,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_tobit,
+ GLOB_fff_resbit,
+ GLOB_ff_bit_band,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_floor,
+ GLOB_vm_floor_efd,
+ GLOB_vm_ceil_efd,
+ GLOB_vm_trunc_efd,
+ GLOB_vm_trunc_hilo,
+ GLOB_vm_foldarith,
+ GLOB_vm_ffi_call,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB_BC_RETV_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c",
+ "vm_unwind_c_eh",
+ "vm_unwind_ff",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_l",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "vm_call_tail",
+ "cont_cat",
+ "BC_CAT_Z",
+ "cont_nop",
+ "vmeta_tgets1",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets1",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_ra",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_arith_vn",
+ "vmeta_arith_nv",
+ "vmeta_unm",
+ "vmeta_arith_vv",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_callt",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res",
+ "ff_type",
+ "fff_restv",
+ "ff_getmetatable",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_res1",
+ "ff_math_floor",
+ "vm_floor_hilo",
+ "ff_math_ceil",
+ "vm_ceil_hilo",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_pow",
+ "ff_math_atan2",
+ "ff_math_fmod",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "ff_string_rep",
+ "ff_string_reverse",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_tobit",
+ "fff_resbit",
+ "ff_bit_band",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_floor",
+ "vm_floor_efd",
+ "vm_ceil_efd",
+ "vm_trunc_efd",
+ "vm_trunc_hilo",
+ "vm_foldarith",
+ "vm_ffi_call",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ "BC_RETV_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_arith",
+ "lj_meta_len",
+ "lj_meta_call",
+ "lj_meta_for",
+ "lj_tab_get",
+ "lj_str_fromnum",
+ "lj_tab_next",
+ "lj_tab_getinth",
+ "lj_ffh_coroutine_wrap_err",
+ "sqrt",
+ "log",
+ "log10",
+ "exp",
+ "sin",
+ "cos",
+ "tan",
+ "asin",
+ "acos",
+ "atan",
+ "sinh",
+ "cosh",
+ "tanh",
+ "pow",
+ "atan2",
+ "fmod",
+ "ldexp",
+ "frexp",
+ "modf",
+ "lj_str_new",
+ "lj_tab_len",
+ "lj_gc_step",
+ "lj_dispatch_ins",
+ "lj_dispatch_call",
+ "lj_meta_cat",
+ "lj_gc_barrieruv",
+ "lj_func_closeuv",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_tab_dup",
+ "lj_gc_step_fixtop",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 1, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, ~LJ_VMST_C, Dt1(->base), DISPATCH_GL(vmstate), 31-3, Dt1(->top));
+ dasm_put(Dst, 55, Dt1(->cframe), Dt1(->maxstack), Dt1(->top), 31-3, Dt1(->top), ~LJ_VMST_C, Dt1(->glref), Dt2(->vmstate));
+ dasm_put(Dst, 135, LJ_TISNUM+1, LJ_TFUNC, LJ_TTAB, Dt1(->base), Dt1(->glref), LJ_TSTR, LJ_TFALSE, LJ_TNIL, ~LJ_VMST_INTERP, GG_G2DISP, DISPATCH_GL(vmstate), LUA_MINSTACK, Dt1(->base), Dt1(->top), 32-3);
+ dasm_put(Dst, 190, Dt1(->base), Dt1(->top), Dt7(->pc), Dt1(->glref), Dt1(->status), FRAME_CP, CFRAME_RESUME, GG_G2DISP, Dt1(->cframe), Dt1(->base), LJ_TISNUM+1, Dt1(->top), LJ_TFUNC, LJ_TTAB, LJ_TSTR, Dt1(->status), FRAME_TYPE, ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate));
+ dasm_put(Dst, 283, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, Dt1(->base), LJ_TISNUM+1, Dt1(->top), LJ_TFUNC, LJ_TTAB, LJ_TSTR, ~LJ_VMST_INTERP, LJ_TNIL, DISPATCH_GL(vmstate));
+ dasm_put(Dst, 384, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), Dt1(->glref), FRAME_CP, GG_G2DISP, Dt7(->pc), PC2PROTO(k), Dt1(->base));
+ dasm_put(Dst, 491, DISPATCH_GL(tmptv), DISPATCH_GL(tmptv), DISPATCH_GL(tmptv2), DISPATCH_GL(tmptv), Dt1(->base), FRAME_CONT, Dt1(->top), DISPATCH_GL(tmptv));
+ dasm_put(Dst, 566, DISPATCH_GL(tmptv), DISPATCH_GL(tmptv2), DISPATCH_GL(tmptv), Dt1(->base), FRAME_CONT, Dt1(->top), Dt1(->base));
+ dasm_put(Dst, 647, -(BCBIAS_J*4 >> 16), LJ_TTRUE, LJ_TFALSE, Dt1(->base));
+ dasm_put(Dst, 716, Dt1(->base), FRAME_CONT);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 739);
+#endif
+ dasm_put(Dst, 741, Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 749);
+#else
+ dasm_put(Dst, 756);
+#endif
+ dasm_put(Dst, 759, Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base));
+#if LJ_HASJIT
+ dasm_put(Dst, 807);
+#endif
+ dasm_put(Dst, 809);
+#if LJ_HASJIT
+ dasm_put(Dst, 811, BC_JFORI);
+#endif
+ dasm_put(Dst, 814);
+#if LJ_HASJIT
+ dasm_put(Dst, 816, BC_JFORI);
+#endif
+ dasm_put(Dst, 819, BC_FORI, ~LJ_TNUMX, 31-3, Dt8(->upvalue), Dt6(->metatable), DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable]));
+ dasm_put(Dst, 884, Dt6(->hmask), Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), DtB(->val), DtB(->next), LJ_TUDATA, 31-2, 4*~LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]));
+ dasm_put(Dst, 940, Dt6(->metatable), Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ dasm_put(Dst, 1000, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->top), (2+1)*8);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1073, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1082, Dt8(->upvalue[0]));
+#endif
+ dasm_put(Dst, 1086, (3+1)*8, Dt6(->asize), Dt6(->array), 31-3, (0+1)*8, (2+1)*8, Dt6(->hmask), (0+1)*8, (0+1)*8);
+ dasm_put(Dst, 1150);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1163, Dt6(->metatable), Dt8(->upvalue[0]));
+#else
+ dasm_put(Dst, 1172, Dt8(->upvalue[0]));
+#endif
+ dasm_put(Dst, 1176, (3+1)*8, DISPATCH_GL(hookmask), 32-HOOK_ACTIVE_SHIFT, 8+FRAME_PCALL, DISPATCH_GL(hookmask), 32-HOOK_ACTIVE_SHIFT, 16+FRAME_PCALL, LJ_TTHREAD, Dt1(->status), Dt1(->cframe), Dt1(->top));
+ dasm_put(Dst, 1237, LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack));
+ dasm_put(Dst, 1300, Dt1(->top), FRAME_TYPE, LJ_TTRUE, FRAME_TYPE, LJ_TFALSE, Dt1(->top), (2+1)*8, 32-3);
+ dasm_put(Dst, 1360, Dt8(->upvalue[0].gcr), Dt1(->status), Dt1(->cframe), Dt1(->top), LUA_YIELD, Dt1(->base), Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->base), LUA_YIELD, Dt1(->top), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 1419, Dt1(->base), DISPATCH_GL(vmstate), Dt1(->maxstack), Dt1(->top), FRAME_TYPE, 32-3, Dt1(->cframe));
+ dasm_put(Dst, 1476, Dt1(->base), CFRAME_RESUME, Dt1(->top), LUA_YIELD, Dt1(->cframe), Dt1(->status), (1+1)*8, FRAME_TYPE);
+ dasm_put(Dst, 1541);
+ dasm_put(Dst, 1610);
+ dasm_put(Dst, 1673);
+ dasm_put(Dst, 1738);
+ dasm_put(Dst, 1808, Dt8(->upvalue[0]), DISPATCH_GL(tmptv), DISPATCH_GL(tmptv), (2+1)*8, (2+1)*8);
+ dasm_put(Dst, 1880, Dt5(->len));
+ dasm_put(Dst, 1947, Dt5(->len), (0+1)*8, Dt5([1]), (1+1)*8, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmptv), Dt1(->base), Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2007, Dt5(->len), sizeof(GCstr)-1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2073, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(strempty), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2132, DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 2191, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), DISPATCH_GL(tmpbuf.sz), Dt5(->len), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 2258);
+ dasm_put(Dst, 2329);
+ dasm_put(Dst, 2417, Dt8(->f), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->base), Dt1(->top), Dt1(->base), 31-3, Dt1(->top), Dt7(->pc));
+ dasm_put(Dst, 2496, FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 2538);
+#endif
+ dasm_put(Dst, 2540, DISPATCH_GL(hookmask), HOOK_ACTIVE, GG_DISP2STATIC, DISPATCH_GL(hookmask), DISPATCH_GL(hookcount), HOOK_ACTIVE, 31-LUA_HOOKLINE, DISPATCH_GL(hookcount), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 2587, GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 2605);
+#endif
+ dasm_put(Dst, 2607);
+#if LJ_HASJIT
+ dasm_put(Dst, 2610);
+#endif
+ dasm_put(Dst, 2613);
+#if LJ_HASJIT
+ dasm_put(Dst, 2615);
+#endif
+ dasm_put(Dst, 2618, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 2640);
+#endif
+ dasm_put(Dst, 2642);
+#if LJ_HASJIT
+ dasm_put(Dst, 2644);
+#endif
+ dasm_put(Dst, 2646);
+#if LJ_HASJIT
+ dasm_put(Dst, 2730);
+#else
+ dasm_put(Dst, 2753);
+#endif
+ dasm_put(Dst, 2756);
+#if LJ_HASFFI
+ dasm_put(Dst, 2819);
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ dasm_put(Dst, 2821, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ dasm_put(Dst, 2823, -(BCBIAS_J*4 >> 16));
+ if (op == BC_ISLE || op == BC_ISGT) {
+ dasm_put(Dst, 2837);
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ dasm_put(Dst, 2840);
+ } else {
+ dasm_put(Dst, 2842);
+ }
+ dasm_put(Dst, 2844);
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 2855, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 2869);
+ } else {
+ dasm_put(Dst, 2871);
+ }
+ dasm_put(Dst, 2873, ~LJ_TISPRI, ~LJ_TISTABUD);
+ if (vk) {
+ dasm_put(Dst, 2895);
+ } else {
+ dasm_put(Dst, 2897);
+ }
+ dasm_put(Dst, 2899);
+ if (vk) {
+ dasm_put(Dst, 2901);
+ } else {
+ dasm_put(Dst, 2903);
+ }
+ dasm_put(Dst, 2905, Dt6(->metatable), 1-vk, Dt6(->nomm), 1<> 16));
+ if (vk) {
+ dasm_put(Dst, 2940);
+ } else {
+ dasm_put(Dst, 2942);
+ }
+ dasm_put(Dst, 2944);
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ dasm_put(Dst, 2955, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 2969);
+ } else {
+ dasm_put(Dst, 2972);
+ }
+ dasm_put(Dst, 2974);
+ if (!vk) {
+ dasm_put(Dst, 2986);
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ dasm_put(Dst, 2992, 32-3, -(BCBIAS_J*4 >> 16));
+ if (vk) {
+ dasm_put(Dst, 3004);
+ } else {
+ dasm_put(Dst, 3006);
+ }
+ dasm_put(Dst, 3008);
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ dasm_put(Dst, 3019);
+ if (op == BC_IST || op == BC_ISF) {
+ dasm_put(Dst, 3025, -(BCBIAS_J*4 >> 16));
+ if (op == BC_IST) {
+ dasm_put(Dst, 3030);
+ } else {
+ dasm_put(Dst, 3032);
+ }
+ } else {
+ if (op == BC_ISTC) {
+ dasm_put(Dst, 3034);
+ } else {
+ dasm_put(Dst, 3037);
+ }
+ dasm_put(Dst, 3040, -(BCBIAS_J*4 >> 16));
+ }
+ dasm_put(Dst, 3047);
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ dasm_put(Dst, 3058);
+ break;
+ case BC_NOT:
+ dasm_put(Dst, 3071, LJ_TTRUE);
+ break;
+ case BC_UNM:
+ dasm_put(Dst, 3087);
+ break;
+ case BC_LEN:
+ dasm_put(Dst, 3104, Dt5(->len));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3128, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 3135);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 3141, Dt6(->nomm), 1<base), 32-3, Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 3390, 32-1);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 3407, 32-1, LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ dasm_put(Dst, 3426, 32-3);
+ break;
+ case BC_KNUM:
+ dasm_put(Dst, 3442);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 3455, 32-3);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 3470);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 3489, 32-1, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+ dasm_put(Dst, 3510, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, DtA(->closed), -(LJ_TISNUM+1), LJ_TISGCV - (LJ_TISNUM+1), Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G);
+ break;
+ case BC_USETS:
+ dasm_put(Dst, 3562, 32-1, 32-1, offsetof(GCfuncL, uvptr), DtA(->marked), DtA(->v), LJ_GC_BLACK, Dt5(->marked), DtA(->closed), LJ_GC_WHITES, GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 3611, 32-1, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 3632, 32-1, offsetof(GCfuncL, uvptr), 32-3, DtA(->v));
+ break;
+
+ case BC_UCLO:
+ dasm_put(Dst, 3655, Dt1(->openupval), 32-1, -(BCBIAS_J*4 >> 16), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 3685, 32-1, Dt1(->base), Dt1(->base));
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ dasm_put(Dst, 3711, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base));
+ if (op == BC_TNEW) {
+ dasm_put(Dst, 3724);
+ } else {
+ dasm_put(Dst, 3732, 32-1);
+ }
+ dasm_put(Dst, 3739, Dt1(->base));
+ break;
+
+ case BC_GGET:
+ case BC_GSET:
+ dasm_put(Dst, 3762, 32-1, Dt7(->env));
+ if (op == BC_GGET) {
+ dasm_put(Dst, 3770);
+ } else {
+ dasm_put(Dst, 3773);
+ }
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 3776, Dt6(->asize), Dt6(->array), 31-3, Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), Dt6(->node), 31-5, 31-3, DtB(->key), DtB(->val), DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), 31-3, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 4026, 32-1, Dt6(->hmask), Dt5(->hash), Dt6(->node), Dt6(->nomm), 31-5, 31-3, Dt6(->marked), DtB(->key), DtB(->val), LJ_GC_BLACK, DtB(->val), Dt6(->metatable));
+ dasm_put(Dst, 4087, Dt6(->nomm), 1<next), Dt6(->metatable), DISPATCH_GL(tmptv), Dt1(->base), Dt6(->nomm), 1<base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain));
+ dasm_put(Dst, 4138, Dt6(->marked), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 4145, 32-3, Dt6(->asize), Dt6(->array), Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->nomm), 1<marked));
+ dasm_put(Dst, 4205, Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 4210, 32-3, Dt6(->asize), 31-3, Dt6(->marked), Dt6(->array), LJ_GC_BLACK, Dt1(->base), DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->marked), Dt6(->gclist));
+ dasm_put(Dst, 4279);
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ dasm_put(Dst, 4282);
+ break;
+ case BC_CALL:
+ dasm_put(Dst, 4284, Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 4304);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 4306, FRAME_TYPE, Dt7(->ffid), FRAME_VARG, Dt7(->pc), -4-8, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP);
+ dasm_put(Dst, 4371, FRAME_TYPE);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 4378, Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 4404, Dt6(->asize), Dt6(->array), 31-3, -(BCBIAS_J*4 >> 16), Dt6(->hmask), Dt6(->node), 31-5, 31-3, DtB(->key), -(BCBIAS_J*4 >> 16));
+ dasm_put(Dst, 4483);
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 4487, LJ_TTAB, LJ_TFUNC, LJ_TNIL, Dt8(->ffid), FF_next_N, 32-1, -(BCBIAS_J*4 >> 16), BC_JMP, BC_ITERC, -(BCBIAS_J*4 >> 16));
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 4538, FRAME_VARG, Dt1(->maxstack), Dt1(->top), Dt1(->base), 32-3, Dt1(->base));
+ dasm_put(Dst, 4618);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 4624);
+ break;
+
+ case BC_RET:
+ dasm_put(Dst, 4626, FRAME_TYPE, FRAME_VARG, Dt7(->pc), PC2PROTO(k), FRAME_TYPEP);
+ break;
+
+ case BC_RET0: case BC_RET1:
+ dasm_put(Dst, 4696, FRAME_TYPE, FRAME_VARG);
+ if (op == BC_RET1) {
+ dasm_put(Dst, 4709);
+ }
+ dasm_put(Dst, 4712, Dt7(->pc), PC2PROTO(k));
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 4740);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 4742, FORL_IDX*8, FORL_STEP*8, FORL_STOP*8);
+ if (!vk) {
+ dasm_put(Dst, 4750);
+ }
+ if (vk) {
+ dasm_put(Dst, 4758, FORL_IDX*8);
+ }
+ dasm_put(Dst, 4762, FORL_EXT*8);
+ if (op != BC_JFORL) {
+ dasm_put(Dst, 4770, 32-1);
+ if (op == BC_JFORI) {
+ dasm_put(Dst, 4774, -(BCBIAS_J*4 >> 16));
+ } else {
+ dasm_put(Dst, 4777, -(BCBIAS_J*4 >> 16));
+ }
+ }
+ if (op == BC_FORI) {
+ dasm_put(Dst, 4780);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 4782);
+ } else {
+ dasm_put(Dst, 4784, BC_JLOOP);
+ }
+ dasm_put(Dst, 4787);
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 4802);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 4804);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 4810);
+ } else {
+ dasm_put(Dst, 4812, 32-1, -(BCBIAS_J*4 >> 16));
+ }
+ dasm_put(Dst, 4819);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 4831);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 4833);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 4844);
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 4846, 32-1, -(BCBIAS_J*4 >> 16));
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 4862);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 4864, Dt1(->maxstack), -4+PC2PROTO(numparams), -4+PC2PROTO(k), 31-3);
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 4882);
+ } else {
+ dasm_put(Dst, 4884);
+ }
+ dasm_put(Dst, 4893);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 4899);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 4901, Dt1(->maxstack), 8+FRAME_VARG, -4+PC2PROTO(k), -4+PC2PROTO(numparams));
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 4951, Dt8(->f));
+ } else {
+ dasm_put(Dst, 4954, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 4957, Dt1(->maxstack), Dt1(->base), Dt1(->top), ~LJ_VMST_C);
+ if (op == BC_FUNCCW) {
+ dasm_put(Dst, 4970, Dt8(->f));
+ }
+ dasm_put(Dst, 4973, DISPATCH_GL(vmstate), Dt1(->top), 31-3, Dt1(->base), ~LJ_VMST_INTERP, DISPATCH_GL(vmstate));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ dasm_put(Dst, 4994);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .LASFDE1-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE1:\n\n");
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_x64.h b/src/LuaJIT/src/buildvm_x64.h
new file mode 100644
index 000000000..55b22b2e8
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_x64.h
@@ -0,0 +1,3406 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM x64 version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_x86.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned char build_actionlist[16378] = {
+ 254,1,248,10,252,247,195,237,15,132,244,11,131,227,252,248,41,218,72,141,
+ 76,25,252,248,139,90,252,252,199,68,10,4,237,248,12,131,192,1,137,68,36,4,
+ 252,247,195,237,15,132,244,13,248,14,129,252,243,239,252,247,195,237,15,133,
+ 244,10,65,199,134,233,237,131,227,252,248,41,211,252,247,219,131,232,1,15,
+ 132,244,248,248,1,72,139,44,10,72,137,106,252,248,131,194,8,131,232,1,15,
+ 133,244,1,248,2,255,139,108,36,24,137,157,233,248,3,139,68,36,4,139,76,36,
+ 16,248,4,57,193,15,133,244,252,248,5,131,252,234,8,137,149,233,248,15,72,
+ 139,76,36,32,72,137,141,233,49,192,248,16,72,131,196,40,65,94,65,95,91,93,
+ 195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252,237,131,
+ 194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193,141,20,
+ 202,252,233,244,5,248,8,137,149,233,137,68,36,4,137,206,137,252,239,232,251,
+ 1,0,139,149,233,252,233,244,3,248,17,137,252,240,72,137,252,252,248,18,139,
+ 108,36,24,139,173,233,199,133,233,237,252,233,244,16,248,19,139,124,36,24,
+ 137,198,72,131,196,40,65,94,65,95,91,93,252,233,251,1,1,248,20,72,129,231,
+ 239,72,137,252,252,248,21,255,139,108,36,24,72,199,193,252,248,252,255,252,
+ 255,252,255,184,237,139,149,233,68,139,181,233,65,129,198,239,139,90,252,
+ 252,199,66,252,252,237,65,199,134,233,237,252,233,244,12,248,22,190,237,252,
+ 233,244,248,248,23,131,232,8,252,233,244,247,248,24,141,68,194,252,248,248,
+ 1,15,182,139,233,131,195,4,137,149,233,255,137,133,233,137,92,36,28,137,206,
+ 248,2,137,252,239,232,251,1,0,139,149,233,139,133,233,139,106,252,248,41,
+ 208,193,232,3,131,192,1,139,157,233,139,11,15,182,252,233,15,182,205,131,
+ 195,4,65,252,255,36,252,238,248,25,85,83,65,87,65,86,72,131,252,236,40,137,
+ 252,253,137,124,36,24,137,252,241,187,237,49,192,76,141,188,253,36,233,68,
+ 139,181,233,65,129,198,239,76,137,189,233,137,68,36,28,72,137,68,36,32,137,
+ 68,36,16,137,68,36,20,56,133,233,15,132,244,249,65,199,134,233,237,136,133,
+ 233,139,149,233,139,133,233,41,200,193,232,3,131,192,1,41,209,139,90,252,
+ 252,137,68,36,4,252,247,195,237,255,15,132,244,13,252,233,244,14,248,26,85,
+ 83,65,87,65,86,72,131,252,236,40,187,237,137,76,36,20,252,233,244,247,248,
+ 27,85,83,65,87,65,86,72,131,252,236,40,187,237,248,1,137,84,36,16,137,252,
+ 253,137,124,36,24,137,252,241,76,139,189,233,76,137,124,36,32,137,108,36,
+ 28,72,137,165,233,248,2,68,139,181,233,65,129,198,239,248,3,65,199,134,233,
+ 237,139,149,233,255,1,203,41,211,139,133,233,41,200,193,232,3,131,192,1,248,
+ 28,139,105,252,248,129,121,253,252,252,239,15,133,244,29,248,30,137,202,137,
+ 90,252,252,139,157,233,139,11,15,182,252,233,15,182,205,131,195,4,65,252,
+ 255,36,252,238,248,31,85,83,65,87,65,86,72,131,252,236,40,137,252,253,137,
+ 124,36,24,137,108,36,28,68,139,189,233,68,43,189,233,199,68,36,20,0,0,0,0,
+ 68,137,124,36,16,76,139,189,233,76,137,124,36,32,72,137,165,233,252,255,209,
+ 133,192,15,132,244,15,137,193,187,237,252,233,244,2,248,11,1,209,131,227,
+ 252,248,137,213,41,218,199,68,193,252,252,237,137,200,139,93,252,244,72,99,
+ 77,252,240,255,131,252,249,1,15,134,244,247,255,76,141,61,245,76,1,252,249,
+ 255,68,139,122,252,248,69,139,191,233,69,139,191,233,252,255,225,255,248,
+ 1,15,132,244,32,41,213,193,252,237,3,141,69,252,255,252,233,244,33,255,248,
+ 34,15,182,75,252,255,131,252,237,16,141,12,202,41,252,233,15,132,244,35,252,
+ 247,217,193,252,233,3,139,124,36,24,137,151,233,137,202,72,139,8,72,137,77,
+ 0,137,252,238,252,233,244,36,248,37,137,4,36,199,68,36,4,237,72,141,4,36,
+ 128,123,252,252,235,15,133,244,247,65,141,142,233,137,41,199,65,4,237,137,
+ 205,252,233,244,248,248,38,15,182,67,252,254,255,199,68,36,4,237,137,4,36,
+ 255,252,242,15,42,192,252,242,15,17,4,36,255,72,141,4,36,252,233,244,247,
+ 248,39,15,182,67,252,254,141,4,194,248,1,15,182,107,252,255,141,44,252,234,
+ 248,2,139,124,36,24,137,151,233,137,252,238,72,137,194,137,252,253,137,92,
+ 36,28,232,251,1,2,139,149,233,133,192,15,132,244,249,248,35,15,182,75,252,
+ 253,72,139,40,72,137,44,202,139,3,15,182,204,15,182,232,131,195,4,193,232,
+ 16,65,252,255,36,252,238,248,3,139,141,233,137,89,252,244,141,153,233,41,
+ 211,139,105,252,248,184,237,252,233,244,30,248,40,137,4,36,199,68,36,4,237,
+ 72,141,4,36,128,123,252,252,235,15,133,244,247,255,65,141,142,233,137,41,
+ 199,65,4,237,137,205,252,233,244,248,248,41,15,182,67,252,254,255,72,141,
+ 4,36,252,233,244,247,248,42,15,182,67,252,254,141,4,194,248,1,15,182,107,
+ 252,255,141,44,252,234,248,2,139,124,36,24,137,151,233,137,252,238,72,137,
+ 194,137,252,253,137,92,36,28,232,251,1,3,139,149,233,133,192,15,132,244,249,
+ 15,182,75,252,253,72,139,44,202,72,137,40,248,43,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,139,141,233,137,89,252,
+ 244,15,182,67,252,253,72,139,44,194,72,137,105,16,141,153,233,41,211,139,
+ 105,252,248,184,237,252,233,244,30,248,44,139,108,36,24,137,149,233,141,52,
+ 202,141,20,194,137,252,239,15,182,75,252,252,137,92,36,28,232,251,1,4,248,
+ 3,139,149,233,255,131,252,248,1,15,135,244,45,248,4,141,91,4,15,130,244,252,
+ 248,5,15,183,67,252,254,141,156,253,131,233,248,6,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,248,46,131,195,4,129,120,253,
+ 4,239,15,130,244,5,252,233,244,6,248,47,129,120,253,4,239,252,233,244,4,248,
+ 48,131,252,235,4,137,206,137,252,233,139,108,36,24,137,149,233,255,137,194,
+ 137,252,239,137,92,36,28,232,251,1,5,252,233,244,3,248,49,255,131,252,235,
+ 4,139,108,36,24,137,149,233,137,252,239,139,115,252,252,137,92,36,28,232,
+ 251,1,6,252,233,244,3,255,248,50,255,15,182,107,252,255,255,248,51,65,141,
+ 4,199,252,233,244,247,248,52,255,248,53,65,141,4,199,141,44,252,234,149,252,
+ 233,244,248,248,54,141,4,194,137,197,252,233,244,248,248,55,255,248,56,141,
+ 4,194,248,1,141,44,252,234,248,2,141,12,202,68,15,182,67,252,252,137,206,
+ 137,193,139,124,36,24,137,151,233,137,252,234,137,252,253,137,92,36,28,232,
+ 251,1,7,139,149,233,133,192,15,132,244,43,248,45,137,193,41,208,137,89,252,
+ 244,141,152,233,184,237,252,233,244,28,248,57,139,108,36,24,137,149,233,141,
+ 52,194,137,252,239,137,92,36,28,232,251,1,8,139,149,233,255,133,192,15,133,
+ 244,45,15,183,67,252,254,139,60,194,252,233,244,58,255,252,233,244,45,255,
+ 248,59,141,76,202,8,248,29,137,76,36,4,137,4,36,131,252,233,8,139,108,36,
+ 24,137,149,233,137,206,141,20,193,137,252,239,137,92,36,28,232,251,1,9,139,
+ 149,233,139,76,36,4,139,4,36,139,105,252,248,131,192,1,65,57,215,15,132,244,
+ 60,137,202,137,90,252,252,139,157,233,139,11,15,182,252,233,15,182,205,131,
+ 195,4,65,252,255,36,252,238,248,61,139,108,36,24,137,149,233,137,206,137,
+ 252,239,137,92,36,28,232,251,1,10,139,149,233,139,67,252,252,15,182,204,15,
+ 182,232,193,232,16,65,252,255,164,253,252,238,233,248,62,129,252,248,239,
+ 15,130,244,63,139,106,4,129,252,253,239,15,131,244,63,139,90,252,252,137,
+ 68,36,4,137,106,252,252,139,42,137,106,252,248,131,232,2,15,132,244,248,255,
+ 137,209,248,1,131,193,8,72,139,41,72,137,105,252,248,131,232,1,15,133,244,
+ 1,248,2,139,68,36,4,252,233,244,64,248,65,129,252,248,239,15,130,244,63,139,
+ 106,4,137,252,233,193,252,249,15,131,252,249,252,254,15,132,244,249,184,237,
+ 252,247,213,57,232,255,15,71,197,255,15,134,244,247,137,232,248,1,255,248,
+ 2,139,106,252,248,139,132,253,197,233,139,90,252,252,199,66,252,252,237,137,
+ 66,252,248,252,233,244,66,248,3,184,237,252,233,244,2,248,67,129,252,248,
+ 239,15,130,244,63,139,106,4,139,90,252,252,129,252,253,239,15,133,244,252,
+ 248,1,139,42,139,173,233,248,2,133,252,237,199,66,252,252,237,255,15,132,
+ 244,66,65,139,134,233,199,66,252,252,237,137,106,252,248,139,141,233,35,136,
+ 233,105,201,239,3,141,233,248,3,129,185,233,239,15,133,244,250,57,129,233,
+ 15,132,244,251,248,4,139,137,233,133,201,15,133,244,3,255,252,233,244,66,
+ 248,5,139,105,4,129,252,253,239,15,132,244,66,139,1,137,106,252,252,137,66,
+ 252,248,252,233,244,66,248,6,129,252,253,239,15,132,244,1,129,252,253,239,
+ 15,135,244,254,129,252,253,239,15,134,244,253,189,237,252,233,244,254,248,
+ 7,255,189,237,248,8,252,247,213,65,139,172,253,174,233,252,233,244,2,248,
+ 68,129,252,248,239,15,130,244,63,129,122,253,4,239,15,133,244,63,139,42,131,
+ 189,233,0,15,133,244,63,129,122,253,12,239,15,133,244,63,139,66,8,137,133,
+ 233,139,90,252,252,199,66,252,252,237,255,137,106,252,248,252,246,133,233,
+ 235,15,132,244,247,128,165,233,235,65,139,134,233,65,137,174,233,137,133,
+ 233,248,1,252,233,244,66,248,69,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,15,133,244,63,137,213,139,50,141,82,8,139,124,36,24,232,251,1,11,137,
+ 252,234,72,139,40,139,90,252,252,72,137,106,252,248,252,233,244,66,248,70,
+ 255,129,252,248,239,15,133,244,63,129,122,253,4,239,255,15,133,244,247,139,
+ 42,252,233,244,71,248,1,15,135,244,63,255,15,131,244,63,255,252,242,15,16,
+ 2,252,233,244,72,255,221,2,252,233,244,73,255,248,74,129,252,248,239,15,130,
+ 244,63,139,90,252,252,129,122,253,4,239,15,133,244,249,139,2,248,2,199,66,
+ 252,252,237,137,66,252,248,252,233,244,66,248,3,129,122,253,4,239,15,135,
+ 244,63,65,131,190,233,0,15,133,244,63,65,139,174,233,65,59,174,233,255,15,
+ 130,244,247,232,244,75,248,1,139,108,36,24,137,149,233,137,92,36,28,137,214,
+ 137,252,239,255,232,251,1,12,255,232,251,1,13,255,139,149,233,252,233,244,
+ 2,248,76,129,252,248,239,15,130,244,63,15,132,244,248,248,1,129,122,253,4,
+ 239,15,133,244,63,139,108,36,24,137,149,233,137,149,233,139,90,252,252,139,
+ 50,141,82,8,137,252,239,137,92,36,28,232,251,1,14,139,149,233,133,192,15,
+ 132,244,249,72,139,106,8,72,139,66,16,72,137,106,252,248,72,137,2,248,77,
+ 184,237,255,252,233,244,78,248,2,199,66,12,237,252,233,244,1,248,3,199,66,
+ 252,252,237,252,233,244,66,248,79,129,252,248,239,15,130,244,63,139,42,129,
+ 122,253,4,239,15,133,244,63,255,131,189,233,0,15,133,244,63,255,139,106,252,
+ 248,139,133,233,139,90,252,252,199,66,252,252,237,137,66,252,248,199,66,12,
+ 237,184,237,252,233,244,78,248,80,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,15,133,244,63,129,122,253,12,239,255,139,90,252,252,255,139,66,8,131,
+ 192,1,199,66,252,252,237,137,66,252,248,255,252,242,15,16,66,8,72,189,237,
+ 237,102,72,15,110,205,252,242,15,88,193,252,242,15,45,192,252,242,15,17,66,
+ 252,248,255,139,42,59,133,233,15,131,244,248,193,224,3,3,133,233,248,1,129,
+ 120,253,4,239,15,132,244,81,72,139,40,72,137,42,252,233,244,77,248,2,131,
+ 189,233,0,15,132,244,81,137,252,239,137,213,137,198,232,251,1,15,137,252,
+ 234,133,192,15,133,244,1,248,81,184,237,252,233,244,78,248,82,255,139,106,
+ 252,248,139,133,233,139,90,252,252,199,66,252,252,237,137,66,252,248,255,
+ 199,66,12,237,199,66,8,0,0,0,0,255,15,87,192,252,242,15,17,66,8,255,217,252,
+ 238,221,90,8,255,184,237,252,233,244,78,248,83,129,252,248,239,15,130,244,
+ 63,141,74,8,131,232,1,187,237,248,1,65,15,182,174,233,193,252,237,235,131,
+ 229,1,1,252,235,252,233,244,28,248,84,129,252,248,239,15,130,244,63,129,122,
+ 253,12,239,15,133,244,63,255,139,106,4,137,106,12,199,66,4,237,139,42,139,
+ 90,8,137,106,8,137,26,141,74,16,131,232,2,187,237,252,233,244,1,248,85,129,
+ 252,248,239,15,130,244,63,139,42,139,90,252,252,137,92,36,28,137,44,36,129,
+ 122,253,4,239,15,133,244,63,72,131,189,233,0,15,133,244,63,128,189,233,235,
+ 15,135,244,63,139,141,233,15,132,244,247,255,59,141,233,15,132,244,63,248,
+ 1,141,92,193,252,240,59,157,233,15,135,244,63,137,157,233,139,108,36,24,137,
+ 149,233,131,194,8,137,149,233,141,108,194,232,72,41,221,57,203,15,132,244,
+ 249,248,2,72,139,4,43,72,137,67,252,248,131,252,235,8,57,203,15,133,244,2,
+ 248,3,137,206,139,60,36,232,244,25,65,199,134,233,237,255,139,108,36,24,139,
+ 28,36,139,149,233,129,252,248,239,15,135,244,254,248,4,139,139,233,68,139,
+ 187,233,137,139,233,68,137,252,251,41,203,15,132,244,252,141,4,26,193,252,
+ 235,3,59,133,233,15,135,244,255,137,213,72,41,205,248,5,72,139,1,72,137,4,
+ 41,131,193,8,68,57,252,249,15,133,244,5,248,6,141,67,2,199,66,252,252,237,
+ 248,7,139,92,36,28,137,68,36,4,72,199,193,252,248,252,255,252,255,252,255,
+ 252,247,195,237,255,15,132,244,13,252,233,244,14,248,8,199,66,252,252,237,
+ 139,139,233,131,252,233,8,137,139,233,72,139,1,72,137,2,184,237,252,233,244,
+ 7,248,9,139,12,36,68,137,185,233,137,222,137,252,239,232,251,1,0,139,28,36,
+ 139,149,233,252,233,244,4,248,86,139,106,252,248,139,173,233,139,90,252,252,
+ 137,92,36,28,137,44,36,72,131,189,233,0,15,133,244,63,255,128,189,233,235,
+ 15,135,244,63,139,141,233,15,132,244,247,59,141,233,15,132,244,63,248,1,141,
+ 92,193,252,248,59,157,233,15,135,244,63,137,157,233,139,108,36,24,137,149,
+ 233,137,149,233,141,108,194,252,240,72,41,221,57,203,15,132,244,249,248,2,
+ 255,72,139,4,43,72,137,67,252,248,131,252,235,8,57,203,15,133,244,2,248,3,
+ 137,206,139,60,36,232,244,25,65,199,134,233,237,139,108,36,24,139,28,36,139,
+ 149,233,129,252,248,239,15,135,244,254,248,4,139,139,233,68,139,187,233,137,
+ 139,233,68,137,252,251,41,203,15,132,244,252,141,4,26,193,252,235,3,59,133,
+ 233,15,135,244,255,255,137,213,72,41,205,248,5,72,139,1,72,137,4,41,131,193,
+ 8,68,57,252,249,15,133,244,5,248,6,141,67,1,248,7,139,92,36,28,137,68,36,
+ 4,49,201,252,247,195,237,15,132,244,13,252,233,244,14,248,8,137,222,137,252,
+ 239,232,251,1,16,248,9,139,12,36,68,137,185,233,137,222,137,252,239,232,251,
+ 1,0,139,28,36,139,149,233,252,233,244,4,248,87,139,108,36,24,72,252,247,133,
+ 233,237,15,132,244,63,255,137,149,233,141,68,194,252,248,137,133,233,49,192,
+ 72,137,133,233,176,235,136,133,233,252,233,244,16,255,248,71,255,248,73,139,
+ 90,252,252,221,90,252,248,252,233,244,66,255,248,88,129,252,248,239,15,130,
+ 244,63,255,129,122,253,4,239,15,133,244,248,139,42,131,252,253,0,15,137,244,
+ 71,252,247,221,15,136,244,247,248,89,248,71,139,90,252,252,199,66,252,252,
+ 237,137,106,252,248,252,233,244,66,248,1,139,90,252,252,199,66,252,252,0,
+ 0,224,65,199,66,252,248,0,0,0,0,252,233,244,66,248,2,15,135,244,63,255,129,
+ 122,253,4,239,15,131,244,63,255,252,242,15,16,2,72,184,237,237,102,72,15,
+ 110,200,15,84,193,248,72,139,90,252,252,252,242,15,17,66,252,248,255,221,
+ 2,217,225,248,72,248,73,139,90,252,252,221,90,252,248,255,248,66,184,237,
+ 248,78,137,68,36,4,248,64,252,247,195,237,15,133,244,253,248,5,56,67,252,
+ 255,15,135,244,252,15,182,75,252,253,72,252,247,209,141,20,202,139,3,15,182,
+ 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,6,199,68,194,
+ 252,244,237,131,192,1,252,233,244,5,248,7,72,199,193,252,248,252,255,252,
+ 255,252,255,252,233,244,14,248,90,255,129,122,253,4,239,15,133,244,247,139,
+ 42,252,233,244,71,248,1,15,135,244,63,255,252,242,15,16,2,232,244,91,255,
+ 252,242,15,45,232,129,252,253,0,0,0,128,15,133,244,71,252,242,15,42,205,102,
+ 15,46,193,15,138,244,72,15,132,244,71,255,221,2,232,244,91,255,248,92,255,
+ 252,242,15,16,2,232,244,93,255,221,2,232,244,93,255,248,94,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,15,81,2,252,233,244,
+ 72,255,248,94,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,221,2,217,252,250,252,233,244,73,255,248,95,129,252,248,239,15,130,244,
+ 63,129,122,253,4,239,15,131,244,63,217,252,237,221,2,217,252,241,252,233,
+ 244,73,248,96,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,217,252,236,221,2,217,252,241,252,233,244,73,248,97,129,252,248,239,255,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,232,244,98,252,233,244,
+ 73,248,99,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,
+ 2,217,252,254,252,233,244,73,248,100,129,252,248,239,255,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,221,2,217,252,255,252,233,244,73,248,101,129,
+ 252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,217,252,242,
+ 221,216,252,233,244,73,248,102,129,252,248,239,15,130,244,63,255,129,122,
+ 253,4,239,15,131,244,63,221,2,217,192,216,200,217,232,222,225,217,252,250,
+ 217,252,243,252,233,244,73,248,103,129,252,248,239,15,130,244,63,129,122,
+ 253,4,239,15,131,244,63,221,2,217,192,216,200,217,232,222,225,217,252,250,
+ 217,201,217,252,243,252,233,244,73,248,104,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,15,131,244,63,255,221,2,217,232,217,252,243,252,233,244,
+ 73,255,248,105,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,252,242,15,16,2,255,137,213,232,251,1,17,137,252,234,252,233,244,72,255,
+ 248,106,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,
+ 242,15,16,2,255,137,213,232,251,1,18,137,252,234,252,233,244,72,255,248,107,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,15,
+ 16,2,255,137,213,232,251,1,19,137,252,234,252,233,244,72,248,108,255,248,
+ 109,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,
+ 15,16,2,139,106,252,248,252,242,15,89,133,233,252,233,244,72,255,248,109,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,139,106,
+ 252,248,220,141,233,252,233,244,73,255,248,110,129,252,248,239,15,130,244,
+ 63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,63,221,2,
+ 221,66,8,217,252,243,252,233,244,73,248,111,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,15,131,244,63,129,122,253,12,239,255,15,131,244,63,221,
+ 66,8,221,2,217,252,253,221,217,252,233,244,73,248,112,129,252,248,239,15,
+ 130,244,63,139,106,4,129,252,253,239,15,131,244,63,139,90,252,252,139,2,137,
+ 106,252,252,137,66,252,248,209,229,129,252,253,0,0,224,252,255,15,131,244,
+ 249,9,232,15,132,244,249,184,252,254,3,0,0,129,252,253,0,0,32,0,15,130,244,
+ 250,248,1,193,252,237,21,41,197,255,252,242,15,42,197,255,137,44,36,219,4,
+ 36,255,139,106,252,252,129,229,252,255,252,255,15,128,129,205,0,0,224,63,
+ 137,106,252,252,248,2,255,252,242,15,17,2,255,221,26,255,184,237,252,233,
+ 244,78,248,3,255,15,87,192,252,233,244,2,255,217,252,238,252,233,244,2,255,
+ 248,4,255,252,242,15,16,2,72,189,237,237,102,72,15,110,205,252,242,15,89,
+ 193,252,242,15,17,66,252,248,255,221,2,199,4,36,0,0,128,90,216,12,36,221,
+ 90,252,248,255,139,106,252,252,184,52,4,0,0,209,229,252,233,244,1,255,248,
+ 113,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,
+ 15,16,2,255,248,113,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,221,2,255,139,106,4,139,90,252,252,209,229,129,252,253,0,0,224,252,
+ 255,15,132,244,250,255,15,40,224,232,244,114,252,242,15,92,224,248,1,252,
+ 242,15,17,66,252,248,252,242,15,17,34,255,217,192,232,244,114,220,252,233,
+ 248,1,221,90,252,248,221,26,255,139,66,252,252,139,106,4,49,232,15,136,244,
+ 249,248,2,184,237,252,233,244,78,248,3,129,252,245,0,0,0,128,137,106,4,252,
+ 233,244,2,248,4,255,15,87,228,252,233,244,1,255,217,252,238,217,201,252,233,
+ 244,1,255,248,115,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,129,122,253,12,239,15,131,244,63,221,66,8,221,2,248,1,217,252,248,
+ 223,224,158,15,138,244,1,221,217,252,233,244,73,255,248,116,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,252,242,15,16,2,252,242,15,16,74,8,232,244,117,252,233,244,72,255,248,
+ 116,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,
+ 253,12,239,15,131,244,63,221,2,221,66,8,232,244,117,252,233,244,73,255,248,
+ 118,185,2,0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,193,
+ 15,131,244,71,129,124,253,202,252,252,239,15,133,244,249,59,108,202,252,248,
+ 15,79,108,202,252,248,131,193,1,252,233,244,1,248,3,15,135,244,63,255,252,
+ 233,244,252,248,4,15,135,244,63,255,252,242,15,16,2,248,5,57,193,15,131,244,
+ 72,129,124,253,202,252,252,239,255,15,130,244,252,15,135,244,63,252,242,15,
+ 42,76,202,252,248,252,233,244,253,255,248,6,252,242,15,16,76,202,252,248,
+ 248,7,252,242,15,93,193,131,193,1,252,233,244,5,255,248,119,185,2,0,0,0,129,
+ 122,253,4,239,255,15,133,244,250,139,42,248,1,57,193,15,131,244,71,129,124,
+ 253,202,252,252,239,15,133,244,249,59,108,202,252,248,15,76,108,202,252,248,
+ 131,193,1,252,233,244,1,248,3,15,135,244,63,255,248,6,252,242,15,16,76,202,
+ 252,248,248,7,252,242,15,95,193,131,193,1,252,233,244,5,255,248,9,221,216,
+ 252,233,244,63,255,248,120,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,133,244,63,139,42,255,139,173,233,252,233,244,71,255,252,242,15,42,133,
+ 233,252,233,244,72,255,219,133,233,252,233,244,73,255,248,121,129,252,248,
+ 239,15,133,244,63,129,122,253,4,239,15,133,244,63,139,42,139,90,252,252,131,
+ 189,233,1,15,130,244,81,15,182,173,233,255,252,242,15,42,197,252,233,244,
+ 72,255,137,44,36,219,4,36,252,233,244,73,255,248,122,65,139,174,233,65,59,
+ 174,233,15,130,244,247,232,244,75,248,1,129,252,248,239,15,133,244,63,129,
+ 122,253,4,239,255,15,133,244,63,139,42,129,252,253,252,255,0,0,0,15,135,244,
+ 63,137,108,36,4,255,15,131,244,63,252,242,15,44,42,129,252,253,252,255,0,
+ 0,0,15,135,244,63,137,108,36,4,255,15,131,244,63,221,2,219,92,36,4,129,124,
+ 36,4,252,255,0,0,0,15,135,244,63,255,199,68,36,8,1,0,0,0,72,141,68,36,4,248,
+ 123,139,108,36,24,137,149,233,139,84,36,8,72,137,198,137,252,239,137,92,36,
+ 28,232,251,1,20,139,149,233,139,90,252,252,199,66,252,252,237,137,66,252,
+ 248,252,233,244,66,248,124,65,139,174,233,65,59,174,233,15,130,244,247,232,
+ 244,75,248,1,199,68,36,4,252,255,252,255,252,255,252,255,129,252,248,239,
+ 15,130,244,63,15,134,244,247,129,122,253,20,239,255,15,133,244,63,139,106,
+ 16,137,108,36,4,255,15,131,244,63,252,242,15,44,106,16,137,108,36,4,255,15,
+ 131,244,63,221,66,16,219,92,36,4,255,248,1,129,122,253,4,239,15,133,244,63,
+ 129,122,253,12,239,255,139,42,137,108,36,8,139,173,233,255,139,74,8,255,252,
+ 242,15,44,74,8,255,139,68,36,4,57,197,15,130,244,251,248,2,133,201,15,142,
+ 244,253,248,3,139,108,36,8,41,200,15,140,244,125,141,172,253,13,233,131,192,
+ 1,248,4,137,68,36,8,137,232,252,233,244,123,248,5,15,140,244,252,141,68,40,
+ 1,252,233,244,2,248,6,137,232,252,233,244,2,248,7,255,15,132,244,254,1,252,
+ 233,131,193,1,15,143,244,3,248,8,185,1,0,0,0,252,233,244,3,248,125,49,192,
+ 252,233,244,4,248,126,129,252,248,239,15,130,244,63,65,139,174,233,65,59,
+ 174,233,15,130,244,247,232,244,75,248,1,255,129,122,253,4,239,15,133,244,
+ 63,129,122,253,12,239,139,42,255,15,133,244,63,139,66,8,255,15,131,244,63,
+ 252,242,15,44,66,8,255,15,131,244,63,221,66,8,219,92,36,4,139,68,36,4,255,
+ 133,192,15,142,244,125,131,189,233,1,15,130,244,125,15,133,244,127,65,57,
+ 134,233,15,130,244,127,15,182,141,233,65,139,174,233,137,68,36,8,248,1,136,
+ 77,0,131,197,1,131,232,1,15,133,244,1,65,139,134,233,252,233,244,123,248,
+ 128,129,252,248,239,255,15,130,244,63,65,139,174,233,65,59,174,233,15,130,
+ 244,247,232,244,75,248,1,129,122,253,4,239,15,133,244,63,139,42,139,133,233,
+ 133,192,15,132,244,125,65,57,134,233,15,130,244,129,129,197,239,137,92,36,
+ 4,137,68,36,8,65,139,158,233,248,1,255,15,182,77,0,131,197,1,131,232,1,136,
+ 12,3,15,133,244,1,137,216,139,92,36,4,252,233,244,123,248,130,129,252,248,
+ 239,15,130,244,63,65,139,174,233,65,59,174,233,15,130,244,247,232,244,75,
+ 248,1,129,122,253,4,239,15,133,244,63,139,42,139,133,233,65,57,134,233,255,
+ 15,130,244,129,129,197,239,137,92,36,4,137,68,36,8,65,139,158,233,252,233,
+ 244,249,248,1,15,182,76,5,0,131,252,249,65,15,130,244,248,131,252,249,90,
+ 15,135,244,248,131,252,241,32,248,2,136,12,3,248,3,131,232,1,15,137,244,1,
+ 137,216,139,92,36,4,252,233,244,123,248,131,129,252,248,239,15,130,244,63,
+ 255,65,139,174,233,65,59,174,233,15,130,244,247,232,244,75,248,1,129,122,
+ 253,4,239,15,133,244,63,139,42,139,133,233,65,57,134,233,15,130,244,129,129,
+ 197,239,137,92,36,4,137,68,36,8,65,139,158,233,252,233,244,249,248,1,15,182,
+ 76,5,0,131,252,249,97,15,130,244,248,255,131,252,249,122,15,135,244,248,131,
+ 252,241,32,248,2,136,12,3,248,3,131,232,1,15,137,244,1,137,216,139,92,36,
+ 4,252,233,244,123,248,132,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,133,244,63,137,213,139,58,232,251,1,21,137,252,234,255,137,197,252,233,
+ 244,71,255,252,242,15,42,192,252,233,244,72,255,248,133,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,89,248,
+ 1,15,135,244,63,255,252,242,15,16,2,72,189,237,237,102,72,15,110,205,252,
+ 242,15,88,193,102,15,126,197,255,252,233,244,89,255,248,134,129,252,248,239,
+ 15,130,244,63,255,72,189,237,237,102,72,15,110,205,255,199,4,36,0,0,192,89,
+ 255,15,133,244,247,139,42,252,233,244,248,248,1,15,135,244,63,255,252,242,
+ 15,16,2,252,242,15,88,193,102,15,126,197,255,248,2,137,68,36,4,141,68,194,
+ 252,240,248,1,57,208,15,134,244,89,129,120,253,4,239,255,15,133,244,248,35,
+ 40,131,232,8,252,233,244,1,248,2,15,135,244,135,255,15,131,244,135,255,252,
+ 242,15,16,0,252,242,15,88,193,102,15,126,193,33,205,255,131,232,8,252,233,
+ 244,1,248,136,129,252,248,239,15,130,244,63,255,15,133,244,248,11,40,131,
+ 232,8,252,233,244,1,248,2,15,135,244,135,255,252,242,15,16,0,252,242,15,88,
+ 193,102,15,126,193,9,205,255,131,232,8,252,233,244,1,248,137,129,252,248,
+ 239,15,130,244,63,255,15,133,244,248,51,40,131,232,8,252,233,244,1,248,2,
+ 15,135,244,135,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,49,205,
+ 255,131,232,8,252,233,244,1,248,138,129,252,248,239,15,130,244,63,129,122,
+ 253,4,239,255,248,2,15,205,252,233,244,89,248,139,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,255,248,2,252,247,213,255,248,89,252,242,15,42,197,
+ 252,233,244,72,255,248,135,139,68,36,4,252,233,244,63,255,248,140,129,252,
+ 248,239,15,130,244,63,129,122,253,4,239,255,248,2,129,122,253,12,239,15,133,
+ 244,63,139,74,8,255,248,140,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,
+ 16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,
+ 202,102,15,126,197,102,15,126,201,255,211,229,252,233,244,89,255,248,141,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,141,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,
+ 242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,211,252,
+ 237,252,233,244,89,255,248,142,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,255,248,142,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,74,
+ 8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102,
+ 15,126,197,102,15,126,201,255,211,252,253,252,233,244,89,255,248,143,129,
+ 252,248,239,15,130,244,63,129,122,253,4,239,255,248,143,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,
+ 242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,211,197,
+ 252,233,244,89,255,248,144,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 255,248,144,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,
+ 129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,74,8,72,189,
+ 237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102,15,126,
+ 197,102,15,126,201,255,211,205,252,233,244,89,248,127,184,237,252,233,244,
+ 63,248,129,184,237,248,63,139,108,36,24,139,90,252,252,137,92,36,28,137,149,
+ 233,141,68,194,252,248,141,136,233,137,133,233,139,66,252,248,59,141,233,
+ 15,135,244,251,137,252,239,252,255,144,233,139,149,233,133,192,15,143,244,
+ 78,248,1,255,139,141,233,41,209,193,252,233,3,133,192,141,65,1,139,106,252,
+ 248,15,133,244,33,139,157,233,139,11,15,182,252,233,15,182,205,131,195,4,
+ 65,252,255,36,252,238,248,33,137,209,252,247,195,237,15,133,244,249,15,182,
+ 107,252,253,72,252,247,213,141,20,252,234,252,233,244,28,248,3,137,221,131,
+ 229,252,248,41,252,234,252,233,244,28,248,5,190,237,137,252,239,232,251,1,
+ 0,139,149,233,49,192,252,233,244,1,248,75,93,72,137,108,36,8,139,108,36,24,
+ 137,92,36,28,137,149,233,255,141,68,194,252,248,137,252,239,137,133,233,232,
+ 251,1,22,139,149,233,139,133,233,41,208,193,232,3,131,192,1,72,139,108,36,
+ 8,85,195,248,145,255,65,15,182,134,233,168,235,15,133,244,251,168,235,15,
+ 133,244,247,168,235,15,132,244,247,65,252,255,142,233,252,233,244,247,255,
+ 248,146,65,15,182,134,233,168,235,15,133,244,251,252,233,244,247,248,147,
+ 65,15,182,134,233,168,235,15,133,244,251,168,235,15,132,244,251,65,252,255,
+ 142,233,15,132,244,247,168,235,15,132,244,251,248,1,255,139,108,36,24,137,
+ 149,233,137,222,137,252,239,232,251,1,23,248,3,139,149,233,248,4,15,182,75,
+ 252,253,248,5,15,182,107,252,252,15,183,67,252,254,65,252,255,164,253,252,
+ 238,233,248,148,131,195,4,139,77,232,137,76,36,4,252,233,244,4,248,149,255,
+ 139,106,252,248,139,173,233,15,182,133,233,141,4,194,139,108,36,24,137,149,
+ 233,137,133,233,137,222,65,141,190,233,73,137,174,233,137,92,36,28,232,251,
+ 1,24,252,233,244,3,255,248,150,137,92,36,28,255,248,151,255,137,92,36,28,
+ 131,203,1,248,1,255,141,68,194,252,248,139,108,36,24,137,149,233,137,133,
+ 233,137,222,137,252,239,232,251,1,25,199,68,36,28,0,0,0,0,255,131,227,252,
+ 254,255,139,149,233,72,137,193,139,133,233,41,208,72,137,205,15,182,75,252,
+ 253,193,232,3,131,192,1,252,255,229,248,152,255,65,85,65,84,65,83,65,82,65,
+ 81,65,80,87,86,85,72,141,108,36,88,85,83,82,81,80,15,182,69,252,248,138,101,
+ 252,240,76,137,125,252,248,76,137,117,252,240,68,139,117,0,65,139,142,233,
+ 65,199,134,233,237,65,137,134,233,65,137,142,233,72,129,252,236,239,72,131,
+ 197,128,252,242,68,15,17,125,252,248,252,242,68,15,17,117,252,240,252,242,
+ 68,15,17,109,232,252,242,68,15,17,101,224,252,242,68,15,17,93,216,252,242,
+ 68,15,17,85,208,252,242,68,15,17,77,200,252,242,68,15,17,69,192,252,242,15,
+ 17,125,184,252,242,15,17,117,176,252,242,15,17,109,168,252,242,15,17,101,
+ 160,252,242,15,17,93,152,252,242,15,17,85,144,252,242,15,17,77,136,252,242,
+ 15,17,69,128,65,139,174,233,65,139,150,233,73,137,174,233,65,199,134,233,
+ 0,0,0,0,137,149,233,72,137,230,65,141,190,233,232,251,1,26,72,139,141,233,
+ 72,129,225,239,72,137,204,137,169,233,139,149,233,139,153,233,252,233,244,
+ 247,255,248,153,255,72,131,196,16,248,1,76,139,108,36,8,76,139,36,36,133,
+ 192,15,136,244,249,137,68,36,4,68,139,122,252,248,69,139,191,233,69,139,191,
+ 233,65,199,134,233,0,0,0,0,65,199,134,233,237,139,3,15,182,204,15,182,232,
+ 131,195,4,193,232,16,129,252,253,239,15,130,244,248,139,68,36,4,248,2,65,
+ 252,255,36,252,238,248,3,252,247,216,137,252,239,137,198,232,251,1,1,255,
+ 248,91,255,217,124,36,4,137,68,36,8,102,184,0,4,102,11,68,36,4,102,37,252,
+ 255,252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,68,
+ 36,8,195,255,248,154,72,184,237,237,102,72,15,110,208,72,184,237,237,102,
+ 72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15,
+ 85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,72,184,237,237,102,
+ 72,15,110,208,252,242,15,194,193,1,102,15,84,194,252,242,15,92,200,15,40,
+ 193,248,1,195,248,93,255,217,124,36,4,137,68,36,8,102,184,0,8,102,11,68,36,
+ 4,102,37,252,255,252,251,102,137,68,36,6,217,108,36,6,217,252,252,217,108,
+ 36,4,139,68,36,8,195,255,248,155,72,184,237,237,102,72,15,110,208,72,184,
+ 237,237,102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,
+ 247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,72,184,
+ 237,237,102,72,15,110,208,252,242,15,194,193,6,102,15,84,194,252,242,15,92,
+ 200,15,40,193,248,1,195,248,114,255,217,124,36,4,137,68,36,8,102,184,0,12,
+ 102,11,68,36,4,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,
+ 68,36,8,195,255,248,156,72,184,237,237,102,72,15,110,208,72,184,237,237,102,
+ 72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15,
+ 85,208,15,40,193,252,242,15,88,203,252,242,15,92,203,72,184,237,237,102,72,
+ 15,110,216,252,242,15,194,193,1,102,15,84,195,252,242,15,92,200,102,15,86,
+ 202,15,40,193,248,1,195,248,157,255,15,40,232,252,242,15,94,193,72,184,237,
+ 237,102,72,15,110,208,72,184,237,237,102,72,15,110,216,15,40,224,102,15,84,
+ 226,102,15,46,220,15,134,244,247,102,15,85,208,252,242,15,88,227,252,242,
+ 15,92,227,102,15,86,226,72,184,237,237,102,72,15,110,208,252,242,15,194,196,
+ 1,102,15,84,194,252,242,15,92,224,15,40,197,252,242,15,89,204,252,242,15,
+ 92,193,195,248,1,252,242,15,89,200,15,40,197,252,242,15,92,193,195,255,217,
+ 193,216,252,241,217,124,36,4,102,184,0,4,102,11,68,36,4,102,37,252,255,252,
+ 247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,222,201,222,252,
+ 233,195,255,248,98,217,252,234,222,201,248,158,217,84,36,252,248,129,124,
+ 36,252,248,0,0,128,127,15,132,244,247,129,124,36,252,248,0,0,128,252,255,
+ 15,132,244,248,248,159,217,192,217,252,252,220,252,233,217,201,217,252,240,
+ 217,232,222,193,217,252,253,221,217,248,1,195,248,2,221,216,217,252,238,195,
+ 255,248,117,255,248,160,252,242,15,45,193,252,242,15,42,208,102,15,46,202,
+ 15,133,244,254,15,138,244,255,248,161,131,252,248,1,15,142,244,252,248,1,
+ 169,1,0,0,0,15,133,244,248,252,242,15,89,192,209,232,252,233,244,1,248,2,
+ 209,232,15,132,244,251,15,40,200,248,3,252,242,15,89,192,209,232,15,132,244,
+ 250,15,131,244,3,255,252,242,15,89,200,252,233,244,3,248,4,252,242,15,89,
+ 193,248,5,195,248,6,15,132,244,5,15,130,244,253,252,247,216,232,244,1,72,
+ 184,237,237,102,72,15,110,200,252,242,15,94,200,15,40,193,195,248,7,72,184,
+ 237,237,102,72,15,110,192,195,248,8,102,72,15,126,200,72,209,224,72,193,192,
+ 12,72,61,252,254,15,0,0,15,132,244,248,102,72,15,126,192,72,209,224,15,132,
+ 244,250,255,72,193,192,12,72,61,252,254,15,0,0,15,132,244,251,252,242,15,
+ 17,76,36,252,240,252,242,15,17,68,36,252,248,221,68,36,252,240,221,68,36,
+ 252,248,217,252,241,217,192,217,252,252,220,252,233,217,201,217,252,240,217,
+ 232,222,193,217,252,253,221,217,221,92,36,252,248,252,242,15,16,68,36,252,
+ 248,195,248,9,72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244,247,
+ 15,40,193,248,1,195,248,2,72,184,237,237,102,72,15,110,208,102,15,84,194,
+ 72,184,237,237,102,72,15,110,208,102,15,46,194,15,132,244,1,102,15,80,193,
+ 15,87,192,136,196,15,146,208,48,224,15,133,244,1,248,3,72,184,237,237,255,
+ 102,72,15,110,192,195,248,4,102,15,80,193,133,192,15,133,244,3,15,87,192,
+ 195,248,5,102,15,80,193,133,192,15,132,244,3,15,87,192,195,248,162,255,131,
+ 252,255,1,15,130,244,91,15,132,244,93,131,252,255,3,15,130,244,114,15,135,
+ 244,248,252,242,15,81,192,195,248,2,252,242,15,17,68,36,252,248,221,68,36,
+ 252,248,131,252,255,5,15,135,244,248,15,132,244,247,232,244,98,252,233,244,
+ 253,248,1,232,244,158,255,252,233,244,253,248,2,131,252,255,7,15,132,244,
+ 247,15,135,244,248,217,252,237,217,201,217,252,241,252,233,244,253,248,1,
+ 217,232,217,201,217,252,241,252,233,244,253,248,2,131,252,255,9,15,132,244,
+ 247,15,135,244,248,217,252,236,217,201,217,252,241,252,233,244,253,248,1,
+ 255,217,252,254,252,233,244,253,248,2,131,252,255,11,15,132,244,247,15,135,
+ 244,255,217,252,255,252,233,244,253,248,1,217,252,242,221,216,248,7,221,92,
+ 36,252,248,252,242,15,16,68,36,252,248,195,255,139,124,36,12,221,68,36,4,
+ 131,252,255,1,15,130,244,91,15,132,244,93,131,252,255,3,15,130,244,114,15,
+ 135,244,248,217,252,250,195,248,2,131,252,255,5,15,130,244,98,15,132,244,
+ 158,131,252,255,7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252,
+ 241,195,248,1,217,232,217,201,217,252,241,195,248,2,131,252,255,9,15,132,
+ 244,247,255,15,135,244,248,217,252,236,217,201,217,252,241,195,248,1,217,
+ 252,254,195,248,2,131,252,255,11,15,132,244,247,15,135,244,255,217,252,255,
+ 195,248,1,217,252,242,221,216,195,255,248,9,204,255,248,163,255,131,252,255,
+ 1,15,132,244,247,15,135,244,248,252,242,15,88,193,195,248,1,252,242,15,92,
+ 193,195,248,2,131,252,255,3,15,132,244,247,15,135,244,248,252,242,15,89,193,
+ 195,248,1,252,242,15,94,193,195,248,2,131,252,255,5,15,130,244,157,15,132,
+ 244,117,131,252,255,7,15,132,244,247,15,135,244,248,72,184,237,237,255,102,
+ 72,15,110,200,15,87,193,195,248,1,72,184,237,237,102,72,15,110,200,15,84,
+ 193,195,248,2,131,252,255,9,15,135,244,248,252,242,15,17,68,36,252,248,252,
+ 242,15,17,76,36,252,240,221,68,36,252,248,221,68,36,252,240,15,132,244,247,
+ 217,252,243,248,7,221,92,36,252,248,252,242,15,16,68,36,252,248,195,248,1,
+ 217,201,217,252,253,221,217,252,233,244,7,248,2,131,252,255,11,15,132,244,
+ 247,15,135,244,255,252,242,15,93,193,195,248,1,252,242,15,95,193,195,248,
+ 9,204,255,139,68,36,20,221,68,36,4,221,68,36,12,131,252,248,1,15,132,244,
+ 247,15,135,244,248,222,193,195,248,1,222,252,233,195,248,2,131,252,248,3,
+ 15,132,244,247,15,135,244,248,222,201,195,248,1,222,252,249,195,248,2,131,
+ 252,248,5,15,130,244,157,15,132,244,117,131,252,248,7,15,132,244,247,15,135,
+ 244,248,255,221,216,217,224,195,248,1,221,216,217,225,195,248,2,131,252,248,
+ 9,15,132,244,247,15,135,244,248,217,252,243,195,248,1,217,201,217,252,253,
+ 221,217,195,248,2,131,252,248,11,15,132,244,247,15,135,244,255,255,219,252,
+ 233,219,209,221,217,195,248,1,219,252,233,218,209,221,217,195,255,221,225,
+ 223,224,252,246,196,1,15,132,244,248,217,201,248,2,221,216,195,248,1,221,
+ 225,223,224,252,246,196,1,15,133,244,248,217,201,248,2,221,216,195,255,248,
+ 164,137,252,248,83,15,162,137,6,137,94,4,137,78,8,137,86,12,91,195,248,165,
+ 255,204,248,166,255,83,65,87,65,86,72,131,252,236,40,68,141,181,233,139,157,
+ 233,15,183,192,137,131,233,72,137,187,233,72,137,179,233,72,137,147,233,72,
+ 137,139,233,252,242,15,17,131,233,252,242,15,17,139,233,252,242,15,17,147,
+ 233,252,242,15,17,155,233,72,141,132,253,36,233,76,137,131,233,76,137,139,
+ 233,252,242,15,17,163,233,252,242,15,17,171,233,252,242,15,17,179,233,252,
+ 242,15,17,187,233,72,137,131,233,72,137,230,137,92,36,28,137,223,232,251,
+ 1,27,65,199,134,233,237,255,139,144,233,139,128,233,41,208,139,106,252,248,
+ 193,232,3,131,192,1,139,157,233,139,11,15,182,252,233,15,182,205,131,195,
+ 4,65,252,255,36,252,238,255,248,32,255,139,76,36,24,65,139,158,233,72,137,
+ 139,233,137,145,233,137,169,233,137,223,137,198,232,251,1,28,72,139,131,233,
+ 252,242,15,16,131,233,252,233,244,16,255,248,167,255,85,72,137,229,83,72,
+ 137,252,251,139,131,233,72,41,196,255,15,182,139,233,131,252,233,1,15,136,
+ 244,248,248,1,72,139,132,253,203,233,72,137,132,253,204,233,131,252,233,1,
+ 15,137,244,1,248,2,15,182,131,233,72,139,187,233,72,139,179,233,72,139,147,
+ 233,72,139,139,233,76,139,131,233,76,139,139,233,133,192,15,132,244,251,15,
+ 40,131,233,15,40,139,233,255,15,40,147,233,15,40,155,233,131,252,248,4,15,
+ 134,244,251,15,40,163,233,15,40,171,233,15,40,179,233,15,40,187,233,248,5,
+ 252,255,147,233,72,137,131,233,15,41,131,233,72,137,147,233,15,41,139,233,
+ 255,72,139,93,252,248,201,195,255,129,124,253,202,4,239,15,133,244,253,129,
+ 124,253,194,4,239,15,133,244,254,139,44,202,131,195,4,59,44,194,255,15,141,
+ 244,255,255,15,140,244,255,255,15,143,244,255,255,15,142,244,255,255,248,
+ 6,15,183,67,252,254,141,156,253,131,233,248,9,139,3,15,182,204,15,182,232,
+ 131,195,4,193,232,16,65,252,255,36,252,238,248,7,15,135,244,44,129,124,253,
+ 194,4,239,15,130,244,247,15,133,244,44,255,252,242,15,42,4,194,252,233,244,
+ 248,255,221,4,202,219,4,194,252,233,244,249,255,248,8,15,135,244,44,255,252,
+ 242,15,42,12,202,252,242,15,16,4,194,131,195,4,102,15,46,193,255,15,134,244,
+ 9,255,15,135,244,9,255,15,130,244,9,255,15,131,244,9,255,252,233,244,6,255,
+ 219,4,202,252,233,244,248,255,129,124,253,202,4,239,15,131,244,44,129,124,
+ 253,194,4,239,15,131,244,44,255,248,1,252,242,15,16,4,194,248,2,131,195,4,
+ 102,15,46,4,202,248,3,255,248,1,221,4,202,248,2,221,4,194,248,3,131,195,4,
+ 255,223,252,233,221,216,255,218,252,233,223,224,158,255,15,135,244,247,255,
+ 15,130,244,247,255,15,131,244,247,255,15,183,67,252,254,141,156,253,131,233,
+ 248,1,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,255,139,108,194,4,131,195,4,255,129,252,253,239,15,133,244,253,129,124,
+ 253,202,4,239,15,133,244,254,139,44,194,59,44,202,255,15,133,244,255,255,
+ 15,132,244,255,255,15,183,67,252,254,141,156,253,131,233,248,9,139,3,15,182,
+ 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,7,15,135,244,
+ 251,129,124,253,202,4,239,15,130,244,247,15,133,244,251,255,252,242,15,42,
+ 4,202,255,219,4,202,255,252,233,244,248,248,8,15,135,244,251,255,252,242,
+ 15,42,4,194,102,15,46,4,202,255,219,4,194,221,4,202,255,252,233,244,250,255,
+ 129,252,253,239,15,131,244,251,129,124,253,202,4,239,15,131,244,251,255,248,
+ 1,252,242,15,16,4,202,248,2,102,15,46,4,194,248,4,255,248,1,221,4,202,248,
+ 2,221,4,194,248,4,255,15,138,244,248,15,133,244,248,255,15,138,244,248,15,
+ 132,244,247,255,248,1,15,183,67,252,254,141,156,253,131,233,248,2,255,248,
+ 2,15,183,67,252,254,141,156,253,131,233,248,1,255,252,233,244,9,255,248,5,
+ 255,129,252,253,239,15,132,244,49,129,124,253,202,4,239,15,132,244,49,255,
+ 57,108,202,4,15,133,244,2,129,252,253,239,15,131,244,1,139,12,202,139,4,194,
+ 57,193,15,132,244,1,129,252,253,239,15,135,244,2,129,252,253,239,15,130,244,
+ 2,139,169,233,133,252,237,15,132,244,2,252,246,133,233,235,15,133,244,2,255,
+ 49,252,237,255,189,1,0,0,0,255,252,233,244,48,255,248,3,129,252,253,239,255,
+ 15,133,244,9,255,252,233,244,49,255,72,252,247,208,139,108,202,4,131,195,
+ 4,129,252,253,239,15,133,244,249,139,12,202,65,59,12,135,255,139,108,202,
+ 4,131,195,4,255,129,252,253,239,15,133,244,253,65,129,124,253,199,4,239,15,
+ 133,244,254,65,139,44,199,59,44,202,255,15,183,67,252,254,141,156,253,131,
+ 233,248,9,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,
+ 252,238,248,7,15,135,244,249,65,129,124,253,199,4,239,15,130,244,247,255,
+ 252,242,65,15,42,4,199,255,65,219,4,199,255,252,233,244,248,248,8,255,252,
+ 242,15,42,4,202,102,65,15,46,4,199,255,219,4,202,65,221,4,199,255,129,252,
+ 253,239,15,131,244,249,255,248,1,252,242,65,15,16,4,199,248,2,102,15,46,4,
+ 202,248,4,255,248,1,65,221,4,199,248,2,221,4,202,248,4,255,72,252,247,208,
+ 139,108,202,4,131,195,4,57,197,255,15,133,244,249,15,183,67,252,254,141,156,
+ 253,131,233,248,2,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,
+ 255,36,252,238,248,3,129,252,253,239,15,133,244,2,252,233,244,49,255,15,132,
+ 244,248,129,252,253,239,15,132,244,49,15,183,67,252,254,141,156,253,131,233,
+ 248,2,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,255,139,108,194,4,131,195,4,129,252,253,239,255,137,108,202,4,139,44,
+ 194,137,44,202,255,72,139,44,194,72,137,44,202,139,3,15,182,204,15,182,232,
+ 131,195,4,193,232,16,65,252,255,36,252,238,255,49,252,237,129,124,253,194,
+ 4,239,129,213,239,137,108,202,4,139,3,15,182,204,15,182,232,131,195,4,193,
+ 232,16,65,252,255,36,252,238,255,129,124,253,194,4,239,15,133,244,251,139,
+ 44,194,252,247,221,15,128,244,250,199,68,202,4,237,137,44,202,248,9,139,3,
+ 15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,4,199,
+ 68,202,4,0,0,224,65,199,4,202,0,0,0,0,252,233,244,9,248,5,15,135,244,54,255,
+ 129,124,253,194,4,239,15,131,244,54,255,252,242,15,16,4,194,72,184,237,237,
+ 102,72,15,110,200,15,87,193,252,242,15,17,4,202,255,221,4,194,217,224,221,
+ 28,202,255,129,124,253,194,4,239,15,133,244,248,139,4,194,255,139,128,233,
+ 248,1,199,68,202,4,237,137,4,202,255,15,87,192,252,242,15,42,128,233,248,
+ 1,252,242,15,17,4,202,255,219,128,233,248,1,221,28,202,255,139,3,15,182,204,
+ 15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,2,129,124,253,194,
+ 4,239,15,133,244,57,139,60,194,255,139,175,233,131,252,253,0,15,133,244,255,
+ 248,3,255,248,58,137,213,232,251,1,21,255,252,242,15,42,192,255,137,252,234,
+ 15,182,75,252,253,252,233,244,1,255,248,9,252,246,133,233,235,15,133,244,
+ 3,252,233,244,57,255,15,182,252,236,15,182,192,255,129,124,253,252,234,4,
+ 239,15,133,244,51,65,129,124,253,199,4,239,15,133,244,51,139,44,252,234,65,
+ 3,44,199,15,128,244,50,255,129,124,253,252,234,4,239,15,133,244,53,65,129,
+ 124,253,199,4,239,15,133,244,53,65,139,4,199,3,4,252,234,15,128,244,52,255,
+ 129,124,253,252,234,4,239,15,133,244,56,129,124,253,194,4,239,15,133,244,
+ 56,139,44,252,234,3,44,194,15,128,244,55,255,199,68,202,4,237,255,129,124,
+ 253,252,234,4,239,15,131,244,51,255,65,129,124,253,199,4,239,15,131,244,51,
+ 255,252,242,15,16,4,252,234,252,242,65,15,88,4,199,255,221,4,252,234,65,220,
+ 4,199,255,129,124,253,252,234,4,239,15,131,244,53,255,65,129,124,253,199,
+ 4,239,15,131,244,53,255,252,242,65,15,16,4,199,252,242,15,88,4,252,234,255,
+ 65,221,4,199,220,4,252,234,255,129,124,253,252,234,4,239,15,131,244,56,129,
+ 124,253,194,4,239,15,131,244,56,255,252,242,15,16,4,252,234,252,242,15,88,
+ 4,194,255,221,4,252,234,220,4,194,255,129,124,253,252,234,4,239,15,133,244,
+ 51,65,129,124,253,199,4,239,15,133,244,51,139,44,252,234,65,43,44,199,15,
+ 128,244,50,255,129,124,253,252,234,4,239,15,133,244,53,65,129,124,253,199,
+ 4,239,15,133,244,53,65,139,4,199,43,4,252,234,15,128,244,52,255,129,124,253,
+ 252,234,4,239,15,133,244,56,129,124,253,194,4,239,15,133,244,56,139,44,252,
+ 234,43,44,194,15,128,244,55,255,252,242,15,16,4,252,234,252,242,65,15,92,
+ 4,199,255,221,4,252,234,65,220,36,199,255,252,242,65,15,16,4,199,252,242,
+ 15,92,4,252,234,255,65,221,4,199,220,36,252,234,255,252,242,15,16,4,252,234,
+ 252,242,15,92,4,194,255,221,4,252,234,220,36,194,255,129,124,253,252,234,
+ 4,239,15,133,244,51,65,129,124,253,199,4,239,15,133,244,51,139,44,252,234,
+ 65,15,175,44,199,15,128,244,50,255,129,124,253,252,234,4,239,15,133,244,53,
+ 65,129,124,253,199,4,239,15,133,244,53,65,139,4,199,15,175,4,252,234,15,128,
+ 244,52,255,129,124,253,252,234,4,239,15,133,244,56,129,124,253,194,4,239,
+ 15,133,244,56,139,44,252,234,15,175,44,194,15,128,244,55,255,252,242,15,16,
+ 4,252,234,252,242,65,15,89,4,199,255,221,4,252,234,65,220,12,199,255,252,
+ 242,65,15,16,4,199,252,242,15,89,4,252,234,255,65,221,4,199,220,12,252,234,
+ 255,252,242,15,16,4,252,234,252,242,15,89,4,194,255,221,4,252,234,220,12,
+ 194,255,252,242,15,16,4,252,234,252,242,65,15,94,4,199,255,221,4,252,234,
+ 65,220,52,199,255,252,242,65,15,16,4,199,252,242,15,94,4,252,234,255,65,221,
+ 4,199,220,52,252,234,255,252,242,15,16,4,252,234,252,242,15,94,4,194,255,
+ 221,4,252,234,220,52,194,255,252,242,15,16,4,252,234,252,242,65,15,16,12,
+ 199,255,221,4,252,234,65,221,4,199,255,252,242,65,15,16,4,199,252,242,15,
+ 16,12,252,234,255,65,221,4,199,221,4,252,234,255,252,242,15,16,4,252,234,
+ 252,242,15,16,12,194,255,221,4,252,234,221,4,194,255,248,168,232,244,157,
+ 255,252,233,244,168,255,232,244,117,255,15,182,252,236,15,182,192,139,124,
+ 36,24,137,151,233,141,52,194,137,194,41,252,234,248,36,137,252,253,137,92,
+ 36,28,232,251,1,29,139,149,233,133,192,15,133,244,45,15,182,107,252,255,15,
+ 182,75,252,253,72,139,4,252,234,72,137,4,202,139,3,15,182,204,15,182,232,
+ 131,195,4,193,232,16,65,252,255,36,252,238,255,72,252,247,208,65,139,4,135,
+ 199,68,202,4,237,137,4,202,139,3,15,182,204,15,182,232,131,195,4,193,232,
+ 16,65,252,255,36,252,238,255,15,191,192,199,68,202,4,237,137,4,202,255,15,
+ 191,192,252,242,15,42,192,252,242,15,17,4,202,255,223,67,252,254,221,28,202,
+ 255,252,242,65,15,16,4,199,252,242,15,17,4,202,255,65,221,4,199,221,28,202,
+ 255,72,252,247,208,137,68,202,4,139,3,15,182,204,15,182,232,131,195,4,193,
+ 232,16,65,252,255,36,252,238,255,141,76,202,12,141,68,194,4,189,237,137,105,
+ 252,248,248,1,137,41,131,193,8,57,193,15,134,244,1,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,255,139,106,252,248,139,172,
+ 253,133,233,139,173,233,72,139,69,0,72,137,4,202,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,255,139,106,252,248,139,172,
+ 253,141,233,128,189,233,0,139,173,233,139,12,194,139,68,194,4,137,77,0,137,
+ 69,4,15,132,244,247,252,246,133,233,235,15,133,244,248,248,1,139,3,15,182,
+ 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,2,129,232,239,
+ 129,252,248,239,15,134,244,1,252,246,129,233,235,15,132,244,1,137,252,238,
+ 137,213,65,141,190,233,255,232,251,1,30,137,252,234,252,233,244,1,255,72,
+ 252,247,208,139,106,252,248,139,172,253,141,233,65,139,12,135,139,133,233,
+ 137,8,199,64,4,237,252,246,133,233,235,15,133,244,248,248,1,139,3,15,182,
+ 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,2,252,246,129,
+ 233,235,15,132,244,1,128,189,233,0,15,132,244,1,137,213,137,198,65,141,190,
+ 233,232,251,1,30,137,252,234,252,233,244,1,255,139,106,252,248,255,252,242,
+ 65,15,16,4,199,255,139,172,253,141,233,139,141,233,255,252,242,15,17,1,255,
+ 221,25,255,72,252,247,208,139,106,252,248,139,172,253,141,233,139,141,233,
+ 137,65,4,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,255,141,156,253,131,233,139,108,36,24,131,189,233,0,15,132,244,247,137,
+ 149,233,141,52,202,137,252,239,232,251,1,31,139,149,233,248,1,139,3,15,182,
+ 204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,255,72,252,247,
+ 208,139,108,36,24,137,149,233,139,82,252,248,65,139,52,135,137,252,239,137,
+ 92,36,28,232,251,1,32,139,149,233,15,182,75,252,253,137,4,202,199,68,202,
+ 4,237,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,255,139,108,36,24,137,149,233,65,139,142,233,65,59,142,233,137,92,36,
+ 28,15,131,244,251,248,1,137,194,37,252,255,7,0,0,193,252,234,11,61,252,255,
+ 7,0,0,15,132,244,249,248,2,137,252,239,137,198,232,251,1,33,139,149,233,15,
+ 182,75,252,253,137,4,202,199,68,202,4,237,139,3,15,182,204,15,182,232,131,
+ 195,4,193,232,16,65,252,255,36,252,238,248,3,184,1,8,0,0,252,233,244,2,248,
+ 5,137,252,239,232,251,1,34,15,183,67,252,254,252,233,244,1,255,72,252,247,
+ 208,139,108,36,24,65,139,142,233,137,92,36,28,65,59,142,233,137,149,233,15,
+ 131,244,249,248,2,65,139,52,135,137,252,239,232,251,1,35,139,149,233,15,182,
+ 75,252,253,137,4,202,199,68,202,4,237,139,3,15,182,204,15,182,232,131,195,
+ 4,193,232,16,65,252,255,36,252,238,248,3,137,252,239,232,251,1,34,15,183,
+ 67,252,254,72,252,247,208,252,233,244,2,255,72,252,247,208,139,106,252,248,
+ 139,173,233,65,139,4,135,252,233,244,169,255,72,252,247,208,139,106,252,248,
+ 139,173,233,65,139,4,135,252,233,244,170,255,15,182,252,236,15,182,192,129,
+ 124,253,252,234,4,239,15,133,244,39,139,44,252,234,255,129,124,253,194,4,
+ 239,15,133,244,251,139,4,194,255,129,124,253,194,4,239,15,131,244,251,255,
+ 252,242,15,16,4,194,252,242,15,45,192,252,242,15,42,200,102,15,46,193,255,
+ 15,133,244,39,255,59,133,233,15,131,244,39,193,224,3,3,133,233,129,120,253,
+ 4,239,15,132,244,248,72,139,40,72,137,44,202,248,1,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,248,2,131,189,233,0,15,132,
+ 244,249,139,141,233,252,246,129,233,235,15,132,244,39,15,182,75,252,253,248,
+ 3,199,68,202,4,237,252,233,244,1,248,5,255,129,124,253,194,4,239,15,133,244,
+ 39,139,4,194,252,233,244,169,255,15,182,252,236,15,182,192,72,252,247,208,
+ 65,139,4,135,129,124,253,252,234,4,239,15,133,244,37,139,44,252,234,248,169,
+ 139,141,233,35,136,233,105,201,239,3,141,233,248,1,129,185,233,239,15,133,
+ 244,250,57,129,233,15,133,244,250,129,121,253,4,239,15,132,244,251,15,182,
+ 67,252,253,72,139,41,72,137,44,194,248,2,255,139,3,15,182,204,15,182,232,
+ 131,195,4,193,232,16,65,252,255,36,252,238,248,3,15,182,67,252,253,199,68,
+ 194,4,237,252,233,244,2,248,4,139,137,233,133,201,15,133,244,1,248,5,139,
+ 141,233,133,201,15,132,244,3,252,246,129,233,235,15,133,244,3,252,233,244,
+ 37,255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,38,
+ 139,44,252,234,59,133,233,15,131,244,38,193,224,3,3,133,233,129,120,253,4,
+ 239,15,132,244,248,72,139,40,72,137,44,202,248,1,139,3,15,182,204,15,182,
+ 232,131,195,4,193,232,16,65,252,255,36,252,238,248,2,131,189,233,0,15,132,
+ 244,249,139,141,233,252,246,129,233,235,15,132,244,38,255,15,182,75,252,253,
+ 248,3,199,68,202,4,237,252,233,244,1,255,15,182,252,236,15,182,192,129,124,
+ 253,252,234,4,239,15,133,244,42,139,44,252,234,255,15,133,244,42,255,59,133,
+ 233,15,131,244,42,193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248,
+ 1,252,246,133,233,235,15,133,244,253,248,2,72,139,44,202,72,137,40,139,3,
+ 15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,3,131,
+ 189,233,0,15,132,244,1,139,141,233,252,246,129,233,235,255,15,132,244,42,
+ 15,182,75,252,253,252,233,244,1,248,5,129,124,253,194,4,239,15,133,244,42,
+ 139,4,194,252,233,244,170,248,7,128,165,233,235,65,139,142,233,65,137,174,
+ 233,137,141,233,15,182,75,252,253,252,233,244,2,255,15,182,252,236,15,182,
+ 192,72,252,247,208,65,139,4,135,129,124,253,252,234,4,239,15,133,244,40,139,
+ 44,252,234,248,170,139,141,233,35,136,233,105,201,239,198,133,233,0,3,141,
+ 233,248,1,129,185,233,239,15,133,244,251,57,129,233,15,133,244,251,129,121,
+ 253,4,239,15,132,244,250,248,2,255,252,246,133,233,235,15,133,244,253,248,
+ 3,15,182,67,252,253,72,139,44,194,72,137,41,139,3,15,182,204,15,182,232,131,
+ 195,4,193,232,16,65,252,255,36,252,238,248,4,131,189,233,0,15,132,244,2,137,
+ 12,36,139,141,233,252,246,129,233,235,15,132,244,40,139,12,36,252,233,244,
+ 2,248,5,139,137,233,133,201,15,133,244,1,255,139,141,233,133,201,15,132,244,
+ 252,252,246,129,233,235,15,132,244,40,248,6,137,4,36,199,68,36,4,237,137,
+ 108,36,8,139,124,36,24,137,151,233,72,141,20,36,137,252,238,137,252,253,137,
+ 92,36,28,232,251,1,36,139,149,233,139,108,36,8,137,193,252,233,244,2,248,
+ 7,128,165,233,235,65,139,134,233,65,137,174,233,137,133,233,252,233,244,3,
+ 255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,41,139,
+ 44,252,234,59,133,233,15,131,244,41,193,224,3,3,133,233,129,120,253,4,239,
+ 15,132,244,249,248,1,252,246,133,233,235,15,133,244,253,248,2,72,139,12,202,
+ 72,137,8,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,248,3,131,189,233,0,15,132,244,1,255,139,141,233,252,246,129,233,235,
+ 15,132,244,41,15,182,75,252,253,252,233,244,1,248,7,128,165,233,235,65,139,
+ 142,233,65,137,174,233,137,141,233,15,182,75,252,253,252,233,244,2,255,68,
+ 137,60,36,69,139,60,199,248,1,141,12,202,139,105,252,248,252,246,133,233,
+ 235,15,133,244,253,248,2,139,68,36,4,131,232,1,15,132,244,250,68,1,252,248,
+ 59,133,233,15,135,244,251,68,41,252,248,65,193,231,3,68,3,189,233,248,3,72,
+ 139,41,131,193,8,73,137,47,65,131,199,8,131,232,1,15,133,244,3,248,4,68,139,
+ 60,36,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,
+ 238,248,5,139,124,36,24,137,151,233,137,252,238,137,194,137,252,253,137,92,
+ 36,28,232,251,1,37,139,149,233,15,182,75,252,253,252,233,244,1,248,7,255,
+ 128,165,233,235,65,139,134,233,65,137,174,233,137,133,233,252,233,244,2,255,
+ 3,68,36,4,255,129,124,253,202,4,239,139,44,202,15,133,244,59,141,84,202,8,
+ 137,90,252,252,139,157,233,139,11,15,182,252,233,15,182,205,131,195,4,65,
+ 252,255,36,252,238,255,141,76,202,8,65,137,215,139,105,252,248,129,121,253,
+ 252,252,239,15,133,244,29,248,60,139,90,252,252,252,247,195,237,15,133,244,
+ 253,248,1,137,106,252,248,137,68,36,4,131,232,1,15,132,244,249,248,2,72,139,
+ 41,131,193,8,73,137,47,65,131,199,8,131,232,1,15,133,244,2,139,106,252,248,
+ 248,3,139,68,36,4,128,189,233,1,15,135,244,251,248,4,139,157,233,139,11,15,
+ 182,252,233,15,182,205,131,195,4,65,252,255,36,252,238,248,5,255,252,247,
+ 195,237,15,133,244,4,15,182,75,252,253,72,252,247,209,141,12,202,68,139,121,
+ 252,248,69,139,191,233,69,139,191,233,252,233,244,4,248,7,129,252,235,239,
+ 252,247,195,237,15,133,244,254,41,218,65,137,215,139,90,252,252,252,233,244,
+ 1,248,8,129,195,239,252,233,244,1,255,141,76,202,8,72,139,105,232,72,139,
+ 65,252,240,72,137,41,72,137,65,8,139,105,224,139,65,228,137,105,252,248,137,
+ 65,252,252,129,252,248,239,184,237,15,133,244,29,137,202,137,90,252,252,139,
+ 157,233,139,11,15,182,252,233,15,182,205,131,195,4,65,252,255,36,252,238,
+ 255,68,137,60,36,68,137,116,36,4,139,108,202,252,240,139,68,202,252,248,68,
+ 139,181,233,131,195,4,68,139,189,233,248,1,68,57,252,240,15,131,244,251,65,
+ 129,124,253,199,4,239,15,132,244,250,255,219,68,202,252,248,255,73,139,44,
+ 199,72,137,108,202,8,131,192,1,255,137,68,202,252,248,248,2,15,183,67,252,
+ 254,141,156,253,131,233,248,3,68,139,116,36,4,68,139,60,36,139,3,15,182,204,
+ 15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,4,131,192,1,255,
+ 137,68,202,252,248,255,252,233,244,1,248,5,68,41,252,240,248,6,59,133,233,
+ 15,135,244,3,68,105,252,248,239,68,3,189,233,65,129,191,233,239,15,132,244,
+ 253,70,141,116,48,1,73,139,175,233,73,139,135,233,72,137,44,202,72,137,68,
+ 202,8,68,137,116,202,252,248,252,233,244,2,248,7,131,192,1,252,233,244,6,
+ 255,129,124,253,202,252,236,239,15,133,244,251,139,108,202,232,129,124,253,
+ 202,252,244,239,15,133,244,251,129,124,253,202,252,252,239,15,133,244,251,
+ 128,189,233,235,15,133,244,251,141,156,253,131,233,199,68,202,252,248,0,0,
+ 0,0,248,1,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,
+ 252,238,248,5,198,67,252,252,235,141,156,253,131,233,198,3,235,252,233,244,
+ 1,255,15,182,252,236,15,182,192,68,137,60,36,68,141,188,253,194,233,141,12,
+ 202,68,43,122,252,252,133,252,237,15,132,244,251,141,108,252,233,252,248,
+ 65,57,215,15,131,244,248,248,1,73,139,71,252,248,65,131,199,8,72,137,1,131,
+ 193,8,57,252,233,15,131,244,249,65,57,215,15,130,244,1,248,2,199,65,4,237,
+ 131,193,8,57,252,233,15,130,244,2,248,3,68,139,60,36,139,3,15,182,204,15,
+ 182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,5,199,68,36,4,1,0,
+ 0,0,137,208,68,41,252,248,15,134,244,3,137,197,193,252,237,3,131,197,1,137,
+ 108,36,4,139,108,36,24,1,200,59,133,233,15,135,244,253,248,6,255,73,139,71,
+ 252,248,65,131,199,8,72,137,1,131,193,8,65,57,215,15,130,244,6,252,233,244,
+ 3,248,7,137,149,233,137,141,233,137,92,36,28,65,41,215,139,116,36,4,131,252,
+ 238,1,137,252,239,232,251,1,0,139,149,233,139,141,233,65,1,215,252,233,244,
+ 6,255,193,225,3,255,248,1,139,90,252,252,137,68,36,4,252,247,195,237,15,133,
+ 244,253,255,248,13,65,137,215,131,232,1,15,132,244,249,248,2,73,139,44,15,
+ 73,137,111,252,248,65,131,199,8,131,232,1,15,133,244,2,248,3,139,68,36,4,
+ 15,182,107,252,255,248,5,57,197,15,135,244,252,255,72,139,44,10,72,137,106,
+ 252,248,255,248,5,56,67,252,255,15,135,244,252,255,15,182,75,252,253,72,252,
+ 247,209,141,20,202,68,139,122,252,248,69,139,191,233,69,139,191,233,139,3,
+ 15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,252,238,248,6,255,
+ 65,199,71,252,252,237,65,131,199,8,255,199,68,194,252,244,237,255,131,192,
+ 1,252,233,244,5,248,7,141,171,233,252,247,197,237,15,133,244,14,41,252,234,
+ 255,1,252,233,255,137,221,209,252,237,129,229,239,102,65,129,172,253,46,233,
+ 238,15,130,244,149,255,141,12,202,255,129,121,253,4,239,15,133,244,255,255,
+ 129,121,253,12,239,15,133,244,61,129,121,253,20,239,15,133,244,61,139,41,
+ 131,121,16,0,15,140,244,251,255,129,121,253,12,239,15,133,244,165,129,121,
+ 253,20,239,15,133,244,165,255,139,105,16,133,252,237,15,136,244,251,3,41,
+ 15,128,244,247,137,41,255,59,105,8,199,65,28,237,137,105,24,255,15,142,244,
+ 253,248,1,248,6,141,156,253,131,233,255,141,156,253,131,233,15,183,67,252,
+ 254,15,142,245,248,1,248,6,255,15,143,244,253,248,6,141,156,253,131,233,248,
+ 1,255,248,7,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,
+ 252,238,248,5,255,3,41,15,128,244,1,137,41,255,15,141,244,7,255,141,156,253,
+ 131,233,15,183,67,252,254,15,141,245,255,15,140,244,7,255,252,233,244,6,248,
+ 9,255,129,121,253,4,239,255,15,131,244,61,129,121,253,12,239,15,131,244,61,
+ 255,129,121,253,12,239,15,131,244,165,129,121,253,20,239,15,131,244,165,255,
+ 139,105,20,255,129,252,253,239,15,131,244,61,255,252,242,15,16,1,252,242,
+ 15,16,73,8,255,252,242,15,88,65,16,252,242,15,17,1,133,252,237,15,136,244,
+ 249,255,15,140,244,249,255,102,15,46,200,248,1,252,242,15,17,65,24,255,221,
+ 65,8,221,1,255,220,65,16,221,17,221,81,24,133,252,237,15,136,244,247,255,
+ 221,81,24,15,140,244,247,255,217,201,248,1,255,15,183,67,252,254,255,15,131,
+ 244,7,255,15,131,244,248,141,156,253,131,233,255,141,156,253,131,233,15,183,
+ 67,252,254,15,131,245,255,15,130,244,7,255,15,130,244,248,141,156,253,131,
+ 233,255,248,3,102,15,46,193,252,233,244,1,255,141,12,202,139,105,4,129,252,
+ 253,239,15,132,244,247,255,137,105,252,252,139,41,137,105,252,248,252,233,
+ 245,255,141,156,253,131,233,139,1,137,105,252,252,137,65,252,248,255,65,139,
+ 142,233,139,4,129,72,139,128,233,139,108,36,24,65,137,150,233,65,137,174,
+ 233,76,137,36,36,76,137,108,36,8,72,131,252,236,16,252,255,224,255,141,156,
+ 253,131,233,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,36,
+ 252,238,255,137,221,209,252,237,129,229,239,102,65,129,172,253,46,233,238,
+ 15,130,244,151,255,68,139,187,233,139,108,36,24,141,12,202,59,141,233,15,
+ 135,244,24,15,182,139,233,57,200,15,134,244,249,248,2,255,15,183,67,252,254,
+ 252,233,245,255,248,3,199,68,194,252,252,237,131,192,1,57,200,15,134,244,
+ 3,252,233,244,2,255,141,44,197,237,141,4,194,68,139,122,252,248,137,104,252,
+ 252,68,137,120,252,248,139,108,36,24,141,12,200,59,141,233,15,135,244,23,
+ 137,209,137,194,15,182,171,233,133,252,237,15,132,244,248,248,1,131,193,8,
+ 57,209,15,131,244,249,68,139,121,252,248,68,137,56,68,139,121,252,252,68,
+ 137,120,4,131,192,8,199,65,252,252,237,131,252,237,1,15,133,244,1,248,2,255,
+ 68,139,187,233,139,3,15,182,204,15,182,232,131,195,4,193,232,16,65,252,255,
+ 36,252,238,255,248,3,199,64,4,237,131,192,8,131,252,237,1,15,133,244,3,252,
+ 233,244,2,255,139,106,252,248,76,139,189,233,139,108,36,24,141,68,194,252,
+ 248,137,149,233,141,136,233,59,141,233,137,133,233,255,76,137,252,254,137,
+ 252,239,255,15,135,244,22,65,199,134,233,237,255,65,252,255,215,255,65,252,
+ 255,150,233,255,65,199,134,233,237,139,149,233,141,12,194,252,247,217,3,141,
+ 233,139,90,252,252,252,233,244,12,255,254,0
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_rethrow,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_v,
+ GLOB_vm_growstack_f,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_cont_ra,
+ GLOB_BC_CAT_Z,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_cont_nop,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_arith_vno,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_nvo,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vvo,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_call_ra,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res_,
+ GLOB_ff_type,
+ GLOB_fff_res1,
+ GLOB_ff_getmetatable,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_fff_resi,
+ GLOB_fff_resxmm0,
+ GLOB_fff_resn,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_fff_res2,
+ GLOB_fff_res,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_fff_res0,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_resbit,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_vm_exp_x87,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_vm_trunc,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_pow,
+ GLOB_vm_pow,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_fff_emptystr,
+ GLOB_ff_string_rep,
+ GLOB_fff_fallback_2,
+ GLOB_ff_string_reverse,
+ GLOB_fff_fallback_1,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_tobit,
+ GLOB_ff_bit_band,
+ GLOB_fff_fallback_bit_op,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_floor_sse,
+ GLOB_vm_ceil_sse,
+ GLOB_vm_trunc_sse,
+ GLOB_vm_mod,
+ GLOB_vm_exp2_x87,
+ GLOB_vm_exp2raw,
+ GLOB_vm_pow_sse,
+ GLOB_vm_powi_sse,
+ GLOB_vm_foldfpm,
+ GLOB_vm_foldarith,
+ GLOB_vm_cpuid,
+ GLOB_assert_bad_for_arg_type,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c@8",
+ "vm_unwind_c_eh",
+ "vm_unwind_rethrow",
+ "vm_unwind_ff@4",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_v",
+ "vm_growstack_f",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "cont_ra",
+ "BC_CAT_Z",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "cont_nop",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_arith_vno",
+ "vmeta_arith_vn",
+ "vmeta_arith_nvo",
+ "vmeta_arith_nv",
+ "vmeta_unm",
+ "vmeta_arith_vvo",
+ "vmeta_arith_vv",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_call_ra",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res_",
+ "ff_type",
+ "fff_res1",
+ "ff_getmetatable",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "fff_resi",
+ "fff_resxmm0",
+ "fff_resn",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "fff_res2",
+ "fff_res",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "fff_res0",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_resbit",
+ "ff_math_floor",
+ "vm_floor",
+ "ff_math_ceil",
+ "vm_ceil",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "vm_exp_x87",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_atan2",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "vm_trunc",
+ "ff_math_fmod",
+ "ff_math_pow",
+ "vm_pow",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "fff_emptystr",
+ "ff_string_rep",
+ "fff_fallback_2",
+ "ff_string_reverse",
+ "fff_fallback_1",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_tobit",
+ "ff_bit_band",
+ "fff_fallback_bit_op",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_floor_sse",
+ "vm_ceil_sse",
+ "vm_trunc_sse",
+ "vm_mod",
+ "vm_exp2_x87",
+ "vm_exp2raw",
+ "vm_pow_sse",
+ "vm_powi_sse",
+ "vm_foldfpm",
+ "vm_foldarith",
+ "vm_cpuid",
+ "assert_bad_for_arg_type",
+ "vm_ffi_callback",
+ "vm_ffi_call@4",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack@8",
+ "lj_err_throw@8",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_equal_cd@8",
+ "lj_meta_arith",
+ "lj_meta_len@8",
+ "lj_meta_call",
+ "lj_meta_for@8",
+ "lj_tab_get",
+ "lj_str_fromnumber@8",
+ "lj_str_fromnum@8",
+ "lj_tab_next",
+ "lj_tab_getinth@8",
+ "lj_ffh_coroutine_wrap_err@8",
+ "lj_vm_sinh",
+ "lj_vm_cosh",
+ "lj_vm_tanh",
+ "lj_str_new",
+ "lj_tab_len@4",
+ "lj_gc_step@4",
+ "lj_dispatch_ins@8",
+ "lj_trace_hot@8",
+ "lj_dispatch_call@8",
+ "lj_trace_exit@8",
+ "lj_ccallback_enter@8",
+ "lj_ccallback_leave@8",
+ "lj_meta_cat",
+ "lj_gc_barrieruv@8",
+ "lj_func_closeuv@8",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_gc_step_fixtop@4",
+ "lj_tab_dup@8",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx, int cmov, int sse)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ dasm_put(Dst, 109, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL);
+ dasm_put(Dst, 200, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK);
+ dasm_put(Dst, 302, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base));
+ dasm_put(Dst, 385, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE);
+ dasm_put(Dst, 548, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base));
+ dasm_put(Dst, 648, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL);
+#if LJ_HASFFI
+ dasm_put(Dst, 813);
+#endif
+ dasm_put(Dst, 822, 0);
+#if LJ_HASFFI
+#endif
+ dasm_put(Dst, 831, Dt7(->pc), PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 848);
+#endif
+ dasm_put(Dst, 869, Dt1(->base), LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 967, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 976);
+ } else {
+ }
+ dasm_put(Dst, 988, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET);
+ dasm_put(Dst, 1134, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 967, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 976);
+ } else {
+ }
+ dasm_put(Dst, 1158, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 1330, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base));
+ dasm_put(Dst, 1429);
+#if LJ_HASFFI
+ dasm_put(Dst, 1449, Dt1(->base));
+#endif
+ dasm_put(Dst, 1480);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1483);
+#endif
+ dasm_put(Dst, 1489);
+#if LJ_DUALNUM
+ dasm_put(Dst, 961);
+#endif
+ dasm_put(Dst, 1502);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1483);
+#endif
+ dasm_put(Dst, 1531, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1633);
+#else
+ dasm_put(Dst, 1652);
+#endif
+ dasm_put(Dst, 1657, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND);
+ dasm_put(Dst, 1843, 1+1, ~LJ_TNUMX);
+ if (cmov) {
+ dasm_put(Dst, 1912);
+ } else {
+ dasm_put(Dst, 1916);
+ }
+ dasm_put(Dst, 1925, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, ~LJ_TLIGHTUD, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL);
+ dasm_put(Dst, 2004, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next));
+ dasm_put(Dst, 2062, LJ_TNIL, LJ_TUDATA, LJ_TNUMX, LJ_TISNUM, LJ_TLIGHTUD);
+ dasm_put(Dst, 2128, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB);
+ dasm_put(Dst, 2199, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist), 2+1, LJ_TTAB);
+ dasm_put(Dst, 2289, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2303);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 2325);
+ } else {
+ dasm_put(Dst, 2335);
+ }
+ dasm_put(Dst, 2342, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2411, Dt1(->base));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2437);
+ } else {
+ dasm_put(Dst, 2442);
+ }
+ dasm_put(Dst, 2447, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2);
+ dasm_put(Dst, 2539, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2586, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2595, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2581);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ dasm_put(Dst, 2650);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2655, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2671, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 2704, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0);
+ dasm_put(Dst, 2566, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2586, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2781, Dt8(->upvalue[0]), LJ_TFUNC);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2802, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2814);
+ } else {
+ dasm_put(Dst, 2824);
+ }
+ dasm_put(Dst, 2831, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC);
+ dasm_put(Dst, 2896, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top));
+ dasm_put(Dst, 2985, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 3072, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE);
+ dasm_put(Dst, 3187, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe));
+ dasm_put(Dst, 3282, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top));
+ dasm_put(Dst, 3348, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack));
+ dasm_put(Dst, 3437, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME);
+ dasm_put(Dst, 3547, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 3574);
+ }
+ if (sse) {
+ dasm_put(Dst, 3577);
+ }
+ dasm_put(Dst, 3592, 1+1);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3603, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3683, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3693, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32));
+ } else {
+ dasm_put(Dst, 3724);
+ }
+ dasm_put(Dst, 3741, 1+1, FRAME_TYPE, LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3838, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3683, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3860);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3869);
+ }
+ dasm_put(Dst, 2330);
+ } else {
+ dasm_put(Dst, 3903);
+ if (LJ_DUALNUM) {
+ } else {
+ dasm_put(Dst, 2337);
+ }
+ }
+ dasm_put(Dst, 3909);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3838, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3683, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3912);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3869);
+ }
+ dasm_put(Dst, 2330);
+ } else {
+ dasm_put(Dst, 3921);
+ if (LJ_DUALNUM) {
+ } else {
+ dasm_put(Dst, 2337);
+ }
+ }
+ if (sse) {
+ dasm_put(Dst, 3927, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3956, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 3985, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4054, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4111, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4174, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM);
+ dasm_put(Dst, 4264);
+ if (sse) {
+ dasm_put(Dst, 4276, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4301);
+ if (sse) {
+ dasm_put(Dst, 4315, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4340);
+ if (sse) {
+ dasm_put(Dst, 4354, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4379);
+ if (sse) {
+ dasm_put(Dst, 4395, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ } else {
+ dasm_put(Dst, 4434, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ }
+ dasm_put(Dst, 4467, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 4532, 1+1, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4631);
+ } else {
+ dasm_put(Dst, 4637);
+ }
+ dasm_put(Dst, 4644);
+ if (sse) {
+ dasm_put(Dst, 4669);
+ } else {
+ dasm_put(Dst, 4675);
+ }
+ dasm_put(Dst, 4678, 1+2);
+ if (sse) {
+ dasm_put(Dst, 4687);
+ } else {
+ dasm_put(Dst, 4695);
+ }
+ dasm_put(Dst, 4703);
+ if (sse) {
+ dasm_put(Dst, 4706, (unsigned int)(U64x(43500000,00000000)), (unsigned int)((U64x(43500000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 4733);
+ }
+ dasm_put(Dst, 4750);
+ if (sse) {
+ dasm_put(Dst, 4766, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4791, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4813);
+ if (sse) {
+ dasm_put(Dst, 4835);
+ } else {
+ dasm_put(Dst, 4861);
+ }
+ dasm_put(Dst, 4878, 1+2);
+ if (sse) {
+ dasm_put(Dst, 4918);
+ } else {
+ dasm_put(Dst, 4926);
+ }
+ dasm_put(Dst, 4936, 2+1, LJ_TISNUM, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4988, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 5035, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 5076, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5089, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4631);
+ } else {
+ }
+ dasm_put(Dst, 5139);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 5150, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5171);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ dasm_put(Dst, 5192);
+ } else {
+ }
+ dasm_put(Dst, 5217, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5230, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4631);
+ } else {
+ }
+ dasm_put(Dst, 5139);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 5150, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5171);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ dasm_put(Dst, 5280);
+ } else {
+ }
+ if (!sse) {
+ dasm_put(Dst, 5305);
+ }
+ dasm_put(Dst, 5314, 1+1, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5336, Dt5(->len));
+ } else if (sse) {
+ dasm_put(Dst, 5344, Dt5(->len));
+ } else {
+ dasm_put(Dst, 5355, Dt5(->len));
+ }
+ dasm_put(Dst, 5363, 1+1, LJ_TSTR, Dt5(->len), Dt5([1]));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5339);
+ } else if (sse) {
+ dasm_put(Dst, 5401);
+ } else {
+ dasm_put(Dst, 5411);
+ }
+ dasm_put(Dst, 5422, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5455);
+ } else if (sse) {
+ dasm_put(Dst, 5478);
+ } else {
+ dasm_put(Dst, 5504);
+ }
+ dasm_put(Dst, 5528, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5637);
+ } else if (sse) {
+ dasm_put(Dst, 5649);
+ } else {
+ dasm_put(Dst, 5664);
+ }
+ dasm_put(Dst, 5676, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2581);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ dasm_put(Dst, 5693, Dt5(->len));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5703);
+ } else if (sse) {
+ dasm_put(Dst, 5707);
+ } else {
+ }
+ dasm_put(Dst, 5714, sizeof(GCstr)-1);
+ dasm_put(Dst, 5789, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 5850, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5867);
+ } else if (sse) {
+ dasm_put(Dst, 5875);
+ } else {
+ dasm_put(Dst, 5886);
+ }
+ dasm_put(Dst, 5902, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 5970, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 6037, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz));
+ dasm_put(Dst, 6110, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 6195, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 6269, 1+1, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6336);
+ } else if (sse) {
+ dasm_put(Dst, 6343);
+ } else {
+ }
+ dasm_put(Dst, 6353, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6369);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 106);
+ if (LJ_DUALNUM || sse) {
+ if (!sse) {
+ }
+ dasm_put(Dst, 6410);
+ } else {
+ }
+ dasm_put(Dst, 6415, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6426, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6436);
+ }
+ dasm_put(Dst, 2297, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6461);
+ } else {
+ }
+ dasm_put(Dst, 6476, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6501);
+ } else {
+ dasm_put(Dst, 6521);
+ }
+ if (sse) {
+ dasm_put(Dst, 6526);
+ } else {
+ }
+ dasm_put(Dst, 6543, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6426, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6436);
+ }
+ dasm_put(Dst, 2297, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6461);
+ } else {
+ }
+ dasm_put(Dst, 6476, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6561);
+ } else {
+ dasm_put(Dst, 6521);
+ }
+ if (sse) {
+ dasm_put(Dst, 6581);
+ } else {
+ }
+ dasm_put(Dst, 6598, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6426, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6436);
+ }
+ dasm_put(Dst, 2297, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6461);
+ } else {
+ }
+ dasm_put(Dst, 6476, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6616);
+ } else {
+ dasm_put(Dst, 6521);
+ }
+ if (sse) {
+ dasm_put(Dst, 6636);
+ } else {
+ }
+ dasm_put(Dst, 6653, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6676, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6700);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6410);
+ } else if (sse) {
+ dasm_put(Dst, 6706);
+ } else {
+ }
+ dasm_put(Dst, 6718);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6729, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6745, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6760, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6827);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6834, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6745, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6850, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6917);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6925, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6745, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6941, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 7008);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7016, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6745, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7032, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 7099);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7106, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6444);
+ } else {
+ dasm_put(Dst, 2320);
+ }
+ if (sse) {
+ dasm_put(Dst, 6386, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6745, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7122, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 7189, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base));
+ dasm_put(Dst, 7265, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 7392, Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7431, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 7464, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE);
+ dasm_put(Dst, 7518, Dt1(->base), Dt1(->base), GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 7585, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L));
+#endif
+ dasm_put(Dst, 7632);
+#if LJ_HASJIT
+ dasm_put(Dst, 7459);
+#endif
+ dasm_put(Dst, 7639);
+#if LJ_HASJIT
+ dasm_put(Dst, 7642);
+#endif
+ dasm_put(Dst, 7652, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7685);
+#endif
+ dasm_put(Dst, 7690, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7721, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 16*8, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC);
+#endif
+ dasm_put(Dst, 7960);
+#if LJ_HASJIT
+ dasm_put(Dst, 7963, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF);
+#endif
+ dasm_put(Dst, 8063);
+ if (!sse) {
+ dasm_put(Dst, 8066);
+ }
+ dasm_put(Dst, 8111, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ if (!sse) {
+ dasm_put(Dst, 8197);
+ }
+ dasm_put(Dst, 8242, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(bff00000,00000000)), (unsigned int)((U64x(bff00000,00000000))>>32));
+ if (!sse) {
+ dasm_put(Dst, 8328);
+ }
+ dasm_put(Dst, 8367, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ if (sse) {
+ dasm_put(Dst, 8456, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 8570);
+ }
+ dasm_put(Dst, 8617);
+ if (!sse) {
+ } else {
+ dasm_put(Dst, 8694);
+ }
+ dasm_put(Dst, 8697);
+ dasm_put(Dst, 8782, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ dasm_put(Dst, 8885, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7ff00000,00000000)), (unsigned int)((U64x(7ff00000,00000000))>>32));
+ dasm_put(Dst, 9047);
+#if LJ_HASJIT
+ if (sse) {
+ dasm_put(Dst, 9088);
+ dasm_put(Dst, 9158);
+ dasm_put(Dst, 9230);
+ } else {
+ dasm_put(Dst, 9282);
+ dasm_put(Dst, 9374);
+ }
+ dasm_put(Dst, 9420);
+#endif
+ dasm_put(Dst, 9424);
+ if (sse) {
+ dasm_put(Dst, 9427, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32));
+ dasm_put(Dst, 9512, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32));
+ } else {
+ dasm_put(Dst, 9640);
+ dasm_put(Dst, 9723);
+ if (cmov) {
+ dasm_put(Dst, 9778);
+ } else {
+ dasm_put(Dst, 9797);
+ }
+ dasm_put(Dst, 9420);
+ }
+ dasm_put(Dst, 9838);
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 9422);
+#endif
+ dasm_put(Dst, 9862);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 9866, GG_G2DISP, Dt2(->ctype_state), DtE(->cb.slot), DtE(->cb.gpr[0]), DtE(->cb.gpr[1]), DtE(->cb.gpr[2]), DtE(->cb.gpr[3]), DtE(->cb.fpr[0]), DtE(->cb.fpr[1]), DtE(->cb.fpr[2]), DtE(->cb.fpr[3]), CFRAME_SIZE, DtE(->cb.gpr[4]), DtE(->cb.gpr[5]), DtE(->cb.fpr[4]), DtE(->cb.fpr[5]), DtE(->cb.fpr[6]), DtE(->cb.fpr[7]), DtE(->cb.stack), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 9990, Dt1(->base), Dt1(->top), Dt7(->pc));
+#endif
+ dasm_put(Dst, 10030);
+#if LJ_HASFFI
+ dasm_put(Dst, 10033, DISPATCH_GL(ctype_state), DtE(->L), Dt1(->base), Dt1(->top), DtE(->cb.gpr[0]), DtE(->cb.fpr[0]));
+#endif
+ dasm_put(Dst, 10074);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 10077, DtF(->spadj));
+#if LJ_TARGET_WINDOWS
+#endif
+ dasm_put(Dst, 10093, DtF(->nsp), offsetof(CCallState, stack), CCALL_SPS_EXTRA*8, DtF(->nfpr), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->gpr[2]), DtF(->gpr[3]), DtF(->gpr[4]), DtF(->gpr[5]), DtF(->fpr[0]), DtF(->fpr[1]));
+ dasm_put(Dst, 10172, DtF(->fpr[2]), DtF(->fpr[3]), DtF(->fpr[4]), DtF(->fpr[5]), DtF(->fpr[6]), DtF(->fpr[7]), DtF(->func), DtF(->gpr[0]), DtF(->fpr[0]), DtF(->gpr[1]), DtF(->fpr[1]));
+#if LJ_TARGET_WINDOWS
+#endif
+ dasm_put(Dst, 10227);
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse)
+{
+ int vk = 0;
+ dasm_put(Dst, 829, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10235, LJ_TISNUM, LJ_TISNUM);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10265);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10270);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10275);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10280);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10285, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 10340);
+ } else {
+ dasm_put(Dst, 10351);
+ }
+ dasm_put(Dst, 10362);
+ if (sse) {
+ dasm_put(Dst, 10369);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10389);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10394);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10399);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10404);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10409);
+ } else {
+ dasm_put(Dst, 10414);
+ }
+ } else {
+ dasm_put(Dst, 10422, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 10443);
+ } else {
+ dasm_put(Dst, 10464);
+ if (cmov) {
+ dasm_put(Dst, 10480);
+ } else {
+ dasm_put(Dst, 10486);
+ }
+ }
+ if (LJ_DUALNUM) {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10389);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10394);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10399);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10404);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10409);
+ } else {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 817);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10493);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10498);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10503);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10508, -BCBIAS_J*4);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 10541);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10549, LJ_TISNUM, LJ_TISNUM);
+ if (vk) {
+ dasm_put(Dst, 10574);
+ } else {
+ dasm_put(Dst, 10579);
+ }
+ dasm_put(Dst, 10584, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 10637);
+ } else {
+ dasm_put(Dst, 10644);
+ }
+ dasm_put(Dst, 10648);
+ if (sse) {
+ dasm_put(Dst, 10659);
+ } else {
+ dasm_put(Dst, 10671);
+ }
+ dasm_put(Dst, 10678);
+ } else {
+ dasm_put(Dst, 10683, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 10702);
+ } else {
+ dasm_put(Dst, 10720);
+ if (cmov) {
+ dasm_put(Dst, 10480);
+ } else {
+ dasm_put(Dst, 10486);
+ }
+ }
+ iseqne_fp:
+ if (vk) {
+ dasm_put(Dst, 10733);
+ } else {
+ dasm_put(Dst, 10742);
+ }
+ iseqne_end:
+ if (vk) {
+ dasm_put(Dst, 10751, -BCBIAS_J*4);
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4684);
+ }
+ } else {
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4684);
+ }
+ dasm_put(Dst, 10766, -BCBIAS_J*4);
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ dasm_put(Dst, 10781);
+ } else {
+ dasm_put(Dst, 10520);
+ }
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ dasm_put(Dst, 10786);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 10789, LJ_TCDATA, LJ_TCDATA);
+ }
+ dasm_put(Dst, 10808, LJ_TISPRI, LJ_TISTABUD, LJ_TUDATA, Dt6(->metatable), Dt6(->nomm), 1<>32));
+ } else {
+ dasm_put(Dst, 11412);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10781);
+ } else {
+ dasm_put(Dst, 10520);
+ }
+ break;
+ case BC_LEN:
+ dasm_put(Dst, 11421, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11435, Dt5(->len), LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 11449, Dt5(->len));
+ } else {
+ dasm_put(Dst, 11467, Dt5(->len));
+ }
+ dasm_put(Dst, 11476, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 11512, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 11526);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 11535);
+ } else {
+ }
+ dasm_put(Dst, 11541);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 11554, Dt6(->nomm), 1<base), Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 12415, LJ_TSTR);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 12415, LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 12452, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 12464);
+ } else {
+ dasm_put(Dst, 12479);
+ }
+ dasm_put(Dst, 10520);
+ break;
+ case BC_KNUM:
+ if (sse) {
+ dasm_put(Dst, 12487);
+ } else {
+ dasm_put(Dst, 12501);
+ }
+ dasm_put(Dst, 10520);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 12509);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 12538, LJ_TNIL);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 12586, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ dasm_put(Dst, 12627, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G);
+ dasm_put(Dst, 12723);
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ dasm_put(Dst, 12735, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 12831);
+ if (sse) {
+ dasm_put(Dst, 12836);
+ } else {
+ dasm_put(Dst, 11052);
+ }
+ dasm_put(Dst, 12844, offsetof(GCfuncL, uvptr), DtA(->v));
+ if (sse) {
+ dasm_put(Dst, 12853);
+ } else {
+ dasm_put(Dst, 12859);
+ }
+ dasm_put(Dst, 10520);
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 12862, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_UCLO:
+ dasm_put(Dst, 12902, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 12958, Dt1(->base), Dt1(->base), LJ_TFUNC);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ dasm_put(Dst, 13025, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB);
+ break;
+ case BC_TDUP:
+ dasm_put(Dst, 13149, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB);
+ break;
+
+ case BC_GGET:
+ dasm_put(Dst, 13248, Dt7(->env));
+ break;
+ case BC_GSET:
+ dasm_put(Dst, 13268, Dt7(->env));
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 13288, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 13311, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 13325, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 13336);
+ } else {
+ }
+ dasm_put(Dst, 13357);
+ }
+ dasm_put(Dst, 13362, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 13557, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 13910, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 13987, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<next));
+ dasm_put(Dst, 14074, Dt6(->metatable), Dt6(->nomm), 1<base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 14166, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable));
+ dasm_put(Dst, 14261, Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 14309, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 14459, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ dasm_put(Dst, 11574);
+ if (op == BC_CALLM) {
+ dasm_put(Dst, 14479);
+ }
+ dasm_put(Dst, 14484, LJ_TFUNC, Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 14479);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 14527, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc));
+ dasm_put(Dst, 14645, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 14719, LJ_TFUNC, 2+1, Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 14791, Dt6(->asize), Dt6(->array), LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11440, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 11535);
+ } else {
+ dasm_put(Dst, 14843);
+ }
+ dasm_put(Dst, 14849);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 11405);
+ } else {
+ dasm_put(Dst, 11417);
+ }
+ dasm_put(Dst, 14862, -BCBIAS_J*4);
+ if (!LJ_DUALNUM && !sse) {
+ dasm_put(Dst, 14916);
+ }
+ dasm_put(Dst, 14922, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key), DtB(->val));
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 15001, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC);
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 15102, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack));
+ dasm_put(Dst, 15269, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 14479);
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ if (op != BC_RET0) {
+ dasm_put(Dst, 15339);
+ }
+ dasm_put(Dst, 15343, FRAME_TYPE);
+ switch (op) {
+ case BC_RET:
+ dasm_put(Dst, 15362);
+ break;
+ case BC_RET1:
+ dasm_put(Dst, 15416);
+ /* fallthrough */
+ case BC_RET0:
+ dasm_put(Dst, 15426);
+ default:
+ break;
+ }
+ dasm_put(Dst, 15437, Dt7(->pc), PC2PROTO(k));
+ if (op == BC_RET) {
+ dasm_put(Dst, 15485, LJ_TNIL);
+ } else {
+ dasm_put(Dst, 15496, LJ_TNIL);
+ }
+ dasm_put(Dst, 15503, -FRAME_VARG, FRAME_TYPEP);
+ if (op != BC_RET0) {
+ dasm_put(Dst, 15527);
+ }
+ dasm_put(Dst, 4761);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 15531, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 15552);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15556, LJ_TISNUM);
+ if (!vk) {
+ dasm_put(Dst, 15566, LJ_TISNUM, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 15595, LJ_TISNUM, LJ_TISNUM);
+#endif
+ dasm_put(Dst, 15614);
+ }
+ dasm_put(Dst, 15633, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 15644, -BCBIAS_J*4);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15658, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 15676, -BCBIAS_J*4);
+ } else {
+ dasm_put(Dst, 15668, BC_JLOOP);
+ }
+ dasm_put(Dst, 15690);
+ if (vk) {
+ dasm_put(Dst, 15715);
+ }
+ dasm_put(Dst, 15633, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 15724);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15729, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 15743);
+ } else {
+ dasm_put(Dst, 15739, BC_JLOOP);
+ }
+ dasm_put(Dst, 15748);
+ } else if (!vk) {
+ dasm_put(Dst, 15755, LJ_TISNUM);
+ }
+ if (!vk) {
+ dasm_put(Dst, 15761, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 15775, LJ_TISNUM, LJ_TISNUM);
+#endif
+ }
+ dasm_put(Dst, 15794);
+ if (!vk) {
+ dasm_put(Dst, 15798, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 15807);
+ if (vk) {
+ dasm_put(Dst, 15819);
+ } else {
+ dasm_put(Dst, 15838);
+ }
+ dasm_put(Dst, 15843);
+ } else {
+ dasm_put(Dst, 15856);
+ if (vk) {
+ dasm_put(Dst, 15862);
+ } else {
+ dasm_put(Dst, 15878);
+ }
+ dasm_put(Dst, 15886);
+ if (cmov) {
+ dasm_put(Dst, 10480);
+ } else {
+ dasm_put(Dst, 10486);
+ }
+ if (!cmov) {
+ dasm_put(Dst, 15891);
+ }
+ }
+ if (op == BC_FORI) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15897);
+ } else {
+ dasm_put(Dst, 15902, -BCBIAS_J*4);
+ }
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15912, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15926);
+ } else {
+ dasm_put(Dst, 15931, -BCBIAS_J*4);
+ }
+ } else {
+ dasm_put(Dst, 15922, BC_JLOOP);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10409);
+ } else {
+ dasm_put(Dst, 11186);
+ }
+ if (sse) {
+ dasm_put(Dst, 15941);
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 15531, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 15952, LJ_TNIL);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 15967, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 15981, -BCBIAS_J*4);
+ }
+ dasm_put(Dst, 10518);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 15531, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 10520);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 15997, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L));
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 16038, -BCBIAS_J*4);
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 16064, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_CALL);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 16085, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams));
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 16116, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 10520);
+ }
+ dasm_put(Dst, 16125, LJ_TNIL);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 9422);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 16147, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL);
+ if (op == BC_JFUNCV) {
+ dasm_put(Dst, 16116, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 16244, -4+PC2PROTO(k));
+ }
+ dasm_put(Dst, 16269, LJ_TNIL);
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ dasm_put(Dst, 16291, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top));
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 2433);
+ } else {
+ dasm_put(Dst, 16321);
+ }
+ dasm_put(Dst, 16329, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 16339);
+ } else {
+ dasm_put(Dst, 16344, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 16350, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ int cmov = 1;
+ int sse = 0;
+#ifdef LUAJIT_CPU_NOCMOV
+ cmov = 0;
+#endif
+#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64)
+ sse = 1;
+#endif
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx, cmov, sse);
+
+ dasm_put(Dst, 16376);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op, cmov, sse);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+#if LJ_64
+#define SZPTR "8"
+#define BSZPTR "3"
+#define REG_SP "0x7"
+#define REG_RA "0x10"
+#else
+#define SZPTR "4"
+#define BSZPTR "2"
+#define REG_SP "0x4"
+#define REG_RA "0x8"
+#endif
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_)
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n");
+ fprintf(ctx->fp,
+ "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n",
+ LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "Lframe1:\n"
+ "\t.long LECIE1-LSCIE1\n"
+ "LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zP\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 5\n" /* augmentation length */
+ "\t.byte 0x00\n" /* absptr */
+ "\t.long %slj_err_unwind_dwarf\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ "LECIE1:\n\n", LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "LSFDE1:\n"
+ "\t.long LEFDE1-LASFDE1\n"
+ "LASFDE1:\n"
+ "\t.long LASFDE1-Lframe1\n"
+ "\t.long %slj_vm_asm_begin\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE);
+ break;
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+#if LJ_64
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 1\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0xd\n\t.uleb128 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+#if LJ_64
+ fprintf(ctx->fp, "\t.subsections_via_symbols\n");
+#else
+ fprintf(ctx->fp,
+ "\t.non_lazy_symbol_pointer\n"
+ "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
+ ".indirect_symbol _lj_err_unwind_dwarf\n"
+ ".long 0\n");
+#endif
+ }
+ break;
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_x64win.h b/src/LuaJIT/src/buildvm_x64win.h
new file mode 100644
index 000000000..533d5b006
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_x64win.h
@@ -0,0 +1,3401 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM x64 version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_x86.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned char build_actionlist[16196] = {
+ 254,1,248,10,252,247,198,237,15,132,244,11,131,230,252,248,41,252,242,72,
+ 141,76,49,252,248,139,114,252,252,199,68,10,4,237,248,12,131,192,1,137,68,
+ 36,84,252,247,198,237,15,132,244,13,248,14,129,252,246,239,252,247,198,237,
+ 15,133,244,10,199,131,233,237,131,230,252,248,41,214,252,247,222,131,232,
+ 1,15,132,244,248,248,1,72,139,44,10,72,137,106,252,248,131,194,8,131,232,
+ 1,15,133,244,1,248,2,255,139,108,36,96,137,181,233,248,3,139,68,36,84,139,
+ 76,36,88,248,4,57,193,15,133,244,252,248,5,131,252,234,8,137,149,233,248,
+ 15,72,139,76,36,104,72,137,141,233,49,192,248,16,72,131,196,40,91,94,95,93,
+ 195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252,237,131,
+ 194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193,141,20,
+ 202,252,233,244,5,248,8,137,149,233,137,68,36,84,137,202,137,252,233,232,
+ 251,1,0,139,149,233,252,233,244,3,248,17,137,208,72,137,204,248,18,139,108,
+ 36,96,139,173,233,199,133,233,237,252,233,244,16,248,19,248,20,72,129,225,
+ 239,72,137,204,248,21,255,139,108,36,96,72,199,193,252,248,252,255,252,255,
+ 252,255,184,237,139,149,233,139,157,233,129,195,239,139,114,252,252,199,66,
+ 252,252,237,199,131,233,237,252,233,244,12,248,22,186,237,252,233,244,248,
+ 248,23,131,232,8,252,233,244,247,248,24,141,68,194,252,248,248,1,15,182,142,
+ 233,131,198,4,137,149,233,255,137,133,233,137,116,36,100,137,202,248,2,137,
+ 252,233,232,251,1,0,139,149,233,139,133,233,139,106,252,248,41,208,193,232,
+ 3,131,192,1,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,
+ 36,252,235,248,25,85,87,86,83,72,131,252,236,40,137,205,137,76,36,96,137,
+ 209,190,237,49,192,72,141,188,253,36,233,139,157,233,129,195,239,72,137,189,
+ 233,137,68,36,100,72,137,68,36,104,137,68,36,88,137,68,36,92,56,133,233,15,
+ 132,244,249,199,131,233,237,136,133,233,139,149,233,139,133,233,41,200,193,
+ 232,3,131,192,1,41,209,139,114,252,252,137,68,36,84,252,247,198,237,255,15,
+ 132,244,13,252,233,244,14,248,26,85,87,86,83,72,131,252,236,40,190,237,68,
+ 137,76,36,92,252,233,244,247,248,27,85,87,86,83,72,131,252,236,40,190,237,
+ 248,1,68,137,68,36,88,137,205,137,76,36,96,137,209,72,139,189,233,72,137,
+ 124,36,104,137,108,36,100,72,137,165,233,248,2,139,157,233,129,195,239,248,
+ 3,199,131,233,237,139,149,233,255,1,206,41,214,139,133,233,41,200,193,232,
+ 3,131,192,1,248,28,139,105,252,248,129,121,253,252,252,239,15,133,244,29,
+ 248,30,137,202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,205,
+ 131,198,4,252,255,36,252,235,248,31,85,87,86,83,72,131,252,236,40,137,205,
+ 137,76,36,96,137,108,36,100,139,189,233,43,189,233,199,68,36,92,0,0,0,0,137,
+ 124,36,88,72,139,189,233,72,137,124,36,104,72,137,165,233,65,252,255,209,
+ 133,192,15,132,244,15,137,193,190,237,252,233,244,2,248,11,1,209,131,230,
+ 252,248,137,213,41,252,242,199,68,193,252,252,237,137,200,139,117,252,244,
+ 72,99,77,252,240,255,131,252,249,1,15,134,244,247,255,72,141,61,245,72,1,
+ 252,249,255,139,122,252,248,139,191,233,139,191,233,252,255,225,255,248,1,
+ 15,132,244,32,41,213,193,252,237,3,141,69,252,255,252,233,244,33,255,248,
+ 34,15,182,78,252,255,131,252,237,16,141,12,202,41,252,233,15,132,244,35,252,
+ 247,217,193,252,233,3,65,137,200,139,76,36,96,137,145,233,72,139,0,72,137,
+ 69,0,137,252,234,252,233,244,36,248,37,137,68,36,80,199,68,36,84,237,72,141,
+ 68,36,80,128,126,252,252,235,15,133,244,247,141,139,233,137,41,199,65,4,237,
+ 137,205,252,233,244,248,248,38,15,182,70,252,254,255,199,68,36,84,237,137,
+ 68,36,80,255,252,242,15,42,192,252,242,15,17,68,36,80,255,72,141,68,36,80,
+ 252,233,244,247,248,39,15,182,70,252,254,141,4,194,248,1,15,182,110,252,255,
+ 141,44,252,234,248,2,139,76,36,96,137,145,233,137,252,234,73,137,192,137,
+ 205,137,116,36,100,232,251,1,1,139,149,233,133,192,15,132,244,249,248,35,
+ 15,182,78,252,253,72,139,40,72,137,44,202,139,6,15,182,204,15,182,232,131,
+ 198,4,193,232,16,252,255,36,252,235,248,3,139,141,233,137,113,252,244,141,
+ 177,233,41,214,139,105,252,248,184,237,252,233,244,30,248,40,137,68,36,80,
+ 199,68,36,84,237,72,141,68,36,80,128,126,252,252,235,15,133,244,247,255,141,
+ 139,233,137,41,199,65,4,237,137,205,252,233,244,248,248,41,15,182,70,252,
+ 254,255,72,141,68,36,80,252,233,244,247,248,42,15,182,70,252,254,141,4,194,
+ 248,1,15,182,110,252,255,141,44,252,234,248,2,139,76,36,96,137,145,233,137,
+ 252,234,73,137,192,137,205,137,116,36,100,232,251,1,2,139,149,233,133,192,
+ 15,132,244,249,15,182,78,252,253,72,139,44,202,72,137,40,248,43,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,139,141,
+ 233,137,113,252,244,15,182,70,252,253,72,139,44,194,72,137,105,16,141,177,
+ 233,41,214,139,105,252,248,184,237,252,233,244,30,248,44,139,108,36,96,137,
+ 149,233,68,141,4,194,141,20,202,137,252,233,68,15,182,78,252,252,137,116,
+ 36,100,232,251,1,3,248,3,139,149,233,255,131,252,248,1,15,135,244,45,248,
+ 4,141,118,4,15,130,244,252,248,5,15,183,70,252,254,141,180,253,134,233,248,
+ 6,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,
+ 46,131,198,4,129,120,253,4,239,15,130,244,5,252,233,244,6,248,47,129,120,
+ 253,4,239,252,233,244,4,248,48,131,252,238,4,65,137,192,65,137,252,233,139,
+ 108,36,96,137,149,233,255,137,202,137,252,233,137,116,36,100,232,251,1,4,
+ 252,233,244,3,248,49,255,131,252,238,4,139,108,36,96,137,149,233,137,252,
+ 233,139,86,252,252,137,116,36,100,232,251,1,5,252,233,244,3,255,248,50,255,
+ 15,182,110,252,255,255,248,51,141,4,199,252,233,244,247,248,52,255,248,53,
+ 141,4,199,141,44,252,234,149,252,233,244,248,248,54,141,4,194,137,197,252,
+ 233,244,248,248,55,255,248,56,141,4,194,248,1,141,44,252,234,248,2,141,12,
+ 202,65,137,232,65,137,193,15,182,70,252,252,137,68,36,32,139,108,36,96,137,
+ 149,233,137,202,137,252,233,137,116,36,100,232,251,1,6,139,149,233,133,192,
+ 15,132,244,43,248,45,137,193,41,208,137,113,252,244,141,176,233,184,237,252,
+ 233,244,28,248,57,139,108,36,96,137,149,233,141,20,194,137,252,233,137,116,
+ 36,100,232,251,1,7,139,149,233,255,133,192,15,133,244,45,15,183,70,252,254,
+ 139,12,194,252,233,244,58,255,252,233,244,45,255,248,59,141,76,202,8,248,
+ 29,137,76,36,84,137,68,36,80,131,252,233,8,139,108,36,96,137,149,233,137,
+ 202,68,141,4,193,137,252,233,137,116,36,100,232,251,1,8,139,149,233,139,76,
+ 36,84,139,68,36,80,139,105,252,248,131,192,1,57,215,15,132,244,60,137,202,
+ 137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,
+ 255,36,252,235,248,61,139,108,36,96,137,149,233,137,202,137,252,233,137,116,
+ 36,100,232,251,1,9,139,149,233,139,70,252,252,15,182,204,15,182,232,193,232,
+ 16,252,255,164,253,252,235,233,248,62,129,252,248,239,15,130,244,63,139,106,
+ 4,129,252,253,239,15,131,244,63,139,114,252,252,137,68,36,84,137,106,252,
+ 252,139,42,137,106,252,248,131,232,2,15,132,244,248,255,137,209,248,1,131,
+ 193,8,72,139,41,72,137,105,252,248,131,232,1,15,133,244,1,248,2,139,68,36,
+ 84,252,233,244,64,248,65,129,252,248,239,15,130,244,63,139,106,4,137,252,
+ 233,193,252,249,15,131,252,249,252,254,15,132,244,249,184,237,252,247,213,
+ 57,232,255,15,71,197,255,15,134,244,247,137,232,248,1,255,248,2,139,106,252,
+ 248,139,132,253,197,233,139,114,252,252,199,66,252,252,237,137,66,252,248,
+ 252,233,244,66,248,3,184,237,252,233,244,2,248,67,129,252,248,239,15,130,
+ 244,63,139,106,4,139,114,252,252,129,252,253,239,15,133,244,252,248,1,139,
+ 42,139,173,233,248,2,133,252,237,199,66,252,252,237,255,15,132,244,66,139,
+ 131,233,199,66,252,252,237,137,106,252,248,139,141,233,35,136,233,105,201,
+ 239,3,141,233,248,3,129,185,233,239,15,133,244,250,57,129,233,15,132,244,
+ 251,248,4,139,137,233,133,201,15,133,244,3,255,252,233,244,66,248,5,139,105,
+ 4,129,252,253,239,15,132,244,66,139,1,137,106,252,252,137,66,252,248,252,
+ 233,244,66,248,6,129,252,253,239,15,132,244,1,129,252,253,239,15,135,244,
+ 254,129,252,253,239,15,134,244,253,189,237,252,233,244,254,248,7,255,189,
+ 237,248,8,252,247,213,139,172,253,171,233,252,233,244,2,248,68,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,15,133,244,63,139,42,131,189,233,0,15,
+ 133,244,63,129,122,253,12,239,15,133,244,63,139,66,8,137,133,233,139,114,
+ 252,252,199,66,252,252,237,255,137,106,252,248,252,246,133,233,235,15,132,
+ 244,247,128,165,233,235,139,131,233,137,171,233,137,133,233,248,1,252,233,
+ 244,66,248,69,129,252,248,239,15,130,244,63,129,122,253,4,239,15,133,244,
+ 63,137,213,68,141,66,8,139,18,139,76,36,96,232,251,1,10,137,252,234,72,139,
+ 40,139,114,252,252,72,137,106,252,248,252,233,244,66,248,70,255,129,252,248,
+ 239,15,133,244,63,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,
+ 71,248,1,15,135,244,63,255,15,131,244,63,255,252,242,15,16,2,252,233,244,
+ 72,255,221,2,252,233,244,73,255,248,74,129,252,248,239,15,130,244,63,139,
+ 114,252,252,129,122,253,4,239,15,133,244,249,139,2,248,2,199,66,252,252,237,
+ 137,66,252,248,252,233,244,66,248,3,129,122,253,4,239,15,135,244,63,131,187,
+ 233,0,15,133,244,63,139,171,233,59,171,233,255,15,130,244,247,232,244,75,
+ 248,1,139,108,36,96,137,149,233,137,116,36,100,137,252,233,255,232,251,1,
+ 11,255,232,251,1,12,255,139,149,233,252,233,244,2,248,76,129,252,248,239,
+ 15,130,244,63,15,132,244,248,248,1,129,122,253,4,239,15,133,244,63,139,108,
+ 36,96,137,149,233,137,149,233,139,114,252,252,68,141,66,8,139,18,137,252,
+ 233,137,116,36,100,232,251,1,13,139,149,233,133,192,15,132,244,249,72,139,
+ 106,8,72,139,66,16,72,137,106,252,248,72,137,2,248,77,184,237,255,252,233,
+ 244,78,248,2,199,66,12,237,252,233,244,1,248,3,199,66,252,252,237,252,233,
+ 244,66,248,79,129,252,248,239,15,130,244,63,139,42,129,122,253,4,239,15,133,
+ 244,63,255,131,189,233,0,15,133,244,63,255,139,106,252,248,139,133,233,139,
+ 114,252,252,199,66,252,252,237,137,66,252,248,199,66,12,237,184,237,252,233,
+ 244,78,248,80,129,252,248,239,15,130,244,63,129,122,253,4,239,15,133,244,
+ 63,129,122,253,12,239,255,139,114,252,252,255,139,66,8,131,192,1,199,66,252,
+ 252,237,137,66,252,248,255,252,242,15,16,66,8,72,189,237,237,102,72,15,110,
+ 205,252,242,15,88,193,252,242,15,45,192,252,242,15,17,66,252,248,255,139,
+ 42,59,133,233,15,131,244,248,193,224,3,3,133,233,248,1,129,120,253,4,239,
+ 15,132,244,81,72,139,40,72,137,42,252,233,244,77,248,2,131,189,233,0,15,132,
+ 244,81,137,252,233,137,213,137,194,232,251,1,14,137,252,234,133,192,15,133,
+ 244,1,248,81,184,237,252,233,244,78,248,82,255,139,106,252,248,139,133,233,
+ 139,114,252,252,199,66,252,252,237,137,66,252,248,255,199,66,12,237,199,66,
+ 8,0,0,0,0,255,15,87,192,252,242,15,17,66,8,255,217,252,238,221,90,8,255,184,
+ 237,252,233,244,78,248,83,129,252,248,239,15,130,244,63,141,74,8,131,232,
+ 1,190,237,248,1,15,182,171,233,193,252,237,235,131,229,1,1,252,238,252,233,
+ 244,28,248,84,129,252,248,239,15,130,244,63,129,122,253,12,239,15,133,244,
+ 63,255,139,106,4,137,106,12,199,66,4,237,139,42,139,114,8,137,106,8,137,50,
+ 141,74,16,131,232,2,190,237,252,233,244,1,248,85,129,252,248,239,15,130,244,
+ 63,139,42,139,114,252,252,137,116,36,100,137,108,36,80,129,122,253,4,239,
+ 15,133,244,63,72,131,189,233,0,15,133,244,63,128,189,233,235,15,135,244,63,
+ 139,141,233,15,132,244,247,255,59,141,233,15,132,244,63,248,1,141,116,193,
+ 252,240,59,181,233,15,135,244,63,137,181,233,139,108,36,96,137,149,233,131,
+ 194,8,137,149,233,141,108,194,232,72,41,252,245,57,206,15,132,244,249,248,
+ 2,72,139,4,46,72,137,70,252,248,131,252,238,8,57,206,15,133,244,2,248,3,137,
+ 202,139,76,36,80,232,244,25,199,131,233,237,255,139,108,36,96,139,116,36,
+ 80,139,149,233,129,252,248,239,15,135,244,254,248,4,139,142,233,139,190,233,
+ 137,142,233,137,252,254,41,206,15,132,244,252,141,4,50,193,252,238,3,59,133,
+ 233,15,135,244,255,137,213,72,41,205,248,5,72,139,1,72,137,4,41,131,193,8,
+ 57,252,249,15,133,244,5,248,6,141,70,2,199,66,252,252,237,248,7,139,116,36,
+ 100,137,68,36,84,72,199,193,252,248,252,255,252,255,252,255,252,247,198,237,
+ 255,15,132,244,13,252,233,244,14,248,8,199,66,252,252,237,139,142,233,131,
+ 252,233,8,137,142,233,72,139,1,72,137,2,184,237,252,233,244,7,248,9,139,76,
+ 36,80,137,185,233,137,252,242,137,252,233,232,251,1,0,139,116,36,80,139,149,
+ 233,252,233,244,4,248,86,139,106,252,248,139,173,233,139,114,252,252,137,
+ 116,36,100,137,108,36,80,72,131,189,233,0,15,133,244,63,255,128,189,233,235,
+ 15,135,244,63,139,141,233,15,132,244,247,59,141,233,15,132,244,63,248,1,141,
+ 116,193,252,248,59,181,233,15,135,244,63,137,181,233,139,108,36,96,137,149,
+ 233,137,149,233,141,108,194,252,240,72,41,252,245,57,206,15,132,244,249,248,
+ 2,255,72,139,4,46,72,137,70,252,248,131,252,238,8,57,206,15,133,244,2,248,
+ 3,137,202,139,76,36,80,232,244,25,199,131,233,237,139,108,36,96,139,116,36,
+ 80,139,149,233,129,252,248,239,15,135,244,254,248,4,139,142,233,139,190,233,
+ 137,142,233,137,252,254,41,206,15,132,244,252,141,4,50,193,252,238,3,59,133,
+ 233,15,135,244,255,255,137,213,72,41,205,248,5,72,139,1,72,137,4,41,131,193,
+ 8,57,252,249,15,133,244,5,248,6,141,70,1,248,7,139,116,36,100,137,68,36,84,
+ 49,201,252,247,198,237,15,132,244,13,252,233,244,14,248,8,137,252,242,137,
+ 252,233,232,251,1,15,248,9,139,76,36,80,137,185,233,137,252,242,137,252,233,
+ 232,251,1,0,139,116,36,80,139,149,233,252,233,244,4,248,87,139,108,36,96,
+ 72,252,247,133,233,237,15,132,244,63,255,137,149,233,141,68,194,252,248,137,
+ 133,233,49,192,72,137,133,233,176,235,136,133,233,252,233,244,16,255,248,
+ 71,255,248,73,139,114,252,252,221,90,252,248,252,233,244,66,255,248,88,129,
+ 252,248,239,15,130,244,63,255,129,122,253,4,239,15,133,244,248,139,42,131,
+ 252,253,0,15,137,244,71,252,247,221,15,136,244,247,248,89,248,71,139,114,
+ 252,252,199,66,252,252,237,137,106,252,248,252,233,244,66,248,1,139,114,252,
+ 252,199,66,252,252,0,0,224,65,199,66,252,248,0,0,0,0,252,233,244,66,248,2,
+ 15,135,244,63,255,129,122,253,4,239,15,131,244,63,255,252,242,15,16,2,72,
+ 184,237,237,102,72,15,110,200,15,84,193,248,72,139,114,252,252,252,242,15,
+ 17,66,252,248,255,221,2,217,225,248,72,248,73,139,114,252,252,221,90,252,
+ 248,255,248,66,184,237,248,78,137,68,36,84,248,64,252,247,198,237,15,133,
+ 244,253,248,5,56,70,252,255,15,135,244,252,15,182,78,252,253,72,252,247,209,
+ 141,20,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,
+ 235,248,6,199,68,194,252,244,237,131,192,1,252,233,244,5,248,7,72,199,193,
+ 252,248,252,255,252,255,252,255,252,233,244,14,248,90,255,129,122,253,4,239,
+ 15,133,244,247,139,42,252,233,244,71,248,1,15,135,244,63,255,252,242,15,16,
+ 2,232,244,91,255,252,242,15,45,232,129,252,253,0,0,0,128,15,133,244,71,252,
+ 242,15,42,205,102,15,46,193,15,138,244,72,15,132,244,71,255,221,2,232,244,
+ 91,255,248,92,255,252,242,15,16,2,232,244,93,255,221,2,232,244,93,255,248,
+ 94,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,
+ 15,81,2,252,233,244,72,255,248,94,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,15,131,244,63,221,2,217,252,250,252,233,244,73,255,248,95,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,15,131,244,63,217,252,237,221,2,217,252,
+ 241,252,233,244,73,248,96,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,217,252,236,221,2,217,252,241,252,233,244,73,248,97,129,252,
+ 248,239,255,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,232,244,98,
+ 252,233,244,73,248,99,129,252,248,239,15,130,244,63,129,122,253,4,239,15,
+ 131,244,63,221,2,217,252,254,252,233,244,73,248,100,129,252,248,239,255,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,221,2,217,252,255,252,233,244,
+ 73,248,101,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,
+ 221,2,217,252,242,221,216,252,233,244,73,248,102,129,252,248,239,15,130,244,
+ 63,255,129,122,253,4,239,15,131,244,63,221,2,217,192,216,200,217,232,222,
+ 225,217,252,250,217,252,243,252,233,244,73,248,103,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,15,131,244,63,221,2,217,192,216,200,217,232,222,
+ 225,217,252,250,217,201,217,252,243,252,233,244,73,248,104,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,255,221,2,217,232,217,252,243,
+ 252,233,244,73,255,248,105,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,252,242,15,16,2,255,137,213,232,251,1,16,137,252,234,252,233,
+ 244,72,255,248,106,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,252,242,15,16,2,255,137,213,232,251,1,17,137,252,234,252,233,244,72,
+ 255,248,107,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,
+ 252,242,15,16,2,255,137,213,232,251,1,18,137,252,234,252,233,244,72,248,108,
+ 255,248,109,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,
+ 252,242,15,16,2,139,106,252,248,252,242,15,89,133,233,252,233,244,72,255,
+ 248,109,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,
+ 2,139,106,252,248,220,141,233,252,233,244,73,255,248,110,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,2,221,66,8,217,252,243,252,233,244,73,248,111,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,255,15,131,244,
+ 63,221,66,8,221,2,217,252,253,221,217,252,233,244,73,248,112,129,252,248,
+ 239,15,130,244,63,139,106,4,129,252,253,239,15,131,244,63,139,114,252,252,
+ 139,2,137,106,252,252,137,66,252,248,209,229,129,252,253,0,0,224,252,255,
+ 15,131,244,249,9,232,15,132,244,249,184,252,254,3,0,0,129,252,253,0,0,32,
+ 0,15,130,244,250,248,1,193,252,237,21,41,197,255,252,242,15,42,197,255,137,
+ 108,36,80,219,68,36,80,255,139,106,252,252,129,229,252,255,252,255,15,128,
+ 129,205,0,0,224,63,137,106,252,252,248,2,255,252,242,15,17,2,255,221,26,255,
+ 184,237,252,233,244,78,248,3,255,15,87,192,252,233,244,2,255,217,252,238,
+ 252,233,244,2,255,248,4,255,252,242,15,16,2,72,189,237,237,102,72,15,110,
+ 205,252,242,15,89,193,252,242,15,17,66,252,248,255,221,2,199,68,36,80,0,0,
+ 128,90,216,76,36,80,221,90,252,248,255,139,106,252,252,184,52,4,0,0,209,229,
+ 252,233,244,1,255,248,113,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,252,242,15,16,2,255,248,113,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,221,2,255,139,106,4,139,114,252,252,209,229,129,
+ 252,253,0,0,224,252,255,15,132,244,250,255,15,40,224,232,244,114,252,242,
+ 15,92,224,248,1,252,242,15,17,66,252,248,252,242,15,17,34,255,217,192,232,
+ 244,114,220,252,233,248,1,221,90,252,248,221,26,255,139,66,252,252,139,106,
+ 4,49,232,15,136,244,249,248,2,184,237,252,233,244,78,248,3,129,252,245,0,
+ 0,0,128,137,106,4,252,233,244,2,248,4,255,15,87,228,252,233,244,1,255,217,
+ 252,238,217,201,252,233,244,1,255,248,115,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,63,221,66,8,221,
+ 2,248,1,217,252,248,223,224,158,15,138,244,1,221,217,252,233,244,73,255,248,
+ 116,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,
+ 253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,74,8,232,244,117,252,
+ 233,244,72,255,248,116,129,252,248,239,15,130,244,63,129,122,253,4,239,15,
+ 131,244,63,129,122,253,12,239,15,131,244,63,221,2,221,66,8,232,244,117,252,
+ 233,244,73,255,248,118,185,2,0,0,0,129,122,253,4,239,255,15,133,244,250,139,
+ 42,248,1,57,193,15,131,244,71,129,124,253,202,252,252,239,15,133,244,249,
+ 59,108,202,252,248,15,79,108,202,252,248,131,193,1,252,233,244,1,248,3,15,
+ 135,244,63,255,252,233,244,252,248,4,15,135,244,63,255,252,242,15,16,2,248,
+ 5,57,193,15,131,244,72,129,124,253,202,252,252,239,255,15,130,244,252,15,
+ 135,244,63,252,242,15,42,76,202,252,248,252,233,244,253,255,248,6,252,242,
+ 15,16,76,202,252,248,248,7,252,242,15,93,193,131,193,1,252,233,244,5,255,
+ 248,119,185,2,0,0,0,129,122,253,4,239,255,15,133,244,250,139,42,248,1,57,
+ 193,15,131,244,71,129,124,253,202,252,252,239,15,133,244,249,59,108,202,252,
+ 248,15,76,108,202,252,248,131,193,1,252,233,244,1,248,3,15,135,244,63,255,
+ 248,6,252,242,15,16,76,202,252,248,248,7,252,242,15,95,193,131,193,1,252,
+ 233,244,5,255,248,9,221,216,252,233,244,63,255,248,120,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,133,244,63,139,42,255,139,173,233,252,233,
+ 244,71,255,252,242,15,42,133,233,252,233,244,72,255,219,133,233,252,233,244,
+ 73,255,248,121,129,252,248,239,15,133,244,63,129,122,253,4,239,15,133,244,
+ 63,139,42,139,114,252,252,131,189,233,1,15,130,244,81,15,182,173,233,255,
+ 252,242,15,42,197,252,233,244,72,255,137,108,36,80,219,68,36,80,252,233,244,
+ 73,255,248,122,139,171,233,59,171,233,15,130,244,247,232,244,75,248,1,129,
+ 252,248,239,15,133,244,63,129,122,253,4,239,255,15,133,244,63,139,42,129,
+ 252,253,252,255,0,0,0,15,135,244,63,137,108,36,84,255,15,131,244,63,252,242,
+ 15,44,42,129,252,253,252,255,0,0,0,15,135,244,63,137,108,36,84,255,15,131,
+ 244,63,221,2,219,92,36,84,129,124,36,84,252,255,0,0,0,15,135,244,63,255,199,
+ 68,36,32,1,0,0,0,72,141,68,36,84,248,123,139,108,36,96,137,149,233,68,139,
+ 68,36,32,72,137,194,137,252,233,137,116,36,100,232,251,1,19,139,149,233,139,
+ 114,252,252,199,66,252,252,237,137,66,252,248,252,233,244,66,248,124,139,
+ 171,233,59,171,233,15,130,244,247,232,244,75,248,1,199,68,36,84,252,255,252,
+ 255,252,255,252,255,129,252,248,239,15,130,244,63,15,134,244,247,129,122,
+ 253,20,239,255,15,133,244,63,139,106,16,137,108,36,84,255,15,131,244,63,252,
+ 242,15,44,106,16,137,108,36,84,255,15,131,244,63,221,66,16,219,92,36,84,255,
+ 248,1,129,122,253,4,239,15,133,244,63,129,122,253,12,239,255,139,42,137,108,
+ 36,32,139,173,233,255,139,74,8,255,252,242,15,44,74,8,255,139,68,36,84,57,
+ 197,15,130,244,251,248,2,133,201,15,142,244,253,248,3,139,108,36,32,41,200,
+ 15,140,244,125,141,172,253,13,233,131,192,1,248,4,137,68,36,32,137,232,252,
+ 233,244,123,248,5,15,140,244,252,141,68,40,1,252,233,244,2,248,6,137,232,
+ 252,233,244,2,248,7,255,15,132,244,254,1,252,233,131,193,1,15,143,244,3,248,
+ 8,185,1,0,0,0,252,233,244,3,248,125,49,192,252,233,244,4,248,126,129,252,
+ 248,239,15,130,244,63,139,171,233,59,171,233,15,130,244,247,232,244,75,248,
+ 1,255,129,122,253,4,239,15,133,244,63,129,122,253,12,239,139,42,255,15,133,
+ 244,63,139,66,8,255,15,131,244,63,252,242,15,44,66,8,255,15,131,244,63,221,
+ 66,8,219,92,36,84,139,68,36,84,255,133,192,15,142,244,125,131,189,233,1,15,
+ 130,244,125,15,133,244,127,57,131,233,15,130,244,127,15,182,141,233,139,171,
+ 233,137,68,36,32,248,1,136,77,0,131,197,1,131,232,1,15,133,244,1,139,131,
+ 233,252,233,244,123,248,128,129,252,248,239,255,15,130,244,63,139,171,233,
+ 59,171,233,15,130,244,247,232,244,75,248,1,129,122,253,4,239,15,133,244,63,
+ 139,42,139,133,233,133,192,15,132,244,125,57,131,233,15,130,244,129,129,197,
+ 239,137,116,36,84,137,68,36,32,139,179,233,248,1,255,15,182,77,0,131,197,
+ 1,131,232,1,136,12,6,15,133,244,1,137,252,240,139,116,36,84,252,233,244,123,
+ 248,130,129,252,248,239,15,130,244,63,139,171,233,59,171,233,15,130,244,247,
+ 232,244,75,248,1,129,122,253,4,239,15,133,244,63,139,42,139,133,233,57,131,
+ 233,255,15,130,244,129,129,197,239,137,116,36,84,137,68,36,32,139,179,233,
+ 252,233,244,249,248,1,15,182,76,5,0,131,252,249,65,15,130,244,248,131,252,
+ 249,90,15,135,244,248,131,252,241,32,248,2,136,12,6,248,3,131,232,1,15,137,
+ 244,1,137,252,240,139,116,36,84,252,233,244,123,248,131,129,252,248,239,15,
+ 130,244,63,255,139,171,233,59,171,233,15,130,244,247,232,244,75,248,1,129,
+ 122,253,4,239,15,133,244,63,139,42,139,133,233,57,131,233,15,130,244,129,
+ 129,197,239,137,116,36,84,137,68,36,32,139,179,233,252,233,244,249,248,1,
+ 15,182,76,5,0,131,252,249,97,15,130,244,248,255,131,252,249,122,15,135,244,
+ 248,131,252,241,32,248,2,136,12,6,248,3,131,232,1,15,137,244,1,137,252,240,
+ 139,116,36,84,252,233,244,123,248,132,129,252,248,239,15,130,244,63,129,122,
+ 253,4,239,15,133,244,63,137,213,139,10,232,251,1,20,137,252,234,255,137,197,
+ 252,233,244,71,255,252,242,15,42,192,252,233,244,72,255,248,133,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,
+ 89,248,1,15,135,244,63,255,252,242,15,16,2,72,189,237,237,102,72,15,110,205,
+ 252,242,15,88,193,102,15,126,197,255,252,233,244,89,255,248,134,129,252,248,
+ 239,15,130,244,63,255,72,189,237,237,102,72,15,110,205,255,199,68,36,80,0,
+ 0,192,89,255,15,133,244,247,139,42,252,233,244,248,248,1,15,135,244,63,255,
+ 252,242,15,16,2,252,242,15,88,193,102,15,126,197,255,248,2,137,68,36,84,141,
+ 68,194,252,240,248,1,57,208,15,134,244,89,129,120,253,4,239,255,15,133,244,
+ 248,35,40,131,232,8,252,233,244,1,248,2,15,135,244,135,255,15,131,244,135,
+ 255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,33,205,255,131,232,8,
+ 252,233,244,1,248,136,129,252,248,239,15,130,244,63,255,15,133,244,248,11,
+ 40,131,232,8,252,233,244,1,248,2,15,135,244,135,255,252,242,15,16,0,252,242,
+ 15,88,193,102,15,126,193,9,205,255,131,232,8,252,233,244,1,248,137,129,252,
+ 248,239,15,130,244,63,255,15,133,244,248,51,40,131,232,8,252,233,244,1,248,
+ 2,15,135,244,135,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,49,
+ 205,255,131,232,8,252,233,244,1,248,138,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,255,248,2,15,205,252,233,244,89,248,139,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,255,248,2,252,247,213,255,248,89,252,242,15,
+ 42,197,252,233,244,72,255,248,135,139,68,36,84,252,233,244,63,255,248,140,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,2,129,122,253,12,
+ 239,15,133,244,63,139,74,8,255,248,140,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,
+ 2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,
+ 242,15,88,202,102,15,126,197,102,15,126,201,255,211,229,252,233,244,89,255,
+ 248,141,129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,141,129,252,
+ 248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,
+ 15,131,244,63,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,
+ 110,213,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,
+ 255,211,252,237,252,233,244,89,255,248,142,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,255,248,142,129,252,248,239,15,130,244,63,129,122,253,4,
+ 239,15,131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,
+ 15,16,74,8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,
+ 88,202,102,15,126,197,102,15,126,201,255,211,252,253,252,233,244,89,255,248,
+ 143,129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,143,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,
+ 244,63,252,242,15,16,2,252,242,15,16,74,8,72,189,237,237,102,72,15,110,213,
+ 252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,201,255,211,
+ 197,252,233,244,89,255,248,144,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,255,248,144,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,74,
+ 8,72,189,237,237,102,72,15,110,213,252,242,15,88,194,252,242,15,88,202,102,
+ 15,126,197,102,15,126,201,255,211,205,252,233,244,89,248,127,184,237,252,
+ 233,244,63,248,129,184,237,248,63,139,108,36,96,139,114,252,252,137,116,36,
+ 100,137,149,233,141,68,194,252,248,141,136,233,137,133,233,139,66,252,248,
+ 59,141,233,15,135,244,251,137,252,233,252,255,144,233,139,149,233,133,192,
+ 15,143,244,78,248,1,255,139,141,233,41,209,193,252,233,3,133,192,141,65,1,
+ 139,106,252,248,15,133,244,33,139,181,233,139,14,15,182,252,233,15,182,205,
+ 131,198,4,252,255,36,252,235,248,33,137,209,252,247,198,237,15,133,244,249,
+ 15,182,110,252,253,72,252,247,213,141,20,252,234,252,233,244,28,248,3,137,
+ 252,245,131,229,252,248,41,252,234,252,233,244,28,248,5,186,237,137,252,233,
+ 232,251,1,0,139,149,233,49,192,252,233,244,1,248,75,93,72,137,108,36,32,139,
+ 108,36,96,137,116,36,100,137,149,233,255,141,68,194,252,248,137,252,233,137,
+ 133,233,232,251,1,21,139,149,233,139,133,233,41,208,193,232,3,131,192,1,72,
+ 139,108,36,32,85,195,248,145,255,15,182,131,233,168,235,15,133,244,251,168,
+ 235,15,133,244,247,168,235,15,132,244,247,252,255,139,233,252,233,244,247,
+ 255,248,146,15,182,131,233,168,235,15,133,244,251,252,233,244,247,248,147,
+ 15,182,131,233,168,235,15,133,244,251,168,235,15,132,244,251,252,255,139,
+ 233,15,132,244,247,168,235,15,132,244,251,248,1,255,139,108,36,96,137,149,
+ 233,137,252,242,137,252,233,232,251,1,22,248,3,139,149,233,248,4,15,182,78,
+ 252,253,248,5,15,182,110,252,252,15,183,70,252,254,252,255,164,253,252,235,
+ 233,248,148,131,198,4,139,77,232,137,76,36,84,252,233,244,4,248,149,255,139,
+ 106,252,248,139,173,233,15,182,133,233,141,4,194,139,108,36,96,137,149,233,
+ 137,133,233,137,252,242,141,139,233,72,137,171,233,137,116,36,100,232,251,
+ 1,23,252,233,244,3,255,248,150,137,116,36,100,255,248,151,255,137,116,36,
+ 100,131,206,1,248,1,255,141,68,194,252,248,139,108,36,96,137,149,233,137,
+ 133,233,137,252,242,137,252,233,232,251,1,24,199,68,36,100,0,0,0,0,255,131,
+ 230,252,254,255,139,149,233,72,137,193,139,133,233,41,208,72,137,205,15,182,
+ 78,252,253,193,232,3,131,192,1,252,255,229,248,152,255,65,85,65,84,65,83,
+ 65,82,65,81,65,80,87,86,85,72,141,108,36,88,85,83,82,81,80,15,182,69,252,
+ 248,138,101,252,240,76,137,125,252,248,76,137,117,252,240,139,93,0,139,139,
+ 233,199,131,233,237,137,131,233,137,139,233,72,129,252,236,239,72,131,197,
+ 128,252,242,68,15,17,125,252,248,252,242,68,15,17,117,252,240,252,242,68,
+ 15,17,109,232,252,242,68,15,17,101,224,252,242,68,15,17,93,216,252,242,68,
+ 15,17,85,208,252,242,68,15,17,77,200,252,242,68,15,17,69,192,252,242,15,17,
+ 125,184,252,242,15,17,117,176,252,242,15,17,109,168,252,242,15,17,101,160,
+ 252,242,15,17,93,152,252,242,15,17,85,144,252,242,15,17,77,136,252,242,15,
+ 17,69,128,139,171,233,139,147,233,72,137,171,233,199,131,233,0,0,0,0,137,
+ 149,233,72,141,148,253,36,233,141,139,233,232,251,1,25,72,139,141,233,72,
+ 129,225,239,137,169,233,139,149,233,139,177,233,252,233,244,247,255,248,153,
+ 255,72,141,140,253,36,233,248,1,102,68,15,111,185,233,102,68,15,111,177,233,
+ 102,68,15,111,169,233,102,68,15,111,161,233,102,68,15,111,153,233,102,68,
+ 15,111,145,233,102,68,15,111,137,233,102,68,15,111,129,233,102,15,111,185,
+ 233,72,137,204,102,15,111,49,76,139,124,36,16,76,139,116,36,24,76,139,108,
+ 36,32,76,139,100,36,80,133,192,15,136,244,249,137,68,36,84,139,122,252,248,
+ 139,191,233,139,191,233,199,131,233,0,0,0,0,199,131,233,237,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,129,252,253,239,15,130,244,248,255,139,
+ 68,36,84,248,2,252,255,36,252,235,248,3,252,247,216,137,252,233,137,194,232,
+ 251,1,26,255,248,91,255,217,124,36,4,137,68,36,8,102,184,0,4,102,11,68,36,
+ 4,102,37,252,255,252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,
+ 36,4,139,68,36,8,195,255,248,154,72,184,237,237,102,72,15,110,208,72,184,
+ 237,237,102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,
+ 247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,72,184,
+ 237,237,102,72,15,110,208,252,242,15,194,193,1,102,15,84,194,252,242,15,92,
+ 200,15,40,193,248,1,195,248,93,255,217,124,36,4,137,68,36,8,102,184,0,8,102,
+ 11,68,36,4,102,37,252,255,252,251,102,137,68,36,6,217,108,36,6,217,252,252,
+ 217,108,36,4,139,68,36,8,195,255,248,155,72,184,237,237,102,72,15,110,208,
+ 72,184,237,237,102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,
+ 134,244,247,102,15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,
+ 72,184,237,237,102,72,15,110,208,252,242,15,194,193,6,102,15,84,194,252,242,
+ 15,92,200,15,40,193,248,1,195,248,114,255,217,124,36,4,137,68,36,8,102,184,
+ 0,12,102,11,68,36,4,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,
+ 139,68,36,8,195,255,248,156,72,184,237,237,102,72,15,110,208,72,184,237,237,
+ 102,72,15,110,216,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,
+ 15,85,208,15,40,193,252,242,15,88,203,252,242,15,92,203,72,184,237,237,102,
+ 72,15,110,216,252,242,15,194,193,1,102,15,84,195,252,242,15,92,200,102,15,
+ 86,202,15,40,193,248,1,195,248,157,255,15,40,232,252,242,15,94,193,72,184,
+ 237,237,102,72,15,110,208,72,184,237,237,102,72,15,110,216,15,40,224,102,
+ 15,84,226,102,15,46,220,15,134,244,247,102,15,85,208,252,242,15,88,227,252,
+ 242,15,92,227,102,15,86,226,72,184,237,237,102,72,15,110,208,252,242,15,194,
+ 196,1,102,15,84,194,252,242,15,92,224,15,40,197,252,242,15,89,204,252,242,
+ 15,92,193,195,248,1,252,242,15,89,200,15,40,197,252,242,15,92,193,195,255,
+ 217,193,216,252,241,217,124,36,4,102,184,0,4,102,11,68,36,4,102,37,252,255,
+ 252,247,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,222,201,222,
+ 252,233,195,255,248,98,217,252,234,222,201,248,158,217,84,36,8,129,124,36,
+ 8,0,0,128,127,15,132,244,247,129,124,36,8,0,0,128,252,255,15,132,244,248,
+ 248,159,217,192,217,252,252,220,252,233,217,201,217,252,240,217,232,222,193,
+ 217,252,253,221,217,248,1,195,248,2,221,216,217,252,238,195,255,248,117,255,
+ 248,160,252,242,15,45,193,252,242,15,42,208,102,15,46,202,15,133,244,254,
+ 15,138,244,255,248,161,131,252,248,1,15,142,244,252,248,1,169,1,0,0,0,15,
+ 133,244,248,252,242,15,89,192,209,232,252,233,244,1,248,2,209,232,15,132,
+ 244,251,15,40,200,248,3,252,242,15,89,192,209,232,15,132,244,250,15,131,244,
+ 3,255,252,242,15,89,200,252,233,244,3,248,4,252,242,15,89,193,248,5,195,248,
+ 6,15,132,244,5,15,130,244,253,252,247,216,232,244,1,72,184,237,237,102,72,
+ 15,110,200,252,242,15,94,200,15,40,193,195,248,7,72,184,237,237,102,72,15,
+ 110,192,195,248,8,102,72,15,126,200,72,209,224,72,193,192,12,72,61,252,254,
+ 15,0,0,15,132,244,248,102,72,15,126,192,72,209,224,15,132,244,250,255,72,
+ 193,192,12,72,61,252,254,15,0,0,15,132,244,251,252,242,15,17,76,36,16,252,
+ 242,15,17,68,36,8,221,68,36,16,221,68,36,8,217,252,241,217,192,217,252,252,
+ 220,252,233,217,201,217,252,240,217,232,222,193,217,252,253,221,217,221,92,
+ 36,8,252,242,15,16,68,36,8,195,248,9,72,184,237,237,102,72,15,110,208,102,
+ 15,46,194,15,132,244,247,15,40,193,248,1,195,248,2,72,184,237,237,102,72,
+ 15,110,208,102,15,84,194,72,184,237,237,102,72,15,110,208,102,15,46,194,15,
+ 132,244,1,102,15,80,193,15,87,192,136,196,15,146,208,48,224,15,133,244,1,
+ 248,3,72,184,237,237,255,102,72,15,110,192,195,248,4,102,15,80,193,133,192,
+ 15,133,244,3,15,87,192,195,248,5,102,15,80,193,133,192,15,132,244,3,15,87,
+ 192,195,248,162,255,131,252,250,1,15,130,244,91,15,132,244,93,131,252,250,
+ 3,15,130,244,114,15,135,244,248,252,242,15,81,192,195,248,2,252,242,15,17,
+ 68,36,8,221,68,36,8,131,252,250,5,15,135,244,248,88,15,132,244,247,232,244,
+ 98,80,252,233,244,253,248,1,232,244,158,255,80,252,233,244,253,248,2,131,
+ 252,250,7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252,241,252,
+ 233,244,253,248,1,217,232,217,201,217,252,241,252,233,244,253,248,2,131,252,
+ 250,9,15,132,244,247,15,135,244,248,217,252,236,217,201,217,252,241,252,233,
+ 244,253,248,1,255,217,252,254,252,233,244,253,248,2,131,252,250,11,15,132,
+ 244,247,15,135,244,255,217,252,255,252,233,244,253,248,1,217,252,242,221,
+ 216,248,7,221,92,36,8,252,242,15,16,68,36,8,195,255,139,84,36,12,221,68,36,
+ 4,131,252,250,1,15,130,244,91,15,132,244,93,131,252,250,3,15,130,244,114,
+ 15,135,244,248,217,252,250,195,248,2,131,252,250,5,15,130,244,98,15,132,244,
+ 158,131,252,250,7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252,
+ 241,195,248,1,217,232,217,201,217,252,241,195,248,2,131,252,250,9,15,132,
+ 244,247,255,15,135,244,248,217,252,236,217,201,217,252,241,195,248,1,217,
+ 252,254,195,248,2,131,252,250,11,15,132,244,247,15,135,244,255,217,252,255,
+ 195,248,1,217,252,242,221,216,195,255,248,9,204,255,248,163,255,65,131,252,
+ 248,1,15,132,244,247,15,135,244,248,252,242,15,88,193,195,248,1,252,242,15,
+ 92,193,195,248,2,65,131,252,248,3,15,132,244,247,15,135,244,248,252,242,15,
+ 89,193,195,248,1,252,242,15,94,193,195,248,2,65,131,252,248,5,15,130,244,
+ 157,15,132,244,117,65,131,252,248,7,15,132,244,247,15,135,244,248,72,184,
+ 237,237,255,102,72,15,110,200,15,87,193,195,248,1,72,184,237,237,102,72,15,
+ 110,200,15,84,193,195,248,2,65,131,252,248,9,15,135,244,248,252,242,15,17,
+ 68,36,8,252,242,15,17,76,36,16,221,68,36,8,221,68,36,16,15,132,244,247,217,
+ 252,243,248,7,221,92,36,8,252,242,15,16,68,36,8,195,248,1,217,201,217,252,
+ 253,221,217,252,233,244,7,248,2,65,131,252,248,11,15,132,244,247,15,135,244,
+ 255,252,242,15,93,193,195,248,1,252,242,15,95,193,195,248,9,204,255,139,68,
+ 36,20,221,68,36,4,221,68,36,12,131,252,248,1,15,132,244,247,15,135,244,248,
+ 222,193,195,248,1,222,252,233,195,248,2,131,252,248,3,15,132,244,247,15,135,
+ 244,248,222,201,195,248,1,222,252,249,195,248,2,131,252,248,5,15,130,244,
+ 157,15,132,244,117,131,252,248,7,15,132,244,247,15,135,244,248,255,221,216,
+ 217,224,195,248,1,221,216,217,225,195,248,2,131,252,248,9,15,132,244,247,
+ 15,135,244,248,217,252,243,195,248,1,217,201,217,252,253,221,217,195,248,
+ 2,131,252,248,11,15,132,244,247,15,135,244,255,255,219,252,233,219,209,221,
+ 217,195,248,1,219,252,233,218,209,221,217,195,255,221,225,223,224,252,246,
+ 196,1,15,132,244,248,217,201,248,2,221,216,195,248,1,221,225,223,224,252,
+ 246,196,1,15,133,244,248,217,201,248,2,221,216,195,255,248,164,137,200,86,
+ 72,137,214,83,15,162,137,6,137,94,4,137,78,8,137,86,12,91,94,195,248,165,
+ 255,204,248,166,255,87,86,83,72,131,252,236,40,141,157,233,139,181,233,15,
+ 183,192,137,134,233,72,137,142,233,72,137,150,233,76,137,134,233,76,137,142,
+ 233,252,242,15,17,134,233,252,242,15,17,142,233,252,242,15,17,150,233,252,
+ 242,15,17,158,233,72,141,132,253,36,233,72,137,134,233,72,137,226,137,116,
+ 36,100,137,252,241,232,251,1,27,199,131,233,237,139,144,233,139,128,233,41,
+ 208,139,106,252,248,193,232,3,131,192,1,139,181,233,139,14,15,182,252,233,
+ 15,182,205,131,198,4,252,255,36,252,235,255,248,32,255,139,76,36,96,139,179,
+ 233,72,137,142,233,137,145,233,137,169,233,137,252,241,137,194,232,251,1,
+ 28,72,139,134,233,252,242,15,16,134,233,252,233,244,16,255,248,167,255,85,
+ 72,137,229,83,72,137,203,139,131,233,72,41,196,255,15,182,139,233,131,252,
+ 233,1,15,136,244,248,248,1,72,139,132,253,203,233,72,137,132,253,204,233,
+ 131,252,233,1,15,137,244,1,248,2,15,182,131,233,72,139,139,233,72,139,147,
+ 233,76,139,131,233,76,139,139,233,133,192,15,132,244,251,15,40,131,233,15,
+ 40,139,233,15,40,147,233,15,40,155,233,248,5,255,252,255,147,233,72,137,131,
+ 233,15,41,131,233,255,72,139,93,252,248,201,195,255,129,124,253,202,4,239,
+ 15,133,244,253,129,124,253,194,4,239,15,133,244,254,139,44,202,131,198,4,
+ 59,44,194,255,15,141,244,255,255,15,140,244,255,255,15,143,244,255,255,15,
+ 142,244,255,255,248,6,15,183,70,252,254,141,180,253,134,233,248,9,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,7,15,135,244,
+ 44,129,124,253,194,4,239,15,130,244,247,15,133,244,44,255,252,242,15,42,4,
+ 194,252,233,244,248,255,221,4,202,219,4,194,252,233,244,249,255,248,8,15,
+ 135,244,44,255,252,242,15,42,12,202,252,242,15,16,4,194,131,198,4,102,15,
+ 46,193,255,15,134,244,9,255,15,135,244,9,255,15,130,244,9,255,15,131,244,
+ 9,255,252,233,244,6,255,219,4,202,252,233,244,248,255,129,124,253,202,4,239,
+ 15,131,244,44,129,124,253,194,4,239,15,131,244,44,255,248,1,252,242,15,16,
+ 4,194,248,2,131,198,4,102,15,46,4,202,248,3,255,248,1,221,4,202,248,2,221,
+ 4,194,248,3,131,198,4,255,223,252,233,221,216,255,218,252,233,223,224,158,
+ 255,15,135,244,247,255,15,130,244,247,255,15,131,244,247,255,15,183,70,252,
+ 254,141,180,253,134,233,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,252,235,255,139,108,194,4,131,198,4,255,129,252,253,239,15,
+ 133,244,253,129,124,253,202,4,239,15,133,244,254,139,44,194,59,44,202,255,
+ 15,133,244,255,255,15,132,244,255,255,15,183,70,252,254,141,180,253,134,233,
+ 248,9,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,
+ 248,7,15,135,244,251,129,124,253,202,4,239,15,130,244,247,15,133,244,251,
+ 255,252,242,15,42,4,202,255,219,4,202,255,252,233,244,248,248,8,15,135,244,
+ 251,255,252,242,15,42,4,194,102,15,46,4,202,255,219,4,194,221,4,202,255,252,
+ 233,244,250,255,129,252,253,239,15,131,244,251,129,124,253,202,4,239,15,131,
+ 244,251,255,248,1,252,242,15,16,4,202,248,2,102,15,46,4,194,248,4,255,248,
+ 1,221,4,202,248,2,221,4,194,248,4,255,15,138,244,248,15,133,244,248,255,15,
+ 138,244,248,15,132,244,247,255,248,1,15,183,70,252,254,141,180,253,134,233,
+ 248,2,255,248,2,15,183,70,252,254,141,180,253,134,233,248,1,255,252,233,244,
+ 9,255,129,252,253,239,15,132,244,49,129,124,253,202,4,239,15,132,244,49,255,
+ 57,108,202,4,15,133,244,2,129,252,253,239,15,131,244,1,139,12,202,139,4,194,
+ 57,193,15,132,244,1,129,252,253,239,15,135,244,2,129,252,253,239,15,130,244,
+ 2,139,169,233,133,252,237,15,132,244,2,252,246,133,233,235,15,133,244,2,255,
+ 49,252,237,255,189,1,0,0,0,255,252,233,244,48,255,248,3,129,252,253,239,255,
+ 15,133,244,9,255,252,233,244,49,255,72,252,247,208,139,108,202,4,131,198,
+ 4,129,252,253,239,15,133,244,249,139,12,202,59,12,135,255,139,108,202,4,131,
+ 198,4,255,129,252,253,239,15,133,244,253,129,124,253,199,4,239,15,133,244,
+ 254,139,44,199,59,44,202,255,15,183,70,252,254,141,180,253,134,233,248,9,
+ 139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,7,
+ 15,135,244,249,129,124,253,199,4,239,15,130,244,247,255,252,242,15,42,4,199,
+ 255,219,4,199,255,252,233,244,248,248,8,255,252,242,15,42,4,202,102,15,46,
+ 4,199,255,219,4,202,221,4,199,255,129,252,253,239,15,131,244,249,255,248,
+ 1,252,242,15,16,4,199,248,2,102,15,46,4,202,248,4,255,248,1,221,4,199,248,
+ 2,221,4,202,248,4,255,72,252,247,208,139,108,202,4,131,198,4,57,197,255,15,
+ 133,244,249,15,183,70,252,254,141,180,253,134,233,248,2,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,129,252,253,239,
+ 15,133,244,2,252,233,244,49,255,15,132,244,248,129,252,253,239,15,132,244,
+ 49,15,183,70,252,254,141,180,253,134,233,248,2,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,252,235,255,139,108,194,4,131,198,4,129,252,
+ 253,239,255,137,108,202,4,139,44,194,137,44,202,255,72,139,44,194,72,137,
+ 44,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,
+ 255,49,252,237,129,124,253,194,4,239,129,213,239,137,108,202,4,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,255,129,124,253,194,
+ 4,239,15,133,244,251,139,44,194,252,247,221,15,128,244,250,199,68,202,4,237,
+ 137,44,202,248,9,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,
+ 36,252,235,248,4,199,68,202,4,0,0,224,65,199,4,202,0,0,0,0,252,233,244,9,
+ 248,5,15,135,244,54,255,129,124,253,194,4,239,15,131,244,54,255,252,242,15,
+ 16,4,194,72,184,237,237,102,72,15,110,200,15,87,193,252,242,15,17,4,202,255,
+ 221,4,194,217,224,221,28,202,255,129,124,253,194,4,239,15,133,244,248,139,
+ 4,194,255,139,128,233,248,1,199,68,202,4,237,137,4,202,255,15,87,192,252,
+ 242,15,42,128,233,248,1,252,242,15,17,4,202,255,219,128,233,248,1,221,28,
+ 202,255,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,
+ 248,2,129,124,253,194,4,239,15,133,244,57,139,12,194,255,139,169,233,131,
+ 252,253,0,15,133,244,255,248,3,255,248,58,137,213,232,251,1,20,255,252,242,
+ 15,42,192,255,137,252,234,15,182,78,252,253,252,233,244,1,255,248,9,252,246,
+ 133,233,235,15,133,244,3,252,233,244,57,255,15,182,252,236,15,182,192,255,
+ 129,124,253,252,234,4,239,15,133,244,51,129,124,253,199,4,239,15,133,244,
+ 51,139,44,252,234,3,44,199,15,128,244,50,255,129,124,253,252,234,4,239,15,
+ 133,244,53,129,124,253,199,4,239,15,133,244,53,139,4,199,3,4,252,234,15,128,
+ 244,52,255,129,124,253,252,234,4,239,15,133,244,56,129,124,253,194,4,239,
+ 15,133,244,56,139,44,252,234,3,44,194,15,128,244,55,255,199,68,202,4,237,
+ 255,129,124,253,252,234,4,239,15,131,244,51,255,129,124,253,199,4,239,15,
+ 131,244,51,255,252,242,15,16,4,252,234,252,242,15,88,4,199,255,221,4,252,
+ 234,220,4,199,255,129,124,253,252,234,4,239,15,131,244,53,255,129,124,253,
+ 199,4,239,15,131,244,53,255,252,242,15,16,4,199,252,242,15,88,4,252,234,255,
+ 221,4,199,220,4,252,234,255,129,124,253,252,234,4,239,15,131,244,56,129,124,
+ 253,194,4,239,15,131,244,56,255,252,242,15,16,4,252,234,252,242,15,88,4,194,
+ 255,221,4,252,234,220,4,194,255,129,124,253,252,234,4,239,15,133,244,51,129,
+ 124,253,199,4,239,15,133,244,51,139,44,252,234,43,44,199,15,128,244,50,255,
+ 129,124,253,252,234,4,239,15,133,244,53,129,124,253,199,4,239,15,133,244,
+ 53,139,4,199,43,4,252,234,15,128,244,52,255,129,124,253,252,234,4,239,15,
+ 133,244,56,129,124,253,194,4,239,15,133,244,56,139,44,252,234,43,44,194,15,
+ 128,244,55,255,252,242,15,16,4,252,234,252,242,15,92,4,199,255,221,4,252,
+ 234,220,36,199,255,252,242,15,16,4,199,252,242,15,92,4,252,234,255,221,4,
+ 199,220,36,252,234,255,252,242,15,16,4,252,234,252,242,15,92,4,194,255,221,
+ 4,252,234,220,36,194,255,129,124,253,252,234,4,239,15,133,244,51,129,124,
+ 253,199,4,239,15,133,244,51,139,44,252,234,15,175,44,199,15,128,244,50,255,
+ 129,124,253,252,234,4,239,15,133,244,53,129,124,253,199,4,239,15,133,244,
+ 53,139,4,199,15,175,4,252,234,15,128,244,52,255,129,124,253,252,234,4,239,
+ 15,133,244,56,129,124,253,194,4,239,15,133,244,56,139,44,252,234,15,175,44,
+ 194,15,128,244,55,255,252,242,15,16,4,252,234,252,242,15,89,4,199,255,221,
+ 4,252,234,220,12,199,255,252,242,15,16,4,199,252,242,15,89,4,252,234,255,
+ 221,4,199,220,12,252,234,255,252,242,15,16,4,252,234,252,242,15,89,4,194,
+ 255,221,4,252,234,220,12,194,255,252,242,15,16,4,252,234,252,242,15,94,4,
+ 199,255,221,4,252,234,220,52,199,255,252,242,15,16,4,199,252,242,15,94,4,
+ 252,234,255,221,4,199,220,52,252,234,255,252,242,15,16,4,252,234,252,242,
+ 15,94,4,194,255,221,4,252,234,220,52,194,255,252,242,15,16,4,252,234,252,
+ 242,15,16,12,199,255,221,4,252,234,221,4,199,255,252,242,15,16,4,199,252,
+ 242,15,16,12,252,234,255,221,4,199,221,4,252,234,255,252,242,15,16,4,252,
+ 234,252,242,15,16,12,194,255,221,4,252,234,221,4,194,255,248,168,232,244,
+ 157,255,252,233,244,168,255,232,244,117,255,15,182,252,236,15,182,192,139,
+ 76,36,96,137,145,233,141,20,194,65,137,192,65,41,232,248,36,137,205,137,116,
+ 36,100,232,251,1,29,139,149,233,133,192,15,133,244,45,15,182,110,252,255,
+ 15,182,78,252,253,72,139,4,252,234,72,137,4,202,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,252,235,255,72,252,247,208,139,4,135,199,
+ 68,202,4,237,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,
+ 255,36,252,235,255,15,191,192,199,68,202,4,237,137,4,202,255,15,191,192,252,
+ 242,15,42,192,252,242,15,17,4,202,255,223,70,252,254,221,28,202,255,252,242,
+ 15,16,4,199,252,242,15,17,4,202,255,221,4,199,221,28,202,255,72,252,247,208,
+ 137,68,202,4,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,
+ 252,235,255,141,76,202,12,141,68,194,4,189,237,137,105,252,248,248,1,137,
+ 41,131,193,8,57,193,15,134,244,1,139,6,15,182,204,15,182,232,131,198,4,193,
+ 232,16,252,255,36,252,235,255,139,106,252,248,139,172,253,133,233,139,173,
+ 233,72,139,69,0,72,137,4,202,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,252,235,255,139,106,252,248,139,172,253,141,233,128,189,233,
+ 0,139,173,233,139,12,194,139,68,194,4,137,77,0,137,69,4,15,132,244,247,252,
+ 246,133,233,235,15,133,244,248,248,1,139,6,15,182,204,15,182,232,131,198,
+ 4,193,232,16,252,255,36,252,235,248,2,129,232,239,129,252,248,239,15,134,
+ 244,1,252,246,129,233,235,15,132,244,1,135,213,141,139,233,255,232,251,1,
+ 30,137,252,234,252,233,244,1,255,72,252,247,208,139,106,252,248,139,172,253,
+ 141,233,139,12,135,139,133,233,137,8,199,64,4,237,252,246,133,233,235,15,
+ 133,244,248,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,
+ 36,252,235,248,2,252,246,129,233,235,15,132,244,1,128,189,233,0,15,132,244,
+ 1,137,213,137,194,141,139,233,232,251,1,30,137,252,234,252,233,244,1,255,
+ 139,106,252,248,255,252,242,15,16,4,199,255,139,172,253,141,233,139,141,233,
+ 255,252,242,15,17,1,255,221,25,255,72,252,247,208,139,106,252,248,139,172,
+ 253,141,233,139,141,233,137,65,4,139,6,15,182,204,15,182,232,131,198,4,193,
+ 232,16,252,255,36,252,235,255,141,180,253,134,233,139,108,36,96,131,189,233,
+ 0,15,132,244,247,137,149,233,141,20,202,137,252,233,232,251,1,31,139,149,
+ 233,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,
+ 235,255,72,252,247,208,139,108,36,96,137,149,233,68,139,66,252,248,139,20,
+ 135,137,252,233,137,116,36,100,232,251,1,32,139,149,233,15,182,78,252,253,
+ 137,4,202,199,68,202,4,237,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,252,235,255,139,108,36,96,137,149,233,139,139,233,59,139,233,
+ 137,116,36,100,15,131,244,251,248,1,65,137,192,37,252,255,7,0,0,65,193,232,
+ 11,61,252,255,7,0,0,15,132,244,249,248,2,137,252,233,137,194,232,251,1,33,
+ 139,149,233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,184,1,8,0,0,252,
+ 233,244,2,248,5,137,252,233,232,251,1,34,15,183,70,252,254,252,233,244,1,
+ 255,72,252,247,208,139,108,36,96,139,139,233,137,116,36,100,59,139,233,137,
+ 149,233,15,131,244,249,248,2,139,20,135,137,252,233,232,251,1,35,139,149,
+ 233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,252,235,248,3,137,252,233,232,251,1,34,
+ 15,183,70,252,254,72,252,247,208,252,233,244,2,255,72,252,247,208,139,106,
+ 252,248,139,173,233,139,4,135,252,233,244,169,255,72,252,247,208,139,106,
+ 252,248,139,173,233,139,4,135,252,233,244,170,255,15,182,252,236,15,182,192,
+ 129,124,253,252,234,4,239,15,133,244,39,139,44,252,234,255,129,124,253,194,
+ 4,239,15,133,244,251,139,4,194,255,129,124,253,194,4,239,15,131,244,251,255,
+ 252,242,15,16,4,194,252,242,15,45,192,252,242,15,42,200,102,15,46,193,255,
+ 15,133,244,39,255,59,133,233,15,131,244,39,193,224,3,3,133,233,129,120,253,
+ 4,239,15,132,244,248,72,139,40,72,137,44,202,248,1,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,252,235,248,2,131,189,233,0,15,132,244,
+ 249,139,141,233,252,246,129,233,235,15,132,244,39,15,182,78,252,253,248,3,
+ 199,68,202,4,237,252,233,244,1,248,5,255,129,124,253,194,4,239,15,133,244,
+ 39,139,4,194,252,233,244,169,255,15,182,252,236,15,182,192,72,252,247,208,
+ 139,4,135,129,124,253,252,234,4,239,15,133,244,37,139,44,252,234,248,169,
+ 139,141,233,35,136,233,105,201,239,3,141,233,248,1,129,185,233,239,15,133,
+ 244,250,57,129,233,15,133,244,250,129,121,253,4,239,15,132,244,251,15,182,
+ 70,252,253,72,139,41,72,137,44,194,248,2,255,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,252,235,248,3,15,182,70,252,253,199,68,194,
+ 4,237,252,233,244,2,248,4,139,137,233,133,201,15,133,244,1,248,5,139,141,
+ 233,133,201,15,132,244,3,252,246,129,233,235,15,133,244,3,252,233,244,37,
+ 255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,38,139,
+ 44,252,234,59,133,233,15,131,244,38,193,224,3,3,133,233,129,120,253,4,239,
+ 15,132,244,248,72,139,40,72,137,44,202,248,1,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,252,235,248,2,131,189,233,0,15,132,244,249,
+ 139,141,233,252,246,129,233,235,15,132,244,38,255,15,182,78,252,253,248,3,
+ 199,68,202,4,237,252,233,244,1,255,15,182,252,236,15,182,192,129,124,253,
+ 252,234,4,239,15,133,244,42,139,44,252,234,255,15,133,244,42,255,59,133,233,
+ 15,131,244,42,193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248,1,
+ 252,246,133,233,235,15,133,244,253,248,2,72,139,44,202,72,137,40,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,3,131,189,
+ 233,0,15,132,244,1,139,141,233,252,246,129,233,235,255,15,132,244,42,15,182,
+ 78,252,253,252,233,244,1,248,5,129,124,253,194,4,239,15,133,244,42,139,4,
+ 194,252,233,244,170,248,7,128,165,233,235,139,139,233,137,171,233,137,141,
+ 233,15,182,78,252,253,252,233,244,2,255,15,182,252,236,15,182,192,72,252,
+ 247,208,139,4,135,129,124,253,252,234,4,239,15,133,244,40,139,44,252,234,
+ 248,170,139,141,233,35,136,233,105,201,239,198,133,233,0,3,141,233,248,1,
+ 129,185,233,239,15,133,244,251,57,129,233,15,133,244,251,129,121,253,4,239,
+ 15,132,244,250,248,2,255,252,246,133,233,235,15,133,244,253,248,3,15,182,
+ 70,252,253,72,139,44,194,72,137,41,139,6,15,182,204,15,182,232,131,198,4,
+ 193,232,16,252,255,36,252,235,248,4,131,189,233,0,15,132,244,2,137,76,36,
+ 80,139,141,233,252,246,129,233,235,15,132,244,40,139,76,36,80,252,233,244,
+ 2,248,5,139,137,233,133,201,15,133,244,1,255,139,141,233,133,201,15,132,244,
+ 252,252,246,129,233,235,15,132,244,40,248,6,137,68,36,80,199,68,36,84,237,
+ 137,108,36,32,139,76,36,96,137,145,233,76,141,68,36,80,137,252,234,137,205,
+ 137,116,36,100,232,251,1,36,139,149,233,139,108,36,32,137,193,252,233,244,
+ 2,248,7,128,165,233,235,139,131,233,137,171,233,137,133,233,252,233,244,3,
+ 255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,41,139,
+ 44,252,234,59,133,233,15,131,244,41,193,224,3,3,133,233,129,120,253,4,239,
+ 15,132,244,249,248,1,252,246,133,233,235,15,133,244,253,248,2,72,139,12,202,
+ 72,137,8,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,
+ 235,248,3,131,189,233,0,15,132,244,1,255,139,141,233,252,246,129,233,235,
+ 15,132,244,41,15,182,78,252,253,252,233,244,1,248,7,128,165,233,235,139,139,
+ 233,137,171,233,137,141,233,15,182,78,252,253,252,233,244,2,255,137,124,36,
+ 80,139,60,199,248,1,141,12,202,139,105,252,248,252,246,133,233,235,15,133,
+ 244,253,248,2,139,68,36,84,131,232,1,15,132,244,250,1,252,248,59,133,233,
+ 15,135,244,251,41,252,248,193,231,3,3,189,233,248,3,72,139,41,131,193,8,72,
+ 137,47,131,199,8,131,232,1,15,133,244,3,248,4,139,124,36,80,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,5,139,76,36,96,
+ 137,145,233,137,252,234,65,137,192,137,205,137,116,36,100,232,251,1,37,139,
+ 149,233,15,182,78,252,253,252,233,244,1,248,7,255,128,165,233,235,139,131,
+ 233,137,171,233,137,133,233,252,233,244,2,255,3,68,36,84,255,129,124,253,
+ 202,4,239,139,44,202,15,133,244,59,141,84,202,8,137,114,252,252,139,181,233,
+ 139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,252,235,255,141,76,
+ 202,8,137,215,139,105,252,248,129,121,253,252,252,239,15,133,244,29,248,60,
+ 139,114,252,252,252,247,198,237,15,133,244,253,248,1,137,106,252,248,137,
+ 68,36,84,131,232,1,15,132,244,249,248,2,72,139,41,131,193,8,72,137,47,131,
+ 199,8,131,232,1,15,133,244,2,139,106,252,248,248,3,139,68,36,84,128,189,233,
+ 1,15,135,244,251,248,4,139,181,233,139,14,15,182,252,233,15,182,205,131,198,
+ 4,252,255,36,252,235,248,5,255,252,247,198,237,15,133,244,4,15,182,78,252,
+ 253,72,252,247,209,141,12,202,139,121,252,248,139,191,233,139,191,233,252,
+ 233,244,4,248,7,129,252,238,239,252,247,198,237,15,133,244,254,41,252,242,
+ 137,215,139,114,252,252,252,233,244,1,248,8,129,198,239,252,233,244,1,255,
+ 141,76,202,8,72,139,105,232,72,139,65,252,240,72,137,41,72,137,65,8,139,105,
+ 224,139,65,228,137,105,252,248,137,65,252,252,129,252,248,239,184,237,15,
+ 133,244,29,137,202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,
+ 205,131,198,4,252,255,36,252,235,255,137,124,36,80,137,92,36,84,139,108,202,
+ 252,240,139,68,202,252,248,139,157,233,131,198,4,139,189,233,248,1,57,216,
+ 15,131,244,251,129,124,253,199,4,239,15,132,244,250,255,219,68,202,252,248,
+ 255,72,139,44,199,72,137,108,202,8,131,192,1,255,137,68,202,252,248,248,2,
+ 15,183,70,252,254,141,180,253,134,233,248,3,139,92,36,84,139,124,36,80,139,
+ 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,4,131,
+ 192,1,255,137,68,202,252,248,255,252,233,244,1,248,5,41,216,248,6,59,133,
+ 233,15,135,244,3,105,252,248,239,3,189,233,129,191,233,239,15,132,244,253,
+ 141,92,24,1,72,139,175,233,72,139,135,233,72,137,44,202,72,137,68,202,8,137,
+ 92,202,252,248,252,233,244,2,248,7,131,192,1,252,233,244,6,255,129,124,253,
+ 202,252,236,239,15,133,244,251,139,108,202,232,129,124,253,202,252,244,239,
+ 15,133,244,251,129,124,253,202,252,252,239,15,133,244,251,128,189,233,235,
+ 15,133,244,251,141,180,253,134,233,199,68,202,252,248,0,0,0,0,248,1,139,6,
+ 15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,248,5,198,70,
+ 252,252,235,141,180,253,134,233,198,6,235,252,233,244,1,255,15,182,252,236,
+ 15,182,192,137,124,36,80,141,188,253,194,233,141,12,202,43,122,252,252,133,
+ 252,237,15,132,244,251,141,108,252,233,252,248,57,215,15,131,244,248,248,
+ 1,72,139,71,252,248,131,199,8,72,137,1,131,193,8,57,252,233,15,131,244,249,
+ 57,215,15,130,244,1,248,2,199,65,4,237,131,193,8,57,252,233,15,130,244,2,
+ 248,3,139,124,36,80,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,
+ 255,36,252,235,248,5,199,68,36,84,1,0,0,0,137,208,41,252,248,15,134,244,3,
+ 137,197,193,252,237,3,131,197,1,137,108,36,84,139,108,36,96,1,200,59,133,
+ 233,15,135,244,253,248,6,255,72,139,71,252,248,131,199,8,72,137,1,131,193,
+ 8,57,215,15,130,244,6,252,233,244,3,248,7,137,149,233,137,141,233,137,116,
+ 36,100,41,215,139,84,36,84,131,252,234,1,137,252,233,232,251,1,0,139,149,
+ 233,139,141,233,1,215,252,233,244,6,255,193,225,3,255,248,1,139,114,252,252,
+ 137,68,36,84,252,247,198,237,15,133,244,253,255,248,13,137,215,131,232,1,
+ 15,132,244,249,248,2,72,139,44,15,72,137,111,252,248,131,199,8,131,232,1,
+ 15,133,244,2,248,3,139,68,36,84,15,182,110,252,255,248,5,57,197,15,135,244,
+ 252,255,72,139,44,10,72,137,106,252,248,255,248,5,56,70,252,255,15,135,244,
+ 252,255,15,182,78,252,253,72,252,247,209,141,20,202,139,122,252,248,139,191,
+ 233,139,191,233,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,
+ 36,252,235,248,6,255,199,71,252,252,237,131,199,8,255,199,68,194,252,244,
+ 237,255,131,192,1,252,233,244,5,248,7,141,174,233,252,247,197,237,15,133,
+ 244,14,41,252,234,255,1,252,233,255,137,252,245,209,252,237,129,229,239,102,
+ 129,172,253,43,233,238,15,130,244,149,255,141,12,202,255,129,121,253,4,239,
+ 15,133,244,255,255,129,121,253,12,239,15,133,244,61,129,121,253,20,239,15,
+ 133,244,61,139,41,131,121,16,0,15,140,244,251,255,129,121,253,12,239,15,133,
+ 244,165,129,121,253,20,239,15,133,244,165,255,139,105,16,133,252,237,15,136,
+ 244,251,3,41,15,128,244,247,137,41,255,59,105,8,199,65,28,237,137,105,24,
+ 255,15,142,244,253,248,1,248,6,141,180,253,134,233,255,141,180,253,134,233,
+ 15,183,70,252,254,15,142,245,248,1,248,6,255,15,143,244,253,248,6,141,180,
+ 253,134,233,248,1,255,248,7,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,252,235,248,5,255,3,41,15,128,244,1,137,41,255,15,141,244,7,
+ 255,141,180,253,134,233,15,183,70,252,254,15,141,245,255,15,140,244,7,255,
+ 252,233,244,6,248,9,255,129,121,253,4,239,255,15,131,244,61,129,121,253,12,
+ 239,15,131,244,61,255,129,121,253,12,239,15,131,244,165,129,121,253,20,239,
+ 15,131,244,165,255,139,105,20,255,129,252,253,239,15,131,244,61,255,252,242,
+ 15,16,1,252,242,15,16,73,8,255,252,242,15,88,65,16,252,242,15,17,1,133,252,
+ 237,15,136,244,249,255,15,140,244,249,255,102,15,46,200,248,1,252,242,15,
+ 17,65,24,255,221,65,8,221,1,255,220,65,16,221,17,221,81,24,133,252,237,15,
+ 136,244,247,255,221,81,24,15,140,244,247,255,217,201,248,1,255,15,183,70,
+ 252,254,255,15,131,244,7,255,15,131,244,248,141,180,253,134,233,255,141,180,
+ 253,134,233,15,183,70,252,254,15,131,245,255,15,130,244,7,255,15,130,244,
+ 248,141,180,253,134,233,255,248,3,102,15,46,193,252,233,244,1,255,141,12,
+ 202,139,105,4,129,252,253,239,15,132,244,247,255,137,105,252,252,139,41,137,
+ 105,252,248,252,233,245,255,141,180,253,134,233,139,1,137,105,252,252,137,
+ 65,252,248,255,139,139,233,139,4,129,72,139,128,233,139,108,36,96,137,147,
+ 233,137,171,233,76,137,100,36,80,76,137,108,36,32,76,137,116,36,24,76,137,
+ 124,36,16,72,137,225,72,129,252,236,239,102,15,127,49,102,15,127,185,233,
+ 102,68,15,127,129,233,102,68,15,127,137,233,102,68,15,127,145,233,102,68,
+ 15,127,153,233,102,68,15,127,161,233,102,68,15,127,169,233,102,68,15,127,
+ 177,233,102,68,15,127,185,233,252,255,224,255,141,180,253,134,233,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,252,235,255,137,252,245,
+ 209,252,237,129,229,239,102,129,172,253,43,233,238,15,130,244,151,255,139,
+ 190,233,139,108,36,96,141,12,202,59,141,233,15,135,244,24,15,182,142,233,
+ 57,200,15,134,244,249,248,2,255,15,183,70,252,254,252,233,245,255,248,3,199,
+ 68,194,252,252,237,131,192,1,57,200,15,134,244,3,252,233,244,2,255,141,44,
+ 197,237,141,4,194,139,122,252,248,137,104,252,252,137,120,252,248,139,108,
+ 36,96,141,12,200,59,141,233,15,135,244,23,137,209,137,194,15,182,174,233,
+ 133,252,237,15,132,244,248,248,1,131,193,8,57,209,15,131,244,249,139,121,
+ 252,248,137,56,139,121,252,252,137,120,4,131,192,8,199,65,252,252,237,131,
+ 252,237,1,15,133,244,1,248,2,255,139,190,233,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,252,235,255,248,3,199,64,4,237,131,192,8,
+ 131,252,237,1,15,133,244,3,252,233,244,2,255,139,106,252,248,72,139,189,233,
+ 139,108,36,96,141,68,194,252,248,137,149,233,141,136,233,59,141,233,137,133,
+ 233,255,72,137,252,250,137,252,233,255,15,135,244,22,199,131,233,237,255,
+ 252,255,215,255,252,255,147,233,255,199,131,233,237,139,149,233,141,12,194,
+ 252,247,217,3,141,233,139,114,252,252,252,233,244,12,255,254,0
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_rethrow,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_v,
+ GLOB_vm_growstack_f,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_cont_ra,
+ GLOB_BC_CAT_Z,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_cont_nop,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_arith_vno,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_nvo,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vvo,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_call_ra,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res_,
+ GLOB_ff_type,
+ GLOB_fff_res1,
+ GLOB_ff_getmetatable,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_fff_resi,
+ GLOB_fff_resxmm0,
+ GLOB_fff_resn,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_fff_res2,
+ GLOB_fff_res,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_fff_res0,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_resbit,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_vm_exp_x87,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_vm_trunc,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_pow,
+ GLOB_vm_pow,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_fff_emptystr,
+ GLOB_ff_string_rep,
+ GLOB_fff_fallback_2,
+ GLOB_ff_string_reverse,
+ GLOB_fff_fallback_1,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_tobit,
+ GLOB_ff_bit_band,
+ GLOB_fff_fallback_bit_op,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_floor_sse,
+ GLOB_vm_ceil_sse,
+ GLOB_vm_trunc_sse,
+ GLOB_vm_mod,
+ GLOB_vm_exp2_x87,
+ GLOB_vm_exp2raw,
+ GLOB_vm_pow_sse,
+ GLOB_vm_powi_sse,
+ GLOB_vm_foldfpm,
+ GLOB_vm_foldarith,
+ GLOB_vm_cpuid,
+ GLOB_assert_bad_for_arg_type,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c@8",
+ "vm_unwind_c_eh",
+ "vm_unwind_rethrow",
+ "vm_unwind_ff@4",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_v",
+ "vm_growstack_f",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "cont_ra",
+ "BC_CAT_Z",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "cont_nop",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_arith_vno",
+ "vmeta_arith_vn",
+ "vmeta_arith_nvo",
+ "vmeta_arith_nv",
+ "vmeta_unm",
+ "vmeta_arith_vvo",
+ "vmeta_arith_vv",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_call_ra",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res_",
+ "ff_type",
+ "fff_res1",
+ "ff_getmetatable",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "fff_resi",
+ "fff_resxmm0",
+ "fff_resn",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "fff_res2",
+ "fff_res",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "fff_res0",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_resbit",
+ "ff_math_floor",
+ "vm_floor",
+ "ff_math_ceil",
+ "vm_ceil",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "vm_exp_x87",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_atan2",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "vm_trunc",
+ "ff_math_fmod",
+ "ff_math_pow",
+ "vm_pow",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "fff_emptystr",
+ "ff_string_rep",
+ "fff_fallback_2",
+ "ff_string_reverse",
+ "fff_fallback_1",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_tobit",
+ "ff_bit_band",
+ "fff_fallback_bit_op",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_floor_sse",
+ "vm_ceil_sse",
+ "vm_trunc_sse",
+ "vm_mod",
+ "vm_exp2_x87",
+ "vm_exp2raw",
+ "vm_pow_sse",
+ "vm_powi_sse",
+ "vm_foldfpm",
+ "vm_foldarith",
+ "vm_cpuid",
+ "assert_bad_for_arg_type",
+ "vm_ffi_callback",
+ "vm_ffi_call@4",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack@8",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_equal_cd@8",
+ "lj_meta_arith",
+ "lj_meta_len@8",
+ "lj_meta_call",
+ "lj_meta_for@8",
+ "lj_tab_get",
+ "lj_str_fromnumber@8",
+ "lj_str_fromnum@8",
+ "lj_tab_next",
+ "lj_tab_getinth@8",
+ "lj_ffh_coroutine_wrap_err@8",
+ "lj_vm_sinh",
+ "lj_vm_cosh",
+ "lj_vm_tanh",
+ "lj_str_new",
+ "lj_tab_len@4",
+ "lj_gc_step@4",
+ "lj_dispatch_ins@8",
+ "lj_trace_hot@8",
+ "lj_dispatch_call@8",
+ "lj_trace_exit@8",
+ "lj_err_throw@8",
+ "lj_ccallback_enter@8",
+ "lj_ccallback_leave@8",
+ "lj_meta_cat",
+ "lj_gc_barrieruv@8",
+ "lj_func_closeuv@8",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_gc_step_fixtop@4",
+ "lj_tab_dup@8",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx, int cmov, int sse)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ dasm_put(Dst, 109, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL);
+ dasm_put(Dst, 198, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK);
+ dasm_put(Dst, 276, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base));
+ dasm_put(Dst, 356, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE);
+ dasm_put(Dst, 511, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base));
+ dasm_put(Dst, 604, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL);
+#if LJ_HASFFI
+ dasm_put(Dst, 764);
+#endif
+ dasm_put(Dst, 773, 0);
+#if LJ_HASFFI
+#endif
+ dasm_put(Dst, 782, Dt7(->pc), PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 796);
+#endif
+ dasm_put(Dst, 817, Dt1(->base), LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 917, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 927);
+ } else {
+ }
+ dasm_put(Dst, 940, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET);
+ dasm_put(Dst, 1087, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 917, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 927);
+ } else {
+ }
+ dasm_put(Dst, 1110, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 1283, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base));
+ dasm_put(Dst, 1383);
+#if LJ_HASFFI
+ dasm_put(Dst, 1403, Dt1(->base));
+#endif
+ dasm_put(Dst, 1434);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1437);
+#endif
+ dasm_put(Dst, 1443);
+#if LJ_DUALNUM
+ dasm_put(Dst, 911);
+#endif
+ dasm_put(Dst, 1455);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1437);
+#endif
+ dasm_put(Dst, 1483, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1589);
+#else
+ dasm_put(Dst, 1608);
+#endif
+ dasm_put(Dst, 1613, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND);
+ dasm_put(Dst, 1799, 1+1, ~LJ_TNUMX);
+ if (cmov) {
+ dasm_put(Dst, 1868);
+ } else {
+ dasm_put(Dst, 1872);
+ }
+ dasm_put(Dst, 1881, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, ~LJ_TLIGHTUD, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL);
+ dasm_put(Dst, 1960, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next));
+ dasm_put(Dst, 2017, LJ_TNIL, LJ_TUDATA, LJ_TNUMX, LJ_TISNUM, LJ_TLIGHTUD);
+ dasm_put(Dst, 2083, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB);
+ dasm_put(Dst, 2153, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist), 2+1, LJ_TTAB);
+ dasm_put(Dst, 2242, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2256);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 2278);
+ } else {
+ dasm_put(Dst, 2288);
+ }
+ dasm_put(Dst, 2295, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2361, Dt1(->base));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2385);
+ } else {
+ dasm_put(Dst, 2390);
+ }
+ dasm_put(Dst, 2395, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2);
+ dasm_put(Dst, 2488, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2535, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2544, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2530);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 2599);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2604, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2620, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 2653, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0);
+ dasm_put(Dst, 2515, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2535, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2730, Dt8(->upvalue[0]), LJ_TFUNC);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2751, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2763);
+ } else {
+ dasm_put(Dst, 2773);
+ }
+ dasm_put(Dst, 2780, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC);
+ dasm_put(Dst, 2844, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top));
+ dasm_put(Dst, 2934, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 3022, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE);
+ dasm_put(Dst, 3135, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe));
+ dasm_put(Dst, 3233, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top));
+ dasm_put(Dst, 3300, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack));
+ dasm_put(Dst, 3388, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME);
+ dasm_put(Dst, 3500, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 3527);
+ }
+ if (sse) {
+ dasm_put(Dst, 3530);
+ }
+ dasm_put(Dst, 3545, 1+1);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3556, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3636, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3646, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32));
+ } else {
+ dasm_put(Dst, 3677);
+ }
+ dasm_put(Dst, 3694, 1+1, FRAME_TYPE, LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3790, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3636, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3812);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3821);
+ }
+ dasm_put(Dst, 2283);
+ } else {
+ dasm_put(Dst, 3855);
+ if (LJ_DUALNUM) {
+ } else {
+ dasm_put(Dst, 2290);
+ }
+ }
+ dasm_put(Dst, 3861);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3790, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3636, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3864);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3821);
+ }
+ dasm_put(Dst, 2283);
+ } else {
+ dasm_put(Dst, 3873);
+ if (LJ_DUALNUM) {
+ } else {
+ dasm_put(Dst, 2290);
+ }
+ }
+ if (sse) {
+ dasm_put(Dst, 3879, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3908, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 3937, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4006, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4063, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4126, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM);
+ dasm_put(Dst, 4216);
+ if (sse) {
+ dasm_put(Dst, 4228, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4253);
+ if (sse) {
+ dasm_put(Dst, 4267, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4292);
+ if (sse) {
+ dasm_put(Dst, 4306, 1+1, LJ_TISNUM);
+ } else {
+ }
+ dasm_put(Dst, 4331);
+ if (sse) {
+ dasm_put(Dst, 4347, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ } else {
+ dasm_put(Dst, 4386, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ }
+ dasm_put(Dst, 4419, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 4484, 1+1, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4583);
+ } else {
+ dasm_put(Dst, 4589);
+ }
+ dasm_put(Dst, 4598);
+ if (sse) {
+ dasm_put(Dst, 4623);
+ } else {
+ dasm_put(Dst, 4629);
+ }
+ dasm_put(Dst, 4632, 1+2);
+ if (sse) {
+ dasm_put(Dst, 4641);
+ } else {
+ dasm_put(Dst, 4649);
+ }
+ dasm_put(Dst, 4657);
+ if (sse) {
+ dasm_put(Dst, 4660, (unsigned int)(U64x(43500000,00000000)), (unsigned int)((U64x(43500000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 4687);
+ }
+ dasm_put(Dst, 4706);
+ if (sse) {
+ dasm_put(Dst, 4722, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4747, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4769);
+ if (sse) {
+ dasm_put(Dst, 4791);
+ } else {
+ dasm_put(Dst, 4817);
+ }
+ dasm_put(Dst, 4834, 1+2);
+ if (sse) {
+ dasm_put(Dst, 4874);
+ } else {
+ dasm_put(Dst, 4882);
+ }
+ dasm_put(Dst, 4892, 2+1, LJ_TISNUM, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4944, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4991, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 5032, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5045, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4583);
+ } else {
+ }
+ dasm_put(Dst, 5095);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 5106, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5127);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5148);
+ } else {
+ }
+ dasm_put(Dst, 5173, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5186, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4583);
+ } else {
+ }
+ dasm_put(Dst, 5095);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 5106, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5127);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5236);
+ } else {
+ }
+ if (!sse) {
+ dasm_put(Dst, 5261);
+ }
+ dasm_put(Dst, 5270, 1+1, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5292, Dt5(->len));
+ } else if (sse) {
+ dasm_put(Dst, 5300, Dt5(->len));
+ } else {
+ dasm_put(Dst, 5311, Dt5(->len));
+ }
+ dasm_put(Dst, 5319, 1+1, LJ_TSTR, Dt5(->len), Dt5([1]));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5295);
+ } else if (sse) {
+ dasm_put(Dst, 5357);
+ } else {
+ dasm_put(Dst, 5367);
+ }
+ dasm_put(Dst, 5380, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5411);
+ } else if (sse) {
+ dasm_put(Dst, 5434);
+ } else {
+ dasm_put(Dst, 5460);
+ }
+ dasm_put(Dst, 5484, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5592);
+ } else if (sse) {
+ dasm_put(Dst, 5604);
+ } else {
+ dasm_put(Dst, 5619);
+ }
+ dasm_put(Dst, 5631, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2530);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5648, Dt5(->len));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5658);
+ } else if (sse) {
+ dasm_put(Dst, 5662);
+ } else {
+ }
+ dasm_put(Dst, 5669, sizeof(GCstr)-1);
+ dasm_put(Dst, 5744, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 5803, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5820);
+ } else if (sse) {
+ dasm_put(Dst, 5828);
+ } else {
+ dasm_put(Dst, 5839);
+ }
+ dasm_put(Dst, 5855, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 5920, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 5983, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz));
+ dasm_put(Dst, 6054, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 6139, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 6209, 1+1, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6277);
+ } else if (sse) {
+ dasm_put(Dst, 6284);
+ } else {
+ }
+ dasm_put(Dst, 6294, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6310);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 106);
+ if (LJ_DUALNUM || sse) {
+ if (!sse) {
+ }
+ dasm_put(Dst, 6351);
+ } else {
+ }
+ dasm_put(Dst, 6356, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6367, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6377);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6403);
+ } else {
+ }
+ dasm_put(Dst, 6418, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6443);
+ } else {
+ dasm_put(Dst, 6463);
+ }
+ if (sse) {
+ dasm_put(Dst, 6468);
+ } else {
+ }
+ dasm_put(Dst, 6485, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6367, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6377);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6403);
+ } else {
+ }
+ dasm_put(Dst, 6418, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6503);
+ } else {
+ dasm_put(Dst, 6463);
+ }
+ if (sse) {
+ dasm_put(Dst, 6523);
+ } else {
+ }
+ dasm_put(Dst, 6540, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6367, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 6377);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6403);
+ } else {
+ }
+ dasm_put(Dst, 6418, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6558);
+ } else {
+ dasm_put(Dst, 6463);
+ }
+ if (sse) {
+ dasm_put(Dst, 6578);
+ } else {
+ }
+ dasm_put(Dst, 6595, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6618, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6642);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6351);
+ } else if (sse) {
+ dasm_put(Dst, 6648);
+ } else {
+ }
+ dasm_put(Dst, 6660);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6671, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6687, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6702, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6769);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6776, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6687, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6792, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6859);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6867, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6687, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6883, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6950);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6958, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6687, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 6974, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 7041);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7048, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6386);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6327, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 6687, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7064, 2+1, LJ_TISNUM, LJ_TISNUM, (unsigned int)(U64x(43380000,00000000)), (unsigned int)((U64x(43380000,00000000))>>32));
+ } else {
+ }
+ dasm_put(Dst, 7131, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base));
+ dasm_put(Dst, 7207, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 7334, Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7373, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 7404, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE);
+ dasm_put(Dst, 7455, Dt1(->base), Dt1(->base), GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 7522, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L));
+#endif
+ dasm_put(Dst, 7569);
+#if LJ_HASJIT
+ dasm_put(Dst, 7399);
+#endif
+ dasm_put(Dst, 7576);
+#if LJ_HASJIT
+ dasm_put(Dst, 7579);
+#endif
+ dasm_put(Dst, 7589, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7623);
+#endif
+ dasm_put(Dst, 7628, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 7659, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 16*8+4*8, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), 4*8, GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC);
+#endif
+ dasm_put(Dst, 7889);
+#if LJ_HASJIT
+ dasm_put(Dst, 7892, 9*16+4*8, -9*16, -8*16, -7*16, -6*16, -5*16, -4*16, -3*16, -2*16, -1*16, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF);
+ dasm_put(Dst, 8034);
+#endif
+ dasm_put(Dst, 8060);
+ if (!sse) {
+ dasm_put(Dst, 8063);
+ }
+ dasm_put(Dst, 8108, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ if (!sse) {
+ dasm_put(Dst, 8194);
+ }
+ dasm_put(Dst, 8239, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(bff00000,00000000)), (unsigned int)((U64x(bff00000,00000000))>>32));
+ if (!sse) {
+ dasm_put(Dst, 8325);
+ }
+ dasm_put(Dst, 8364, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ if (sse) {
+ dasm_put(Dst, 8453, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(43300000,00000000)), (unsigned int)((U64x(43300000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ } else {
+ dasm_put(Dst, 8567);
+ }
+ dasm_put(Dst, 8614);
+ if (!sse) {
+ } else {
+ dasm_put(Dst, 8688);
+ }
+ dasm_put(Dst, 8691);
+ dasm_put(Dst, 8776, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32));
+ dasm_put(Dst, 8879, (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32), (unsigned int)(U64x(3ff00000,00000000)), (unsigned int)((U64x(3ff00000,00000000))>>32), (unsigned int)(U64x(7ff00000,00000000)), (unsigned int)((U64x(7ff00000,00000000))>>32));
+ dasm_put(Dst, 9035);
+#if LJ_HASJIT
+ if (sse) {
+ dasm_put(Dst, 9076);
+ dasm_put(Dst, 9146);
+ dasm_put(Dst, 9219);
+ } else {
+ dasm_put(Dst, 9269);
+ dasm_put(Dst, 9361);
+ }
+ dasm_put(Dst, 9407);
+#endif
+ dasm_put(Dst, 9411);
+ if (sse) {
+ dasm_put(Dst, 9414, (unsigned int)(U64x(80000000,00000000)), (unsigned int)((U64x(80000000,00000000))>>32));
+ dasm_put(Dst, 9503, (unsigned int)(U64x(7fffffff,ffffffff)), (unsigned int)((U64x(7fffffff,ffffffff))>>32));
+ } else {
+ dasm_put(Dst, 9627);
+ dasm_put(Dst, 9710);
+ if (cmov) {
+ dasm_put(Dst, 9765);
+ } else {
+ dasm_put(Dst, 9784);
+ }
+ dasm_put(Dst, 9407);
+ }
+ dasm_put(Dst, 9825);
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 9409);
+#endif
+ dasm_put(Dst, 9853);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 9857, GG_G2DISP, Dt2(->ctype_state), DtE(->cb.slot), DtE(->cb.gpr[0]), DtE(->cb.gpr[1]), DtE(->cb.gpr[2]), DtE(->cb.gpr[3]), DtE(->cb.fpr[0]), DtE(->cb.fpr[1]), DtE(->cb.fpr[2]), DtE(->cb.fpr[3]), CFRAME_SIZE+4*8, DtE(->cb.stack), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top), Dt7(->pc));
+#endif
+ dasm_put(Dst, 9984);
+#if LJ_HASFFI
+ dasm_put(Dst, 9987, DISPATCH_GL(ctype_state), DtE(->L), Dt1(->base), Dt1(->top), DtE(->cb.gpr[0]), DtE(->cb.fpr[0]));
+#endif
+ dasm_put(Dst, 10028);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 10031, DtF(->spadj));
+#if LJ_TARGET_WINDOWS
+#endif
+ dasm_put(Dst, 10046, DtF(->nsp), offsetof(CCallState, stack), CCALL_SPS_EXTRA*8, DtF(->nfpr), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->gpr[2]), DtF(->gpr[3]), DtF(->fpr[0]), DtF(->fpr[1]), DtF(->fpr[2]), DtF(->fpr[3]));
+ dasm_put(Dst, 10127, DtF(->func), DtF(->gpr[0]), DtF(->fpr[0]));
+#if LJ_TARGET_WINDOWS
+#endif
+ dasm_put(Dst, 10140);
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse)
+{
+ int vk = 0;
+ dasm_put(Dst, 780, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10148, LJ_TISNUM, LJ_TISNUM);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10178);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10183);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10188);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10193);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10198, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 10252);
+ } else {
+ dasm_put(Dst, 10263);
+ }
+ dasm_put(Dst, 10274);
+ if (sse) {
+ dasm_put(Dst, 10281);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10301);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10306);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10311);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10316);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10321);
+ } else {
+ dasm_put(Dst, 10326);
+ }
+ } else {
+ dasm_put(Dst, 10334, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 10355);
+ } else {
+ dasm_put(Dst, 10376);
+ if (cmov) {
+ dasm_put(Dst, 10392);
+ } else {
+ dasm_put(Dst, 10398);
+ }
+ }
+ if (LJ_DUALNUM) {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 10301);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10306);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10311);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10316);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10321);
+ } else {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 768);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 10405);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 10410);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 10415);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 10420, -BCBIAS_J*4);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 10452);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10460, LJ_TISNUM, LJ_TISNUM);
+ if (vk) {
+ dasm_put(Dst, 10485);
+ } else {
+ dasm_put(Dst, 10490);
+ }
+ dasm_put(Dst, 10495, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 10547);
+ } else {
+ dasm_put(Dst, 10554);
+ }
+ dasm_put(Dst, 10558);
+ if (sse) {
+ dasm_put(Dst, 10569);
+ } else {
+ dasm_put(Dst, 10581);
+ }
+ dasm_put(Dst, 10588);
+ } else {
+ dasm_put(Dst, 10593, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 10612);
+ } else {
+ dasm_put(Dst, 10630);
+ if (cmov) {
+ dasm_put(Dst, 10392);
+ } else {
+ dasm_put(Dst, 10398);
+ }
+ }
+ iseqne_fp:
+ if (vk) {
+ dasm_put(Dst, 10643);
+ } else {
+ dasm_put(Dst, 10652);
+ }
+ iseqne_end:
+ if (vk) {
+ dasm_put(Dst, 10661, -BCBIAS_J*4);
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4638);
+ }
+ } else {
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4638);
+ }
+ dasm_put(Dst, 10676, -BCBIAS_J*4);
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ dasm_put(Dst, 10691);
+ } else {
+ dasm_put(Dst, 10432);
+ }
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ dasm_put(Dst, 10124);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 10696, LJ_TCDATA, LJ_TCDATA);
+ }
+ dasm_put(Dst, 10715, LJ_TISPRI, LJ_TISTABUD, LJ_TUDATA, Dt6(->metatable), Dt6(->nomm), 1<>32));
+ } else {
+ dasm_put(Dst, 11303);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10691);
+ } else {
+ dasm_put(Dst, 10432);
+ }
+ break;
+ case BC_LEN:
+ dasm_put(Dst, 11312, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11326, Dt5(->len), LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 11340, Dt5(->len));
+ } else {
+ dasm_put(Dst, 11358, Dt5(->len));
+ }
+ dasm_put(Dst, 11367, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 11402, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 11416);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 11425);
+ } else {
+ }
+ dasm_put(Dst, 11431);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 11444, Dt6(->nomm), 1<base), Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 12270, LJ_TSTR);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 12270, LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 12305, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 12317);
+ } else {
+ dasm_put(Dst, 12332);
+ }
+ dasm_put(Dst, 10432);
+ break;
+ case BC_KNUM:
+ if (sse) {
+ dasm_put(Dst, 12340);
+ } else {
+ dasm_put(Dst, 12353);
+ }
+ dasm_put(Dst, 10432);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 12360);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 12388, LJ_TNIL);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 12435, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ dasm_put(Dst, 12475, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G);
+ dasm_put(Dst, 12566);
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ dasm_put(Dst, 12578, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 12671);
+ if (sse) {
+ dasm_put(Dst, 12676);
+ } else {
+ dasm_put(Dst, 10951);
+ }
+ dasm_put(Dst, 12683, offsetof(GCfuncL, uvptr), DtA(->v));
+ if (sse) {
+ dasm_put(Dst, 12692);
+ } else {
+ dasm_put(Dst, 12698);
+ }
+ dasm_put(Dst, 10432);
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 12701, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_UCLO:
+ dasm_put(Dst, 12740, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 12795, Dt1(->base), Dt1(->base), LJ_TFUNC);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ dasm_put(Dst, 12861, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB);
+ break;
+ case BC_TDUP:
+ dasm_put(Dst, 12983, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB);
+ break;
+
+ case BC_GGET:
+ dasm_put(Dst, 13078, Dt7(->env));
+ break;
+ case BC_GSET:
+ dasm_put(Dst, 13097, Dt7(->env));
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 13116, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 13139, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 13153, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 13164);
+ } else {
+ }
+ dasm_put(Dst, 13185);
+ }
+ dasm_put(Dst, 13190, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 13383, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 13731, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 13807, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<next));
+ dasm_put(Dst, 13895, Dt6(->metatable), Dt6(->nomm), 1<base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 13986, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable));
+ dasm_put(Dst, 14080, Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 14126, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 14269, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ dasm_put(Dst, 11464);
+ if (op == BC_CALLM) {
+ dasm_put(Dst, 14287);
+ }
+ dasm_put(Dst, 14292, LJ_TFUNC, Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 14287);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 14334, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc));
+ dasm_put(Dst, 14449, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 14520, LJ_TFUNC, 2+1, Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 14591, Dt6(->asize), Dt6(->array), LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11331, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 11425);
+ } else {
+ dasm_put(Dst, 14637);
+ }
+ dasm_put(Dst, 14643);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 11296);
+ } else {
+ dasm_put(Dst, 11308);
+ }
+ dasm_put(Dst, 14656, -BCBIAS_J*4);
+ if (!LJ_DUALNUM && !sse) {
+ dasm_put(Dst, 14708);
+ }
+ dasm_put(Dst, 14714, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key), DtB(->val));
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 14786, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC);
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 14886, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack));
+ dasm_put(Dst, 15046, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 14287);
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ if (op != BC_RET0) {
+ dasm_put(Dst, 15112);
+ }
+ dasm_put(Dst, 15116, FRAME_TYPE);
+ switch (op) {
+ case BC_RET:
+ dasm_put(Dst, 15135);
+ break;
+ case BC_RET1:
+ dasm_put(Dst, 15187);
+ /* fallthrough */
+ case BC_RET0:
+ dasm_put(Dst, 15197);
+ default:
+ break;
+ }
+ dasm_put(Dst, 15208, Dt7(->pc), PC2PROTO(k));
+ if (op == BC_RET) {
+ dasm_put(Dst, 15252, LJ_TNIL);
+ } else {
+ dasm_put(Dst, 15261, LJ_TNIL);
+ }
+ dasm_put(Dst, 15268, -FRAME_VARG, FRAME_TYPEP);
+ if (op != BC_RET0) {
+ dasm_put(Dst, 15292);
+ }
+ dasm_put(Dst, 4717);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 15296, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 15317);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15321, LJ_TISNUM);
+ if (!vk) {
+ dasm_put(Dst, 15331, LJ_TISNUM, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 15360, LJ_TISNUM, LJ_TISNUM);
+#endif
+ dasm_put(Dst, 15379);
+ }
+ dasm_put(Dst, 15398, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 15409, -BCBIAS_J*4);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15423, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 15441, -BCBIAS_J*4);
+ } else {
+ dasm_put(Dst, 15433, BC_JLOOP);
+ }
+ dasm_put(Dst, 15455);
+ if (vk) {
+ dasm_put(Dst, 15479);
+ }
+ dasm_put(Dst, 15398, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 15488);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15493, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 15507);
+ } else {
+ dasm_put(Dst, 15503, BC_JLOOP);
+ }
+ dasm_put(Dst, 15512);
+ } else if (!vk) {
+ dasm_put(Dst, 15519, LJ_TISNUM);
+ }
+ if (!vk) {
+ dasm_put(Dst, 15525, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 15539, LJ_TISNUM, LJ_TISNUM);
+#endif
+ }
+ dasm_put(Dst, 15558);
+ if (!vk) {
+ dasm_put(Dst, 15562, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 15571);
+ if (vk) {
+ dasm_put(Dst, 15583);
+ } else {
+ dasm_put(Dst, 15602);
+ }
+ dasm_put(Dst, 15607);
+ } else {
+ dasm_put(Dst, 15620);
+ if (vk) {
+ dasm_put(Dst, 15626);
+ } else {
+ dasm_put(Dst, 15642);
+ }
+ dasm_put(Dst, 15650);
+ if (cmov) {
+ dasm_put(Dst, 10392);
+ } else {
+ dasm_put(Dst, 10398);
+ }
+ if (!cmov) {
+ dasm_put(Dst, 15655);
+ }
+ }
+ if (op == BC_FORI) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15661);
+ } else {
+ dasm_put(Dst, 15666, -BCBIAS_J*4);
+ }
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 15676, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 15690);
+ } else {
+ dasm_put(Dst, 15695, -BCBIAS_J*4);
+ }
+ } else {
+ dasm_put(Dst, 15686, BC_JLOOP);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 10321);
+ } else {
+ dasm_put(Dst, 11081);
+ }
+ if (sse) {
+ dasm_put(Dst, 15705);
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 15296, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 15716, LJ_TNIL);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 15731, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 15745, -BCBIAS_J*4);
+ }
+ dasm_put(Dst, 10430);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 15296, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 10432);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 15761, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L), 9*16+4*8, -1*16, -2*16, -3*16, -4*16, -5*16, -6*16, -7*16, -8*16, -9*16);
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 15870, -BCBIAS_J*4);
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 15895, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_CALL);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 15916, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams));
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 15946, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 10432);
+ }
+ dasm_put(Dst, 15955, LJ_TNIL);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 9409);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 15977, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL);
+ if (op == BC_JFUNCV) {
+ dasm_put(Dst, 15946, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 16068, -4+PC2PROTO(k));
+ }
+ dasm_put(Dst, 16091, LJ_TNIL);
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ dasm_put(Dst, 16113, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top));
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 2381);
+ } else {
+ dasm_put(Dst, 16143);
+ }
+ dasm_put(Dst, 16151, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 16160);
+ } else {
+ dasm_put(Dst, 16164, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 16169, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ int cmov = 1;
+ int sse = 0;
+#ifdef LUAJIT_CPU_NOCMOV
+ cmov = 0;
+#endif
+#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64)
+ sse = 1;
+#endif
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx, cmov, sse);
+
+ dasm_put(Dst, 16194);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op, cmov, sse);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+#if LJ_64
+#define SZPTR "8"
+#define BSZPTR "3"
+#define REG_SP "0x7"
+#define REG_RA "0x10"
+#else
+#define SZPTR "4"
+#define BSZPTR "2"
+#define REG_SP "0x4"
+#define REG_RA "0x8"
+#endif
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_)
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n");
+ fprintf(ctx->fp,
+ "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n",
+ LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "Lframe1:\n"
+ "\t.long LECIE1-LSCIE1\n"
+ "LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zP\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 5\n" /* augmentation length */
+ "\t.byte 0x00\n" /* absptr */
+ "\t.long %slj_err_unwind_dwarf\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ "LECIE1:\n\n", LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "LSFDE1:\n"
+ "\t.long LEFDE1-LASFDE1\n"
+ "LASFDE1:\n"
+ "\t.long LASFDE1-Lframe1\n"
+ "\t.long %slj_vm_asm_begin\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE);
+ break;
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+#if LJ_64
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 1\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0xd\n\t.uleb128 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+#if LJ_64
+ fprintf(ctx->fp, "\t.subsections_via_symbols\n");
+#else
+ fprintf(ctx->fp,
+ "\t.non_lazy_symbol_pointer\n"
+ "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
+ ".indirect_symbol _lj_err_unwind_dwarf\n"
+ ".long 0\n");
+#endif
+ }
+ break;
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_x86.dasc b/src/LuaJIT/src/buildvm_x86.dasc
new file mode 100644
index 000000000..30ee5b67a
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_x86.dasc
@@ -0,0 +1,6458 @@
+|// Low-level VM code for x86 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+|
+|.if X64
+|.arch x64
+|.else
+|.arch x86
+|.endif
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// This is very fragile and has many dependencies. Caveat emptor.
+|.define BASE, edx // Not C callee-save, refetched anyway.
+|.if not X64
+|.define KBASE, edi // Must be C callee-save.
+|.define KBASEa, KBASE
+|.define PC, esi // Must be C callee-save.
+|.define PCa, PC
+|.define DISPATCH, ebx // Must be C callee-save.
+|.elif X64WIN
+|.define KBASE, edi // Must be C callee-save.
+|.define KBASEa, rdi
+|.define PC, esi // Must be C callee-save.
+|.define PCa, rsi
+|.define DISPATCH, ebx // Must be C callee-save.
+|.else
+|.define KBASE, r15d // Must be C callee-save.
+|.define KBASEa, r15
+|.define PC, ebx // Must be C callee-save.
+|.define PCa, rbx
+|.define DISPATCH, r14d // Must be C callee-save.
+|.endif
+|
+|.define RA, ecx
+|.define RAH, ch
+|.define RAL, cl
+|.define RB, ebp // Must be ebp (C callee-save).
+|.define RC, eax // Must be eax (fcomparepp and others).
+|.define RCW, ax
+|.define RCH, ah
+|.define RCL, al
+|.define OP, RB
+|.define RD, RC
+|.define RDW, RCW
+|.define RDL, RCL
+|.if X64
+|.define RAa, rcx
+|.define RBa, rbp
+|.define RCa, rax
+|.define RDa, rax
+|.else
+|.define RAa, RA
+|.define RBa, RB
+|.define RCa, RC
+|.define RDa, RD
+|.endif
+|
+|.if not X64
+|.define FCARG1, ecx // x86 fastcall arguments.
+|.define FCARG2, edx
+|.elif X64WIN
+|.define CARG1, rcx // x64/WIN64 C call arguments.
+|.define CARG2, rdx
+|.define CARG3, r8
+|.define CARG4, r9
+|.define CARG1d, ecx
+|.define CARG2d, edx
+|.define CARG3d, r8d
+|.define CARG4d, r9d
+|.define FCARG1, CARG1d // Upwards compatible to x86 fastcall.
+|.define FCARG2, CARG2d
+|.else
+|.define CARG1, rdi // x64/POSIX C call arguments.
+|.define CARG2, rsi
+|.define CARG3, rdx
+|.define CARG4, rcx
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG1d, edi
+|.define CARG2d, esi
+|.define CARG3d, edx
+|.define CARG4d, ecx
+|.define CARG5d, r8d
+|.define CARG6d, r9d
+|.define FCARG1, CARG1d // Simulate x86 fastcall.
+|.define FCARG2, CARG2d
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS, int
+|.type TRACE, GCtrace
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|//-----------------------------------------------------------------------
+|.if not X64 // x86 stack layout.
+|
+|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--).
+|.macro saveregs_
+| push edi; push esi; push ebx
+| sub esp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push ebp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add esp, CFRAME_SPACE
+| pop ebx; pop esi; pop edi; pop ebp
+|.endmacro
+|
+|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only.
+|.define SAVE_NRES, aword [esp+aword*14]
+|.define SAVE_CFRAME, aword [esp+aword*13]
+|.define SAVE_L, aword [esp+aword*12]
+|//----- 16 byte aligned, ^^^ arguments from C caller
+|.define SAVE_RET, aword [esp+aword*11] //<-- esp entering interpreter.
+|.define SAVE_R4, aword [esp+aword*10]
+|.define SAVE_R3, aword [esp+aword*9]
+|.define SAVE_R2, aword [esp+aword*8]
+|//----- 16 byte aligned
+|.define SAVE_R1, aword [esp+aword*7] //<-- esp after register saves.
+|.define SAVE_PC, aword [esp+aword*6]
+|.define TMP2, aword [esp+aword*5]
+|.define TMP1, aword [esp+aword*4]
+|//----- 16 byte aligned
+|.define ARG4, aword [esp+aword*3]
+|.define ARG3, aword [esp+aword*2]
+|.define ARG2, aword [esp+aword*1]
+|.define ARG1, aword [esp] //<-- esp while in interpreter.
+|//----- 16 byte aligned, ^^^ arguments for C callee
+|
+|// FPARGx overlaps ARGx and ARG(x+1) on x86.
+|.define FPARG3, qword [esp+qword*1]
+|.define FPARG1, qword [esp]
+|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ).
+|.define TMPQ, qword [esp+aword*4]
+|.define TMP3, ARG4
+|.define ARG5, TMP1
+|.define TMPa, TMP1
+|.define MULTRES, TMP2
+|
+|// Arguments for vm_call and vm_pcall.
+|.define INARG_BASE, SAVE_CFRAME // Overwritten by SAVE_CFRAME!
+|
+|// Arguments for vm_cpcall.
+|.define INARG_CP_CALL, SAVE_ERRF
+|.define INARG_CP_UD, SAVE_NRES
+|.define INARG_CP_FUNC, SAVE_CFRAME
+|
+|//-----------------------------------------------------------------------
+|.elif X64WIN // x64/Windows stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rdi; push rsi; push rbx
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+| pop rbx; pop rsi; pop rdi; pop rbp
+|.endmacro
+|
+|.define SAVE_CFRAME, aword [rsp+aword*13]
+|.define SAVE_PC, dword [rsp+dword*25]
+|.define SAVE_L, dword [rsp+dword*24]
+|.define SAVE_ERRF, dword [rsp+dword*23]
+|.define SAVE_NRES, dword [rsp+dword*22]
+|.define TMP2, dword [rsp+dword*21]
+|.define TMP1, dword [rsp+dword*20]
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.define ARG5, aword [rsp+aword*4]
+|.define CSAVE_4, aword [rsp+aword*3]
+|.define CSAVE_3, aword [rsp+aword*2]
+|.define CSAVE_2, aword [rsp+aword*1]
+|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ, qword [rsp+aword*10]
+|.define MULTRES, TMP2
+|.define TMPa, ARG5
+|.define ARG5d, dword [rsp+aword*4]
+|.define TMP3, ARG5d
+|
+|//-----------------------------------------------------------------------
+|.else // x64/POSIX stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rbx; push r15; push r14
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+| pop r14; pop r15; pop rbx; pop rbp
+|.endmacro
+|
+|//----- 16 byte aligned,
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.define SAVE_CFRAME, aword [rsp+aword*4]
+|.define SAVE_PC, dword [rsp+dword*7]
+|.define SAVE_L, dword [rsp+dword*6]
+|.define SAVE_ERRF, dword [rsp+dword*5]
+|.define SAVE_NRES, dword [rsp+dword*4]
+|.define TMPa, aword [rsp+aword*1]
+|.define TMP2, dword [rsp+dword*1]
+|.define TMP1, dword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ, qword [rsp]
+|.define TMP3, dword [rsp+aword*1]
+|.define MULTRES, TMP2
+|
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Instruction headers.
+|.macro ins_A; .endmacro
+|.macro ins_AD; .endmacro
+|.macro ins_AJ; .endmacro
+|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro
+|.macro ins_AB_; movzx RB, RCH; .endmacro
+|.macro ins_A_C; movzx RC, RCL; .endmacro
+|.macro ins_AND; not RDa; .endmacro
+|
+|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
+|.macro ins_NEXT
+| mov RC, [PC]
+| movzx RA, RCH
+| movzx OP, RCL
+| add PC, 4
+| shr RC, 16
+|.if X64
+| jmp aword [DISPATCH+OP*8]
+|.else
+| jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| // Around 10%-30% slower on Core2, a lot more slower on P4.
+| .macro ins_next
+| jmp ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC
+| mov PC, LFUNC:RB->pc
+| mov RA, [PC]
+| movzx OP, RAL
+| movzx RA, RAH
+| add PC, 4
+|.if X64
+| jmp aword [DISPATCH+OP*8]
+|.else
+| jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC, RD = nargs+1
+| mov [BASE-4], PC
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp; cmp dword [BASE+reg*8+4], tp; .endmacro
+|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro
+|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro
+|
+|// These operands must be used with movzx.
+|.define PC_OP, byte [PC-4]
+|.define PC_RA, byte [PC-3]
+|.define PC_RB, byte [PC-1]
+|.define PC_RC, byte [PC-2]
+|.define PC_RD, word [PC-2]
+|
+|.macro branchPC, reg
+| lea PC, [PC+reg*4-BCBIAS_J*4]
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|// Decrement hashed hotcount and trigger trace recorder if zero.
+|.macro hotloop, reg
+| mov reg, PC
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
+| jb ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall, reg
+| mov reg, PC
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
+| jb ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro set_vmstate, st
+| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
+|.endmacro
+|
+|// Annoying x87 stuff: support for two compare variants.
+|.macro fcomparepp // Compare and pop st0 >< st1.
+||if (cmov) {
+| fucomip st1
+| fpop
+||} else {
+| fucompp
+| fnstsw ax // eax modified!
+| sahf
+||}
+|.endmacro
+|
+|.macro fdup; fld st0; .endmacro
+|.macro fpop1; fstp st1; .endmacro
+|
+|// Synthesize SSE FP constants.
+|.macro sseconst_abs, reg, tmp // Synthesize abs mask.
+|.if X64
+| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
+|.else
+| pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1
+|.endif
+|.endmacro
+|
+|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const.
+|.if X64
+| mov64 tmp, U64x(val,00000000); movd reg, tmp
+|.else
+| mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51
+|.endif
+|.endmacro
+|
+|.macro sseconst_sign, reg, tmp // Synthesize sign mask.
+| sseconst_hi reg, tmp, 80000000
+|.endmacro
+|.macro sseconst_1, reg, tmp // Synthesize 1.0.
+| sseconst_hi reg, tmp, 3ff00000
+|.endmacro
+|.macro sseconst_m1, reg, tmp // Synthesize -1.0.
+| sseconst_hi reg, tmp, bff00000
+|.endmacro
+|.macro sseconst_2p52, reg, tmp // Synthesize 2^52.
+| sseconst_hi reg, tmp, 43300000
+|.endmacro
+|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51.
+| sseconst_hi reg, tmp, 43380000
+|.endmacro
+|
+|// Move table write barrier back. Overwrites reg.
+|.macro barrierback, tab, reg
+| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab)
+| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
+| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
+| mov tab->gclist, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx, int cmov, int sse)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | test PC, FRAME_P
+ | jz ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | and PC, -8
+ | sub BASE, PC // Restore caller base.
+ | lea RAa, [RA+PC-8] // Rebase RA and prepend one result.
+ | mov PC, [BASE-4] // Fetch PC of previous frame.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | mov dword [BASE+RA+4], LJ_TTRUE // Prepend true to results.
+ |
+ |->vm_returnc:
+ | add RD, 1 // RD = nresults+1
+ | mov MULTRES, RD
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
+ | xor PC, FRAME_C
+ | test PC, FRAME_TYPE
+ | jnz ->vm_returnp
+ |
+ | // Return to C.
+ | set_vmstate C
+ | and PC, -8
+ | sub PC, BASE
+ | neg PC // Previous base = BASE - delta.
+ |
+ | sub RD, 1
+ | jz >2
+ |1: // Move results down.
+ |.if X64
+ | mov RBa, [BASE+RA]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [BASE+RA]
+ | mov [BASE-8], RB
+ | mov RB, [BASE+RA+4]
+ | mov [BASE-4], RB
+ |.endif
+ | add BASE, 8
+ | sub RD, 1
+ | jnz <1
+ |2:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, PC
+ |3:
+ | mov RD, MULTRES
+ | mov RA, SAVE_NRES // RA = wanted nresults+1
+ |4:
+ | cmp RA, RD
+ | jne >6 // More/less results wanted?
+ |5:
+ | sub BASE, 8
+ | mov L:RB->top, BASE
+ |
+ |->vm_leave_cp:
+ | mov RAa, SAVE_CFRAME // Restore previous C frame.
+ | mov L:RB->cframe, RAa
+ | xor eax, eax // Ok return status for vm_pcall.
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret
+ |
+ |6:
+ | jb >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | cmp BASE, L:RB->maxstack
+ | ja >8
+ | mov dword [BASE-4], LJ_TNIL
+ | add BASE, 8
+ | add RD, 1
+ | jmp <4
+ |
+ |7: // Less results wanted.
+ | test RA, RA
+ | jz <5 // But check for LUA_MULTRET+1.
+ | sub RA, RD // Negative result!
+ | lea BASE, [BASE+RA*8] // Correct top.
+ | jmp <5
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | mov L:RB->top, BASE // Save current top held in BASE (yes).
+ | mov MULTRES, RD // Need to fill only remainder with nil.
+ | mov FCARG2, RA
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->top // Need the (realloced) L->top in BASE.
+ | jmp <3
+ |
+ |->vm_unwind_c@8: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ |.if X64
+ | mov eax, CARG2d // Error return status for vm_pcall.
+ | mov rsp, CARG1
+ |.else
+ | mov eax, FCARG2 // Error return status for vm_pcall.
+ | mov esp, FCARG1
+ |.endif
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov GL:RB, L:RB->glref
+ | mov dword GL:RB->vmstate, ~LJ_VMST_C
+ | jmp ->vm_leave_unw
+ |
+ |->vm_unwind_rethrow:
+ |.if X64 and not X64WIN
+ | mov FCARG1, SAVE_L
+ | mov FCARG2, eax
+ | restoreregs
+ | jmp extern lj_err_throw@8 // (lua_State *L, int errcode)
+ |.endif
+ |
+ |->vm_unwind_ff@4: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ |.if X64
+ | and CARG1, CFRAME_RAWMASK
+ | mov rsp, CARG1
+ |.else
+ | and FCARG1, CFRAME_RAWMASK
+ | mov esp, FCARG1
+ |.endif
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov RAa, -8 // Results start at BASE+RA = BASE-8.
+ | mov RD, 1+1 // Really 1+2 results, incr. later.
+ | mov BASE, L:RB->base
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov PC, [BASE-4] // Fetch PC of previous frame.
+ | mov dword [BASE-4], LJ_TFALSE // Prepend false to error message.
+ | set_vmstate INTERP
+ | jmp ->vm_returnc // Increments RD/MULTRES and returns.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | mov FCARG2, LUA_MINSTACK
+ | jmp >2
+ |
+ |->vm_growstack_v: // Grow stack for vararg Lua function.
+ | sub RD, 8
+ | jmp >1
+ |
+ |->vm_growstack_f: // Grow stack for fixarg Lua function.
+ | // BASE = new base, RD = nargs+1, RB = L, PC = first PC
+ | lea RD, [BASE+NARGS:RD*8-8]
+ |1:
+ | movzx RA, byte [PC-4+PC2PROTO(framesize)]
+ | add PC, 4 // Must point after first instruction.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov SAVE_PC, PC
+ | mov FCARG2, RA
+ |2:
+ | // RB = L, L->base = new base, L->top = top
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | mov LFUNC:RB, [BASE-8]
+ | sub RD, BASE
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | // BASE = new base, RB = LFUNC, RD = nargs+1
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ |.if X64
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ | mov RA, CARG2d
+ |.else
+ | mov L:RB, SAVE_L
+ | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME!
+ |.endif
+ | mov PC, FRAME_CP
+ | xor RD, RD
+ | lea KBASEa, [esp+CFRAME_RESUME]
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov L:RB->cframe, KBASEa
+ | mov SAVE_PC, RD // Any value outside of bytecode is ok.
+ | mov SAVE_CFRAME, RDa
+ |.if X64
+ | mov SAVE_NRES, RD
+ | mov SAVE_ERRF, RD
+ |.endif
+ | cmp byte L:RB->status, RDL
+ | je >3 // Initial resume (like a call).
+ |
+ | // Resume after yield (like a return).
+ | set_vmstate INTERP
+ | mov byte L:RB->status, RDL
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr RD, 3
+ | add RD, 1 // RD = nresults+1
+ | sub RA, BASE // RA = resultofs
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, FRAME_CP
+ |.if X64
+ | mov SAVE_ERRF, CARG4d
+ |.endif
+ | jmp >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ |.if X64
+ | mov SAVE_NRES, CARG3d
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ | mov RA, CARG2d
+ |.else
+ | mov L:RB, SAVE_L
+ | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME!
+ |.endif
+ |
+ | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASEa
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ |.if X64
+ | mov L:RB->cframe, rsp
+ |.else
+ | mov L:RB->cframe, esp
+ |.endif
+ |
+ |2: // Entry point for vm_cpcall below (RA = base, RB = L, PC = ftype).
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_resume above (RA = base, RB = L, PC = ftype).
+ | set_vmstate INTERP
+ | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
+ | add PC, RA
+ | sub PC, BASE // PC = frame delta + frame type
+ |
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr NARGS:RD, 3
+ | add NARGS:RD, 1 // RD = nargs+1
+ |
+ |->vm_call_dispatch:
+ | mov LFUNC:RB, [RA-8]
+ | cmp dword [RA-4], LJ_TFUNC
+ | jne ->vmeta_call // Ensure KBASE defined and != BASE.
+ |
+ |->vm_call_dispatch_f:
+ | mov BASE, RA
+ | ins_call
+ | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ |.if X64
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ |.else
+ | mov L:RB, SAVE_L
+ | // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap!
+ | mov RC, INARG_CP_UD // Get args before they are overwritten.
+ | mov RA, INARG_CP_FUNC
+ | mov BASE, INARG_CP_CALL
+ |.endif
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ |
+ | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
+ | sub KBASE, L:RB->top
+ | mov SAVE_ERRF, 0 // No error function.
+ | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame.
+ | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
+ |
+ |.if X64
+ | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASEa
+ | mov L:RB->cframe, rsp
+ |
+ | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |.else
+ | mov ARG3, RC // Have to copy args downwards.
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ |
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov L:RB->cframe, esp
+ |
+ | call BASE // (lua_State *L, lua_CFunction func, void *ud)
+ |.endif
+ | // TValue * (new base) or NULL returned in eax (RC).
+ | test RC, RC
+ | jz ->vm_leave_cp // No base? Just remove C frame.
+ | mov RA, RC
+ | mov PC, FRAME_CP
+ | jmp <2 // Else continue with the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
+ | add RA, BASE
+ | and PC, -8
+ | mov RB, BASE
+ | sub BASE, PC // Restore caller BASE.
+ | mov dword [RA+RD*8-4], LJ_TNIL // Ensure one valid arg.
+ | mov RC, RA // ... in [RC]
+ | mov PC, [RB-12] // Restore PC from [cont|PC].
+ |.if X64
+ | movsxd RAa, dword [RB-16] // May be negative on WIN64 with debug.
+#if LJ_HASFFI
+ | cmp RA, 1
+ | jbe >1
+#endif
+ | lea KBASEa, qword [=>0]
+ | add RAa, KBASEa
+ |.else
+ | mov RA, dword [RB-16]
+#if LJ_HASFFI
+ | cmp RA, 1
+ | jbe >1
+#endif
+ |.endif
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | // BASE = base, RC = result, RB = meta base
+ | jmp RAa // Jump to continuation.
+ |
+#if LJ_HASFFI
+ |1:
+ | je ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: Tail call from C function.
+ | sub RB, BASE
+ | shr RB, 3
+ | lea RD, [RB-1]
+ | jmp ->vm_call_tail
+#endif
+ |
+ |->cont_cat: // BASE = base, RC = result, RB = mbase
+ | movzx RA, PC_RB
+ | sub RB, 16
+ | lea RA, [BASE+RA*8]
+ | sub RA, RB
+ | je ->cont_ra
+ | neg RA
+ | shr RA, 3
+ |.if X64WIN
+ | mov CARG3d, RA
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | mov RCa, [RC]
+ | mov [RB], RCa
+ | mov CARG2d, RB
+ |.elif X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | mov CARG3d, RA
+ | mov RAa, [RC]
+ | mov [RB], RAa
+ | mov CARG2d, RB
+ |.else
+ | mov ARG3, RA
+ | mov RA, [RC+4]
+ | mov RC, [RC]
+ | mov [RB+4], RA
+ | mov [RB], RC
+ | mov ARG2, RB
+ |.endif
+ | jmp ->BC_CAT_Z
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets:
+ | mov TMP1, RC // RC = GCstr *
+ | mov TMP2, LJ_TSTR
+ | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | cmp PC_OP, BC_GGET
+ | jne >1
+ | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RA], TAB:RB // RB = GCtab *
+ | mov dword [RA+4], LJ_TTAB
+ | mov RB, RA
+ | jmp >2
+ |
+ |->vmeta_tgetb:
+ | movzx RC, PC_RC
+ if (LJ_DUALNUM) {
+ | mov TMP2, LJ_TISNUM
+ | mov TMP1, RC
+ } else if (sse) {
+ | cvtsi2sd xmm0, RC
+ | movsd TMPQ, xmm0
+ } else {
+ |.if not X64
+ | mov ARG4, RC
+ | fild ARG4
+ | fstp TMPQ
+ |.endif
+ }
+ | lea RCa, TMPQ // Store temp. TValue in TMPQ.
+ | jmp >1
+ |
+ |->vmeta_tgetv:
+ | movzx RC, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RB, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RB
+ | mov CARG3, RCa // May be 64 bit ptr to stack.
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ |->cont_ra: // BASE = base, RC = result
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC+4]
+ | mov RC, [RC]
+ | mov [BASE+RA*8+4], RB
+ | mov [BASE+RA*8], RC
+ |.endif
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | mov RA, L:RB->top
+ | mov [RA-12], PC // [cont|PC]
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here.
+ | mov NARGS:RD, 2+1 // 2 args for func(t, k).
+ | jmp ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets:
+ | mov TMP1, RC // RC = GCstr *
+ | mov TMP2, LJ_TSTR
+ | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | cmp PC_OP, BC_GSET
+ | jne >1
+ | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RA], TAB:RB // RB = GCtab *
+ | mov dword [RA+4], LJ_TTAB
+ | mov RB, RA
+ | jmp >2
+ |
+ |->vmeta_tsetb:
+ | movzx RC, PC_RC
+ if (LJ_DUALNUM) {
+ | mov TMP2, LJ_TISNUM
+ | mov TMP1, RC
+ } else if (sse) {
+ | cvtsi2sd xmm0, RC
+ | movsd TMPQ, xmm0
+ } else {
+ |.if not X64
+ | mov ARG4, RC
+ | fild ARG4
+ | fstp TMPQ
+ |.endif
+ }
+ | lea RCa, TMPQ // Store temp. TValue in TMPQ.
+ | jmp >1
+ |
+ |->vmeta_tsetv:
+ | movzx RC, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RB, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RB
+ | mov CARG3, RCa // May be 64 bit ptr to stack.
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RBa, [BASE+RA*8]
+ | mov [RC], RBa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ |->cont_nop: // BASE = base, (RC = result)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | mov RA, L:RB->top
+ | mov [RA-12], PC // [cont|PC]
+ | movzx RC, PC_RA
+ | // Copy value to third argument.
+ |.if X64
+ | mov RBa, [BASE+RC*8]
+ | mov [RA+16], RBa
+ |.else
+ | mov RB, [BASE+RC*8+4]
+ | mov RC, [BASE+RC*8]
+ | mov [RA+20], RB
+ | mov [RA+16], RC
+ |.endif
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here.
+ | mov NARGS:RD, 3+1 // 3 args for func(t, k, v).
+ | jmp ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d == BASE.
+ |.if X64WIN
+ | lea CARG3d, [BASE+RD*8]
+ | lea CARG2d, [BASE+RA*8]
+ |.else
+ | lea CARG2d, [BASE+RA*8]
+ | lea CARG3d, [BASE+RD*8]
+ |.endif
+ | mov CARG1d, L:RB // Caveat: CARG1d/CARG4d == RA.
+ | movzx CARG4d, PC_OP
+ |.else
+ | movzx RB, PC_OP
+ | lea RD, [BASE+RD*8]
+ | lea RA, [BASE+RA*8]
+ | mov ARG4, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RD
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ |3:
+ | mov BASE, L:RB->base
+ | cmp RC, 1
+ | ja ->vmeta_binop
+ |4:
+ | lea PC, [PC+4]
+ | jb >6
+ |5:
+ | movzx RD, PC_RD
+ | branchPC RD
+ |6:
+ | ins_next
+ |
+ |->cont_condt: // BASE = base, RC = result
+ | add PC, 4
+ | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is true.
+ | jb <5
+ | jmp <6
+ |
+ |->cont_condf: // BASE = base, RC = result
+ | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is false.
+ | jmp <4
+ |
+ |->vmeta_equal:
+ | sub PC, 4
+ |.if X64WIN
+ | mov CARG3d, RD
+ | mov CARG4d, RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d == BASE.
+ | mov CARG2d, RA
+ | mov CARG1d, L:RB // Caveat: CARG1d == RA.
+ |.elif X64
+ | mov CARG2d, RA
+ | mov CARG4d, RB // Caveat: CARG4d == RA.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG3d == BASE.
+ | mov CARG3d, RD
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG4, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RD
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |
+ |->vmeta_equal_cd:
+#if LJ_HASFFI
+ | sub PC, 4
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG1, L:RB
+ | mov FCARG2, dword [PC-4]
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal_cd@8 // (lua_State *L, BCIns ins)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+#endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vno:
+#if LJ_DUALNUM
+ | movzx RB, PC_RB
+#endif
+ |->vmeta_arith_vn:
+ | lea RC, [KBASE+RC*8]
+ | jmp >1
+ |
+ |->vmeta_arith_nvo:
+#if LJ_DUALNUM
+ | movzx RC, PC_RC
+#endif
+ |->vmeta_arith_nv:
+ | lea RC, [KBASE+RC*8]
+ | lea RB, [BASE+RB*8]
+ | xchg RB, RC
+ | jmp >2
+ |
+ |->vmeta_unm:
+ | lea RC, [BASE+RD*8]
+ | mov RB, RC
+ | jmp >2
+ |
+ |->vmeta_arith_vvo:
+#if LJ_DUALNUM
+ | movzx RB, PC_RB
+#endif
+ |->vmeta_arith_vv:
+ | lea RC, [BASE+RC*8]
+ |1:
+ | lea RB, [BASE+RB*8]
+ |2:
+ | lea RA, [BASE+RA*8]
+ |.if X64WIN
+ | mov CARG3d, RB
+ | mov CARG4d, RC
+ | movzx RC, PC_OP
+ | mov ARG5d, RC
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d == BASE.
+ | mov CARG2d, RA
+ | mov CARG1d, L:RB // Caveat: CARG1d == RA.
+ |.elif X64
+ | movzx CARG5d, PC_OP
+ | mov CARG2d, RA
+ | mov CARG4d, RC // Caveat: CARG4d == RA.
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG3d == BASE.
+ | mov CARG3d, RB
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG3, RB
+ | mov L:RB, SAVE_L
+ | mov ARG4, RC
+ | movzx RC, PC_OP
+ | mov ARG2, RA
+ | mov ARG5, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = base, RC = new base, stack = cont/func/o1/o2
+ | mov RA, RC
+ | sub RC, BASE
+ | mov [RA-12], PC // [cont|PC]
+ | lea PC, [RC+FRAME_CONT]
+ | mov NARGS:RD, 2+1 // 2 args for func(o1, o2).
+ | jmp ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | lea FCARG2, [BASE+RD*8] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_len@8 // (lua_State *L, TValue *o)
+ | // NULL (retry) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | test RC, RC
+ | jne ->vmeta_binop // Binop call for compatibility.
+ | movzx RD, PC_RD
+ | mov TAB:FCARG1, [BASE+RD*8]
+ | jmp ->BC_LEN_Z
+#else
+ | jmp ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call_ra:
+ | lea RA, [BASE+RA*8+8]
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // BASE = old base, RA = new base, RC = nargs+1, PC = return
+ | mov TMP2, RA // Save RA, RC for us.
+ | mov TMP1, NARGS:RD
+ | sub RA, 8
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RA
+ | lea CARG3d, [RA+NARGS:RD*8]
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | lea RC, [RA+NARGS:RD*8]
+ | mov L:RB, SAVE_L
+ | mov ARG2, RA
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE // This is the callers base!
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | mov BASE, L:RB->base
+ | mov RA, TMP2
+ | mov NARGS:RD, TMP1
+ | mov LFUNC:RB, [RA-8]
+ | add NARGS:RD, 1
+ | // This is fragile. L->base must not move, KBASE must always be defined.
+ | cmp KBASE, BASE // Continue with CALLT if flag set.
+ | je ->BC_CALLT_Z
+ | mov BASE, RA
+ | ins_call // Otherwise call resolved metamethod.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, RA // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | mov SAVE_PC, PC
+ | call extern lj_meta_for@8 // (lua_State *L, TValue *base)
+ | mov BASE, L:RB->base
+ | mov RC, [PC-4]
+ | movzx RA, RCH
+ | movzx OP, RCL
+ | shr RC, 16
+ |.if X64
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |.else
+ | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmp NARGS:RD, 1+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmp NARGS:RD, 2+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | fld qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_n, name, op
+ | .ffunc_1 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | op
+ | fld qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_nsse, name, op
+ | .ffunc_1 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | op xmm0, qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_nsse, name
+ | .ffunc_nsse name, movsd
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
+ | fld qword [BASE]
+ | fld qword [BASE+8]
+ |.endmacro
+ |
+ |.macro .ffunc_nnsse, name
+ | .ffunc_2 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | movsd xmm1, qword [BASE+8]
+ |.endmacro
+ |
+ |.macro .ffunc_nnr, name
+ | .ffunc_2 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
+ | fld qword [BASE+8]
+ | fld qword [BASE]
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses label 1.
+ |.macro ffgccheck
+ | mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | jb >1
+ | call ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | mov RB, [BASE+4]
+ | cmp RB, LJ_TISTRUECOND; jae ->fff_fallback
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD
+ | mov [BASE-4], RB
+ | mov RB, [BASE]
+ | mov [BASE-8], RB
+ | sub RD, 2
+ | jz >2
+ | mov RA, BASE
+ |1:
+ | add RA, 8
+ |.if X64
+ | mov RBa, [RA]
+ | mov [RA-8], RBa
+ |.else
+ | mov RB, [RA+4]
+ | mov [RA-4], RB
+ | mov RB, [RA]
+ | mov [RA-8], RB
+ |.endif
+ | sub RD, 1
+ | jnz <1
+ |2:
+ | mov RD, MULTRES
+ | jmp ->fff_res_
+ |
+ |.ffunc_1 type
+ | mov RB, [BASE+4]
+ |.if X64
+ | mov RA, RB
+ | sar RA, 15
+ | cmp RA, -2
+ | je >3
+ |.endif
+ | mov RC, ~LJ_TNUMX
+ | not RB
+ | cmp RC, RB
+ ||if (cmov) {
+ | cmova RC, RB
+ ||} else {
+ | jbe >1; mov RC, RB; 1:
+ ||}
+ |2:
+ | mov CFUNC:RB, [BASE-8]
+ | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RC
+ | jmp ->fff_res1
+ |.if X64
+ |3:
+ | mov RC, ~LJ_TLIGHTUD
+ | jmp <2
+ |.endif
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | mov RB, [BASE+4]
+ | mov PC, [BASE-4]
+ | cmp RB, LJ_TTAB; jne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, [BASE]
+ | mov TAB:RB, TAB:RB->metatable
+ |2:
+ | test TAB:RB, TAB:RB
+ | mov dword [BASE-4], LJ_TNIL
+ | jz ->fff_res1
+ | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)]
+ | mov dword [BASE-4], LJ_TTAB // Store metatable as default result.
+ | mov [BASE-8], TAB:RB
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->hash
+ | imul RA, #NODE
+ | add NODE:RA, TAB:RB->node
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >4
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | je >5
+ |4:
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <3
+ | jmp ->fff_res1 // Not found, keep default result.
+ |5:
+ | mov RB, [RA+4]
+ | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value.
+ | mov RC, [RA]
+ | mov [BASE-4], RB // Return value of mt.__metatable.
+ | mov [BASE-8], RC
+ | jmp ->fff_res1
+ |
+ |6:
+ | cmp RB, LJ_TUDATA; je <1
+ |.if X64
+ | cmp RB, LJ_TNUMX; ja >8
+ | cmp RB, LJ_TISNUM; jbe >7
+ | mov RB, LJ_TLIGHTUD
+ | jmp >8
+ |7:
+ |.else
+ | cmp RB, LJ_TISNUM; ja >8
+ |.endif
+ | mov RB, LJ_TNUMX
+ |8:
+ | not RB
+ | mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | jmp <2
+ |
+ |.ffunc_2 setmetatable
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | mov TAB:RB, [BASE]
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TTAB; jne ->fff_fallback
+ | mov TAB:RC, [BASE+8]
+ | mov TAB:RB->metatable, TAB:RC
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TTAB // Return original table.
+ | mov [BASE-8], TAB:RB
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jz >1
+ | // Possible write barrier. Table is black, but skip iswhite(mt) check.
+ | barrierback TAB:RB, RC
+ |1:
+ | jmp ->fff_res1
+ |
+ |.ffunc_2 rawget
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ |.if X64WIN
+ | mov RB, BASE // Save BASE.
+ | lea CARG3d, [BASE+8]
+ | mov CARG2d, [BASE] // Caveat: CARG2d == BASE.
+ | mov CARG1d, SAVE_L
+ |.elif X64
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, [BASE]
+ | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE.
+ | mov CARG1d, SAVE_L
+ |.else
+ | mov TAB:RD, [BASE]
+ | mov L:RB, SAVE_L
+ | mov ARG2, TAB:RD
+ | mov ARG1, L:RB
+ | mov RB, BASE // Save BASE.
+ | add BASE, 8
+ | mov ARG3, BASE
+ |.endif
+ | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // cTValue * returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | // Copy table slot.
+ |.if X64
+ | mov RBa, [RD]
+ | mov PC, [BASE-4]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [RD]
+ | mov RD, [RD+4]
+ | mov PC, [BASE-4]
+ | mov [BASE-8], RB
+ | mov [BASE-4], RD
+ |.endif
+ | jmp ->fff_res1
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument.
+ | cmp dword [BASE+4], LJ_TISNUM
+ if (LJ_DUALNUM) {
+ | jne >1
+ | mov RB, dword [BASE]; jmp ->fff_resi
+ |1:
+ | ja ->fff_fallback
+ } else {
+ | jae ->fff_fallback
+ }
+ if (sse) {
+ | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0
+ } else {
+ | fld qword [BASE]; jmp ->fff_resn
+ }
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | mov PC, [BASE-4]
+ | cmp dword [BASE+4], LJ_TSTR; jne >3
+ | // A __tostring method in the string base metatable is ignored.
+ | mov STR:RD, [BASE]
+ |2:
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RD
+ | jmp ->fff_res1
+ |3: // Handle numbers inline, unless a number base metatable is present.
+ | cmp dword [BASE+4], LJ_TISNUM; ja ->fff_fallback
+ | cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
+ | jne ->fff_fallback
+ | ffgccheck // Caveat: uses label 1.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Add frame since C call can throw.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ |.if X64 and not X64WIN
+ | mov FCARG2, BASE // Otherwise: FCARG2 == BASE
+ |.endif
+ | mov L:FCARG1, L:RB
+ if (LJ_DUALNUM) {
+ | call extern lj_str_fromnumber@8 // (lua_State *L, cTValue *o)
+ } else {
+ | call extern lj_str_fromnum@8 // (lua_State *L, lua_Number *np)
+ }
+ | // GCstr returned in eax (RD).
+ | mov BASE, L:RB->base
+ | jmp <2
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | je >2 // Missing 2nd arg?
+ |1:
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Add frame since C call can throw.
+ | mov L:RB->top, BASE // Dummy frame length is ok.
+ | mov PC, [BASE-4]
+ |.if X64WIN
+ | lea CARG3d, [BASE+8]
+ | mov CARG2d, [BASE] // Caveat: CARG2d == BASE.
+ | mov CARG1d, L:RB
+ |.elif X64
+ | mov CARG2d, [BASE]
+ | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE.
+ | mov CARG1d, L:RB
+ |.else
+ | mov TAB:RD, [BASE]
+ | mov ARG2, TAB:RD
+ | mov ARG1, L:RB
+ | add BASE, 8
+ | mov ARG3, BASE
+ |.endif
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | call extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Flag returned in eax (RD).
+ | mov BASE, L:RB->base
+ | test RD, RD; jz >3 // End of traversal?
+ | // Copy key and value to results.
+ |.if X64
+ | mov RBa, [BASE+8]
+ | mov RDa, [BASE+16]
+ | mov [BASE-8], RBa
+ | mov [BASE], RDa
+ |.else
+ | mov RB, [BASE+8]
+ | mov RD, [BASE+12]
+ | mov [BASE-8], RB
+ | mov [BASE-4], RD
+ | mov RB, [BASE+16]
+ | mov RD, [BASE+20]
+ | mov [BASE], RB
+ | mov [BASE+4], RD
+ |.endif
+ |->fff_res2:
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |2: // Set missing 2nd arg to nil.
+ | mov dword [BASE+12], LJ_TNIL
+ | jmp <1
+ |3: // End of traversal: return nil.
+ | mov dword [BASE-4], LJ_TNIL
+ | jmp ->fff_res1
+ |
+ |.ffunc_1 pairs
+ | mov TAB:RB, [BASE]
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RB, [BASE-8]
+ | mov CFUNC:RD, CFUNC:RB->upvalue[0]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TFUNC
+ | mov [BASE-8], CFUNC:RD
+ | mov dword [BASE+12], LJ_TNIL
+ | mov RD, 1+3
+ | jmp ->fff_res
+ |
+ |.ffunc_1 ipairs_aux
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM
+ if (LJ_DUALNUM) {
+ | jne ->fff_fallback
+ } else {
+ | jae ->fff_fallback
+ }
+ | mov PC, [BASE-4]
+ if (LJ_DUALNUM) {
+ | mov RD, dword [BASE+8]
+ | add RD, 1
+ | mov dword [BASE-4], LJ_TISNUM
+ | mov dword [BASE-8], RD
+ } else if (sse) {
+ | movsd xmm0, qword [BASE+8]
+ | sseconst_1 xmm1, RBa
+ | addsd xmm0, xmm1
+ | cvtsd2si RD, xmm0
+ | movsd qword [BASE-8], xmm0
+ } else {
+ |.if not X64
+ | fld qword [BASE+8]
+ | fld1
+ | faddp st1
+ | fist ARG1
+ | fstp qword [BASE-8]
+ | mov RD, ARG1
+ |.endif
+ }
+ | mov TAB:RB, [BASE]
+ | cmp RD, TAB:RB->asize; jae >2 // Not in array part?
+ | shl RD, 3
+ | add RD, TAB:RB->array
+ |1:
+ | cmp dword [RD+4], LJ_TNIL; je ->fff_res0
+ | // Copy array slot.
+ |.if X64
+ | mov RBa, [RD]
+ | mov [BASE], RBa
+ |.else
+ | mov RB, [RD]
+ | mov RD, [RD+4]
+ | mov [BASE], RB
+ | mov [BASE+4], RD
+ |.endif
+ | jmp ->fff_res2
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cmp dword TAB:RB->hmask, 0; je ->fff_res0
+ | mov FCARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov FCARG2, RD // Caveat: FCARG2 == BASE
+ | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RD).
+ | mov BASE, RB
+ | test RD, RD
+ | jnz <1
+ |->fff_res0:
+ | mov RD, 1+0
+ | jmp ->fff_res
+ |
+ |.ffunc_1 ipairs
+ | mov TAB:RB, [BASE]
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RB, [BASE-8]
+ | mov CFUNC:RD, CFUNC:RB->upvalue[0]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TFUNC
+ | mov [BASE-8], CFUNC:RD
+ if (LJ_DUALNUM) {
+ | mov dword [BASE+12], LJ_TISNUM
+ | mov dword [BASE+8], 0
+ } else if (sse) {
+ | xorps xmm0, xmm0
+ | movsd qword [BASE+8], xmm0
+ } else {
+ | fldz
+ | fstp qword [BASE+8]
+ }
+ | mov RD, 1+3
+ | jmp ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc_1 pcall
+ | lea RA, [BASE+8]
+ | sub NARGS:RD, 1
+ | mov PC, 8+FRAME_PCALL
+ |1:
+ | movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | shr RB, HOOK_ACTIVE_SHIFT
+ | and RB, 1
+ | add PC, RB // Remember active hook before pcall.
+ | jmp ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | cmp dword [BASE+12], LJ_TFUNC; jne ->fff_fallback
+ | mov RB, [BASE+4] // Swap function and traceback.
+ | mov [BASE+12], RB
+ | mov dword [BASE+4], LJ_TFUNC
+ | mov LFUNC:RB, [BASE]
+ | mov PC, [BASE+8]
+ | mov [BASE+8], LFUNC:RB
+ | mov [BASE], PC
+ | lea RA, [BASE+16]
+ | sub NARGS:RD, 2
+ | mov PC, 16+FRAME_PCALL
+ | jmp <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | mov L:RB, [BASE]
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | mov CFUNC:RB, [BASE-8]
+ | mov L:RB, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | mov PC, [BASE-4]
+ | mov SAVE_PC, PC
+ |.if X64
+ | mov TMP1, L:RB
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ |.if resume
+ | cmp dword [BASE+4], LJ_TTHREAD; jne ->fff_fallback
+ |.endif
+ | cmp aword L:RB->cframe, 0; jne ->fff_fallback
+ | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback
+ | mov RA, L:RB->top
+ | je >1 // Status != LUA_YIELD (i.e. 0)?
+ | cmp RA, L:RB->base // Check for presence of initial func.
+ | je ->fff_fallback
+ |1:
+ |.if resume
+ | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread).
+ |.else
+ | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1).
+ |.endif
+ | cmp PC, L:RB->maxstack; ja ->fff_fallback
+ | mov L:RB->top, PC
+ |
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if resume
+ | add BASE, 8 // Keep resumed thread in stack for GC.
+ |.endif
+ | mov L:RB->top, BASE
+ |.if resume
+ | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move.
+ |.else
+ | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move.
+ |.endif
+ | sub RBa, PCa // Relative to PC.
+ |
+ | cmp PC, RA
+ | je >3
+ |2: // Move args to coroutine.
+ |.if X64
+ | mov RCa, [PC+RB]
+ | mov [PC-8], RCa
+ |.else
+ | mov RC, [PC+RB+4]
+ | mov [PC-4], RC
+ | mov RC, [PC+RB]
+ | mov [PC-8], RC
+ |.endif
+ | sub PC, 8
+ | cmp PC, RA
+ | jne <2
+ |3:
+ |.if X64
+ | mov CARG2d, RA
+ | mov CARG1d, TMP1
+ |.else
+ | mov ARG2, RA
+ | xor RA, RA
+ | mov ARG4, RA
+ | mov ARG3, RA
+ |.endif
+ | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | set_vmstate INTERP
+ |
+ | mov L:RB, SAVE_L
+ |.if X64
+ | mov L:PC, TMP1
+ |.else
+ | mov L:PC, ARG1 // The callee doesn't modify SAVE_L.
+ |.endif
+ | mov BASE, L:RB->base
+ | cmp eax, LUA_YIELD
+ | ja >8
+ |4:
+ | mov RA, L:PC->base
+ | mov KBASE, L:PC->top
+ | mov L:PC->top, RA // Clear coroutine stack.
+ | mov PC, KBASE
+ | sub PC, RA
+ | je >6 // No results?
+ | lea RD, [BASE+PC]
+ | shr PC, 3
+ | cmp RD, L:RB->maxstack
+ | ja >9 // Need to grow stack?
+ |
+ | mov RB, BASE
+ | sub RBa, RAa
+ |5: // Move results from coroutine.
+ |.if X64
+ | mov RDa, [RA]
+ | mov [RA+RB], RDa
+ |.else
+ | mov RD, [RA]
+ | mov [RA+RB], RD
+ | mov RD, [RA+4]
+ | mov [RA+RB+4], RD
+ |.endif
+ | add RA, 8
+ | cmp RA, KBASE
+ | jne <5
+ |6:
+ |.if resume
+ | lea RD, [PC+2] // nresults+1 = 1 + true + results.
+ | mov dword [BASE-4], LJ_TTRUE // Prepend true to results.
+ |.else
+ | lea RD, [PC+1] // nresults+1 = 1 + results.
+ |.endif
+ |7:
+ | mov PC, SAVE_PC
+ | mov MULTRES, RD
+ |.if resume
+ | mov RAa, -8
+ |.else
+ | xor RA, RA
+ |.endif
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | mov dword [BASE-4], LJ_TFALSE // Prepend false to results.
+ | mov RA, L:PC->top
+ | sub RA, 8
+ | mov L:PC->top, RA // Clear error from coroutine stack.
+ | // Copy error message.
+ |.if X64
+ | mov RDa, [RA]
+ | mov [BASE], RDa
+ |.else
+ | mov RD, [RA]
+ | mov [BASE], RD
+ | mov RD, [RA+4]
+ | mov [BASE+4], RD
+ |.endif
+ | mov RD, 1+2 // nresults+1 = 1 + false + error.
+ | jmp <7
+ |.else
+ | mov FCARG2, L:PC
+ | mov FCARG1, L:RB
+ | call extern lj_ffh_coroutine_wrap_err@8 // (lua_State *L, lua_State *co)
+ | // Error function does not return.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ |.if X64
+ | mov L:RA, TMP1
+ |.else
+ | mov L:RA, ARG1 // The callee doesn't modify SAVE_L.
+ |.endif
+ | mov L:RA->top, KBASE // Undo coroutine stack clearing.
+ | mov FCARG2, PC
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ |.if X64
+ | mov L:PC, TMP1
+ |.else
+ | mov L:PC, ARG1
+ |.endif
+ | mov BASE, L:RB->base
+ | jmp <4 // Retry the stack move.
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | mov L:RB, SAVE_L
+ | test aword L:RB->cframe, CFRAME_RESUME
+ | jz ->fff_fallback
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->top, RD
+ | xor RD, RD
+ | mov aword L:RB->cframe, RDa
+ | mov al, LUA_YIELD
+ | mov byte L:RB->status, al
+ | jmp ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ if (!LJ_DUALNUM) {
+ |->fff_resi: // Dummy.
+ }
+ if (sse) {
+ |->fff_resn:
+ | mov PC, [BASE-4]
+ | fstp qword [BASE-8]
+ | jmp ->fff_res1
+ }
+ | .ffunc_1 math_abs
+ if (LJ_DUALNUM) {
+ | cmp dword [BASE+4], LJ_TISNUM; jne >2
+ | mov RB, dword [BASE]
+ | cmp RB, 0; jns ->fff_resi
+ | neg RB; js >1
+ |->fff_resbit:
+ |->fff_resi:
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TISNUM
+ | mov dword [BASE-8], RB
+ | jmp ->fff_res1
+ |1:
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], 0x41e00000 // 2^31.
+ | mov dword [BASE-8], 0
+ | jmp ->fff_res1
+ |2:
+ | ja ->fff_fallback
+ } else {
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ }
+ if (sse) {
+ | movsd xmm0, qword [BASE]
+ | sseconst_abs xmm1, RDa
+ | andps xmm0, xmm1
+ |->fff_resxmm0:
+ | mov PC, [BASE-4]
+ | movsd qword [BASE-8], xmm0
+ | // fallthrough
+ } else {
+ | fld qword [BASE]
+ | fabs
+ | // fallthrough
+ |->fff_resxmm0: // Dummy.
+ |->fff_resn:
+ | mov PC, [BASE-4]
+ | fstp qword [BASE-8]
+ }
+ |->fff_res1:
+ | mov RD, 1+1
+ |->fff_res:
+ | mov MULTRES, RD
+ |->fff_res_:
+ | test PC, FRAME_TYPE
+ | jnz >7
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | movzx RA, PC_RA
+ | not RAa // Note: ~RA = -(RA+1)
+ | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | mov dword [BASE+RD*8-12], LJ_TNIL
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | mov RAa, -8 // Results start at BASE+RA = BASE-8.
+ | jmp ->vm_return
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ ||if (LJ_DUALNUM) {
+ | cmp dword [BASE+4], LJ_TISNUM; jne >1
+ | mov RB, dword [BASE]; jmp ->fff_resi
+ |1:
+ | ja ->fff_fallback
+ ||} else {
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ ||}
+ ||if (sse) {
+ | movsd xmm0, qword [BASE]
+ | call ->vm_ .. func
+ || if (LJ_DUALNUM) {
+ | cvtsd2si RB, xmm0
+ | cmp RB, 0x80000000
+ | jne ->fff_resi
+ | cvtsi2sd xmm1, RB
+ | ucomisd xmm0, xmm1
+ | jp ->fff_resxmm0
+ | je ->fff_resi
+ || }
+ | jmp ->fff_resxmm0
+ ||} else {
+ | fld qword [BASE]
+ | call ->vm_ .. func
+ || if (LJ_DUALNUM) {
+ |.if not X64
+ | fist ARG1
+ | mov RB, ARG1
+ | cmp RB, 0x80000000; jne >2
+ | fdup
+ | fild ARG1
+ | fcomparepp
+ | jp ->fff_resn
+ | jne ->fff_resn
+ |2:
+ | fpop
+ | jmp ->fff_resi
+ |.endif
+ || } else {
+ | jmp ->fff_resn
+ || }
+ ||}
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ if (sse) {
+ |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0
+ } else {
+ |.ffunc_n math_sqrt; fsqrt; jmp ->fff_resn
+ }
+ |.ffunc_n math_log, fldln2; fyl2x; jmp ->fff_resn
+ |.ffunc_n math_log10, fldlg2; fyl2x; jmp ->fff_resn
+ |.ffunc_n math_exp; call ->vm_exp_x87; jmp ->fff_resn
+ |
+ |.ffunc_n math_sin; fsin; jmp ->fff_resn
+ |.ffunc_n math_cos; fcos; jmp ->fff_resn
+ |.ffunc_n math_tan; fptan; fpop; jmp ->fff_resn
+ |
+ |.ffunc_n math_asin
+ | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fpatan
+ | jmp ->fff_resn
+ |.ffunc_n math_acos
+ | fdup; fmul st0; fld1; fsubrp st1; fsqrt; fxch; fpatan
+ | jmp ->fff_resn
+ |.ffunc_n math_atan; fld1; fpatan; jmp ->fff_resn
+ |
+ |.macro math_extern, func
+ ||if (sse) {
+ | .ffunc_nsse math_ .. func
+ | .if not X64
+ | movsd FPARG1, xmm0
+ | .endif
+ ||} else {
+ | .if not X64
+ | .ffunc_n math_ .. func
+ | fstp FPARG1
+ | .endif
+ ||}
+ | mov RB, BASE
+ | call extern lj_vm_ .. func
+ | mov BASE, RB
+ | .if X64
+ | jmp ->fff_resxmm0
+ | .else
+ | jmp ->fff_resn
+ | .endif
+ |.endmacro
+ |
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ |
+ |->ff_math_deg:
+ if (sse) {
+ |.ffunc_nsse math_rad
+ | mov CFUNC:RB, [BASE-8]
+ | mulsd xmm0, qword CFUNC:RB->upvalue[0]
+ | jmp ->fff_resxmm0
+ } else {
+ |.ffunc_n math_rad
+ | mov CFUNC:RB, [BASE-8]
+ | fmul qword CFUNC:RB->upvalue[0]
+ | jmp ->fff_resn
+ }
+ |
+ |.ffunc_nn math_atan2; fpatan; jmp ->fff_resn
+ |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn
+ |
+ |.ffunc_1 math_frexp
+ | mov RB, [BASE+4]
+ | cmp RB, LJ_TISNUM; jae ->fff_fallback
+ | mov PC, [BASE-4]
+ | mov RC, [BASE]
+ | mov [BASE-4], RB; mov [BASE-8], RC
+ | shl RB, 1; cmp RB, 0xffe00000; jae >3
+ | or RC, RB; jz >3
+ | mov RC, 1022
+ | cmp RB, 0x00200000; jb >4
+ |1:
+ | shr RB, 21; sub RB, RC // Extract and unbias exponent.
+ if (sse) {
+ | cvtsi2sd xmm0, RB
+ } else {
+ | mov TMP1, RB; fild TMP1
+ }
+ | mov RB, [BASE-4]
+ | and RB, 0x800fffff // Mask off exponent.
+ | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0.
+ | mov [BASE-4], RB
+ |2:
+ if (sse) {
+ | movsd qword [BASE], xmm0
+ } else {
+ | fstp qword [BASE]
+ }
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0.
+ if (sse) {
+ | xorps xmm0, xmm0; jmp <2
+ } else {
+ | fldz; jmp <2
+ }
+ |4: // Handle denormals by multiplying with 2^54 and adjusting the bias.
+ if (sse) {
+ | movsd xmm0, qword [BASE]
+ | sseconst_hi xmm1, RBa, 43500000 // 2^54.
+ | mulsd xmm0, xmm1
+ | movsd qword [BASE-8], xmm0
+ } else {
+ | fld qword [BASE]
+ | mov TMP1, 0x5a800000; fmul TMP1 // x = x*2^54
+ | fstp qword [BASE-8]
+ }
+ | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1
+ |
+ if (sse) {
+ |.ffunc_nsse math_modf
+ } else {
+ |.ffunc_n math_modf
+ }
+ | mov RB, [BASE+4]
+ | mov PC, [BASE-4]
+ | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf?
+ if (sse) {
+ | movaps xmm4, xmm0
+ | call ->vm_trunc
+ | subsd xmm4, xmm0
+ |1:
+ | movsd qword [BASE-8], xmm0
+ | movsd qword [BASE], xmm4
+ } else {
+ | fdup
+ | call ->vm_trunc
+ | fsub st1, st0
+ |1:
+ | fstp qword [BASE-8]
+ | fstp qword [BASE]
+ }
+ | mov RC, [BASE-4]; mov RB, [BASE+4]
+ | xor RC, RB; js >3 // Need to adjust sign?
+ |2:
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |3:
+ | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction.
+ | jmp <2
+ |4:
+ if (sse) {
+ | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0.
+ } else {
+ | fldz; fxch; jmp <1 // Return +-Inf and +-0.
+ }
+ |
+ |.ffunc_nnr math_fmod
+ |1: ; fprem; fnstsw ax; sahf; jp <1
+ | fpop1
+ | jmp ->fff_resn
+ |
+ if (sse) {
+ |.ffunc_nnsse math_pow; call ->vm_pow; jmp ->fff_resxmm0
+ } else {
+ |.ffunc_nn math_pow; call ->vm_pow; jmp ->fff_resn
+ }
+ |
+ |.macro math_minmax, name, cmovop, fcmovop, nofcmovop, sseop
+ | .ffunc name
+ | mov RA, 2
+ | cmp dword [BASE+4], LJ_TISNUM
+ ||if (LJ_DUALNUM) {
+ | jne >4
+ | mov RB, dword [BASE]
+ |1: // Handle integers.
+ | cmp RA, RD; jae ->fff_resi
+ | cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3
+ | cmp RB, dword [BASE+RA*8-8]
+ | cmovop RB, dword [BASE+RA*8-8]
+ | add RA, 1
+ | jmp <1
+ |3:
+ | ja ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ ||if (sse) {
+ | cvtsi2sd xmm0, RB
+ ||} else {
+ |.if not X64
+ | mov TMP1, RB
+ | fild TMP1
+ |.endif
+ ||}
+ | jmp >6
+ |4:
+ | ja ->fff_fallback
+ ||} else {
+ | jae ->fff_fallback
+ ||}
+ |
+ ||if (sse) {
+ | movsd xmm0, qword [BASE]
+ |5: // Handle numbers or integers.
+ | cmp RA, RD; jae ->fff_resxmm0
+ | cmp dword [BASE+RA*8-4], LJ_TISNUM
+ ||if (LJ_DUALNUM) {
+ | jb >6
+ | ja ->fff_fallback
+ | cvtsi2sd xmm1, dword [BASE+RA*8-8]
+ | jmp >7
+ ||} else {
+ | jae ->fff_fallback
+ ||}
+ |6:
+ | movsd xmm1, qword [BASE+RA*8-8]
+ |7:
+ | sseop xmm0, xmm1
+ | add RA, 1
+ | jmp <5
+ ||} else {
+ |.if not X64
+ | fld qword [BASE]
+ |5: // Handle numbers or integers.
+ | cmp RA, RD; jae ->fff_resn
+ | cmp dword [BASE+RA*8-4], LJ_TISNUM
+ ||if (LJ_DUALNUM) {
+ | jb >6
+ | ja >9
+ | fild dword [BASE+RA*8-8]
+ | jmp >7
+ ||} else {
+ | jae >9
+ ||}
+ |6:
+ | fld qword [BASE+RA*8-8]
+ |7:
+ ||if (cmov) {
+ | fucomi st1; fcmovop st1; fpop1
+ ||} else {
+ | push eax
+ | fucom st1; fnstsw ax; test ah, 1; nofcmovop >2; fxch; 2: ; fpop
+ | pop eax
+ ||}
+ | add RA, 1
+ | jmp <5
+ |.endif
+ ||}
+ |.endmacro
+ |
+ | math_minmax math_min, cmovg, fcmovnbe, jz, minsd
+ | math_minmax math_max, cmovl, fcmovbe, jnz, maxsd
+ if (!sse) {
+ |9:
+ | fpop; jmp ->fff_fallback
+ }
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ if (LJ_DUALNUM) {
+ | mov RB, dword STR:RB->len; jmp ->fff_resi
+ } else if (sse) {
+ | cvtsi2sd xmm0, dword STR:RB->len; jmp ->fff_resxmm0
+ } else {
+ | fild dword STR:RB->len; jmp ->fff_resn
+ }
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | mov PC, [BASE-4]
+ | cmp dword STR:RB->len, 1
+ | jb ->fff_res0 // Return no results for empty string.
+ | movzx RB, byte STR:RB[1]
+ if (LJ_DUALNUM) {
+ | jmp ->fff_resi
+ } else if (sse) {
+ | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0
+ } else {
+ | mov TMP1, RB; fild TMP1; jmp ->fff_resn
+ }
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback // *Exactly* 1 arg.
+ | cmp dword [BASE+4], LJ_TISNUM
+ if (LJ_DUALNUM) {
+ | jne ->fff_fallback
+ | mov RB, dword [BASE]
+ | cmp RB, 255; ja ->fff_fallback
+ | mov TMP2, RB
+ } else if (sse) {
+ | jae ->fff_fallback
+ | cvttsd2si RB, qword [BASE]
+ | cmp RB, 255; ja ->fff_fallback
+ | mov TMP2, RB
+ } else {
+ | jae ->fff_fallback
+ | fld qword [BASE]
+ | fistp TMP2
+ | cmp TMP2, 255; ja ->fff_fallback
+ }
+ |.if X64
+ | mov TMP3, 1
+ |.else
+ | mov ARG3, 1
+ |.endif
+ | lea RDa, TMP2 // Points to stack. Little-endian.
+ |->fff_newstr:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if X64
+ | mov CARG3d, TMP3 // Zero-extended to size_t.
+ | mov CARG2, RDa // May be 64 bit ptr to stack.
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG2, RD
+ | mov ARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // GCstr * returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RD
+ | jmp ->fff_res1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | mov TMP2, -1
+ | cmp NARGS:RD, 1+2; jb ->fff_fallback
+ | jna >1
+ | cmp dword [BASE+20], LJ_TISNUM
+ if (LJ_DUALNUM) {
+ | jne ->fff_fallback
+ | mov RB, dword [BASE+16]
+ | mov TMP2, RB
+ } else if (sse) {
+ | jae ->fff_fallback
+ | cvttsd2si RB, qword [BASE+16]
+ | mov TMP2, RB
+ } else {
+ | jae ->fff_fallback
+ | fld qword [BASE+16]
+ | fistp TMP2
+ }
+ |1:
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM
+ if (LJ_DUALNUM) {
+ | jne ->fff_fallback
+ } else {
+ | jae ->fff_fallback
+ }
+ | mov STR:RB, [BASE]
+ | mov TMP3, STR:RB
+ | mov RB, STR:RB->len
+ if (LJ_DUALNUM) {
+ | mov RA, dword [BASE+8]
+ } else if (sse) {
+ | cvttsd2si RA, qword [BASE+8]
+ } else {
+ |.if not X64
+ | fld qword [BASE+8]
+ | fistp ARG3
+ | mov RA, ARG3
+ |.endif
+ }
+ | mov RC, TMP2
+ | cmp RB, RC // len < end? (unsigned compare)
+ | jb >5
+ |2:
+ | test RA, RA // start <= 0?
+ | jle >7
+ |3:
+ | mov STR:RB, TMP3
+ | sub RC, RA // start > end?
+ | jl ->fff_emptystr
+ | lea RB, [STR:RB+RA+#STR-1]
+ | add RC, 1
+ |4:
+ |.if X64
+ | mov TMP3, RC
+ |.else
+ | mov ARG3, RC
+ |.endif
+ | mov RD, RB
+ | jmp ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | jl >6
+ | lea RC, [RC+RB+1] // end = end+(len+1)
+ | jmp <2
+ |6: // Overflow.
+ | mov RC, RB // end = len
+ | jmp <2
+ |
+ |7: // Negative start or underflow.
+ | je >8
+ | add RA, RB // start = start+(len+1)
+ | add RA, 1
+ | jg <3 // start > 0?
+ |8: // Underflow.
+ | mov RA, 1 // start = 1
+ | jmp <3
+ |
+ |->fff_emptystr: // Range underflow.
+ | xor RC, RC // Zero length. Any ptr in RB is ok.
+ | jmp <4
+ |
+ |.ffunc_2 string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM
+ | mov STR:RB, [BASE]
+ if (LJ_DUALNUM) {
+ | jne ->fff_fallback
+ | mov RC, dword [BASE+8]
+ } else if (sse) {
+ | jae ->fff_fallback
+ | cvttsd2si RC, qword [BASE+8]
+ } else {
+ | jae ->fff_fallback
+ | fld qword [BASE+8]
+ | fistp TMP2
+ | mov RC, TMP2
+ }
+ | test RC, RC
+ | jle ->fff_emptystr // Count <= 0? (or non-int)
+ | cmp dword STR:RB->len, 1
+ | jb ->fff_emptystr // Zero length string?
+ | jne ->fff_fallback_2 // Fallback for > 1-char strings.
+ | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_2
+ | movzx RA, byte STR:RB[1]
+ | mov RB, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+ |.if X64
+ | mov TMP3, RC
+ |.else
+ | mov ARG3, RC
+ |.endif
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | mov [RB], RAL
+ | add RB, 1
+ | sub RC, 1
+ | jnz <1
+ | mov RD, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+ | jmp ->fff_newstr
+ |
+ |.ffunc_1 string_reverse
+ | ffgccheck
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | mov RC, STR:RB->len
+ | test RC, RC
+ | jz ->fff_emptystr // Zero length string?
+ | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1
+ | add RB, #STR
+ | mov TMP2, PC // Need another temp register.
+ |.if X64
+ | mov TMP3, RC
+ |.else
+ | mov ARG3, RC
+ |.endif
+ | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+ |1:
+ | movzx RA, byte [RB]
+ | add RB, 1
+ | sub RC, 1
+ | mov [PC+RC], RAL
+ | jnz <1
+ | mov RD, PC
+ | mov PC, TMP2
+ | jmp ->fff_newstr
+ |
+ |.macro ffstring_case, name, lo, hi
+ | .ffunc_1 name
+ | ffgccheck
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | mov RC, STR:RB->len
+ | cmp [DISPATCH+DISPATCH_GL(tmpbuf.sz)], RC; jb ->fff_fallback_1
+ | add RB, #STR
+ | mov TMP2, PC // Need another temp register.
+ |.if X64
+ | mov TMP3, RC
+ |.else
+ | mov ARG3, RC
+ |.endif
+ | mov PC, [DISPATCH+DISPATCH_GL(tmpbuf.buf)]
+ | jmp >3
+ |1: // ASCII case conversion. Yes, this is suboptimal code (do you care?).
+ | movzx RA, byte [RB+RC]
+ | cmp RA, lo
+ | jb >2
+ | cmp RA, hi
+ | ja >2
+ | xor RA, 0x20
+ |2:
+ | mov [PC+RC], RAL
+ |3:
+ | sub RC, 1
+ | jns <1
+ | mov RD, PC
+ | mov PC, TMP2
+ | jmp ->fff_newstr
+ |.endmacro
+ |
+ |ffstring_case string_lower, 0x41, 0x5a
+ |ffstring_case string_upper, 0x61, 0x7a
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ | mov TAB:FCARG1, [BASE]
+ | call extern lj_tab_len@4 // LJ_FASTCALL (GCtab *t)
+ | // Length of table returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ if (LJ_DUALNUM) {
+ | mov RB, RD; jmp ->fff_resi
+ } else if (sse) {
+ | cvtsi2sd xmm0, RD; jmp ->fff_resxmm0
+ } else {
+ |.if not X64
+ | mov ARG1, RD; fild ARG1; jmp ->fff_resn
+ |.endif
+ }
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.define TOBIT_BIAS, 0x59c00000 // 2^52 + 2^51 (float, not double!).
+ |
+ |.macro .ffunc_bit, name, kind
+ | .ffunc_1 name
+ |.if kind == 2
+ ||if (sse) {
+ | sseconst_tobit xmm1, RBa
+ ||} else {
+ | mov TMP1, TOBIT_BIAS
+ ||}
+ |.endif
+ | cmp dword [BASE+4], LJ_TISNUM
+ ||if (LJ_DUALNUM) {
+ | jne >1
+ | mov RB, dword [BASE]
+ |.if kind > 0
+ | jmp >2
+ |.else
+ | jmp ->fff_resbit
+ |.endif
+ |1:
+ | ja ->fff_fallback
+ ||} else {
+ | jae ->fff_fallback
+ ||}
+ ||if (sse) {
+ | movsd xmm0, qword [BASE]
+ |.if kind < 2
+ | sseconst_tobit xmm1, RBa
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RB, xmm0
+ ||} else {
+ |.if not X64
+ | fld qword [BASE]
+ |.if kind < 2
+ | mov TMP1, TOBIT_BIAS
+ |.endif
+ | fadd TMP1
+ | fstp FPARG1
+ |.if kind > 0
+ | mov RB, ARG1
+ |.endif
+ |.endif
+ ||}
+ |2:
+ |.endmacro
+ |
+ |.ffunc_bit bit_tobit, 0
+ if (LJ_DUALNUM || sse) {
+ if (!sse) {
+ |.if not X64
+ | mov RB, ARG1
+ |.endif
+ }
+ | jmp ->fff_resbit
+ } else {
+ |.if not X64
+ | fild ARG1
+ | jmp ->fff_resn
+ |.endif
+ }
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name, 2
+ | mov TMP2, NARGS:RD // Save for fallback.
+ | lea RD, [BASE+NARGS:RD*8-16]
+ |1:
+ | cmp RD, BASE
+ | jbe ->fff_resbit
+ | cmp dword [RD+4], LJ_TISNUM
+ ||if (LJ_DUALNUM) {
+ | jne >2
+ | ins RB, dword [RD]
+ | sub RD, 8
+ | jmp <1
+ |2:
+ | ja ->fff_fallback_bit_op
+ ||} else {
+ | jae ->fff_fallback_bit_op
+ ||}
+ ||if (sse) {
+ | movsd xmm0, qword [RD]
+ | addsd xmm0, xmm1
+ | movd RA, xmm0
+ | ins RB, RA
+ ||} else {
+ |.if not X64
+ | fld qword [RD]
+ | fadd TMP1
+ | fstp FPARG1
+ | ins RB, ARG1
+ |.endif
+ ||}
+ | sub RD, 8
+ | jmp <1
+ |.endmacro
+ |
+ |.ffunc_bit_op bit_band, and
+ |.ffunc_bit_op bit_bor, or
+ |.ffunc_bit_op bit_bxor, xor
+ |
+ |.ffunc_bit bit_bswap, 1
+ | bswap RB
+ | jmp ->fff_resbit
+ |
+ |.ffunc_bit bit_bnot, 1
+ | not RB
+ if (LJ_DUALNUM) {
+ | jmp ->fff_resbit
+ } else if (sse) {
+ |->fff_resbit:
+ | cvtsi2sd xmm0, RB
+ | jmp ->fff_resxmm0
+ } else {
+ |.if not X64
+ |->fff_resbit:
+ | mov ARG1, RB
+ | fild ARG1
+ | jmp ->fff_resn
+ |.endif
+ }
+ |
+ |->fff_fallback_bit_op:
+ | mov NARGS:RD, TMP2 // Restore for fallback
+ | jmp ->fff_fallback
+ |
+ |.macro .ffunc_bit_sh, name, ins
+ ||if (LJ_DUALNUM) {
+ | .ffunc_bit name, 1
+ | // Note: no inline conversion from number for 2nd argument!
+ | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback
+ | mov RA, dword [BASE+8]
+ ||} else if (sse) {
+ | .ffunc_nnsse name
+ | sseconst_tobit xmm2, RBa
+ | addsd xmm0, xmm2
+ | addsd xmm1, xmm2
+ | movd RB, xmm0
+ | movd RA, xmm1
+ ||} else {
+ |.if not X64
+ | .ffunc_nn name
+ | mov TMP1, TOBIT_BIAS
+ | fadd TMP1
+ | fstp FPARG3
+ | fadd TMP1
+ | fstp FPARG1
+ | mov RA, ARG3
+ | mov RB, ARG1
+ |.endif
+ ||}
+ | ins RB, cl // Assumes RA is ecx.
+ | jmp ->fff_resbit
+ |.endmacro
+ |
+ |.ffunc_bit_sh bit_lshift, shl
+ |.ffunc_bit_sh bit_rshift, shr
+ |.ffunc_bit_sh bit_arshift, sar
+ |.ffunc_bit_sh bit_rol, rol
+ |.ffunc_bit_sh bit_ror, ror
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback_2:
+ | mov NARGS:RD, 1+2 // Other args are ignored, anyway.
+ | jmp ->fff_fallback
+ |->fff_fallback_1:
+ | mov NARGS:RD, 1+1 // Other args are ignored, anyway.
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RD = nargs+1
+ | mov L:RB, SAVE_L
+ | mov PC, [BASE-4] // Fallback may overwrite PC.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler.
+ | mov L:RB->top, RD
+ | mov CFUNC:RD, [BASE-8]
+ | cmp RA, L:RB->maxstack
+ | ja >5 // Need to grow stack.
+ |.if X64
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ | call aword CFUNC:RD->f // (lua_State *L)
+ | mov BASE, L:RB->base
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | test RD, RD; jg ->fff_res // Returned nresults+1?
+ |1:
+ | mov RA, L:RB->top
+ | sub RA, BASE
+ | shr RA, 3
+ | test RD, RD
+ | lea NARGS:RD, [RA+1]
+ | mov LFUNC:RB, [BASE-8]
+ | jne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | mov RA, BASE
+ | test PC, FRAME_TYPE
+ | jnz >3
+ | movzx RB, PC_RA
+ | not RBa // Note: ~RB = -(RB+1)
+ | lea BASE, [BASE+RB*8] // base = base - (RB+1)*8
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |3:
+ | mov RB, PC
+ | and RB, -8
+ | sub BASE, RB
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov FCARG2, LUA_MINSTACK
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | xor RD, RD // Simulate a return 0.
+ | jmp <1 // Dumb retry (goes through ff first).
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RD = nargs+1
+ | pop RBa // Must keep stack at same level.
+ | mov TMPa, RBa // Save return address
+ | mov L:RB, SAVE_L
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov FCARG1, L:RB
+ | mov L:RB->top, RD
+ | call extern lj_gc_step@4 // (lua_State *L)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | mov RBa, TMPa
+ | push RBa // Restore return address.
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+#if LJ_HASJIT
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_VMEVENT // No recording while in vmevent.
+ | jnz >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | test RDL, HOOK_ACTIVE
+ | jnz >1
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >1
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jmp >1
+#endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ | jmp >1
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ |
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >5
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jz >1
+ | test RDL, LUA_MASKLINE
+ | jz >5
+ |1:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, PC // Caveat: FCARG2 == BASE
+ | mov FCARG1, L:RB
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call extern lj_dispatch_ins@8 // (lua_State *L, BCIns *pc)
+ |3:
+ | mov BASE, L:RB->base
+ |4:
+ | movzx RA, PC_RA
+ |5:
+ | movzx OP, PC_OP
+ | movzx RD, PC_RD
+ |.if X64
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |.else
+ | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |.endif
+ |
+ |->cont_hook: // Continue from hook yield.
+ | add PC, 4
+ | mov RA, [RB-24]
+ | mov MULTRES, RA // Restore MULTRES for *M ins.
+ | jmp <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+#if LJ_HASJIT
+ | mov LFUNC:RB, [BASE-8] // Same as curr_topL(L).
+ | mov RB, LFUNC:RB->pc
+ | movzx RD, byte [RB+PC2PROTO(framesize)]
+ | lea RD, [BASE+RD*8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov FCARG2, PC
+ | lea FCARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+ | mov SAVE_PC, PC
+ | call extern lj_trace_hot@8 // (jit_State *J, const BCIns *pc)
+ | jmp <3
+#endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov SAVE_PC, PC
+#if LJ_HASJIT
+ | jmp >1
+#endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+#if LJ_HASJIT
+ | mov SAVE_PC, PC
+ | or PC, 1 // Marker for hot call.
+ |1:
+#endif
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov FCARG2, PC
+ | mov FCARG1, L:RB
+ | call extern lj_dispatch_call@8 // (lua_State *L, const BCIns *pc)
+ | // ASMFunction returned in eax/rax (RDa).
+ | mov SAVE_PC, 0 // Invalidate for subsequent line hook.
+#if LJ_HASJIT
+ | and PC, -2
+#endif
+ | mov BASE, L:RB->base
+ | mov RAa, RDa
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | mov RBa, RAa
+ | movzx RA, PC_RA
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | jmp RBa
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Called from an exit stub with the exit number on the stack.
+ |// The 16 bit exit number is stored with two (sign-extended) push imm8.
+ |->vm_exit_handler:
+#if LJ_HASJIT
+ |.if X64
+ | push r13; push r12
+ | push r11; push r10; push r9; push r8
+ | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
+ | push rbx; push rdx; push rcx; push rax
+ | movzx RC, byte [rbp-8] // Reconstruct exit number.
+ | mov RCH, byte [rbp-16]
+ | mov [rbp-8], r15; mov [rbp-16], r14
+ |.else
+ | push ebp; lea ebp, [esp+12]; push ebp
+ | push ebx; push edx; push ecx; push eax
+ | movzx RC, byte [ebp-4] // Reconstruct exit number.
+ | mov RCH, byte [ebp-8]
+ | mov [ebp-4], edi; mov [ebp-8], esi
+ |.endif
+ | // Caveat: DISPATCH is ebx.
+ | mov DISPATCH, [ebp]
+ | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number.
+ | set_vmstate EXIT
+ | mov [DISPATCH+DISPATCH_J(exitno)], RC
+ | mov [DISPATCH+DISPATCH_J(parent)], RA
+ |.if X64
+ |.if X64WIN
+ | sub rsp, 16*8+4*8 // Room for SSE regs + save area.
+ |.else
+ | sub rsp, 16*8 // Room for SSE regs.
+ |.endif
+ | add rbp, -128
+ | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14
+ | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12
+ | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10
+ | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8
+ | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6
+ | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4
+ | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2
+ | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0
+ |.else
+ | sub esp, 8*8+16 // Room for SSE regs + args.
+ | movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6
+ | movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4
+ | movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2
+ | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0
+ |.endif
+ | // Caveat: RB is ebp.
+ | mov L:RB, [DISPATCH+DISPATCH_GL(jit_L)]
+ | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+ | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | lea CARG2, [rsp+4*8]
+ |.elif X64
+ | mov CARG2, rsp
+ |.else
+ | lea FCARG2, [esp+16]
+ |.endif
+ | lea FCARG1, [DISPATCH+GG_DISP2J]
+ | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex)
+ | // MULTRES or negated error code returned in eax (RD).
+ | mov RAa, L:RB->cframe
+ | and RAa, CFRAME_RAWMASK
+ |.if X64WIN
+ | // Reposition stack later.
+ |.elif X64
+ | mov rsp, RAa // Reposition stack to C frame.
+ |.else
+ | mov esp, RAa // Reposition stack to C frame.
+ |.endif
+ | mov [RAa+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield).
+ | mov BASE, L:RB->base
+ | mov PC, [RAa+CFRAME_OFS_PC] // Get SAVE_PC.
+ |.if X64
+ | jmp >1
+ |.endif
+#endif
+ |->vm_exit_interp:
+ | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
+#if LJ_HASJIT
+ |.if X64
+ | // Restore additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | lea RAa, [rsp+9*16+4*8]
+ |1:
+ | movdqa xmm15, [RAa-9*16]
+ | movdqa xmm14, [RAa-8*16]
+ | movdqa xmm13, [RAa-7*16]
+ | movdqa xmm12, [RAa-6*16]
+ | movdqa xmm11, [RAa-5*16]
+ | movdqa xmm10, [RAa-4*16]
+ | movdqa xmm9, [RAa-3*16]
+ | movdqa xmm8, [RAa-2*16]
+ | movdqa xmm7, [RAa-1*16]
+ | mov rsp, RAa // Reposition stack to C frame.
+ | movdqa xmm6, [RAa]
+ | mov r15, CSAVE_3
+ | mov r14, CSAVE_4
+ |.else
+ | add rsp, 16 // Reposition stack to C frame.
+ |1:
+ |.endif
+ | mov r13, TMPa
+ | mov r12, TMPQ
+ |.endif
+ | test RD, RD; js >3 // Check for error from exit.
+ | mov MULTRES, RD
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | mov dword [DISPATCH+DISPATCH_GL(jit_L)], 0
+ | set_vmstate INTERP
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | mov RC, [PC]
+ | movzx RA, RCH
+ | movzx OP, RCL
+ | add PC, 4
+ | shr RC, 16
+ | cmp OP, BC_FUNCF // Function header?
+ | jb >2
+ | mov RC, MULTRES // RC/RD holds nres+1.
+ |2:
+ |.if X64
+ | jmp aword [DISPATCH+OP*8]
+ |.else
+ | jmp aword [DISPATCH+OP*4]
+ |.endif
+ |
+ |3: // Rethrow error from the right C frame.
+ | neg RD
+ | mov FCARG1, L:RB
+ | mov FCARG2, RD
+ | call extern lj_err_throw@8 // (lua_State *L, int errcode)
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code.
+ |
+ |// x87 variant: Arg/ret on x87 stack. No int/xmm registers modified.
+ |.macro vm_round_x87, mode1, mode2
+ | fnstcw word [esp+4] // Caveat: overwrites ARG1 and ARG2.
+ | mov [esp+8], eax
+ | mov ax, mode1
+ | or ax, [esp+4]
+ |.if mode2 ~= 0xffff
+ | and ax, mode2
+ |.endif
+ | mov [esp+6], ax
+ | fldcw word [esp+6]
+ | frndint
+ | fldcw word [esp+4]
+ | mov eax, [esp+8]
+ | ret
+ |.endmacro
+ |
+ |// SSE variant: arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
+ |.macro vm_round_sse, mode
+ | sseconst_abs xmm2, RDa
+ | sseconst_2p52 xmm3, RDa
+ | movaps xmm1, xmm0
+ | andpd xmm1, xmm2 // |x|
+ | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ |.if mode == 2 // trunc(x)?
+ | movaps xmm0, xmm1
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | sseconst_1 xmm3, RDa
+ | cmpsd xmm0, xmm1, 1 // |x| < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract -1.
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ |.else
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ | .if mode == 1 // ceil(x)?
+ | sseconst_m1 xmm2, RDa // Must subtract -1 to preserve -0.
+ | cmpsd xmm0, xmm1, 6 // x > result?
+ | .else // floor(x)?
+ | sseconst_1 xmm2, RDa
+ | cmpsd xmm0, xmm1, 1 // x < result?
+ | .endif
+ | andpd xmm0, xmm2
+ | subsd xmm1, xmm0 // If yes, subtract +-1.
+ |.endif
+ | movaps xmm0, xmm1
+ |1:
+ | ret
+ |.endmacro
+ |
+ |.macro vm_round, name, ssemode, mode1, mode2
+ |->name:
+ ||if (!sse) {
+ | vm_round_x87 mode1, mode2
+ ||}
+ |->name .. _sse:
+ | vm_round_sse ssemode
+ |.endmacro
+ |
+ | vm_round vm_floor, 0, 0x0400, 0xf7ff
+ | vm_round vm_ceil, 1, 0x0800, 0xfbff
+ | vm_round vm_trunc, 2, 0x0c00, 0xffff
+ |
+ |// FP modulo x%y. Called by BC_MOD* and vm_arith.
+ |->vm_mod:
+ if (sse) {
+ |// Args in xmm0/xmm1, return value in xmm0.
+ |// Caveat: xmm0-xmm5 and RC (eax) modified!
+ | movaps xmm5, xmm0
+ | divsd xmm0, xmm1
+ | sseconst_abs xmm2, RDa
+ | sseconst_2p52 xmm3, RDa
+ | movaps xmm4, xmm0
+ | andpd xmm4, xmm2 // |x/y|
+ | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52
+ | subsd xmm4, xmm3
+ | orpd xmm4, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm2, RDa
+ | cmpsd xmm0, xmm4, 1 // x/y < result?
+ | andpd xmm0, xmm2
+ | subsd xmm4, xmm0 // If yes, subtract 1.0.
+ | movaps xmm0, xmm5
+ | mulsd xmm1, xmm4
+ | subsd xmm0, xmm1
+ | ret
+ |1:
+ | mulsd xmm1, xmm0
+ | movaps xmm0, xmm5
+ | subsd xmm0, xmm1
+ | ret
+ } else {
+ |// Args/ret on x87 stack (y on top). No xmm registers modified.
+ |// Caveat: needs 3 slots on x87 stack! RC (eax) modified!
+ | fld st1
+ | fdiv st1
+ | fnstcw word [esp+4]
+ | mov ax, 0x0400
+ | or ax, [esp+4]
+ | and ax, 0xf7ff
+ | mov [esp+6], ax
+ | fldcw word [esp+6]
+ | frndint
+ | fldcw word [esp+4]
+ | fmulp st1
+ | fsubp st1
+ | ret
+ }
+ |
+ |// FP exponentiation e^x and 2^x. Called by math.exp fast function and
+ |// from JIT code. Arg/ret on x87 stack. No int/xmm regs modified.
+ |// Caveat: needs 3 slots on x87 stack!
+ |->vm_exp_x87:
+ | fldl2e; fmulp st1 // e^x ==> 2^(x*log2(e))
+ |->vm_exp2_x87:
+ | .if X64WIN
+ | .define expscratch, dword [rsp+8] // Use scratch area.
+ | .elif X64
+ | .define expscratch, dword [rsp-8] // Use red zone.
+ | .else
+ | .define expscratch, dword [esp+4] // Needs 4 byte scratch area.
+ | .endif
+ | fst expscratch // Caveat: overwrites ARG1.
+ | cmp expscratch, 0x7f800000; je >1 // Special case: e^+Inf = +Inf
+ | cmp expscratch, 0xff800000; je >2 // Special case: e^-Inf = 0
+ |->vm_exp2raw: // Entry point for vm_pow. Without +-Inf check.
+ | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part.
+ | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int
+ |1:
+ | ret
+ |2:
+ | fpop; fldz; ret
+ |
+ |// Generic power function x^y. Called by BC_POW, math.pow fast function,
+ |// and vm_arith.
+ if (!sse) {
+ |.if not X64
+ |// Args/ret on x87 stack (y on top). RC (eax) modified.
+ |// Caveat: needs 3 slots on x87 stack!
+ |->vm_pow:
+ | fist dword [esp+4] // Store/reload int before comparison.
+ | fild dword [esp+4] // Integral exponent used in vm_powi.
+ ||if (cmov) {
+ | fucomip st1
+ ||} else {
+ | fucomp st1; fnstsw ax; sahf
+ ||}
+ | jnz >8 // Branch for FP exponents.
+ | jp >9 // Branch for NaN exponent.
+ | fpop // Pop y and fallthrough to vm_powi.
+ |
+ |// FP/int power function x^i. Arg1/ret on x87 stack.
+ |// Arg2 (int) on C stack. RC (eax) modified.
+ |// Caveat: needs 2 slots on x87 stack!
+ | mov eax, [esp+4]
+ | cmp eax, 1; jle >6 // i<=1?
+ | // Now 1 < (unsigned)i <= 0x80000000.
+ |1: // Handle leading zeros.
+ | test eax, 1; jnz >2
+ | fmul st0
+ | shr eax, 1
+ | jmp <1
+ |2:
+ | shr eax, 1; jz >5
+ | fdup
+ |3: // Handle trailing bits.
+ | fmul st0
+ | shr eax, 1; jz >4
+ | jnc <3
+ | fmul st1, st0
+ | jmp <3
+ |4:
+ | fmulp st1
+ |5:
+ | ret
+ |6:
+ | je <5 // x^1 ==> x
+ | jb >7
+ | fld1; fdivrp st1
+ | neg eax
+ | cmp eax, 1; je <5 // x^-1 ==> 1/x
+ | jmp <1 // x^-i ==> (1/x)^i
+ |7:
+ | fpop; fld1 // x^0 ==> 1
+ | ret
+ |
+ |8: // FP/FP power function x^y.
+ | fst dword [esp+4]
+ | fxch
+ | fst dword [esp+8]
+ | mov eax, [esp+4]; shl eax, 1
+ | cmp eax, 0xff000000; je >2 // x^+-Inf?
+ | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y?
+ | cmp eax, 0xff000000; je >4 // +-Inf^y?
+ | fyl2x
+ | jmp ->vm_exp2raw
+ |
+ |9: // Handle x^NaN.
+ | fld1
+ ||if (cmov) {
+ | fucomip st2
+ ||} else {
+ | fucomp st2; fnstsw ax; sahf
+ ||}
+ | je >1 // 1^NaN ==> 1
+ | fxch // x^NaN ==> NaN
+ |1:
+ | fpop
+ | ret
+ |
+ |2: // Handle x^+-Inf.
+ | fabs
+ | fld1
+ ||if (cmov) {
+ | fucomip st1
+ ||} else {
+ | fucomp st1; fnstsw ax; sahf
+ ||}
+ | je >3 // +-1^+-Inf ==> 1
+ | fpop; fabs; fldz; mov eax, 0; setc al
+ | ror eax, 1; xor eax, [esp+4]; jns >3 // |x|<>1, x^+-Inf ==> +Inf/0
+ | fxch
+ |3:
+ | fpop1; fabs
+ | ret
+ |
+ |4: // Handle +-0^y or +-Inf^y.
+ | cmp dword [esp+4], 0; jge <3 // y >= 0, x^y ==> |x|
+ | fpop; fpop
+ | test eax, eax; jz >5 // y < 0, +-0^y ==> +Inf
+ | fldz // y < 0, +-Inf^y ==> 0
+ | ret
+ |5:
+ | mov dword [esp+4], 0x7f800000 // Return +Inf.
+ | fld dword [esp+4]
+ | ret
+ |.endif
+ } else {
+ |->vm_pow:
+ }
+ |
+ |// Args in xmm0/xmm1. Ret in xmm0. xmm0-xmm2 and RC (eax) modified.
+ |// Needs 16 byte scratch area for x86. Also called from JIT code.
+ |->vm_pow_sse:
+ | cvtsd2si eax, xmm1
+ | cvtsi2sd xmm2, eax
+ | ucomisd xmm1, xmm2
+ | jnz >8 // Branch for FP exponents.
+ | jp >9 // Branch for NaN exponent.
+ | // Fallthrough to vm_powi_sse.
+ |
+ |// Args in xmm0/eax. Ret in xmm0. xmm0-xmm1 and eax modified.
+ |->vm_powi_sse:
+ | cmp eax, 1; jle >6 // i<=1?
+ | // Now 1 < (unsigned)i <= 0x80000000.
+ |1: // Handle leading zeros.
+ | test eax, 1; jnz >2
+ | mulsd xmm0, xmm0
+ | shr eax, 1
+ | jmp <1
+ |2:
+ | shr eax, 1; jz >5
+ | movaps xmm1, xmm0
+ |3: // Handle trailing bits.
+ | mulsd xmm0, xmm0
+ | shr eax, 1; jz >4
+ | jnc <3
+ | mulsd xmm1, xmm0
+ | jmp <3
+ |4:
+ | mulsd xmm0, xmm1
+ |5:
+ | ret
+ |6:
+ | je <5 // x^1 ==> x
+ | jb >7 // x^0 ==> 1
+ | neg eax
+ | call <1
+ | sseconst_1 xmm1, RDa
+ | divsd xmm1, xmm0
+ | movaps xmm0, xmm1
+ | ret
+ |7:
+ | sseconst_1 xmm0, RDa
+ | ret
+ |
+ |8: // FP/FP power function x^y.
+ |.if X64
+ | movd rax, xmm1; shl rax, 1
+ | rol rax, 12; cmp rax, 0xffe; je >2 // x^+-Inf?
+ | movd rax, xmm0; shl rax, 1; je >4 // +-0^y?
+ | rol rax, 12; cmp rax, 0xffe; je >5 // +-Inf^y?
+ | .if X64WIN
+ | movsd qword [rsp+16], xmm1 // Use scratch area.
+ | movsd qword [rsp+8], xmm0
+ | fld qword [rsp+16]
+ | fld qword [rsp+8]
+ | .else
+ | movsd qword [rsp-16], xmm1 // Use red zone.
+ | movsd qword [rsp-8], xmm0
+ | fld qword [rsp-16]
+ | fld qword [rsp-8]
+ | .endif
+ |.else
+ | movsd qword [esp+12], xmm1 // Needs 16 byte scratch area.
+ | movsd qword [esp+4], xmm0
+ | cmp dword [esp+12], 0; jne >1
+ | mov eax, [esp+16]; shl eax, 1
+ | cmp eax, 0xffe00000; je >2 // x^+-Inf?
+ |1:
+ | cmp dword [esp+4], 0; jne >1
+ | mov eax, [esp+8]; shl eax, 1; je >4 // +-0^y?
+ | cmp eax, 0xffe00000; je >5 // +-Inf^y?
+ |1:
+ | fld qword [esp+12]
+ | fld qword [esp+4]
+ |.endif
+ | fyl2x // y*log2(x)
+ | fdup; frndint; fsub st1, st0; fxch // Split into frac/int part.
+ | f2xm1; fld1; faddp st1; fscale; fpop1 // ==> (2^frac-1 +1) << int
+ |.if X64WIN
+ | fstp qword [rsp+8] // Use scratch area.
+ | movsd xmm0, qword [rsp+8]
+ |.elif X64
+ | fstp qword [rsp-8] // Use red zone.
+ | movsd xmm0, qword [rsp-8]
+ |.else
+ | fstp qword [esp+4] // Needs 8 byte scratch area.
+ | movsd xmm0, qword [esp+4]
+ |.endif
+ | ret
+ |
+ |9: // Handle x^NaN.
+ | sseconst_1 xmm2, RDa
+ | ucomisd xmm0, xmm2; je >1 // 1^NaN ==> 1
+ | movaps xmm0, xmm1 // x^NaN ==> NaN
+ |1:
+ | ret
+ |
+ |2: // Handle x^+-Inf.
+ | sseconst_abs xmm2, RDa
+ | andpd xmm0, xmm2 // |x|
+ | sseconst_1 xmm2, RDa
+ | ucomisd xmm0, xmm2; je <1 // +-1^+-Inf ==> 1
+ | movmskpd eax, xmm1
+ | xorps xmm0, xmm0
+ | mov ah, al; setc al; xor al, ah; jne <1 // |x|<>1, x^+-Inf ==> +Inf/0
+ |3:
+ | sseconst_hi xmm0, RDa, 7ff00000 // +Inf
+ | ret
+ |
+ |4: // Handle +-0^y.
+ | movmskpd eax, xmm1; test eax, eax; jnz <3 // y < 0, +-0^y ==> +Inf
+ | xorps xmm0, xmm0 // y >= 0, +-0^y ==> 0
+ | ret
+ |
+ |5: // Handle +-Inf^y.
+ | movmskpd eax, xmm1; test eax, eax; jz <3 // y >= 0, +-Inf^y ==> +Inf
+ | xorps xmm0, xmm0 // y < 0, +-Inf^y ==> 0
+ | ret
+ |
+ |// Callable from C: double lj_vm_foldfpm(double x, int fpm)
+ |// Computes fpm(x) for extended math functions. ORDER FPM.
+ |->vm_foldfpm:
+#if LJ_HASJIT
+ if (sse) {
+ |.if X64
+ |
+ | .if X64WIN
+ | .define fpmop, CARG2d
+ | .else
+ | .define fpmop, CARG1d
+ | .endif
+ | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil
+ | cmp fpmop, 3; jb ->vm_trunc; ja >2
+ | sqrtsd xmm0, xmm0; ret
+ |2:
+ | .if X64WIN
+ | movsd qword [rsp+8], xmm0 // Use scratch area.
+ | fld qword [rsp+8]
+ | .else
+ | movsd qword [rsp-8], xmm0 // Use red zone.
+ | fld qword [rsp-8]
+ | .endif
+ | cmp fpmop, 5; ja >2
+ | .if X64WIN; pop rax; .endif
+ | je >1
+ | call ->vm_exp_x87
+ | .if X64WIN; push rax; .endif
+ | jmp >7
+ |1:
+ | call ->vm_exp2_x87
+ | .if X64WIN; push rax; .endif
+ | jmp >7
+ |2: ; cmp fpmop, 7; je >1; ja >2
+ | fldln2; fxch; fyl2x; jmp >7
+ |1: ; fld1; fxch; fyl2x; jmp >7
+ |2: ; cmp fpmop, 9; je >1; ja >2
+ | fldlg2; fxch; fyl2x; jmp >7
+ |1: ; fsin; jmp >7
+ |2: ; cmp fpmop, 11; je >1; ja >9
+ | fcos; jmp >7
+ |1: ; fptan; fpop
+ |7:
+ | .if X64WIN
+ | fstp qword [rsp+8] // Use scratch area.
+ | movsd xmm0, qword [rsp+8]
+ | .else
+ | fstp qword [rsp-8] // Use red zone.
+ | movsd xmm0, qword [rsp-8]
+ | .endif
+ | ret
+ |
+ |.else // x86 calling convention.
+ |
+ | .define fpmop, eax
+ | mov fpmop, [esp+12]
+ | movsd xmm0, qword [esp+4]
+ | cmp fpmop, 1; je >1; ja >2
+ | call ->vm_floor; jmp >7
+ |1: ; call ->vm_ceil; jmp >7
+ |2: ; cmp fpmop, 3; je >1; ja >2
+ | call ->vm_trunc; jmp >7
+ |1:
+ | sqrtsd xmm0, xmm0
+ |7:
+ | movsd qword [esp+4], xmm0 // Overwrite callee-owned args.
+ | fld qword [esp+4]
+ | ret
+ |2: ; fld qword [esp+4]
+ | cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87
+ |2: ; cmp fpmop, 7; je >1; ja >2
+ | fldln2; fxch; fyl2x; ret
+ |1: ; fld1; fxch; fyl2x; ret
+ |2: ; cmp fpmop, 9; je >1; ja >2
+ | fldlg2; fxch; fyl2x; ret
+ |1: ; fsin; ret
+ |2: ; cmp fpmop, 11; je >1; ja >9
+ | fcos; ret
+ |1: ; fptan; fpop; ret
+ |
+ |.endif
+ } else {
+ | mov fpmop, [esp+12]
+ | fld qword [esp+4]
+ | cmp fpmop, 1; jb ->vm_floor; je ->vm_ceil
+ | cmp fpmop, 3; jb ->vm_trunc; ja >2
+ | fsqrt; ret
+ |2: ; cmp fpmop, 5; jb ->vm_exp_x87; je ->vm_exp2_x87
+ | cmp fpmop, 7; je >1; ja >2
+ | fldln2; fxch; fyl2x; ret
+ |1: ; fld1; fxch; fyl2x; ret
+ |2: ; cmp fpmop, 9; je >1; ja >2
+ | fldlg2; fxch; fyl2x; ret
+ |1: ; fsin; ret
+ |2: ; cmp fpmop, 11; je >1; ja >9
+ | fcos; ret
+ |1: ; fptan; fpop; ret
+ }
+ |9: ; int3 // Bad fpm.
+#endif
+ |
+ |// Callable from C: double lj_vm_foldarith(double x, double y, int op)
+ |// Compute x op y for basic arithmetic operators (+ - * / % ^ and unary -)
+ |// and basic math functions. ORDER ARITH
+ |->vm_foldarith:
+ if (sse) {
+ |.if X64
+ |
+ | .if X64WIN
+ | .define foldop, CARG3d
+ | .else
+ | .define foldop, CARG1d
+ | .endif
+ | cmp foldop, 1; je >1; ja >2
+ | addsd xmm0, xmm1; ret
+ |1: ; subsd xmm0, xmm1; ret
+ |2: ; cmp foldop, 3; je >1; ja >2
+ | mulsd xmm0, xmm1; ret
+ |1: ; divsd xmm0, xmm1; ret
+ |2: ; cmp foldop, 5; jb ->vm_mod; je ->vm_pow
+ | cmp foldop, 7; je >1; ja >2
+ | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; ret
+ |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; ret
+ |2: ; cmp foldop, 9; ja >2
+ |.if X64WIN
+ | movsd qword [rsp+8], xmm0 // Use scratch area.
+ | movsd qword [rsp+16], xmm1
+ | fld qword [rsp+8]
+ | fld qword [rsp+16]
+ |.else
+ | movsd qword [rsp-8], xmm0 // Use red zone.
+ | movsd qword [rsp-16], xmm1
+ | fld qword [rsp-8]
+ | fld qword [rsp-16]
+ |.endif
+ | je >1
+ | fpatan
+ |7:
+ |.if X64WIN
+ | fstp qword [rsp+8] // Use scratch area.
+ | movsd xmm0, qword [rsp+8]
+ |.else
+ | fstp qword [rsp-8] // Use red zone.
+ | movsd xmm0, qword [rsp-8]
+ |.endif
+ | ret
+ |1: ; fxch; fscale; fpop1; jmp <7
+ |2: ; cmp foldop, 11; je >1; ja >9
+ | minsd xmm0, xmm1; ret
+ |1: ; maxsd xmm0, xmm1; ret
+ |9: ; int3 // Bad op.
+ |
+ |.else // x86 calling convention.
+ |
+ | .define foldop, eax
+ | mov foldop, [esp+20]
+ | movsd xmm0, qword [esp+4]
+ | movsd xmm1, qword [esp+12]
+ | cmp foldop, 1; je >1; ja >2
+ | addsd xmm0, xmm1
+ |7:
+ | movsd qword [esp+4], xmm0 // Overwrite callee-owned args.
+ | fld qword [esp+4]
+ | ret
+ |1: ; subsd xmm0, xmm1; jmp <7
+ |2: ; cmp foldop, 3; je >1; ja >2
+ | mulsd xmm0, xmm1; jmp <7
+ |1: ; divsd xmm0, xmm1; jmp <7
+ |2: ; cmp foldop, 5
+ | je >1; ja >2
+ | call ->vm_mod; jmp <7
+ |1: ; pop edx; call ->vm_pow; push edx; jmp <7 // Writes to scratch area.
+ |2: ; cmp foldop, 7; je >1; ja >2
+ | sseconst_sign xmm1, RDa; xorps xmm0, xmm1; jmp <7
+ |1: ; sseconst_abs xmm1, RDa; andps xmm0, xmm1; jmp <7
+ |2: ; cmp foldop, 9; ja >2
+ | fld qword [esp+4] // Reload from stack
+ | fld qword [esp+12]
+ | je >1
+ | fpatan; ret
+ |1: ; fxch; fscale; fpop1; ret
+ |2: ; cmp foldop, 11; je >1; ja >9
+ | minsd xmm0, xmm1; jmp <7
+ |1: ; maxsd xmm0, xmm1; jmp <7
+ |9: ; int3 // Bad op.
+ |
+ |.endif
+ } else {
+ | mov eax, [esp+20]
+ | fld qword [esp+4]
+ | fld qword [esp+12]
+ | cmp eax, 1; je >1; ja >2
+ | faddp st1; ret
+ |1: ; fsubp st1; ret
+ |2: ; cmp eax, 3; je >1; ja >2
+ | fmulp st1; ret
+ |1: ; fdivp st1; ret
+ |2: ; cmp eax, 5; jb ->vm_mod; je ->vm_pow
+ | cmp eax, 7; je >1; ja >2
+ | fpop; fchs; ret
+ |1: ; fpop; fabs; ret
+ |2: ; cmp eax, 9; je >1; ja >2
+ | fpatan; ret
+ |1: ; fxch; fscale; fpop1; ret
+ |2: ; cmp eax, 11; je >1; ja >9
+ ||if (cmov) {
+ | fucomi st1; fcmovnbe st1; fpop1; ret
+ |1: ; fucomi st1; fcmovbe st1; fpop1; ret
+ ||} else {
+ | fucom st1; fnstsw ax; test ah, 1; jz >2; fxch; 2: ; fpop; ret
+ |1: ; fucom st1; fnstsw ax; test ah, 1; jnz >2; fxch; 2: ; fpop; ret
+ ||}
+ |9: ; int3 // Bad op.
+ }
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// int lj_vm_cpuid(uint32_t f, uint32_t res[4])
+ |->vm_cpuid:
+ |.if X64
+ | mov eax, CARG1d
+ | .if X64WIN; push rsi; mov rsi, CARG2; .endif
+ | push rbx
+ | cpuid
+ | mov [rsi], eax
+ | mov [rsi+4], ebx
+ | mov [rsi+8], ecx
+ | mov [rsi+12], edx
+ | pop rbx
+ | .if X64WIN; pop rsi; .endif
+ | ret
+ |.else
+ | pushfd
+ | pop edx
+ | mov ecx, edx
+ | xor edx, 0x00200000 // Toggle ID bit in flags.
+ | push edx
+ | popfd
+ | pushfd
+ | pop edx
+ | xor eax, eax // Zero means no features supported.
+ | cmp ecx, edx
+ | jz >1 // No ID toggle means no CPUID support.
+ | mov eax, [esp+4] // Argument 1 is function number.
+ | push edi
+ | push ebx
+ | cpuid
+ | mov edi, [esp+16] // Argument 2 is result area.
+ | mov [edi], eax
+ | mov [edi+4], ebx
+ | mov [edi+8], ecx
+ | mov [edi+12], edx
+ | pop ebx
+ | pop edi
+ |1:
+ | ret
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Assertions ---------------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->assert_bad_for_arg_type:
+#ifdef LUA_USE_ASSERT
+ | int3
+#endif
+ | int3
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in ah/al.
+ |->vm_ffi_callback:
+#if LJ_HASFFI
+ |.type CTSTATE, CTState, PC
+ |.if not X64
+ | sub esp, 16 // Leave room for SAVE_ERRF etc.
+ |.endif
+ | saveregs_ // ebp/rbp already saved. ebp now holds global_State *.
+ | lea DISPATCH, [ebp+GG_G2DISP]
+ | mov CTSTATE, GL:ebp->ctype_state
+ | movzx eax, ax
+ | mov CTSTATE->cb.slot, eax
+ |.if X64
+ | mov CTSTATE->cb.gpr[0], CARG1
+ | mov CTSTATE->cb.gpr[1], CARG2
+ | mov CTSTATE->cb.gpr[2], CARG3
+ | mov CTSTATE->cb.gpr[3], CARG4
+ | movsd qword CTSTATE->cb.fpr[0], xmm0
+ | movsd qword CTSTATE->cb.fpr[1], xmm1
+ | movsd qword CTSTATE->cb.fpr[2], xmm2
+ | movsd qword CTSTATE->cb.fpr[3], xmm3
+ |.if X64WIN
+ | lea rax, [rsp+CFRAME_SIZE+4*8]
+ |.else
+ | lea rax, [rsp+CFRAME_SIZE]
+ | mov CTSTATE->cb.gpr[4], CARG5
+ | mov CTSTATE->cb.gpr[5], CARG6
+ | movsd qword CTSTATE->cb.fpr[4], xmm4
+ | movsd qword CTSTATE->cb.fpr[5], xmm5
+ | movsd qword CTSTATE->cb.fpr[6], xmm6
+ | movsd qword CTSTATE->cb.fpr[7], xmm7
+ |.endif
+ | mov CTSTATE->cb.stack, rax
+ | mov CARG2, rsp
+ |.else
+ | lea eax, [esp+CFRAME_SIZE+16]
+ | mov CTSTATE->cb.gpr[0], FCARG1
+ | mov CTSTATE->cb.gpr[1], FCARG2
+ | mov CTSTATE->cb.stack, eax
+ | mov FCARG1, [esp+CFRAME_SIZE+12] // Move around misplaced retaddr/ebp.
+ | mov FCARG2, [esp+CFRAME_SIZE+8]
+ | mov SAVE_RET, FCARG1
+ | mov SAVE_R4, FCARG2
+ | mov FCARG2, esp
+ |.endif
+ | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok.
+ | mov FCARG1, CTSTATE
+ | call extern lj_ccallback_enter@8 // (CTState *cts, void *cf)
+ | // lua_State * returned in eax (RD).
+ | set_vmstate INTERP
+ | mov BASE, L:RD->base
+ | mov RD, L:RD->top
+ | sub RD, BASE
+ | mov LFUNC:RB, [BASE-8]
+ | shr RD, 3
+ | add RD, 1
+ | ins_callt
+#endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+#if LJ_HASFFI
+ | mov L:RA, SAVE_L
+ | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)]
+ | mov aword CTSTATE->L, L:RAa
+ | mov L:RA->base, BASE
+ | mov L:RA->top, RB
+ | mov FCARG1, CTSTATE
+ | mov FCARG2, RC
+ | call extern lj_ccallback_leave@8 // (CTState *cts, TValue *o)
+ |.if X64
+ | mov rax, CTSTATE->cb.gpr[0]
+ | movsd xmm0, qword CTSTATE->cb.fpr[0]
+ | jmp ->vm_leave_unw
+ |.else
+ | mov L:RB, SAVE_L
+ | mov eax, CTSTATE->cb.gpr[0]
+ | mov edx, CTSTATE->cb.gpr[1]
+ | cmp dword CTSTATE->cb.gpr[2], 1
+ | jb >7
+ | je >6
+ | fld qword CTSTATE->cb.fpr[0].d
+ | jmp >7
+ |6:
+ | fld dword CTSTATE->cb.fpr[0].f
+ |7:
+ | mov ecx, L:RB->top
+ | movzx ecx, word [ecx+6] // Get stack adjustment and copy up.
+ | mov SAVE_L, ecx // Must be one slot above SAVE_RET
+ | restoreregs
+ | pop ecx // Move return addr from SAVE_RET.
+ | add esp, [esp] // Adjust stack.
+ | add esp, 16
+ | push ecx
+ | ret
+ |.endif
+#endif
+ |
+ |->vm_ffi_call@4: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+#if LJ_HASFFI
+ |.if X64
+ | .type CCSTATE, CCallState, rbx
+ | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1
+ |.else
+ | .type CCSTATE, CCallState, ebx
+ | push ebp; mov ebp, esp; push ebx; mov CCSTATE, FCARG1
+ |.endif
+ |
+ | // Readjust stack.
+ |.if X64
+ | mov eax, CCSTATE->spadj
+ | sub rsp, rax
+ |.else
+ | sub esp, CCSTATE->spadj
+#if LJ_TARGET_WINDOWS
+ | mov CCSTATE->spadj, esp
+#endif
+ |.endif
+ |
+ | // Copy stack slots.
+ | movzx ecx, byte CCSTATE->nsp
+ | sub ecx, 1
+ | js >2
+ |1:
+ |.if X64
+ | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)]
+ | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax
+ |.else
+ | mov eax, [CCSTATE+ecx*4+offsetof(CCallState, stack)]
+ | mov [esp+ecx*4], eax
+ |.endif
+ | sub ecx, 1
+ | jns <1
+ |2:
+ |
+ |.if X64
+ | movzx eax, byte CCSTATE->nfpr
+ | mov CARG1, CCSTATE->gpr[0]
+ | mov CARG2, CCSTATE->gpr[1]
+ | mov CARG3, CCSTATE->gpr[2]
+ | mov CARG4, CCSTATE->gpr[3]
+ |.if not X64WIN
+ | mov CARG5, CCSTATE->gpr[4]
+ | mov CARG6, CCSTATE->gpr[5]
+ |.endif
+ | test eax, eax; jz >5
+ | movaps xmm0, CCSTATE->fpr[0]
+ | movaps xmm1, CCSTATE->fpr[1]
+ | movaps xmm2, CCSTATE->fpr[2]
+ | movaps xmm3, CCSTATE->fpr[3]
+ |.if not X64WIN
+ | cmp eax, 4; jbe >5
+ | movaps xmm4, CCSTATE->fpr[4]
+ | movaps xmm5, CCSTATE->fpr[5]
+ | movaps xmm6, CCSTATE->fpr[6]
+ | movaps xmm7, CCSTATE->fpr[7]
+ |.endif
+ |5:
+ |.else
+ | mov FCARG1, CCSTATE->gpr[0]
+ | mov FCARG2, CCSTATE->gpr[1]
+ |.endif
+ |
+ | call aword CCSTATE->func
+ |
+ |.if X64
+ | mov CCSTATE->gpr[0], rax
+ | movaps CCSTATE->fpr[0], xmm0
+ |.if not X64WIN
+ | mov CCSTATE->gpr[1], rdx
+ | movaps CCSTATE->fpr[1], xmm1
+ |.endif
+ |.else
+ | mov CCSTATE->gpr[0], eax
+ | mov CCSTATE->gpr[1], edx
+ | cmp byte CCSTATE->resx87, 1
+ | jb >7
+ | je >6
+ | fstp qword CCSTATE->fpr[0].d[0]
+ | jmp >7
+ |6:
+ | fstp dword CCSTATE->fpr[0].f[0]
+ |7:
+#if LJ_TARGET_WINDOWS
+ | sub CCSTATE->spadj, esp
+#endif
+ |.endif
+ |
+ |.if X64
+ | mov rbx, [rbp-8]; leave; ret
+ |.else
+ | mov ebx, [ebp-4]; leave; ret
+ |.endif
+#endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse)
+{
+ int vk = 0;
+ |// Note: aligning all instructions does not pay off.
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ |.macro jmp_comp, lt, ge, le, gt, target
+ ||switch (op) {
+ ||case BC_ISLT:
+ | lt target
+ ||break;
+ ||case BC_ISGE:
+ | ge target
+ ||break;
+ ||case BC_ISLE:
+ | le target
+ ||break;
+ ||case BC_ISGT:
+ | gt target
+ ||break;
+ ||default: break; /* Shut up GCC. */
+ ||}
+ |.endmacro
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RD = src2, JMP with RD = target
+ | ins_AD
+ if (LJ_DUALNUM) {
+ | checkint RA, >7
+ | checkint RD, >8
+ | mov RB, dword [BASE+RA*8]
+ | add PC, 4
+ | cmp RB, dword [BASE+RD*8]
+ | jmp_comp jge, jl, jg, jle, >9
+ |6:
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja ->vmeta_comp
+ | // RA is a number.
+ | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp
+ | // RA is a number, RD is an integer.
+ if (sse) {
+ | cvtsi2sd xmm0, dword [BASE+RD*8]
+ | jmp >2
+ } else {
+ | fld qword [BASE+RA*8]
+ | fild dword [BASE+RD*8]
+ | jmp >3
+ }
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | ja ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ if (sse) {
+ | cvtsi2sd xmm1, dword [BASE+RA*8]
+ | movsd xmm0, qword [BASE+RD*8]
+ | add PC, 4
+ | ucomisd xmm0, xmm1
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ } else {
+ | fild dword [BASE+RA*8]
+ | jmp >2
+ }
+ } else {
+ | checknum RA, ->vmeta_comp
+ | checknum RD, ->vmeta_comp
+ }
+ if (sse) {
+ |1:
+ | movsd xmm0, qword [BASE+RD*8]
+ |2:
+ | add PC, 4
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |3:
+ } else {
+ |1:
+ | fld qword [BASE+RA*8] // Reverse order, i.e like cmp D, A.
+ |2:
+ | fld qword [BASE+RD*8]
+ |3:
+ | add PC, 4
+ | fcomparepp // eax (RD) modified!
+ }
+ | // Unordered: all of ZF CF PF set, ordered: PF clear.
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (LJ_DUALNUM) {
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ } else {
+ | jmp_comp jbe, ja, jb, jae, >1
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1:
+ | ins_next
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | ins_AD // RA = src1, RD = src2, JMP with RD = target
+ | mov RB, [BASE+RD*8+4]
+ | add PC, 4
+ if (LJ_DUALNUM) {
+ | cmp RB, LJ_TISNUM; jne >7
+ | checkint RA, >8
+ | mov RB, dword [BASE+RD*8]
+ | cmp RB, dword [BASE+RA*8]
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RD is not an integer.
+ | ja >5
+ | // RD is a number.
+ | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5
+ | // RD is a number, RA is an integer.
+ if (sse) {
+ | cvtsi2sd xmm0, dword [BASE+RA*8]
+ } else {
+ | fild dword [BASE+RA*8]
+ }
+ | jmp >2
+ |
+ |8: // RD is an integer, RA is not an integer.
+ | ja >5
+ | // RD is an integer, RA is a number.
+ if (sse) {
+ | cvtsi2sd xmm0, dword [BASE+RD*8]
+ | ucomisd xmm0, qword [BASE+RA*8]
+ } else {
+ | fild dword [BASE+RD*8]
+ | fld qword [BASE+RA*8]
+ }
+ | jmp >4
+ |
+ } else {
+ | cmp RB, LJ_TISNUM; jae >5
+ | checknum RA, >5
+ }
+ if (sse) {
+ |1:
+ | movsd xmm0, qword [BASE+RA*8]
+ |2:
+ | ucomisd xmm0, qword [BASE+RD*8]
+ |4:
+ } else {
+ |1:
+ | fld qword [BASE+RA*8]
+ |2:
+ | fld qword [BASE+RD*8]
+ |4:
+ | fcomparepp // eax (RD) modified!
+ }
+ iseqne_fp:
+ if (vk) {
+ | jp >2 // Unordered means not equal.
+ | jne >2
+ } else {
+ | jp >2 // Unordered means not equal.
+ | je >1
+ }
+ iseqne_end:
+ if (vk) {
+ |1: // EQ: Branch to the target.
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2: // NE: Fallthrough to next instruction.
+ if (!LJ_HASFFI) {
+ |3:
+ }
+ } else {
+ if (!LJ_HASFFI) {
+ |3:
+ }
+ |2: // NE: Branch to the target.
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1: // EQ: Fallthrough to next instruction.
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ | jmp <9
+ } else {
+ | ins_next
+ }
+ |
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ |5: // Either or both types are not numbers.
+ if (LJ_HASFFI) {
+ | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd
+ | checktp RA, LJ_TCDATA; je ->vmeta_equal_cd
+ }
+ | checktp RA, RB // Compare types.
+ | jne <2 // Not the same type?
+ | cmp RB, LJ_TISPRI
+ | jae <1 // Same type and primitive type?
+ |
+ | // Same types and not a primitive type. Compare GCobj or pvalue.
+ | mov RA, [BASE+RA*8]
+ | mov RD, [BASE+RD*8]
+ | cmp RA, RD
+ | je <1 // Same GCobjs or pvalues?
+ | cmp RB, LJ_TISTABUD
+ | ja <2 // Different objects and not table/ud?
+ |.if X64
+ | cmp RB, LJ_TUDATA // And not 64 bit lightuserdata.
+ | jb <2
+ |.endif
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, TAB:RA->metatable
+ | test TAB:RB, TAB:RB
+ | jz <2 // No metatable?
+ | test byte TAB:RB->nomm, 1<vmeta_equal // Handle __eq metamethod.
+ } else if (LJ_HASFFI) {
+ |3:
+ | cmp RB, LJ_TCDATA
+ if (LJ_DUALNUM && vk) {
+ | jne <9
+ } else {
+ | jne <2
+ }
+ | jmp ->vmeta_equal_cd
+ }
+ break;
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | ins_AND // RA = src, RD = str const, JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ | cmp RB, LJ_TSTR; jne >3
+ | mov RA, [BASE+RA*8]
+ | cmp RA, [KBASE+RD*4]
+ iseqne_test:
+ if (vk) {
+ | jne >2
+ } else {
+ | je >1
+ }
+ goto iseqne_end;
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | ins_AD // RA = src, RD = num const, JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ if (LJ_DUALNUM) {
+ | cmp RB, LJ_TISNUM; jne >7
+ | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jne >8
+ | mov RB, dword [KBASE+RD*8]
+ | cmp RB, dword [BASE+RA*8]
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja >3
+ | // RA is a number.
+ | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1
+ | // RA is a number, RD is an integer.
+ if (sse) {
+ | cvtsi2sd xmm0, dword [KBASE+RD*8]
+ } else {
+ | fild dword [KBASE+RD*8]
+ }
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is a number.
+ if (sse) {
+ | cvtsi2sd xmm0, dword [BASE+RA*8]
+ | ucomisd xmm0, qword [KBASE+RD*8]
+ } else {
+ | fild dword [BASE+RA*8]
+ | fld qword [KBASE+RD*8]
+ }
+ | jmp >4
+ } else {
+ | cmp RB, LJ_TISNUM; jae >3
+ }
+ if (sse) {
+ |1:
+ | movsd xmm0, qword [KBASE+RD*8]
+ |2:
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |4:
+ } else {
+ |1:
+ | fld qword [KBASE+RD*8]
+ |2:
+ | fld qword [BASE+RA*8]
+ |4:
+ | fcomparepp // eax (RD) modified!
+ }
+ goto iseqne_fp;
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ | cmp RB, RD
+ if (!LJ_HASFFI) goto iseqne_test;
+ if (vk) {
+ | jne >3
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ |3:
+ | cmp RB, LJ_TCDATA; jne <2
+ | jmp ->vmeta_equal_cd
+ } else {
+ | je >2
+ | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | ins_AD // RA = dst or unused, RD = src, JMP with RD = target
+ | mov RB, [BASE+RD*8+4]
+ | add PC, 4
+ | cmp RB, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISTC) {
+ | jae >1
+ } else {
+ | jb >1
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov [BASE+RA*8+4], RB
+ | mov RB, [BASE+RD*8]
+ | mov [BASE+RA*8], RB
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1: // Fallthrough to the next instruction.
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | ins_AD // RA = dst, RD = src
+ |.if X64
+ | mov RBa, [BASE+RD*8]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [BASE+RD*8+4]
+ | mov RD, [BASE+RD*8]
+ | mov [BASE+RA*8+4], RB
+ | mov [BASE+RA*8], RD
+ |.endif
+ | ins_next_
+ break;
+ case BC_NOT:
+ | ins_AD // RA = dst, RD = src
+ | xor RB, RB
+ | checktp RD, LJ_TISTRUECOND
+ | adc RB, LJ_TTRUE
+ | mov [BASE+RA*8+4], RB
+ | ins_next
+ break;
+ case BC_UNM:
+ | ins_AD // RA = dst, RD = src
+ if (LJ_DUALNUM) {
+ | checkint RD, >5
+ | mov RB, [BASE+RD*8]
+ | neg RB
+ | jo >4
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RB
+ |9:
+ | ins_next
+ |4:
+ | mov dword [BASE+RA*8+4], 0x41e00000 // 2^31.
+ | mov dword [BASE+RA*8], 0
+ | jmp <9
+ |5:
+ | ja ->vmeta_unm
+ } else {
+ | checknum RD, ->vmeta_unm
+ }
+ if (sse) {
+ | movsd xmm0, qword [BASE+RD*8]
+ | sseconst_sign xmm1, RDa
+ | xorps xmm0, xmm1
+ | movsd qword [BASE+RA*8], xmm0
+ } else {
+ | fld qword [BASE+RD*8]
+ | fchs
+ | fstp qword [BASE+RA*8]
+ }
+ if (LJ_DUALNUM) {
+ | jmp <9
+ } else {
+ | ins_next
+ }
+ break;
+ case BC_LEN:
+ | ins_AD // RA = dst, RD = src
+ | checkstr RD, >2
+ | mov STR:RD, [BASE+RD*8]
+ if (LJ_DUALNUM) {
+ | mov RD, dword STR:RD->len
+ |1:
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RD
+ } else if (sse) {
+ | xorps xmm0, xmm0
+ | cvtsi2sd xmm0, dword STR:RD->len
+ |1:
+ | movsd qword [BASE+RA*8], xmm0
+ } else {
+ | fild dword STR:RD->len
+ |1:
+ | fstp qword [BASE+RA*8]
+ }
+ | ins_next
+ |2:
+ | checktab RD, ->vmeta_len
+ | mov TAB:FCARG1, [BASE+RD*8]
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ | mov TAB:RB, TAB:FCARG1->metatable
+ | cmp TAB:RB, 0
+ | jnz >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | mov RB, BASE // Save BASE.
+ | call extern lj_tab_len@4 // (GCtab *t)
+ | // Length of table returned in eax (RD).
+ if (LJ_DUALNUM) {
+ | // Nothing to do.
+ } else if (sse) {
+ | cvtsi2sd xmm0, RD
+ } else {
+ |.if not X64
+ | mov ARG1, RD
+ | fild ARG1
+ |.endif
+ }
+ | mov BASE, RB // Restore BASE.
+ | movzx RA, PC_RA
+ | jmp <1
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ |9: // Check for __len.
+ | test byte TAB:RB->nomm, 1<vmeta_len // 'no __len' flag NOT set: check.
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, x87ins, sseins, ssereg
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checknum RB, ->vmeta_arith_vn
+ ||if (LJ_DUALNUM) {
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn
+ ||}
+ ||if (sse) {
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [KBASE+RC*8]
+ ||} else {
+ | fld qword [BASE+RB*8]
+ | x87ins qword [KBASE+RC*8]
+ ||}
+ || break;
+ ||case 1:
+ | checknum RB, ->vmeta_arith_nv
+ ||if (LJ_DUALNUM) {
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv
+ ||}
+ ||if (sse) {
+ | movsd xmm0, qword [KBASE+RC*8]
+ | sseins ssereg, qword [BASE+RB*8]
+ ||} else {
+ | fld qword [KBASE+RC*8]
+ | x87ins qword [BASE+RB*8]
+ ||}
+ || break;
+ ||default:
+ | checknum RB, ->vmeta_arith_vv
+ | checknum RC, ->vmeta_arith_vv
+ ||if (sse) {
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [BASE+RC*8]
+ ||} else {
+ | fld qword [BASE+RB*8]
+ | x87ins qword [BASE+RC*8]
+ ||}
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checkint RB, ->vmeta_arith_vn
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_vn
+ | mov RB, [BASE+RB*8]
+ | intins RB, [KBASE+RC*8]; jo ->vmeta_arith_vno
+ || break;
+ ||case 1:
+ | checkint RB, ->vmeta_arith_nv
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_nv
+ | mov RC, [KBASE+RC*8]
+ | intins RC, [BASE+RB*8]; jo ->vmeta_arith_nvo
+ || break;
+ ||default:
+ | checkint RB, ->vmeta_arith_vv
+ | checkint RC, ->vmeta_arith_vv
+ | mov RB, [BASE+RB*8]
+ | intins RB, [BASE+RC*8]; jo ->vmeta_arith_vvo
+ || break;
+ ||}
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ ||if (vk == 1) {
+ | mov dword [BASE+RA*8], RC
+ ||} else {
+ | mov dword [BASE+RA*8], RB
+ ||}
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arithpost
+ ||if (sse) {
+ | movsd qword [BASE+RA*8], xmm0
+ ||} else {
+ | fstp qword [BASE+RA*8]
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, x87ins, sseins
+ | ins_arithpre x87ins, sseins, xmm0
+ | ins_arithpost
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arith, intins, x87ins, sseins
+ ||if (LJ_DUALNUM) {
+ | ins_arithdn intins
+ ||} else {
+ | ins_arith, x87ins, sseins
+ ||}
+ |.endmacro
+
+ | // RA = dst, RB = src1 or num const, RC = src2 or num const
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add, fadd, addsd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub, fsub, subsd
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith imul, fmul, mulsd
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith fdiv, divsd
+ break;
+ case BC_MODVN:
+ | ins_arithpre fld, movsd, xmm1
+ |->BC_MODVN_Z:
+ | call ->vm_mod
+ | ins_arithpost
+ | ins_next
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre fld, movsd, xmm1
+ | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | ins_arithpre fld, movsd, xmm1
+ | call ->vm_pow
+ | ins_arithpost
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | ins_ABC // RA = dst, RB = src_start, RC = src_end
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | lea CARG2d, [BASE+RC*8]
+ | mov CARG3d, RC
+ | sub CARG3d, RB
+ |->BC_CAT_Z:
+ | mov L:RB, L:CARG1d
+ |.else
+ | lea RA, [BASE+RC*8]
+ | sub RC, RB
+ | mov ARG2, RA
+ | mov ARG3, RC
+ |->BC_CAT_Z:
+ | mov L:RB, SAVE_L
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jnz ->vmeta_binop
+ | movzx RB, PC_RB // Copy result to Stk[RA] from Stk[RB].
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RCa, [BASE+RB*8]
+ | mov [BASE+RA*8], RCa
+ |.else
+ | mov RC, [BASE+RB*8+4]
+ | mov RB, [BASE+RB*8]
+ | mov [BASE+RA*8+4], RC
+ | mov [BASE+RA*8], RB
+ |.endif
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov RD, [KBASE+RD*4]
+ | mov dword [BASE+RA*8+4], LJ_TSTR
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ | ins_AND // RA = dst, RD = cdata const (~)
+ | mov RD, [KBASE+RD*4]
+ | mov dword [BASE+RA*8+4], LJ_TCDATA
+ | mov [BASE+RA*8], RD
+ | ins_next
+#endif
+ break;
+ case BC_KSHORT:
+ | ins_AD // RA = dst, RD = signed int16 literal
+ if (LJ_DUALNUM) {
+ | movsx RD, RDW
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RD
+ } else if (sse) {
+ | movsx RD, RDW // Sign-extend literal.
+ | cvtsi2sd xmm0, RD
+ | movsd qword [BASE+RA*8], xmm0
+ } else {
+ | fild PC_RD // Refetch signed RD from instruction.
+ | fstp qword [BASE+RA*8]
+ }
+ | ins_next
+ break;
+ case BC_KNUM:
+ | ins_AD // RA = dst, RD = num const
+ if (sse) {
+ | movsd xmm0, qword [KBASE+RD*8]
+ | movsd qword [BASE+RA*8], xmm0
+ } else {
+ | fld qword [KBASE+RD*8]
+ | fstp qword [BASE+RA*8]
+ }
+ | ins_next
+ break;
+ case BC_KPRI:
+ | ins_AND // RA = dst, RD = primitive type (~)
+ | mov [BASE+RA*8+4], RD
+ | ins_next
+ break;
+ case BC_KNIL:
+ | ins_AD // RA = dst_start, RD = dst_end
+ | lea RA, [BASE+RA*8+12]
+ | lea RD, [BASE+RD*8+4]
+ | mov RB, LJ_TNIL
+ | mov [RA-8], RB // Sets minimum 2 slots.
+ |1:
+ | mov [RA], RB
+ | add RA, 8
+ | cmp RA, RD
+ | jbe <1
+ | ins_next
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | ins_AD // RA = dst, RD = upvalue #
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RD*4+offsetof(GCfuncL, uvptr)]
+ | mov RB, UPVAL:RB->v
+ |.if X64
+ | mov RDa, [RB]
+ | mov [BASE+RA*8], RDa
+ |.else
+ | mov RD, [RB+4]
+ | mov RB, [RB]
+ | mov [BASE+RA*8+4], RD
+ | mov [BASE+RA*8], RB
+ |.endif
+ | ins_next
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ | ins_AD // RA = upvalue #, RD = src
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | cmp byte UPVAL:RB->closed, 0
+ | mov RB, UPVAL:RB->v
+ | mov RA, [BASE+RD*8]
+ | mov RD, [BASE+RD*8+4]
+ | mov [RB], RA
+ | mov [RB+4], RD
+ | jz >1
+ | // Check barrier for closed upvalue.
+ | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Upvalue is black. Check if new value is collectable and white.
+ | sub RD, LJ_TISGCV
+ | cmp RD, LJ_TISNUM - LJ_TISGCV // tvisgcv(v)
+ | jbe <1
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if X64 and not X64WIN
+ | mov FCARG2, RB
+ | mov RB, BASE // Save BASE.
+ |.else
+ | xchg FCARG2, RB // Save BASE (FCARG2 == BASE).
+ |.endif
+ | lea GL:FCARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ | ins_AND // RA = upvalue #, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov GCOBJ:RA, [KBASE+RD*4]
+ | mov RD, UPVAL:RB->v
+ | mov [RD], GCOBJ:RA
+ | mov dword [RD+4], LJ_TSTR
+ | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str)
+ | jz <1
+ | cmp byte UPVAL:RB->closed, 0
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov RB, BASE // Save BASE (FCARG2 == BASE).
+ | mov FCARG2, RD
+ | lea GL:FCARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+ case BC_USETN:
+ | ins_AD // RA = upvalue #, RD = num const
+ | mov LFUNC:RB, [BASE-8]
+ if (sse) {
+ | movsd xmm0, qword [KBASE+RD*8]
+ } else {
+ | fld qword [KBASE+RD*8]
+ }
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ if (sse) {
+ | movsd qword [RA], xmm0
+ } else {
+ | fstp qword [RA]
+ }
+ | ins_next
+ break;
+ case BC_USETP:
+ | ins_AND // RA = upvalue #, RD = primitive type (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ | mov [RA+4], RD
+ | ins_next
+ break;
+ case BC_UCLO:
+ | ins_AD // RA = level, RD = target
+ | branchPC RD // Do this first to free RD.
+ | mov L:RB, SAVE_L
+ | cmp dword L:RB->openupval, 0
+ | je >1
+ | mov L:RB->base, BASE
+ | lea FCARG2, [BASE+RA*8] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | call extern lj_func_closeuv@8 // (lua_State *L, TValue *level)
+ | mov BASE, L:RB->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | ins_AND // RA = dst, RD = proto const (~) (holding function prototype)
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG3d, [BASE-8]
+ | mov CARG2d, [KBASE+RD*4] // Fetch GCproto *.
+ | mov CARG1d, L:RB
+ |.else
+ | mov LFUNC:RA, [BASE-8]
+ | mov PROTO:RD, [KBASE+RD*4] // Fetch GCproto *.
+ | mov L:RB, SAVE_L
+ | mov ARG3, LFUNC:RA
+ | mov ARG2, PROTO:RD
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call extern lj_func_newL_gc
+ | // GCfuncL * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], LFUNC:RC
+ | mov dword [BASE+RA*8+4], LJ_TFUNC
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ | ins_AD // RA = dst, RD = hbits|asize
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov SAVE_PC, PC
+ | jae >5
+ |1:
+ |.if X64
+ | mov CARG3d, RD
+ | and RD, 0x7ff
+ | shr CARG3d, 11
+ |.else
+ | mov RA, RD
+ | and RD, 0x7ff
+ | shr RA, 11
+ | mov ARG3, RA
+ |.endif
+ | cmp RD, 0x7ff
+ | je >3
+ |2:
+ |.if X64
+ | mov L:CARG1d, L:RB
+ | mov CARG2d, RD
+ |.else
+ | mov ARG1, L:RB
+ | mov ARG2, RD
+ |.endif
+ | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], TAB:RC
+ | mov dword [BASE+RA*8+4], LJ_TTAB
+ | ins_next
+ |3: // Turn 0x7ff into 0x801.
+ | mov RD, 0x801
+ | jmp <2
+ |5:
+ | mov L:FCARG1, L:RB
+ | call extern lj_gc_step_fixtop@4 // (lua_State *L)
+ | movzx RD, PC_RD
+ | jmp <1
+ break;
+ case BC_TDUP:
+ | ins_AND // RA = dst, RD = table const (~) (holding template table)
+ | mov L:RB, SAVE_L
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | mov SAVE_PC, PC
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov L:RB->base, BASE
+ | jae >3
+ |2:
+ | mov TAB:FCARG2, [KBASE+RD*4] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | call extern lj_tab_dup@8 // (lua_State *L, Table *kt)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], TAB:RC
+ | mov dword [BASE+RA*8+4], LJ_TTAB
+ | ins_next
+ |3:
+ | mov L:FCARG1, L:RB
+ | call extern lj_gc_step_fixtop@4 // (lua_State *L)
+ | movzx RD, PC_RD // Need to reload RD.
+ | not RDa
+ | jmp <2
+ break;
+
+ case BC_GGET:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*4]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_GSET:
+ | ins_AND // RA = src, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*4]
+ | jmp ->BC_TSETS_Z
+ break;
+
+ case BC_TGETV:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | checktab RB, ->vmeta_tgetv
+ | mov TAB:RB, [BASE+RB*8]
+ |
+ | // Integer key?
+ if (LJ_DUALNUM) {
+ | checkint RC, >5
+ | mov RC, dword [BASE+RC*8]
+ } else {
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ if (sse) {
+ | movsd xmm0, qword [BASE+RC*8]
+ | cvtsd2si RC, xmm0
+ | cvtsi2sd xmm1, RC
+ | ucomisd xmm0, xmm1
+ } else {
+ |.if not X64
+ | fld qword [BASE+RC*8]
+ | fist ARG1
+ | fild ARG1
+ | fcomparepp // eax (RC) modified!
+ | mov RC, ARG1
+ |.endif
+ }
+ | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
+ }
+ | cmp RC, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tgetv // Not in array part? Use fallback.
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ | // Get array slot.
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC]
+ | mov RC, [RC+4]
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ |.endif
+ |1:
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz >3
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<vmeta_tgetv // 'no __index' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ |3:
+ | mov dword [BASE+RA*8+4], LJ_TNIL
+ | jmp <1
+ |
+ |5: // String key?
+ | checkstr RC, ->vmeta_tgetv
+ | mov STR:RC, [BASE+RC*8]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | ins_ABC // RA = dst, RB = table, RC = str const (~)
+ | not RCa
+ | mov STR:RC, [KBASE+RC*4]
+ | checktab RB, ->vmeta_tgets
+ | mov TAB:RB, [BASE+RB*8]
+ |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->hash
+ | imul RA, #NODE
+ | add NODE:RA, TAB:RB->node
+ |1:
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >4
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | jne >4
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp dword [RA+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >5 // Key found, but nil value?
+ | movzx RC, PC_RA
+ | // Get node value.
+ |.if X64
+ | mov RBa, [RA]
+ | mov [BASE+RC*8], RBa
+ |.else
+ | mov RB, [RA]
+ | mov RA, [RA+4]
+ | mov [BASE+RC*8], RB
+ | mov [BASE+RC*8+4], RA
+ |.endif
+ |2:
+ | ins_next
+ |
+ |3:
+ | movzx RC, PC_RA
+ | mov dword [BASE+RC*8+4], LJ_TNIL
+ | jmp <2
+ |
+ |4: // Follow hash chain.
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | mov TAB:RA, TAB:RB->metatable
+ | test TAB:RA, TAB:RA
+ | jz <3 // No metatable: done.
+ | test byte TAB:RA->nomm, 1<vmeta_tgets // Caveat: preserve STR:RC.
+ break;
+ case BC_TGETB:
+ | ins_ABC // RA = dst, RB = table, RC = byte literal
+ | checktab RB, ->vmeta_tgetb
+ | mov TAB:RB, [BASE+RB*8]
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tgetb
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ | // Get array slot.
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC]
+ | mov RC, [RC+4]
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ |.endif
+ |1:
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz >3
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<vmeta_tgetb // 'no __index' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ |3:
+ | mov dword [BASE+RA*8+4], LJ_TNIL
+ | jmp <1
+ break;
+
+ case BC_TSETV:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | checktab RB, ->vmeta_tsetv
+ | mov TAB:RB, [BASE+RB*8]
+ |
+ | // Integer key?
+ if (LJ_DUALNUM) {
+ | checkint RC, >5
+ | mov RC, dword [BASE+RC*8]
+ } else {
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ if (sse) {
+ | movsd xmm0, qword [BASE+RC*8]
+ | cvtsd2si RC, xmm0
+ | cvtsi2sd xmm1, RC
+ | ucomisd xmm0, xmm1
+ } else {
+ |.if not X64
+ | fld qword [BASE+RC*8]
+ | fist ARG1
+ | fild ARG1
+ | fcomparepp // eax (RC) modified!
+ | mov RC, ARG1
+ |.endif
+ }
+ | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
+ }
+ | cmp RC, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tsetv
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ |.if X64
+ | mov RBa, [BASE+RA*8]
+ | mov [RC], RBa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <1
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<vmeta_tsetv // 'no __newindex' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1
+ |
+ |5: // String key?
+ | checkstr RC, ->vmeta_tsetv
+ | mov STR:RC, [BASE+RC*8]
+ | jmp ->BC_TSETS_Z
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RA
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <2
+ break;
+ case BC_TSETS:
+ | ins_ABC // RA = src, RB = table, RC = str const (~)
+ | not RCa
+ | mov STR:RC, [KBASE+RC*4]
+ | checktab RB, ->vmeta_tsets
+ | mov TAB:RB, [BASE+RB*8]
+ |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->hash
+ | imul RA, #NODE
+ | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
+ | add NODE:RA, TAB:RB->node
+ |1:
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >5
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | jne >5
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp dword [RA+4], LJ_TNIL
+ | je >4 // Previous value is nil?
+ |2:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |3: // Set node value.
+ | movzx RC, PC_RA
+ |.if X64
+ | mov RBa, [BASE+RC*8]
+ | mov [RA], RBa
+ |.else
+ | mov RB, [BASE+RC*8+4]
+ | mov RC, [BASE+RC*8]
+ | mov [RA+4], RB
+ | mov [RA], RC
+ |.endif
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <2
+ | mov TMP1, RA // Save RA.
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ | mov RA, TMP1 // Restore RA.
+ | jmp <2
+ |
+ |5: // Follow hash chain.
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | mov TAB:RA, TAB:RB->metatable
+ | test TAB:RA, TAB:RA
+ | jz >6 // No metatable: continue.
+ | test byte TAB:RA->nomm, 1<vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mov TMP1, STR:RC
+ | mov TMP2, LJ_TSTR
+ | mov TMP3, TAB:RB // Save TAB:RB for us.
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | lea CARG3, TMP1
+ | mov CARG2d, TAB:RB
+ | mov L:RB, L:CARG1d
+ |.else
+ | lea RC, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | mov ARG2, TAB:RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Handles write barrier for the new key. TValue * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | mov TAB:RB, TMP3 // Need TAB:RB for barrier.
+ | mov RA, eax
+ | jmp <2 // Must check write barrier for value.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RC // Destroys STR:RC.
+ | jmp <3
+ break;
+ case BC_TSETB:
+ | ins_ABC // RA = src, RB = table, RC = byte literal
+ | checktab RB, ->vmeta_tsetb
+ | mov TAB:RB, [BASE+RB*8]
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tsetb
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ |.if X64
+ | mov RAa, [BASE+RA*8]
+ | mov [RC], RAa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <1
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<vmeta_tsetb // 'no __newindex' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RA
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <2
+ break;
+
+ case BC_TSETM:
+ | ins_AD // RA = base (table at base-1), RD = num const (start index)
+ | mov TMP1, KBASE // Need one more free register.
+ | mov KBASE, dword [KBASE+RD*8] // Integer constant is in lo-word.
+ |1:
+ | lea RA, [BASE+RA*8]
+ | mov TAB:RB, [RA-8] // Guaranteed to be a table.
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | mov RD, MULTRES
+ | sub RD, 1
+ | jz >4 // Nothing to copy?
+ | add RD, KBASE // Compute needed size.
+ | cmp RD, TAB:RB->asize
+ | ja >5 // Doesn't fit into array part?
+ | sub RD, KBASE
+ | shl KBASE, 3
+ | add KBASE, TAB:RB->array
+ |3: // Copy result slots to table.
+ |.if X64
+ | mov RBa, [RA]
+ | add RA, 8
+ | mov [KBASE], RBa
+ |.else
+ | mov RB, [RA]
+ | mov [KBASE], RB
+ | mov RB, [RA+4]
+ | add RA, 8
+ | mov [KBASE+4], RB
+ |.endif
+ | add KBASE, 8
+ | sub RD, 1
+ | jnz <3
+ |4:
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, TAB:RB
+ | mov CARG3d, RD
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, TAB:RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov ARG3, RD
+ | mov ARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1 // Retry.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:RB, RD
+ | jmp <2
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs
+ if (op == BC_CALLM) {
+ | add NARGS:RD, MULTRES
+ }
+ | cmp dword [BASE+RA*8+4], LJ_TFUNC
+ | mov LFUNC:RB, [BASE+RA*8]
+ | jne ->vmeta_call_ra
+ | lea BASE, [BASE+RA*8+8]
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | ins_AD // RA = base, RD = extra_nargs
+ | add NARGS:RD, MULTRES
+ | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op.
+ break;
+ case BC_CALLT:
+ | ins_AD // RA = base, RD = nargs+1
+ | lea RA, [BASE+RA*8+8]
+ | mov KBASE, BASE // Use KBASE for move + vmeta_call hint.
+ | mov LFUNC:RB, [RA-8]
+ | cmp dword [RA-4], LJ_TFUNC
+ | jne ->vmeta_call
+ |->BC_CALLT_Z:
+ | mov PC, [BASE-4]
+ | test PC, FRAME_TYPE
+ | jnz >7
+ |1:
+ | mov [BASE-8], LFUNC:RB // Copy function down, reloaded below.
+ | mov MULTRES, NARGS:RD
+ | sub NARGS:RD, 1
+ | jz >3
+ |2: // Move args down.
+ |.if X64
+ | mov RBa, [RA]
+ | add RA, 8
+ | mov [KBASE], RBa
+ |.else
+ | mov RB, [RA]
+ | mov [KBASE], RB
+ | mov RB, [RA+4]
+ | add RA, 8
+ | mov [KBASE+4], RB
+ |.endif
+ | add KBASE, 8
+ | sub NARGS:RD, 1
+ | jnz <2
+ |
+ | mov LFUNC:RB, [BASE-8]
+ |3:
+ | mov NARGS:RD, MULTRES
+ | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function?
+ | ja >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function.
+ | test PC, FRAME_TYPE // Lua frame below?
+ | jnz <4
+ | movzx RA, PC_RA
+ | not RAa
+ | lea RA, [BASE+RA*8]
+ | mov LFUNC:KBASE, [RA-8] // Need to prepare KBASE.
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <4
+ |
+ |7: // Tailcall from a vararg function.
+ | sub PC, FRAME_VARG
+ | test PC, FRAME_TYPEP
+ | jnz >8 // Vararg frame below?
+ | sub BASE, PC // Need to relocate BASE/KBASE down.
+ | mov KBASE, BASE
+ | mov PC, [BASE-4]
+ | jmp <1
+ |8:
+ | add PC, FRAME_VARG
+ | jmp <1
+ break;
+
+ case BC_ITERC:
+ | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1)
+ | lea RA, [BASE+RA*8+8] // fb = base+1
+ |.if X64
+ | mov RBa, [RA-24] // Copy state. fb[0] = fb[-3].
+ | mov RCa, [RA-16] // Copy control var. fb[1] = fb[-2].
+ | mov [RA], RBa
+ | mov [RA+8], RCa
+ |.else
+ | mov RB, [RA-24] // Copy state. fb[0] = fb[-3].
+ | mov RC, [RA-20]
+ | mov [RA], RB
+ | mov [RA+4], RC
+ | mov RB, [RA-16] // Copy control var. fb[1] = fb[-2].
+ | mov RC, [RA-12]
+ | mov [RA+8], RB
+ | mov [RA+12], RC
+ |.endif
+ | mov LFUNC:RB, [RA-32] // Copy callable. fb[-1] = fb[-4]
+ | mov RC, [RA-28]
+ | mov [RA-8], LFUNC:RB
+ | mov [RA-4], RC
+ | cmp RC, LJ_TFUNC // Handle like a regular 2-arg call.
+ | mov NARGS:RD, 2+1
+ | jne ->vmeta_call
+ | mov BASE, RA
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+#if LJ_HASJIT
+ | // NYI: add hotloop, record BC_ITERN.
+#endif
+ | mov TMP1, KBASE // Need two more free registers.
+ | mov TMP2, DISPATCH
+ | mov TAB:RB, [BASE+RA*8-16]
+ | mov RC, [BASE+RA*8-8] // Get index from control var.
+ | mov DISPATCH, TAB:RB->asize
+ | add PC, 4
+ | mov KBASE, TAB:RB->array
+ |1: // Traverse array part.
+ | cmp RC, DISPATCH; jae >5 // Index points after array part?
+ | cmp dword [KBASE+RC*8+4], LJ_TNIL; je >4
+ if (LJ_DUALNUM) {
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RC
+ } else if (sse) {
+ | cvtsi2sd xmm0, RC
+ } else {
+ | fild dword [BASE+RA*8-8]
+ }
+ | // Copy array slot to returned value.
+ |.if X64
+ | mov RBa, [KBASE+RC*8]
+ | mov [BASE+RA*8+8], RBa
+ |.else
+ | mov RB, [KBASE+RC*8+4]
+ | mov [BASE+RA*8+12], RB
+ | mov RB, [KBASE+RC*8]
+ | mov [BASE+RA*8+8], RB
+ |.endif
+ | add RC, 1
+ | // Return array index as a numeric key.
+ if (LJ_DUALNUM) {
+ | // See above.
+ } else if (sse) {
+ | movsd qword [BASE+RA*8], xmm0
+ } else {
+ | fstp qword [BASE+RA*8]
+ }
+ | mov [BASE+RA*8-8], RC // Update control var.
+ |2:
+ | movzx RD, PC_RD // Get target from ITERL.
+ | branchPC RD
+ |3:
+ | mov DISPATCH, TMP2
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | add RC, 1
+ if (!LJ_DUALNUM && !sse) {
+ | mov [BASE+RA*8-8], RC
+ }
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub RC, DISPATCH
+ |6:
+ | cmp RC, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1.
+ | imul KBASE, RC, #NODE
+ | add NODE:KBASE, TAB:RB->node
+ | cmp dword NODE:KBASE->val.it, LJ_TNIL; je >7
+ | lea DISPATCH, [RC+DISPATCH+1]
+ | // Copy key and value from hash slot.
+ |.if X64
+ | mov RBa, NODE:KBASE->key
+ | mov RCa, NODE:KBASE->val
+ | mov [BASE+RA*8], RBa
+ | mov [BASE+RA*8+8], RCa
+ |.else
+ | mov RB, NODE:KBASE->key.gcr
+ | mov RC, NODE:KBASE->key.it
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ | mov RB, NODE:KBASE->val.gcr
+ | mov RC, NODE:KBASE->val.it
+ | mov [BASE+RA*8+8], RB
+ | mov [BASE+RA*8+12], RC
+ |.endif
+ | mov [BASE+RA*8-8], DISPATCH
+ | jmp <2
+ |
+ |7: // Skip holes in hash part.
+ | add RC, 1
+ | jmp <6
+ break;
+
+ case BC_ISNEXT:
+ | ins_AD // RA = base, RD = target (points to ITERN)
+ | cmp dword [BASE+RA*8-20], LJ_TFUNC; jne >5
+ | mov CFUNC:RB, [BASE+RA*8-24]
+ | cmp dword [BASE+RA*8-12], LJ_TTAB; jne >5
+ | cmp dword [BASE+RA*8-4], LJ_TNIL; jne >5
+ | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5
+ | branchPC RD
+ | mov dword [BASE+RA*8-8], 0 // Initialize control var.
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov PC_OP, BC_JMP
+ | branchPC RD
+ | mov byte [PC], BC_ITERC
+ | jmp <1
+ break;
+
+ case BC_VARG:
+ | ins_ABC // RA = base, RB = nresults+1, RC = numparams
+ | mov TMP1, KBASE // Need one more free register.
+ | lea KBASE, [BASE+RC*8+(8+FRAME_VARG)]
+ | lea RA, [BASE+RA*8]
+ | sub KBASE, [BASE-4]
+ | // Note: KBASE may now be even _above_ BASE if nargs was < numparams.
+ | test RB, RB
+ | jz >5 // Copy all varargs?
+ | lea RB, [RA+RB*8-8]
+ | cmp KBASE, BASE // No vararg slots?
+ | jnb >2
+ |1: // Copy vararg slots to destination slots.
+ |.if X64
+ | mov RCa, [KBASE-8]
+ | add KBASE, 8
+ | mov [RA], RCa
+ |.else
+ | mov RC, [KBASE-8]
+ | mov [RA], RC
+ | mov RC, [KBASE-4]
+ | add KBASE, 8
+ | mov [RA+4], RC
+ |.endif
+ | add RA, 8
+ | cmp RA, RB // All destination slots filled?
+ | jnb >3
+ | cmp KBASE, BASE // No more vararg slots?
+ | jb <1
+ |2: // Fill up remainder with nil.
+ | mov dword [RA+4], LJ_TNIL
+ | add RA, 8
+ | cmp RA, RB
+ | jb <2
+ |3:
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | mov MULTRES, 1 // MULTRES = 0+1
+ | mov RC, BASE
+ | sub RC, KBASE
+ | jbe <3 // No vararg slots?
+ | mov RB, RC
+ | shr RB, 3
+ | add RB, 1
+ | mov MULTRES, RB // MULTRES = #varargs+1
+ | mov L:RB, SAVE_L
+ | add RC, RA
+ | cmp RC, L:RB->maxstack
+ | ja >7 // Need to grow stack?
+ |6: // Copy all vararg slots.
+ |.if X64
+ | mov RCa, [KBASE-8]
+ | add KBASE, 8
+ | mov [RA], RCa
+ |.else
+ | mov RC, [KBASE-8]
+ | mov [RA], RC
+ | mov RC, [KBASE-4]
+ | add KBASE, 8
+ | mov [RA+4], RC
+ |.endif
+ | add RA, 8
+ | cmp KBASE, BASE // No more vararg slots?
+ | jb <6
+ | jmp <3
+ |
+ |7: // Grow stack for varargs.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RA
+ | mov SAVE_PC, PC
+ | sub KBASE, BASE // Need delta, because BASE may change.
+ | mov FCARG2, MULTRES
+ | sub FCARG2, 1
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RA, L:RB->top
+ | add KBASE, BASE
+ | jmp <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | ins_AD // RA = results, RD = extra_nresults
+ | add RD, MULTRES // MULTRES >=1, so RD >=1.
+ | // Fall through. Assumes BC_RET follows and ins_AD is a no-op.
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ | ins_AD // RA = results, RD = nresults+1
+ if (op != BC_RET0) {
+ | shl RA, 3
+ }
+ |1:
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD // Save nresults+1.
+ | test PC, FRAME_TYPE // Check frame type marker.
+ | jnz >7 // Not returning to a fixarg Lua func?
+ switch (op) {
+ case BC_RET:
+ |->BC_RET_Z:
+ | mov KBASE, BASE // Use KBASE for result move.
+ | sub RD, 1
+ | jz >3
+ |2: // Move results down.
+ |.if X64
+ | mov RBa, [KBASE+RA]
+ | mov [KBASE-8], RBa
+ |.else
+ | mov RB, [KBASE+RA]
+ | mov [KBASE-8], RB
+ | mov RB, [KBASE+RA+4]
+ | mov [KBASE-4], RB
+ |.endif
+ | add KBASE, 8
+ | sub RD, 1
+ | jnz <2
+ |3:
+ | mov RD, MULTRES // Note: MULTRES may be >255.
+ | movzx RB, PC_RB // So cannot compare with RDL!
+ |5:
+ | cmp RB, RD // More results expected?
+ | ja >6
+ break;
+ case BC_RET1:
+ |.if X64
+ | mov RBa, [BASE+RA]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [BASE+RA+4]
+ | mov [BASE-4], RB
+ | mov RB, [BASE+RA]
+ | mov [BASE-8], RB
+ |.endif
+ /* fallthrough */
+ case BC_RET0:
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ default:
+ break;
+ }
+ | movzx RA, PC_RA
+ | not RAa // Note: ~RA = -(RA+1)
+ | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ if (op == BC_RET) {
+ | mov dword [KBASE-4], LJ_TNIL // Note: relies on shifted base.
+ | add KBASE, 8
+ } else {
+ | mov dword [BASE+RD*8-12], LJ_TNIL
+ }
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | lea RB, [PC-FRAME_VARG]
+ | test RB, FRAME_TYPEP
+ | jnz ->vm_return
+ | // Return from vararg function: relocate BASE down and RA up.
+ | sub BASE, RB
+ if (op != BC_RET0) {
+ | add RA, RB
+ }
+ | jmp <1
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, dword [RA+4]
+ |.define FOR_STOP, [RA+8]; .define FOR_TSTOP, dword [RA+12]
+ |.define FOR_STEP, [RA+16]; .define FOR_TSTEP, dword [RA+20]
+ |.define FOR_EXT, [RA+24]; .define FOR_TEXT, dword [RA+28]
+
+ case BC_FORL:
+#if LJ_HASJIT
+ | hotloop RB
+#endif
+ | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ins_AJ // RA = base, RD = target (after end of loop or start of loop)
+ | lea RA, [BASE+RA*8]
+ if (LJ_DUALNUM) {
+ | cmp FOR_TIDX, LJ_TISNUM; jne >9
+ if (!vk) {
+ | cmp FOR_TSTOP, LJ_TISNUM; jne ->vmeta_for
+ | cmp FOR_TSTEP, LJ_TISNUM; jne ->vmeta_for
+ | mov RB, dword FOR_IDX
+ | cmp dword FOR_STEP, 0; jl >5
+ } else {
+#ifdef LUA_USE_ASSERT
+ | cmp FOR_TSTOP, LJ_TISNUM; jne ->assert_bad_for_arg_type
+ | cmp FOR_TSTEP, LJ_TISNUM; jne ->assert_bad_for_arg_type
+#endif
+ | mov RB, dword FOR_STEP
+ | test RB, RB; js >5
+ | add RB, dword FOR_IDX; jo >1
+ | mov dword FOR_IDX, RB
+ }
+ | cmp RB, dword FOR_STOP
+ | mov FOR_TEXT, LJ_TISNUM
+ | mov dword FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jle >7
+ |1:
+ |6:
+ | branchPC RD
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ } else if (op == BC_IFORL) {
+ | jg >7
+ |6:
+ | branchPC RD
+ |1:
+ } else {
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ }
+ |7:
+ | ins_next
+ |
+ |5: // Invert check for negative step.
+ if (vk) {
+ | add RB, dword FOR_IDX; jo <1
+ | mov dword FOR_IDX, RB
+ }
+ | cmp RB, dword FOR_STOP
+ | mov FOR_TEXT, LJ_TISNUM
+ | mov dword FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jge <7
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jge =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | jl <7
+ } else {
+ | jge =>BC_JLOOP
+ }
+ | jmp <6
+ |9: // Fallback to FP variant.
+ } else if (!vk) {
+ | cmp FOR_TIDX, LJ_TISNUM
+ }
+ if (!vk) {
+ | jae ->vmeta_for
+ | cmp FOR_TSTOP, LJ_TISNUM; jae ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | cmp FOR_TSTOP, LJ_TISNUM; jae ->assert_bad_for_arg_type
+ | cmp FOR_TSTEP, LJ_TISNUM; jae ->assert_bad_for_arg_type
+#endif
+ }
+ | mov RB, FOR_TSTEP // Load type/hiword of for step.
+ if (!vk) {
+ | cmp RB, LJ_TISNUM; jae ->vmeta_for
+ }
+ if (sse) {
+ | movsd xmm0, qword FOR_IDX
+ | movsd xmm1, qword FOR_STOP
+ if (vk) {
+ | addsd xmm0, qword FOR_STEP
+ | movsd qword FOR_IDX, xmm0
+ | test RB, RB; js >3
+ } else {
+ | jl >3
+ }
+ | ucomisd xmm1, xmm0
+ |1:
+ | movsd qword FOR_EXT, xmm0
+ } else {
+ | fld qword FOR_STOP
+ | fld qword FOR_IDX
+ if (vk) {
+ | fadd qword FOR_STEP // nidx = idx + step
+ | fst qword FOR_IDX
+ | fst qword FOR_EXT
+ | test RB, RB; js >1
+ } else {
+ | fst qword FOR_EXT
+ | jl >1
+ }
+ | fxch // Swap lim/(n)idx if step non-negative.
+ |1:
+ | fcomparepp // eax (RD) modified if !cmov.
+ if (!cmov) {
+ | movzx RD, PC_RD // Need to reload RD.
+ }
+ }
+ if (op == BC_FORI) {
+ if (LJ_DUALNUM) {
+ | jnb <7
+ } else {
+ | jnb >2
+ | branchPC RD
+ }
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jnb =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ | jb <7
+ } else {
+ | jb >2
+ | branchPC RD
+ }
+ } else {
+ | jnb =>BC_JLOOP
+ }
+ if (LJ_DUALNUM) {
+ | jmp <6
+ } else {
+ |2:
+ | ins_next
+ }
+ if (sse) {
+ |3: // Invert comparison if step is negative.
+ | ucomisd xmm0, xmm1
+ | jmp <1
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ | hotloop RB
+#endif
+ | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | ins_AJ // RA = base, RD = target
+ | lea RA, [BASE+RA*8]
+ | mov RB, [RA+4]
+ | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | mov [RA-4], RB
+ | mov RB, [RA]
+ | mov [RA-8], RB
+ | jmp =>BC_JLOOP
+ } else {
+ | branchPC RD // Otherwise save control var + branch.
+ | mov RD, [RA]
+ | mov [RA-4], RB
+ | mov [RA-8], RD
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+#if LJ_HASJIT
+ | hotloop RB
+#endif
+ | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
+ break;
+
+ case BC_ILOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ | ins_AD // RA = base (ignored), RD = traceno
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | mov TRACE:RD, [RA+RD*4]
+ | mov RDa, TRACE:RD->mcode
+ | mov L:RB, SAVE_L
+ | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
+ | mov [DISPATCH+DISPATCH_GL(jit_L)], L:RB
+ | // Save additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | mov TMPQ, r12
+ | mov TMPa, r13
+ | mov CSAVE_4, r14
+ | mov CSAVE_3, r15
+ | mov RAa, rsp
+ | sub rsp, 9*16+4*8
+ | movdqa [RAa], xmm6
+ | movdqa [RAa-1*16], xmm7
+ | movdqa [RAa-2*16], xmm8
+ | movdqa [RAa-3*16], xmm9
+ | movdqa [RAa-4*16], xmm10
+ | movdqa [RAa-5*16], xmm11
+ | movdqa [RAa-6*16], xmm12
+ | movdqa [RAa-7*16], xmm13
+ | movdqa [RAa-8*16], xmm14
+ | movdqa [RAa-9*16], xmm15
+ |.elif X64
+ | mov TMPQ, r12
+ | mov TMPa, r13
+ | sub rsp, 16
+ |.endif
+ | jmp RDa
+#endif
+ break;
+
+ case BC_JMP:
+ | ins_AJ // RA = unused, RD = target
+ | branchPC RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ | hotcall RB
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | mov L:RB, SAVE_L
+ | lea RA, [BASE+RA*8] // Top of frame.
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_f
+ | movzx RA, byte [PC-4+PC2PROTO(numparams)]
+ | cmp NARGS:RD, RA // Check for missing parameters.
+ | jbe >3
+ |2:
+ if (op == BC_JFUNCF) {
+ | movzx RD, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov dword [BASE+NARGS:RD*8-4], LJ_TNIL
+ | add NARGS:RD, 1
+ | cmp NARGS:RD, RA
+ | jbe <3
+ | jmp <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | int3 // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | lea RB, [NARGS:RD*8+FRAME_VARG]
+ | lea RD, [BASE+NARGS:RD*8]
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov [RD-4], RB // Store delta + FRAME_VARG.
+ | mov [RD-8], LFUNC:KBASE // Store copy of LFUNC.
+ | mov L:RB, SAVE_L
+ | lea RA, [RD+RA*8]
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_v // Need to grow stack.
+ | mov RA, BASE
+ | mov BASE, RD
+ | movzx RB, byte [PC-4+PC2PROTO(numparams)]
+ | test RB, RB
+ | jz >2
+ |1: // Copy fixarg slots up to new frame.
+ | add RA, 8
+ | cmp RA, BASE
+ | jnb >3 // Less args than parameters?
+ | mov KBASE, [RA-8]
+ | mov [RD], KBASE
+ | mov KBASE, [RA-4]
+ | mov [RD+4], KBASE
+ | add RD, 8
+ | mov dword [RA-4], LJ_TNIL // Clear old fixarg slot (help the GC).
+ | sub RB, 1
+ | jnz <1
+ |2:
+ if (op == BC_JFUNCV) {
+ | movzx RD, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov dword [RD+4], LJ_TNIL
+ | add RD, 8
+ | sub RB, 1
+ | jnz <3
+ | jmp <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1
+ | mov CFUNC:RB, [BASE-8]
+ | mov KBASEa, CFUNC:RB->f
+ | mov L:RB, SAVE_L
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->base, BASE
+ | lea RA, [RD+8*LUA_MINSTACK]
+ | cmp RA, L:RB->maxstack
+ | mov L:RB->top, RD
+ if (op == BC_FUNCC) {
+ |.if X64
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ } else {
+ |.if X64
+ | mov CARG2, KBASEa
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | mov ARG2, KBASEa
+ | mov ARG1, L:RB
+ |.endif
+ }
+ | ja ->vm_growstack_c // Need to grow stack.
+ | set_vmstate C
+ if (op == BC_FUNCC) {
+ | call KBASEa // (lua_State *L)
+ } else {
+ | // (lua_State *L, lua_CFunction f)
+ | call aword [DISPATCH+DISPATCH_GL(wrapf)]
+ }
+ | set_vmstate INTERP
+ | // nresults returned in eax (RD).
+ | mov BASE, L:RB->base
+ | lea RA, [BASE+RD*8]
+ | neg RA
+ | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
+ | mov PC, [BASE-4] // Fetch PC of caller.
+ | jmp ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ int cmov = 1;
+ int sse = 0;
+#ifdef LUAJIT_CPU_NOCMOV
+ cmov = 0;
+#endif
+#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64)
+ sse = 1;
+#endif
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx, cmov, sse);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op, cmov, sse);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+#if LJ_64
+#define SZPTR "8"
+#define BSZPTR "3"
+#define REG_SP "0x7"
+#define REG_RA "0x10"
+#else
+#define SZPTR "4"
+#define BSZPTR "2"
+#define REG_SP "0x4"
+#define REG_RA "0x8"
+#endif
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_)
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n");
+ fprintf(ctx->fp,
+ "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n",
+ LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "Lframe1:\n"
+ "\t.long LECIE1-LSCIE1\n"
+ "LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zP\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 5\n" /* augmentation length */
+ "\t.byte 0x00\n" /* absptr */
+ "\t.long %slj_err_unwind_dwarf\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ "LECIE1:\n\n", LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "LSFDE1:\n"
+ "\t.long LEFDE1-LASFDE1\n"
+ "LASFDE1:\n"
+ "\t.long LASFDE1-Lframe1\n"
+ "\t.long %slj_vm_asm_begin\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE);
+ break;
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+#if LJ_64
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 1\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0xd\n\t.uleb128 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+#if LJ_64
+ fprintf(ctx->fp, "\t.subsections_via_symbols\n");
+#else
+ fprintf(ctx->fp,
+ "\t.non_lazy_symbol_pointer\n"
+ "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
+ ".indirect_symbol _lj_err_unwind_dwarf\n"
+ ".long 0\n");
+#endif
+ }
+ break;
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/src/LuaJIT/src/buildvm_x86.h b/src/LuaJIT/src/buildvm_x86.h
new file mode 100644
index 000000000..cd33cf877
--- /dev/null
+++ b/src/LuaJIT/src/buildvm_x86.h
@@ -0,0 +1,3561 @@
+/*
+** This file has been pre-processed with DynASM.
+** http://luajit.org/dynasm.html
+** DynASM version 1.3.0, DynASM x86 version 1.3.0
+** DO NOT EDIT! The original file is in "buildvm_x86.dasc".
+*/
+
+#if DASM_VERSION != 10300
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+#define DASM_SECTION_CODE_OP 0
+#define DASM_SECTION_CODE_SUB 1
+#define DASM_MAXSECTION 2
+static const unsigned char build_actionlist[17321] = {
+ 254,1,248,10,252,247,198,237,15,132,244,11,131,230,252,248,41,252,242,141,
+ 76,49,252,248,139,114,252,252,199,68,10,4,237,248,12,131,192,1,137,68,36,
+ 20,252,247,198,237,15,132,244,13,248,14,129,252,246,239,252,247,198,237,15,
+ 133,244,10,199,131,233,237,131,230,252,248,41,214,252,247,222,131,232,1,15,
+ 132,244,248,248,1,139,44,10,137,106,252,248,139,108,10,4,137,106,252,252,
+ 131,194,8,131,232,1,15,133,244,1,248,2,255,139,108,36,48,137,181,233,248,
+ 3,139,68,36,20,139,76,36,56,248,4,57,193,15,133,244,252,248,5,131,252,234,
+ 8,137,149,233,248,15,139,76,36,52,137,141,233,49,192,248,16,131,196,28,91,
+ 94,95,93,195,248,6,15,130,244,253,59,149,233,15,135,244,254,199,66,252,252,
+ 237,131,194,8,131,192,1,252,233,244,4,248,7,255,133,201,15,132,244,5,41,193,
+ 141,20,202,252,233,244,5,248,8,137,149,233,137,68,36,20,137,202,137,252,233,
+ 232,251,1,0,139,149,233,252,233,244,3,248,17,137,208,137,204,248,18,139,108,
+ 36,48,139,173,233,199,133,233,237,252,233,244,16,248,19,248,20,129,225,239,
+ 137,204,248,21,255,139,108,36,48,185,252,248,252,255,252,255,252,255,184,
+ 237,139,149,233,139,157,233,129,195,239,139,114,252,252,199,66,252,252,237,
+ 199,131,233,237,252,233,244,12,248,22,186,237,252,233,244,248,248,23,131,
+ 232,8,252,233,244,247,248,24,141,68,194,252,248,248,1,15,182,142,233,131,
+ 198,4,137,149,233,255,137,133,233,137,116,36,24,137,202,248,2,137,252,233,
+ 232,251,1,0,139,149,233,139,133,233,139,106,252,248,41,208,193,232,3,131,
+ 192,1,139,181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,
+ 248,25,85,87,86,83,131,252,236,28,139,108,36,48,139,76,36,52,190,237,49,192,
+ 141,188,253,36,233,139,157,233,129,195,239,137,189,233,137,68,36,24,137,68,
+ 36,52,56,133,233,15,132,244,249,199,131,233,237,136,133,233,139,149,233,139,
+ 133,233,41,200,193,232,3,131,192,1,41,209,139,114,252,252,137,68,36,20,252,
+ 247,198,237,255,15,132,244,13,252,233,244,14,248,26,85,87,86,83,131,252,236,
+ 28,190,237,252,233,244,247,248,27,85,87,86,83,131,252,236,28,190,237,248,
+ 1,139,108,36,48,139,76,36,52,139,189,233,137,124,36,52,137,108,36,24,137,
+ 165,233,248,2,139,157,233,129,195,239,248,3,199,131,233,237,139,149,233,255,
+ 1,206,41,214,139,133,233,41,200,193,232,3,131,192,1,248,28,139,105,252,248,
+ 129,121,253,252,252,239,15,133,244,29,248,30,137,202,137,114,252,252,139,
+ 181,233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,248,31,
+ 85,87,86,83,131,252,236,28,139,108,36,48,139,68,36,56,139,76,36,52,139,84,
+ 36,60,137,108,36,24,139,189,233,43,189,233,199,68,36,60,0,0,0,0,137,124,36,
+ 56,137,68,36,8,137,76,36,4,137,44,36,139,189,233,137,124,36,52,137,165,233,
+ 252,255,210,133,192,15,132,244,15,137,193,190,237,252,233,244,2,248,11,1,
+ 209,131,230,252,248,137,213,41,252,242,199,68,193,252,252,237,137,200,139,
+ 117,252,244,255,139,77,252,240,255,131,252,249,1,15,134,244,247,255,139,122,
+ 252,248,139,191,233,139,191,233,252,255,225,255,248,1,15,132,244,32,41,213,
+ 193,252,237,3,141,69,252,255,252,233,244,33,255,248,34,15,182,78,252,255,
+ 131,252,237,16,141,12,202,41,252,233,15,132,244,35,252,247,217,193,252,233,
+ 3,137,76,36,8,139,72,4,139,0,137,77,4,137,69,0,137,108,36,4,252,233,244,36,
+ 248,37,137,68,36,16,199,68,36,20,237,141,68,36,16,128,126,252,252,235,15,
+ 133,244,247,141,139,233,137,41,199,65,4,237,137,205,252,233,244,248,248,38,
+ 15,182,70,252,254,255,199,68,36,20,237,137,68,36,16,255,252,242,15,42,192,
+ 252,242,15,17,68,36,16,255,137,68,36,12,219,68,36,12,221,92,36,16,255,141,
+ 68,36,16,252,233,244,247,248,39,15,182,70,252,254,141,4,194,248,1,15,182,
+ 110,252,255,141,44,252,234,248,2,137,108,36,4,139,108,36,48,137,68,36,8,137,
+ 44,36,137,149,233,137,116,36,24,232,251,1,1,139,149,233,133,192,15,132,244,
+ 249,248,35,15,182,78,252,253,139,104,4,139,0,137,108,202,4,137,4,202,139,
+ 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,3,139,141,
+ 233,137,113,252,244,141,177,233,41,214,139,105,252,248,184,237,252,233,244,
+ 30,248,40,137,68,36,16,199,68,36,20,237,141,68,36,16,128,126,252,252,235,
+ 15,133,244,247,255,141,139,233,137,41,199,65,4,237,137,205,252,233,244,248,
+ 248,41,15,182,70,252,254,255,141,68,36,16,252,233,244,247,248,42,15,182,70,
+ 252,254,141,4,194,248,1,15,182,110,252,255,141,44,252,234,248,2,137,108,36,
+ 4,139,108,36,48,137,68,36,8,137,44,36,137,149,233,137,116,36,24,232,251,1,
+ 2,139,149,233,133,192,15,132,244,249,15,182,78,252,253,139,108,202,4,139,
+ 12,202,137,104,4,137,8,248,43,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,171,248,3,139,141,233,137,113,252,244,15,182,70,252,253,139,
+ 108,194,4,139,4,194,137,105,20,137,65,16,141,177,233,41,214,139,105,252,248,
+ 184,237,252,233,244,30,248,44,15,182,110,252,252,141,4,194,141,12,202,137,
+ 108,36,12,139,108,36,48,137,68,36,8,137,76,36,4,137,44,36,137,149,233,137,
+ 116,36,24,232,251,1,3,248,3,139,149,233,255,131,252,248,1,15,135,244,45,248,
+ 4,141,118,4,15,130,244,252,248,5,15,183,70,252,254,141,180,253,134,233,248,
+ 6,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,46,
+ 131,198,4,129,120,253,4,239,15,130,244,5,252,233,244,6,248,47,129,120,253,
+ 4,239,252,233,244,4,248,48,131,252,238,4,137,108,36,12,139,108,36,48,137,
+ 68,36,8,137,76,36,4,137,44,36,137,149,233,255,137,116,36,24,232,251,1,4,252,
+ 233,244,3,248,49,255,131,252,238,4,139,108,36,48,137,149,233,137,252,233,
+ 139,86,252,252,137,116,36,24,232,251,1,5,252,233,244,3,255,248,50,255,15,
+ 182,110,252,255,255,248,51,141,4,199,252,233,244,247,248,52,255,248,53,141,
+ 4,199,141,44,252,234,149,252,233,244,248,248,54,141,4,194,137,197,252,233,
+ 244,248,248,55,255,248,56,141,4,194,248,1,141,44,252,234,248,2,141,12,202,
+ 137,108,36,8,139,108,36,48,137,68,36,12,15,182,70,252,252,137,76,36,4,137,
+ 68,36,16,137,44,36,137,149,233,137,116,36,24,232,251,1,6,139,149,233,133,
+ 192,15,132,244,43,248,45,137,193,41,208,137,113,252,244,141,176,233,184,237,
+ 252,233,244,28,248,57,139,108,36,48,137,149,233,141,20,194,137,252,233,137,
+ 116,36,24,232,251,1,7,139,149,233,255,133,192,15,133,244,45,15,183,70,252,
+ 254,139,12,194,252,233,244,58,255,252,233,244,45,255,248,59,141,76,202,8,
+ 248,29,137,76,36,20,137,68,36,16,131,252,233,8,141,4,193,139,108,36,48,137,
+ 76,36,4,137,68,36,8,137,44,36,137,149,233,137,116,36,24,232,251,1,8,139,149,
+ 233,139,76,36,20,139,68,36,16,139,105,252,248,131,192,1,57,215,15,132,244,
+ 60,137,202,137,114,252,252,139,181,233,139,14,15,182,252,233,15,182,205,131,
+ 198,4,252,255,36,171,248,61,139,108,36,48,137,149,233,137,202,137,252,233,
+ 137,116,36,24,232,251,1,9,139,149,233,139,70,252,252,15,182,204,15,182,232,
+ 193,232,16,252,255,164,253,171,233,248,62,129,252,248,239,15,130,244,63,139,
+ 106,4,129,252,253,239,15,131,244,63,139,114,252,252,137,68,36,20,137,106,
+ 252,252,139,42,137,106,252,248,131,232,2,15,132,244,248,255,137,209,248,1,
+ 131,193,8,139,105,4,137,105,252,252,139,41,137,105,252,248,131,232,1,15,133,
+ 244,1,248,2,139,68,36,20,252,233,244,64,248,65,129,252,248,239,15,130,244,
+ 63,139,106,4,184,237,252,247,213,57,232,255,15,71,197,255,15,134,244,247,
+ 137,232,248,1,255,248,2,139,106,252,248,139,132,253,197,233,139,114,252,252,
+ 199,66,252,252,237,137,66,252,248,252,233,244,66,248,67,129,252,248,239,15,
+ 130,244,63,139,106,4,139,114,252,252,129,252,253,239,15,133,244,252,248,1,
+ 139,42,139,173,233,248,2,133,252,237,199,66,252,252,237,15,132,244,66,139,
+ 131,233,199,66,252,252,237,255,137,106,252,248,139,141,233,35,136,233,105,
+ 201,239,3,141,233,248,3,129,185,233,239,15,133,244,250,57,129,233,15,132,
+ 244,251,248,4,139,137,233,133,201,15,133,244,3,252,233,244,66,248,5,139,105,
+ 4,129,252,253,239,255,15,132,244,66,139,1,137,106,252,252,137,66,252,248,
+ 252,233,244,66,248,6,129,252,253,239,15,132,244,1,129,252,253,239,15,135,
+ 244,254,189,237,248,8,252,247,213,139,172,253,171,233,252,233,244,2,248,68,
+ 129,252,248,239,15,130,244,63,255,129,122,253,4,239,15,133,244,63,139,42,
+ 131,189,233,0,15,133,244,63,129,122,253,12,239,15,133,244,63,139,66,8,137,
+ 133,233,139,114,252,252,199,66,252,252,237,137,106,252,248,252,246,133,233,
+ 235,15,132,244,247,128,165,233,235,139,131,233,137,171,233,137,133,233,248,
+ 1,255,252,233,244,66,248,69,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,133,244,63,139,2,139,108,36,48,137,68,36,4,137,44,36,137,213,131,194,8,
+ 137,84,36,8,232,251,1,10,137,252,234,139,40,139,64,4,139,114,252,252,137,
+ 106,252,248,137,66,252,252,252,233,244,66,248,70,129,252,248,239,15,133,244,
+ 63,129,122,253,4,239,255,15,133,244,247,139,42,252,233,244,71,248,1,15,135,
+ 244,63,255,15,131,244,63,255,252,242,15,16,2,252,233,244,72,255,221,2,252,
+ 233,244,73,255,248,74,129,252,248,239,15,130,244,63,139,114,252,252,129,122,
+ 253,4,239,15,133,244,249,139,2,248,2,199,66,252,252,237,137,66,252,248,252,
+ 233,244,66,248,3,129,122,253,4,239,15,135,244,63,131,187,233,0,15,133,244,
+ 63,139,171,233,59,171,233,255,15,130,244,247,232,244,75,248,1,139,108,36,
+ 48,137,149,233,137,116,36,24,137,252,233,255,232,251,1,11,255,232,251,1,12,
+ 255,139,149,233,252,233,244,2,248,76,129,252,248,239,15,130,244,63,15,132,
+ 244,248,248,1,129,122,253,4,239,15,133,244,63,139,108,36,48,137,149,233,137,
+ 149,233,139,114,252,252,139,2,137,68,36,4,137,44,36,131,194,8,137,84,36,8,
+ 137,116,36,24,232,251,1,13,139,149,233,133,192,15,132,244,249,139,106,8,139,
+ 66,12,137,106,252,248,137,66,252,252,139,106,16,139,66,20,137,42,137,66,4,
+ 248,77,184,237,255,252,233,244,78,248,2,199,66,12,237,252,233,244,1,248,3,
+ 199,66,252,252,237,252,233,244,66,248,79,129,252,248,239,15,130,244,63,139,
+ 42,129,122,253,4,239,15,133,244,63,255,131,189,233,0,15,133,244,63,255,139,
+ 106,252,248,139,133,233,139,114,252,252,199,66,252,252,237,137,66,252,248,
+ 199,66,12,237,184,237,252,233,244,78,248,80,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,15,133,244,63,129,122,253,12,239,255,139,114,252,252,255,
+ 139,66,8,131,192,1,199,66,252,252,237,137,66,252,248,255,252,242,15,16,66,
+ 8,189,0,0,252,240,63,102,15,110,205,102,15,112,201,81,252,242,15,88,193,252,
+ 242,15,45,192,252,242,15,17,66,252,248,255,221,66,8,217,232,222,193,219,20,
+ 36,221,90,252,248,139,4,36,255,139,42,59,133,233,15,131,244,248,193,224,3,
+ 3,133,233,248,1,129,120,253,4,239,15,132,244,81,139,40,139,64,4,137,42,137,
+ 66,4,252,233,244,77,248,2,131,189,233,0,15,132,244,81,137,252,233,137,213,
+ 137,194,232,251,1,14,137,252,234,133,192,15,133,244,1,248,81,184,237,252,
+ 233,244,78,248,82,255,139,106,252,248,139,133,233,139,114,252,252,199,66,
+ 252,252,237,137,66,252,248,255,199,66,12,237,199,66,8,0,0,0,0,255,15,87,192,
+ 252,242,15,17,66,8,255,217,252,238,221,90,8,255,184,237,252,233,244,78,248,
+ 83,129,252,248,239,15,130,244,63,141,74,8,131,232,1,190,237,248,1,15,182,
+ 171,233,193,252,237,235,131,229,1,1,252,238,252,233,244,28,248,84,129,252,
+ 248,239,15,130,244,63,129,122,253,12,239,15,133,244,63,255,139,106,4,137,
+ 106,12,199,66,4,237,139,42,139,114,8,137,106,8,137,50,141,74,16,131,232,2,
+ 190,237,252,233,244,1,248,85,129,252,248,239,15,130,244,63,139,42,139,114,
+ 252,252,137,116,36,24,137,44,36,129,122,253,4,239,15,133,244,63,131,189,233,
+ 0,15,133,244,63,128,189,233,235,15,135,244,63,139,141,233,15,132,244,247,
+ 255,59,141,233,15,132,244,63,248,1,141,116,193,252,240,59,181,233,15,135,
+ 244,63,137,181,233,139,108,36,48,137,149,233,131,194,8,137,149,233,141,108,
+ 194,232,41,252,245,57,206,15,132,244,249,248,2,139,68,46,4,137,70,252,252,
+ 139,4,46,137,70,252,248,131,252,238,8,57,206,15,133,244,2,248,3,137,76,36,
+ 4,49,201,137,76,36,12,137,76,36,8,232,244,25,199,131,233,237,255,139,108,
+ 36,48,139,52,36,139,149,233,129,252,248,239,15,135,244,254,248,4,139,142,
+ 233,139,190,233,137,142,233,137,252,254,41,206,15,132,244,252,141,4,50,193,
+ 252,238,3,59,133,233,15,135,244,255,137,213,41,205,248,5,139,1,137,4,41,139,
+ 65,4,137,68,41,4,131,193,8,57,252,249,15,133,244,5,248,6,141,70,2,199,66,
+ 252,252,237,248,7,139,116,36,24,137,68,36,20,185,252,248,252,255,252,255,
+ 252,255,252,247,198,237,255,15,132,244,13,252,233,244,14,248,8,199,66,252,
+ 252,237,139,142,233,131,252,233,8,137,142,233,139,1,137,2,139,65,4,137,66,
+ 4,184,237,252,233,244,7,248,9,139,12,36,137,185,233,137,252,242,137,252,233,
+ 232,251,1,0,139,52,36,139,149,233,252,233,244,4,248,86,139,106,252,248,139,
+ 173,233,139,114,252,252,137,116,36,24,137,44,36,131,189,233,0,15,133,244,
+ 63,255,128,189,233,235,15,135,244,63,139,141,233,15,132,244,247,59,141,233,
+ 15,132,244,63,248,1,141,116,193,252,248,59,181,233,15,135,244,63,137,181,
+ 233,139,108,36,48,137,149,233,137,149,233,141,108,194,252,240,41,252,245,
+ 57,206,15,132,244,249,248,2,255,139,68,46,4,137,70,252,252,139,4,46,137,70,
+ 252,248,131,252,238,8,57,206,15,133,244,2,248,3,137,76,36,4,49,201,137,76,
+ 36,12,137,76,36,8,232,244,25,199,131,233,237,139,108,36,48,139,52,36,139,
+ 149,233,129,252,248,239,15,135,244,254,248,4,139,142,233,139,190,233,137,
+ 142,233,137,252,254,41,206,15,132,244,252,141,4,50,193,252,238,3,59,133,233,
+ 15,135,244,255,255,137,213,41,205,248,5,139,1,137,4,41,139,65,4,137,68,41,
+ 4,131,193,8,57,252,249,15,133,244,5,248,6,141,70,1,248,7,139,116,36,24,137,
+ 68,36,20,49,201,252,247,198,237,15,132,244,13,252,233,244,14,248,8,137,252,
+ 242,137,252,233,232,251,1,15,248,9,139,12,36,137,185,233,137,252,242,137,
+ 252,233,232,251,1,0,139,52,36,139,149,233,252,233,244,4,248,87,139,108,36,
+ 48,252,247,133,233,237,15,132,244,63,255,137,149,233,141,68,194,252,248,137,
+ 133,233,49,192,137,133,233,176,235,136,133,233,252,233,244,16,255,248,71,
+ 255,248,73,139,114,252,252,221,90,252,248,252,233,244,66,255,248,88,129,252,
+ 248,239,15,130,244,63,255,129,122,253,4,239,15,133,244,248,139,42,131,252,
+ 253,0,15,137,244,71,252,247,221,15,136,244,247,248,89,248,71,139,114,252,
+ 252,199,66,252,252,237,137,106,252,248,252,233,244,66,248,1,139,114,252,252,
+ 199,66,252,252,0,0,224,65,199,66,252,248,0,0,0,0,252,233,244,66,248,2,15,
+ 135,244,63,255,129,122,253,4,239,15,131,244,63,255,252,242,15,16,2,102,15,
+ 252,239,201,102,15,118,201,102,15,115,209,1,15,84,193,248,72,139,114,252,
+ 252,252,242,15,17,66,252,248,255,221,2,217,225,248,72,248,73,139,114,252,
+ 252,221,90,252,248,255,248,66,184,237,248,78,137,68,36,20,248,64,252,247,
+ 198,237,15,133,244,253,248,5,56,70,252,255,15,135,244,252,15,182,78,252,253,
+ 252,247,209,141,20,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,
+ 255,36,171,248,6,199,68,194,252,244,237,131,192,1,252,233,244,5,248,7,185,
+ 252,248,252,255,252,255,252,255,252,233,244,14,248,90,255,129,122,253,4,239,
+ 15,133,244,247,139,42,252,233,244,71,248,1,15,135,244,63,255,252,242,15,16,
+ 2,232,244,91,255,252,242,15,45,232,129,252,253,0,0,0,128,15,133,244,71,252,
+ 242,15,42,205,102,15,46,193,15,138,244,72,15,132,244,71,255,221,2,232,244,
+ 91,255,219,20,36,139,44,36,129,252,253,0,0,0,128,15,133,244,248,217,192,219,
+ 4,36,255,223,252,233,221,216,255,218,252,233,223,224,158,255,15,138,244,73,
+ 15,133,244,73,248,2,221,216,252,233,244,71,255,248,92,255,252,242,15,16,2,
+ 232,244,93,255,221,2,232,244,93,255,248,94,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,15,131,244,63,252,242,15,81,2,252,233,244,72,255,248,94,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,217,252,
+ 250,252,233,244,73,255,248,95,129,252,248,239,15,130,244,63,129,122,253,4,
+ 239,15,131,244,63,217,252,237,221,2,217,252,241,252,233,244,73,248,96,129,
+ 252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,217,252,236,221,
+ 2,217,252,241,252,233,244,73,248,97,129,252,248,239,255,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,221,2,232,244,98,252,233,244,73,248,99,129,252,
+ 248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,217,252,254,252,
+ 233,244,73,248,100,129,252,248,239,255,15,130,244,63,129,122,253,4,239,15,
+ 131,244,63,221,2,217,252,255,252,233,244,73,248,101,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,15,131,244,63,221,2,217,252,242,221,216,252,233,
+ 244,73,248,102,129,252,248,239,15,130,244,63,255,129,122,253,4,239,15,131,
+ 244,63,221,2,217,192,216,200,217,232,222,225,217,252,250,217,252,243,252,
+ 233,244,73,248,103,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,
+ 244,63,221,2,217,192,216,200,217,232,222,225,217,252,250,217,201,217,252,
+ 243,252,233,244,73,248,104,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,255,221,2,217,232,217,252,243,252,233,244,73,255,248,105,129,
+ 252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,252,242,15,16,2,
+ 252,242,15,17,4,36,255,248,105,129,252,248,239,15,130,244,63,129,122,253,
+ 4,239,15,131,244,63,221,2,221,28,36,255,137,213,232,251,1,16,137,252,234,
+ 252,233,244,73,255,248,106,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,252,242,15,16,2,252,242,15,17,4,36,255,248,106,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,2,221,28,36,255,137,
+ 213,232,251,1,17,137,252,234,252,233,244,73,255,248,107,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,252,242,15,16,2,252,242,15,17,
+ 4,36,255,248,107,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,221,2,221,28,36,255,137,213,232,251,1,18,137,252,234,252,233,244,73,248,
+ 108,255,248,109,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,252,242,15,16,2,139,106,252,248,252,242,15,89,133,233,252,233,244,72,255,
+ 248,109,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,221,
+ 2,139,106,252,248,220,141,233,252,233,244,73,255,248,110,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,2,221,66,8,217,252,243,252,233,244,73,248,111,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,255,15,131,244,
+ 63,221,66,8,221,2,217,252,253,221,217,252,233,244,73,248,112,129,252,248,
+ 239,15,130,244,63,139,106,4,129,252,253,239,15,131,244,63,139,114,252,252,
+ 139,2,137,106,252,252,137,66,252,248,209,229,129,252,253,0,0,224,252,255,
+ 15,131,244,249,9,232,15,132,244,249,184,252,254,3,0,0,129,252,253,0,0,32,
+ 0,15,130,244,250,248,1,193,252,237,21,41,197,255,252,242,15,42,197,255,137,
+ 108,36,16,219,68,36,16,255,139,106,252,252,129,229,252,255,252,255,15,128,
+ 129,205,0,0,224,63,137,106,252,252,248,2,255,252,242,15,17,2,255,221,26,255,
+ 184,237,252,233,244,78,248,3,255,15,87,192,252,233,244,2,255,217,252,238,
+ 252,233,244,2,255,248,4,255,252,242,15,16,2,189,0,0,80,67,102,15,110,205,
+ 102,15,112,201,81,252,242,15,89,193,252,242,15,17,66,252,248,255,221,2,199,
+ 68,36,16,0,0,128,90,216,76,36,16,221,90,252,248,255,139,106,252,252,184,52,
+ 4,0,0,209,229,252,233,244,1,255,248,113,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,15,131,244,63,252,242,15,16,2,255,248,113,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,221,2,255,139,106,4,139,114,252,
+ 252,209,229,129,252,253,0,0,224,252,255,15,132,244,250,255,15,40,224,232,
+ 244,114,252,242,15,92,224,248,1,252,242,15,17,66,252,248,252,242,15,17,34,
+ 255,217,192,232,244,114,220,252,233,248,1,221,90,252,248,221,26,255,139,66,
+ 252,252,139,106,4,49,232,15,136,244,249,248,2,184,237,252,233,244,78,248,
+ 3,129,252,245,0,0,0,128,137,106,4,252,233,244,2,248,4,255,15,87,228,252,233,
+ 244,1,255,217,252,238,217,201,252,233,244,1,255,248,115,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,66,8,221,2,248,1,217,252,248,223,224,158,15,138,244,1,221,217,252,
+ 233,244,73,255,248,116,129,252,248,239,15,130,244,63,129,122,253,4,239,15,
+ 131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,
+ 74,8,232,244,117,252,233,244,72,255,248,116,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,63,221,2,221,
+ 66,8,232,244,117,252,233,244,73,255,248,118,185,2,0,0,0,129,122,253,4,239,
+ 255,15,133,244,250,139,42,248,1,57,193,15,131,244,71,129,124,253,202,252,
+ 252,239,15,133,244,249,59,108,202,252,248,15,79,108,202,252,248,131,193,1,
+ 252,233,244,1,248,3,15,135,244,63,255,252,233,244,252,248,4,15,135,244,63,
+ 255,252,242,15,16,2,248,5,57,193,15,131,244,72,129,124,253,202,252,252,239,
+ 255,15,130,244,252,15,135,244,63,252,242,15,42,76,202,252,248,252,233,244,
+ 253,255,248,6,252,242,15,16,76,202,252,248,248,7,252,242,15,93,193,131,193,
+ 1,252,233,244,5,255,221,2,248,5,57,193,15,131,244,73,129,124,253,202,252,
+ 252,239,255,15,130,244,252,15,135,244,255,219,68,202,252,248,252,233,244,
+ 253,255,15,131,244,255,255,248,6,221,68,202,252,248,248,7,255,219,252,233,
+ 219,209,221,217,255,80,221,225,223,224,252,246,196,1,15,132,244,248,217,201,
+ 248,2,221,216,88,255,248,119,185,2,0,0,0,129,122,253,4,239,255,15,133,244,
+ 250,139,42,248,1,57,193,15,131,244,71,129,124,253,202,252,252,239,15,133,
+ 244,249,59,108,202,252,248,15,76,108,202,252,248,131,193,1,252,233,244,1,
+ 248,3,15,135,244,63,255,248,6,252,242,15,16,76,202,252,248,248,7,252,242,
+ 15,95,193,131,193,1,252,233,244,5,255,219,252,233,218,209,221,217,255,80,
+ 221,225,223,224,252,246,196,1,15,133,244,248,217,201,248,2,221,216,88,255,
+ 248,9,221,216,252,233,244,63,255,248,120,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,15,133,244,63,139,42,255,139,173,233,252,233,244,71,255,252,
+ 242,15,42,133,233,252,233,244,72,255,219,133,233,252,233,244,73,255,248,121,
+ 129,252,248,239,15,133,244,63,129,122,253,4,239,15,133,244,63,139,42,139,
+ 114,252,252,131,189,233,1,15,130,244,81,15,182,173,233,255,252,242,15,42,
+ 197,252,233,244,72,255,137,108,36,16,219,68,36,16,252,233,244,73,255,248,
+ 122,139,171,233,59,171,233,15,130,244,247,232,244,75,248,1,129,252,248,239,
+ 15,133,244,63,129,122,253,4,239,255,15,133,244,63,139,42,129,252,253,252,
+ 255,0,0,0,15,135,244,63,137,108,36,20,255,15,131,244,63,252,242,15,44,42,
+ 129,252,253,252,255,0,0,0,15,135,244,63,137,108,36,20,255,15,131,244,63,221,
+ 2,219,92,36,20,129,124,36,20,252,255,0,0,0,15,135,244,63,255,199,68,36,8,
+ 1,0,0,0,141,68,36,20,248,123,139,108,36,48,137,149,233,137,68,36,4,137,44,
+ 36,137,116,36,24,232,251,1,19,139,149,233,139,114,252,252,199,66,252,252,
+ 237,137,66,252,248,252,233,244,66,248,124,139,171,233,59,171,233,15,130,244,
+ 247,232,244,75,248,1,199,68,36,20,252,255,252,255,252,255,252,255,129,252,
+ 248,239,15,130,244,63,15,134,244,247,129,122,253,20,239,255,15,133,244,63,
+ 139,106,16,137,108,36,20,255,15,131,244,63,252,242,15,44,106,16,137,108,36,
+ 20,255,15,131,244,63,221,66,16,219,92,36,20,255,248,1,129,122,253,4,239,15,
+ 133,244,63,129,122,253,12,239,255,139,42,137,108,36,12,139,173,233,255,139,
+ 74,8,255,252,242,15,44,74,8,255,221,66,8,219,92,36,8,139,76,36,8,255,139,
+ 68,36,20,57,197,15,130,244,251,248,2,133,201,15,142,244,253,248,3,139,108,
+ 36,12,41,200,15,140,244,125,141,172,253,13,233,131,192,1,248,4,137,68,36,
+ 8,137,232,252,233,244,123,248,5,15,140,244,252,141,68,40,1,252,233,244,2,
+ 248,6,137,232,252,233,244,2,248,7,255,15,132,244,254,1,252,233,131,193,1,
+ 15,143,244,3,248,8,185,1,0,0,0,252,233,244,3,248,125,49,192,252,233,244,4,
+ 248,126,129,252,248,239,15,130,244,63,139,171,233,59,171,233,15,130,244,247,
+ 232,244,75,248,1,255,129,122,253,4,239,15,133,244,63,129,122,253,12,239,139,
+ 42,255,15,133,244,63,139,66,8,255,15,131,244,63,252,242,15,44,66,8,255,15,
+ 131,244,63,221,66,8,219,92,36,20,139,68,36,20,255,133,192,15,142,244,125,
+ 131,189,233,1,15,130,244,125,15,133,244,127,57,131,233,15,130,244,127,15,
+ 182,141,233,139,171,233,137,68,36,8,248,1,136,77,0,131,197,1,131,232,1,15,
+ 133,244,1,139,131,233,252,233,244,123,248,128,129,252,248,239,255,15,130,
+ 244,63,139,171,233,59,171,233,15,130,244,247,232,244,75,248,1,129,122,253,
+ 4,239,15,133,244,63,139,42,139,133,233,133,192,15,132,244,125,57,131,233,
+ 15,130,244,129,129,197,239,137,116,36,20,137,68,36,8,139,179,233,248,1,255,
+ 15,182,77,0,131,197,1,131,232,1,136,12,6,15,133,244,1,137,252,240,139,116,
+ 36,20,252,233,244,123,248,130,129,252,248,239,15,130,244,63,139,171,233,59,
+ 171,233,15,130,244,247,232,244,75,248,1,129,122,253,4,239,15,133,244,63,139,
+ 42,139,133,233,57,131,233,255,15,130,244,129,129,197,239,137,116,36,20,137,
+ 68,36,8,139,179,233,252,233,244,249,248,1,15,182,76,5,0,131,252,249,65,15,
+ 130,244,248,131,252,249,90,15,135,244,248,131,252,241,32,248,2,136,12,6,248,
+ 3,131,232,1,15,137,244,1,137,252,240,139,116,36,20,252,233,244,123,248,131,
+ 129,252,248,239,15,130,244,63,255,139,171,233,59,171,233,15,130,244,247,232,
+ 244,75,248,1,129,122,253,4,239,15,133,244,63,139,42,139,133,233,57,131,233,
+ 15,130,244,129,129,197,239,137,116,36,20,137,68,36,8,139,179,233,252,233,
+ 244,249,248,1,15,182,76,5,0,131,252,249,97,15,130,244,248,255,131,252,249,
+ 122,15,135,244,248,131,252,241,32,248,2,136,12,6,248,3,131,232,1,15,137,244,
+ 1,137,252,240,139,116,36,20,252,233,244,123,248,132,129,252,248,239,15,130,
+ 244,63,129,122,253,4,239,15,133,244,63,137,213,139,10,232,251,1,20,137,252,
+ 234,255,137,197,252,233,244,71,255,252,242,15,42,192,252,233,244,72,255,137,
+ 4,36,219,4,36,252,233,244,73,255,248,133,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,255,15,133,244,247,139,42,252,233,244,89,248,1,15,135,244,63,
+ 255,252,242,15,16,2,189,0,0,56,67,102,15,110,205,102,15,112,201,81,252,242,
+ 15,88,193,102,15,126,197,255,221,2,199,68,36,16,0,0,192,89,216,68,36,16,221,
+ 28,36,255,139,44,36,255,252,233,244,89,255,248,134,129,252,248,239,15,130,
+ 244,63,255,189,0,0,56,67,102,15,110,205,102,15,112,201,81,255,199,68,36,16,
+ 0,0,192,89,255,15,133,244,247,139,42,252,233,244,248,248,1,15,135,244,63,
+ 255,252,242,15,16,2,252,242,15,88,193,102,15,126,197,255,221,2,216,68,36,
+ 16,221,28,36,139,44,36,255,248,2,137,68,36,20,141,68,194,252,240,248,1,57,
+ 208,15,134,244,89,129,120,253,4,239,255,15,133,244,248,35,40,131,232,8,252,
+ 233,244,1,248,2,15,135,244,135,255,15,131,244,135,255,252,242,15,16,0,252,
+ 242,15,88,193,102,15,126,193,33,205,255,221,0,216,68,36,16,221,28,36,35,44,
+ 36,255,131,232,8,252,233,244,1,248,136,129,252,248,239,15,130,244,63,255,
+ 15,133,244,248,11,40,131,232,8,252,233,244,1,248,2,15,135,244,135,255,252,
+ 242,15,16,0,252,242,15,88,193,102,15,126,193,9,205,255,221,0,216,68,36,16,
+ 221,28,36,11,44,36,255,131,232,8,252,233,244,1,248,137,129,252,248,239,15,
+ 130,244,63,255,15,133,244,248,51,40,131,232,8,252,233,244,1,248,2,15,135,
+ 244,135,255,252,242,15,16,0,252,242,15,88,193,102,15,126,193,49,205,255,221,
+ 0,216,68,36,16,221,28,36,51,44,36,255,131,232,8,252,233,244,1,248,138,129,
+ 252,248,239,15,130,244,63,129,122,253,4,239,255,221,2,199,68,36,16,0,0,192,
+ 89,216,68,36,16,221,28,36,139,44,36,255,248,2,15,205,252,233,244,89,248,139,
+ 129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,2,252,247,213,255,
+ 248,89,252,242,15,42,197,252,233,244,72,255,248,89,137,44,36,219,4,36,252,
+ 233,244,73,255,248,135,139,68,36,20,252,233,244,63,255,248,140,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,255,248,2,129,122,253,12,239,15,133,244,
+ 63,139,74,8,255,248,140,129,252,248,239,15,130,244,63,129,122,253,4,239,15,
+ 131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,16,
+ 74,8,189,0,0,56,67,102,15,110,213,102,15,112,210,81,252,242,15,88,194,252,
+ 242,15,88,202,102,15,126,197,102,15,126,201,255,248,140,129,252,248,239,15,
+ 130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68,36,16,221,92,36,8,216,68,
+ 36,16,221,28,36,139,76,36,8,139,44,36,255,211,229,252,233,244,89,255,248,
+ 141,129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,141,129,252,248,
+ 239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,
+ 244,63,252,242,15,16,2,252,242,15,16,74,8,189,0,0,56,67,102,15,110,213,102,
+ 15,112,210,81,252,242,15,88,194,252,242,15,88,202,102,15,126,197,102,15,126,
+ 201,255,248,141,129,252,248,239,15,130,244,63,129,122,253,4,239,15,131,244,
+ 63,129,122,253,12,239,15,131,244,63,221,2,221,66,8,199,68,36,16,0,0,192,89,
+ 216,68,36,16,221,92,36,8,216,68,36,16,221,28,36,139,76,36,8,139,44,36,255,
+ 211,252,237,252,233,244,89,255,248,142,129,252,248,239,15,130,244,63,129,
+ 122,253,4,239,255,248,142,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,15,
+ 16,74,8,189,0,0,56,67,102,15,110,213,102,15,112,210,81,252,242,15,88,194,
+ 252,242,15,88,202,102,15,126,197,102,15,126,201,255,248,142,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68,36,16,221,92,36,8,216,68,
+ 36,16,221,28,36,139,76,36,8,139,44,36,255,211,252,253,252,233,244,89,255,
+ 248,143,129,252,248,239,15,130,244,63,129,122,253,4,239,255,248,143,129,252,
+ 248,239,15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,
+ 15,131,244,63,252,242,15,16,2,252,242,15,16,74,8,189,0,0,56,67,102,15,110,
+ 213,102,15,112,210,81,252,242,15,88,194,252,242,15,88,202,102,15,126,197,
+ 102,15,126,201,255,248,143,129,252,248,239,15,130,244,63,129,122,253,4,239,
+ 15,131,244,63,129,122,253,12,239,15,131,244,63,221,2,221,66,8,199,68,36,16,
+ 0,0,192,89,216,68,36,16,221,92,36,8,216,68,36,16,221,28,36,139,76,36,8,139,
+ 44,36,255,211,197,252,233,244,89,255,248,144,129,252,248,239,15,130,244,63,
+ 129,122,253,4,239,255,248,144,129,252,248,239,15,130,244,63,129,122,253,4,
+ 239,15,131,244,63,129,122,253,12,239,15,131,244,63,252,242,15,16,2,252,242,
+ 15,16,74,8,189,0,0,56,67,102,15,110,213,102,15,112,210,81,252,242,15,88,194,
+ 252,242,15,88,202,102,15,126,197,102,15,126,201,255,248,144,129,252,248,239,
+ 15,130,244,63,129,122,253,4,239,15,131,244,63,129,122,253,12,239,15,131,244,
+ 63,221,2,221,66,8,199,68,36,16,0,0,192,89,216,68,36,16,221,92,36,8,216,68,
+ 36,16,221,28,36,139,76,36,8,139,44,36,255,211,205,252,233,244,89,248,127,
+ 184,237,252,233,244,63,248,129,184,237,248,63,139,108,36,48,139,114,252,252,
+ 137,116,36,24,137,149,233,141,68,194,252,248,141,136,233,137,133,233,139,
+ 66,252,248,59,141,233,15,135,244,251,137,44,36,252,255,144,233,139,149,233,
+ 133,192,15,143,244,78,248,1,255,139,141,233,41,209,193,252,233,3,133,192,
+ 141,65,1,139,106,252,248,15,133,244,33,139,181,233,139,14,15,182,252,233,
+ 15,182,205,131,198,4,252,255,36,171,248,33,137,209,252,247,198,237,15,133,
+ 244,249,15,182,110,252,253,252,247,213,141,20,252,234,252,233,244,28,248,
+ 3,137,252,245,131,229,252,248,41,252,234,252,233,244,28,248,5,186,237,137,
+ 252,233,232,251,1,0,139,149,233,49,192,252,233,244,1,248,75,93,137,108,36,
+ 16,139,108,36,48,137,116,36,24,137,149,233,255,141,68,194,252,248,137,252,
+ 233,137,133,233,232,251,1,21,139,149,233,139,133,233,41,208,193,232,3,131,
+ 192,1,139,108,36,16,85,195,248,145,255,15,182,131,233,168,235,15,133,244,
+ 251,168,235,15,133,244,247,168,235,15,132,244,247,252,255,139,233,252,233,
+ 244,247,255,248,146,15,182,131,233,168,235,15,133,244,251,252,233,244,247,
+ 248,147,15,182,131,233,168,235,15,133,244,251,168,235,15,132,244,251,252,
+ 255,139,233,15,132,244,247,168,235,15,132,244,251,248,1,255,139,108,36,48,
+ 137,149,233,137,252,242,137,252,233,232,251,1,22,248,3,139,149,233,248,4,
+ 15,182,78,252,253,248,5,15,182,110,252,252,15,183,70,252,254,252,255,164,
+ 253,171,233,248,148,131,198,4,139,77,232,137,76,36,20,252,233,244,4,248,149,
+ 255,139,106,252,248,139,173,233,15,182,133,233,141,4,194,139,108,36,48,137,
+ 149,233,137,133,233,137,252,242,141,139,233,137,171,233,137,116,36,24,232,
+ 251,1,23,252,233,244,3,255,248,150,137,116,36,24,255,248,151,255,137,116,
+ 36,24,131,206,1,248,1,255,141,68,194,252,248,139,108,36,48,137,149,233,137,
+ 133,233,137,252,242,137,252,233,232,251,1,24,199,68,36,24,0,0,0,0,255,131,
+ 230,252,254,255,139,149,233,137,193,139,133,233,41,208,137,205,15,182,78,
+ 252,253,193,232,3,131,192,1,252,255,229,248,152,255,85,141,108,36,12,85,83,
+ 82,81,80,15,182,69,252,252,138,101,252,248,137,125,252,252,137,117,252,248,
+ 139,93,0,139,139,233,199,131,233,237,137,131,233,137,139,233,129,252,236,
+ 239,252,242,15,17,125,216,252,242,15,17,117,208,252,242,15,17,109,200,252,
+ 242,15,17,101,192,252,242,15,17,93,184,252,242,15,17,85,176,252,242,15,17,
+ 77,168,252,242,15,17,69,160,139,171,233,139,147,233,137,171,233,199,131,233,
+ 0,0,0,0,137,149,233,141,84,36,16,141,139,233,232,251,1,25,139,141,233,129,
+ 225,239,137,204,137,169,233,139,149,233,139,177,233,255,248,153,255,133,192,
+ 15,136,244,249,137,68,36,20,139,122,252,248,139,191,233,139,191,233,199,131,
+ 233,0,0,0,0,199,131,233,237,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,129,252,253,239,15,130,244,248,139,68,36,20,248,2,252,255,36,171,248,3,
+ 252,247,216,137,252,233,137,194,232,251,1,26,255,248,91,255,217,124,36,4,
+ 137,68,36,8,102,184,0,4,102,11,68,36,4,102,37,252,255,252,247,102,137,68,
+ 36,6,217,108,36,6,217,252,252,217,108,36,4,139,68,36,8,195,255,248,154,102,
+ 15,252,239,210,102,15,118,210,102,15,115,210,1,184,0,0,48,67,102,15,110,216,
+ 102,15,112,219,81,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,
+ 15,85,208,252,242,15,88,203,252,242,15,92,203,102,15,86,202,184,0,0,252,240,
+ 63,102,15,110,208,102,15,112,210,81,252,242,15,194,193,1,102,15,84,194,252,
+ 242,15,92,200,15,40,193,248,1,195,248,93,255,217,124,36,4,137,68,36,8,102,
+ 184,0,8,102,11,68,36,4,102,37,252,255,252,251,102,137,68,36,6,217,108,36,
+ 6,217,252,252,217,108,36,4,139,68,36,8,195,255,248,155,102,15,252,239,210,
+ 102,15,118,210,102,15,115,210,1,184,0,0,48,67,102,15,110,216,102,15,112,219,
+ 81,15,40,200,102,15,84,202,102,15,46,217,15,134,244,247,102,15,85,208,252,
+ 242,15,88,203,252,242,15,92,203,102,15,86,202,184,0,0,252,240,191,102,15,
+ 110,208,102,15,112,210,81,252,242,15,194,193,6,102,15,84,194,252,242,15,92,
+ 200,15,40,193,248,1,195,248,114,255,217,124,36,4,137,68,36,8,102,184,0,12,
+ 102,11,68,36,4,102,137,68,36,6,217,108,36,6,217,252,252,217,108,36,4,139,
+ 68,36,8,195,255,248,156,102,15,252,239,210,102,15,118,210,102,15,115,210,
+ 1,184,0,0,48,67,102,15,110,216,102,15,112,219,81,15,40,200,102,15,84,202,
+ 102,15,46,217,15,134,244,247,102,15,85,208,15,40,193,252,242,15,88,203,252,
+ 242,15,92,203,184,0,0,252,240,63,102,15,110,216,102,15,112,219,81,252,242,
+ 15,194,193,1,102,15,84,195,252,242,15,92,200,102,15,86,202,15,40,193,248,
+ 1,195,248,157,255,15,40,232,252,242,15,94,193,102,15,252,239,210,102,15,118,
+ 210,102,15,115,210,1,184,0,0,48,67,102,15,110,216,102,15,112,219,81,15,40,
+ 224,102,15,84,226,102,15,46,220,15,134,244,247,102,15,85,208,252,242,15,88,
+ 227,252,242,15,92,227,102,15,86,226,184,0,0,252,240,63,102,15,110,208,102,
+ 15,112,210,81,252,242,15,194,196,1,102,15,84,194,252,242,15,92,224,15,40,
+ 197,252,242,15,89,204,252,242,15,92,193,195,248,1,252,242,15,89,200,15,40,
+ 197,252,242,15,92,193,195,255,217,193,216,252,241,217,124,36,4,102,184,0,
+ 4,102,11,68,36,4,102,37,252,255,252,247,102,137,68,36,6,217,108,36,6,217,
+ 252,252,217,108,36,4,222,201,222,252,233,195,255,248,98,217,252,234,222,201,
+ 248,158,217,84,36,4,129,124,36,4,0,0,128,127,15,132,244,247,129,124,36,4,
+ 0,0,128,252,255,15,132,244,248,248,159,217,192,217,252,252,220,252,233,217,
+ 201,217,252,240,217,232,222,193,217,252,253,221,217,248,1,195,248,2,221,216,
+ 217,252,238,195,255,248,117,219,84,36,4,219,68,36,4,255,223,252,233,255,221,
+ 252,233,223,224,158,255,15,133,244,254,15,138,244,255,221,216,139,68,36,4,
+ 131,252,248,1,15,142,244,252,248,1,169,1,0,0,0,15,133,244,248,216,200,209,
+ 232,252,233,244,1,248,2,209,232,15,132,244,251,217,192,248,3,216,200,209,
+ 232,15,132,244,250,15,131,244,3,220,201,252,233,244,3,248,4,255,222,201,248,
+ 5,195,248,6,15,132,244,5,15,130,244,253,217,232,222,252,241,252,247,216,131,
+ 252,248,1,15,132,244,5,252,233,244,1,248,7,221,216,217,232,195,248,8,217,
+ 84,36,4,217,201,217,84,36,8,139,68,36,4,209,224,61,0,0,0,252,255,15,132,244,
+ 248,139,68,36,8,209,224,15,132,244,250,61,0,0,0,252,255,15,132,244,250,217,
+ 252,241,252,233,244,159,248,9,255,217,232,255,223,252,234,255,221,252,234,
+ 223,224,158,255,15,132,244,247,217,201,248,1,221,216,195,248,2,217,225,217,
+ 232,255,15,132,244,249,221,216,217,225,217,252,238,184,0,0,0,0,15,146,208,
+ 209,200,51,68,36,4,15,137,244,249,217,201,248,3,221,217,217,225,195,248,4,
+ 131,124,36,4,0,15,141,244,3,221,216,221,216,133,192,15,132,244,251,217,252,
+ 238,195,248,5,199,68,36,4,0,0,128,127,217,68,36,4,195,255,248,117,255,248,
+ 160,252,242,15,45,193,252,242,15,42,208,102,15,46,202,15,133,244,254,15,138,
+ 244,255,248,161,131,252,248,1,15,142,244,252,248,1,169,1,0,0,0,15,133,244,
+ 248,252,242,15,89,192,209,232,252,233,244,1,248,2,209,232,15,132,244,251,
+ 15,40,200,248,3,252,242,15,89,192,209,232,15,132,244,250,15,131,244,3,255,
+ 252,242,15,89,200,252,233,244,3,248,4,252,242,15,89,193,248,5,195,248,6,15,
+ 132,244,5,15,130,244,253,252,247,216,232,244,1,184,0,0,252,240,63,102,15,
+ 110,200,102,15,112,201,81,252,242,15,94,200,15,40,193,195,248,7,184,0,0,252,
+ 240,63,102,15,110,192,102,15,112,192,81,195,248,8,252,242,15,17,76,36,12,
+ 252,242,15,17,68,36,4,131,124,36,12,0,15,133,244,247,139,68,36,16,209,224,
+ 61,0,0,224,252,255,15,132,244,248,248,1,131,124,36,4,0,15,133,244,247,255,
+ 139,68,36,8,209,224,15,132,244,250,61,0,0,224,252,255,15,132,244,251,248,
+ 1,221,68,36,12,221,68,36,4,217,252,241,217,192,217,252,252,220,252,233,217,
+ 201,217,252,240,217,232,222,193,217,252,253,221,217,221,92,36,4,252,242,15,
+ 16,68,36,4,195,248,9,184,0,0,252,240,63,102,15,110,208,102,15,112,210,81,
+ 102,15,46,194,15,132,244,247,15,40,193,248,1,195,248,2,102,15,252,239,210,
+ 102,15,118,210,102,15,115,210,1,102,15,84,194,184,0,0,252,240,63,102,15,110,
+ 208,102,15,112,210,81,102,15,46,194,15,132,244,1,102,15,80,193,15,87,192,
+ 136,196,15,146,208,48,224,15,133,244,1,248,3,184,0,0,252,240,127,102,15,110,
+ 192,102,15,112,192,81,195,248,4,102,15,80,193,133,192,15,133,244,3,15,87,
+ 192,195,248,5,102,15,80,193,133,192,15,132,244,3,255,15,87,192,195,248,162,
+ 255,139,68,36,12,252,242,15,16,68,36,4,131,252,248,1,15,132,244,247,15,135,
+ 244,248,232,244,91,252,233,244,253,248,1,232,244,93,252,233,244,253,248,2,
+ 131,252,248,3,15,132,244,247,15,135,244,248,232,244,114,255,252,233,244,253,
+ 248,1,252,242,15,81,192,248,7,252,242,15,17,68,36,4,221,68,36,4,195,248,2,
+ 221,68,36,4,131,252,248,5,15,130,244,98,15,132,244,158,248,2,131,252,248,
+ 7,15,132,244,247,15,135,244,248,217,252,237,217,201,217,252,241,195,248,1,
+ 217,232,217,201,217,252,241,195,248,2,131,252,248,9,15,132,244,247,15,135,
+ 244,248,255,217,252,236,217,201,217,252,241,195,248,1,217,252,254,195,248,
+ 2,131,252,248,11,15,132,244,247,15,135,244,255,217,252,255,195,248,1,217,
+ 252,242,221,216,195,255,139,68,36,12,221,68,36,4,131,252,248,1,15,130,244,
+ 91,15,132,244,93,131,252,248,3,15,130,244,114,15,135,244,248,217,252,250,
+ 195,248,2,131,252,248,5,15,130,244,98,15,132,244,158,131,252,248,7,15,132,
+ 244,247,15,135,244,248,217,252,237,217,201,217,252,241,195,248,1,217,232,
+ 217,201,217,252,241,195,248,2,131,252,248,9,15,132,244,247,255,15,135,244,
+ 248,217,252,236,217,201,217,252,241,195,248,1,217,252,254,195,248,2,131,252,
+ 248,11,15,132,244,247,15,135,244,255,217,252,255,195,248,1,217,252,242,221,
+ 216,195,255,248,9,204,255,248,163,255,139,68,36,20,252,242,15,16,68,36,4,
+ 252,242,15,16,76,36,12,131,252,248,1,15,132,244,247,15,135,244,248,252,242,
+ 15,88,193,248,7,252,242,15,17,68,36,4,221,68,36,4,195,248,1,252,242,15,92,
+ 193,252,233,244,7,248,2,131,252,248,3,15,132,244,247,15,135,244,248,252,242,
+ 15,89,193,252,233,244,7,248,1,252,242,15,94,193,252,233,244,7,248,2,131,252,
+ 248,5,15,132,244,247,255,15,135,244,248,232,244,157,252,233,244,7,248,1,90,
+ 232,244,117,82,252,233,244,7,248,2,131,252,248,7,15,132,244,247,15,135,244,
+ 248,184,0,0,0,128,102,15,110,200,102,15,112,201,81,15,87,193,252,233,244,
+ 7,248,1,102,15,252,239,201,102,15,118,201,102,15,115,209,1,15,84,193,252,
+ 233,244,7,248,2,255,131,252,248,9,15,135,244,248,221,68,36,4,221,68,36,12,
+ 15,132,244,247,217,252,243,195,248,1,217,201,217,252,253,221,217,195,248,
+ 2,131,252,248,11,15,132,244,247,15,135,244,255,252,242,15,93,193,252,233,
+ 244,7,248,1,252,242,15,95,193,252,233,244,7,248,9,204,255,139,68,36,20,221,
+ 68,36,4,221,68,36,12,131,252,248,1,15,132,244,247,15,135,244,248,222,193,
+ 195,248,1,222,252,233,195,248,2,131,252,248,3,15,132,244,247,15,135,244,248,
+ 222,201,195,248,1,222,252,249,195,248,2,131,252,248,5,15,130,244,157,15,132,
+ 244,117,131,252,248,7,15,132,244,247,15,135,244,248,255,221,216,217,224,195,
+ 248,1,221,216,217,225,195,248,2,131,252,248,9,15,132,244,247,15,135,244,248,
+ 217,252,243,195,248,1,217,201,217,252,253,221,217,195,248,2,131,252,248,11,
+ 15,132,244,247,15,135,244,255,255,219,252,233,219,209,221,217,195,248,1,219,
+ 252,233,218,209,221,217,195,255,221,225,223,224,252,246,196,1,15,132,244,
+ 248,217,201,248,2,221,216,195,248,1,221,225,223,224,252,246,196,1,15,133,
+ 244,248,217,201,248,2,221,216,195,255,248,164,156,90,137,209,129,252,242,
+ 0,0,32,0,82,157,156,90,49,192,57,209,15,132,244,247,139,68,36,4,87,83,15,
+ 162,139,124,36,16,137,7,137,95,4,137,79,8,137,87,12,91,95,248,1,195,248,165,
+ 255,204,248,166,255,131,252,236,16,87,86,83,131,252,236,28,141,157,233,139,
+ 181,233,15,183,192,137,134,233,141,132,253,36,233,137,142,233,137,150,233,
+ 137,134,233,139,140,253,36,233,139,148,253,36,233,137,76,36,44,137,84,36,
+ 40,137,226,137,116,36,24,137,252,241,232,251,1,27,199,131,233,237,139,144,
+ 233,139,128,233,41,208,139,106,252,248,193,232,3,131,192,1,139,181,233,139,
+ 14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,255,248,32,255,139,
+ 76,36,48,139,179,233,137,142,233,137,145,233,137,169,233,137,252,241,137,
+ 194,232,251,1,28,139,108,36,48,139,134,233,139,150,233,131,190,233,1,15,130,
+ 244,253,15,132,244,252,221,134,233,252,233,244,253,248,6,217,134,233,248,
+ 7,139,141,233,15,183,73,6,137,76,36,48,131,196,28,91,94,95,93,89,3,36,36,
+ 131,196,16,81,195,255,248,167,255,85,137,229,83,137,203,43,163,233,255,137,
+ 163,233,255,15,182,139,233,131,252,233,1,15,136,244,248,248,1,139,132,253,
+ 139,233,137,4,140,131,252,233,1,15,137,244,1,248,2,139,139,233,139,147,233,
+ 252,255,147,233,137,131,233,137,147,233,128,187,233,1,15,130,244,253,15,132,
+ 244,252,221,155,233,252,233,244,253,248,6,255,217,155,233,248,7,255,41,163,
+ 233,255,139,93,252,252,201,195,255,249,255,129,124,253,202,4,239,15,133,244,
+ 253,129,124,253,194,4,239,15,133,244,254,139,44,202,131,198,4,59,44,194,255,
+ 15,141,244,255,255,15,140,244,255,255,15,143,244,255,255,15,142,244,255,255,
+ 248,6,15,183,70,252,254,141,180,253,134,233,248,9,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,171,248,7,15,135,244,44,129,124,253,194,
+ 4,239,15,130,244,247,15,133,244,44,255,252,242,15,42,4,194,252,233,244,248,
+ 255,221,4,202,219,4,194,252,233,244,249,255,248,8,15,135,244,44,255,252,242,
+ 15,42,12,202,252,242,15,16,4,194,131,198,4,102,15,46,193,255,15,134,244,9,
+ 255,15,135,244,9,255,15,130,244,9,255,15,131,244,9,255,252,233,244,6,255,
+ 219,4,202,252,233,244,248,255,129,124,253,202,4,239,15,131,244,44,129,124,
+ 253,194,4,239,15,131,244,44,255,248,1,252,242,15,16,4,194,248,2,131,198,4,
+ 102,15,46,4,202,248,3,255,248,1,221,4,202,248,2,221,4,194,248,3,131,198,4,
+ 255,15,135,244,247,255,15,130,244,247,255,15,131,244,247,255,15,183,70,252,
+ 254,141,180,253,134,233,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,171,255,139,108,194,4,131,198,4,255,129,252,253,239,15,133,
+ 244,253,129,124,253,202,4,239,15,133,244,254,139,44,194,59,44,202,255,15,
+ 133,244,255,255,15,132,244,255,255,15,183,70,252,254,141,180,253,134,233,
+ 248,9,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,
+ 7,15,135,244,251,129,124,253,202,4,239,15,130,244,247,15,133,244,251,255,
+ 252,242,15,42,4,202,255,219,4,202,255,252,233,244,248,248,8,15,135,244,251,
+ 255,252,242,15,42,4,194,102,15,46,4,202,255,219,4,194,221,4,202,255,252,233,
+ 244,250,255,129,252,253,239,15,131,244,251,129,124,253,202,4,239,15,131,244,
+ 251,255,248,1,252,242,15,16,4,202,248,2,102,15,46,4,194,248,4,255,248,1,221,
+ 4,202,248,2,221,4,194,248,4,255,15,138,244,248,15,133,244,248,255,15,138,
+ 244,248,15,132,244,247,255,248,1,15,183,70,252,254,141,180,253,134,233,248,
+ 2,255,248,2,15,183,70,252,254,141,180,253,134,233,248,1,255,252,233,244,9,
+ 255,248,5,255,129,252,253,239,15,132,244,49,129,124,253,202,4,239,15,132,
+ 244,49,255,57,108,202,4,15,133,244,2,129,252,253,239,15,131,244,1,139,12,
+ 202,139,4,194,57,193,15,132,244,1,129,252,253,239,15,135,244,2,139,169,233,
+ 133,252,237,15,132,244,2,252,246,133,233,235,15,133,244,2,255,49,252,237,
+ 255,189,1,0,0,0,255,252,233,244,48,255,248,3,129,252,253,239,255,15,133,244,
+ 9,255,252,233,244,49,255,252,247,208,139,108,202,4,131,198,4,129,252,253,
+ 239,15,133,244,249,139,12,202,59,12,135,255,139,108,202,4,131,198,4,255,129,
+ 252,253,239,15,133,244,253,129,124,253,199,4,239,15,133,244,254,139,44,199,
+ 59,44,202,255,15,183,70,252,254,141,180,253,134,233,248,9,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,171,248,7,15,135,244,249,129,124,
+ 253,199,4,239,15,130,244,247,255,252,242,15,42,4,199,255,219,4,199,255,252,
+ 233,244,248,248,8,255,252,242,15,42,4,202,102,15,46,4,199,255,219,4,202,221,
+ 4,199,255,129,252,253,239,15,131,244,249,255,248,1,252,242,15,16,4,199,248,
+ 2,102,15,46,4,202,248,4,255,248,1,221,4,199,248,2,221,4,202,248,4,255,252,
+ 247,208,139,108,202,4,131,198,4,57,197,255,15,133,244,249,15,183,70,252,254,
+ 141,180,253,134,233,248,2,139,6,15,182,204,15,182,232,131,198,4,193,232,16,
+ 252,255,36,171,248,3,129,252,253,239,15,133,244,2,252,233,244,49,255,15,132,
+ 244,248,129,252,253,239,15,132,244,49,15,183,70,252,254,141,180,253,134,233,
+ 248,2,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,
+ 139,108,194,4,131,198,4,129,252,253,239,255,137,108,202,4,139,44,194,137,
+ 44,202,255,139,108,194,4,139,4,194,137,108,202,4,137,4,202,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,171,255,49,252,237,129,124,253,
+ 194,4,239,129,213,239,137,108,202,4,139,6,15,182,204,15,182,232,131,198,4,
+ 193,232,16,252,255,36,171,255,129,124,253,194,4,239,15,133,244,251,139,44,
+ 194,252,247,221,15,128,244,250,199,68,202,4,237,137,44,202,248,9,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,4,199,68,202,4,
+ 0,0,224,65,199,4,202,0,0,0,0,252,233,244,9,248,5,15,135,244,54,255,129,124,
+ 253,194,4,239,15,131,244,54,255,252,242,15,16,4,194,184,0,0,0,128,102,15,
+ 110,200,102,15,112,201,81,15,87,193,252,242,15,17,4,202,255,221,4,194,217,
+ 224,221,28,202,255,129,124,253,194,4,239,15,133,244,248,139,4,194,255,139,
+ 128,233,248,1,199,68,202,4,237,137,4,202,255,15,87,192,252,242,15,42,128,
+ 233,248,1,252,242,15,17,4,202,255,219,128,233,248,1,221,28,202,255,139,6,
+ 15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,2,129,124,253,
+ 194,4,239,15,133,244,57,139,12,194,255,139,169,233,131,252,253,0,15,133,244,
+ 255,248,3,255,248,58,137,213,232,251,1,20,255,252,242,15,42,192,255,137,4,
+ 36,219,4,36,255,137,252,234,15,182,78,252,253,252,233,244,1,255,248,9,252,
+ 246,133,233,235,15,133,244,3,252,233,244,57,255,15,182,252,236,15,182,192,
+ 255,129,124,253,252,234,4,239,15,133,244,51,129,124,253,199,4,239,15,133,
+ 244,51,139,44,252,234,3,44,199,15,128,244,50,255,129,124,253,252,234,4,239,
+ 15,133,244,53,129,124,253,199,4,239,15,133,244,53,139,4,199,3,4,252,234,15,
+ 128,244,52,255,129,124,253,252,234,4,239,15,133,244,56,129,124,253,194,4,
+ 239,15,133,244,56,139,44,252,234,3,44,194,15,128,244,55,255,199,68,202,4,
+ 237,255,129,124,253,252,234,4,239,15,131,244,51,255,129,124,253,199,4,239,
+ 15,131,244,51,255,252,242,15,16,4,252,234,252,242,15,88,4,199,255,221,4,252,
+ 234,220,4,199,255,129,124,253,252,234,4,239,15,131,244,53,255,129,124,253,
+ 199,4,239,15,131,244,53,255,252,242,15,16,4,199,252,242,15,88,4,252,234,255,
+ 221,4,199,220,4,252,234,255,129,124,253,252,234,4,239,15,131,244,56,129,124,
+ 253,194,4,239,15,131,244,56,255,252,242,15,16,4,252,234,252,242,15,88,4,194,
+ 255,221,4,252,234,220,4,194,255,129,124,253,252,234,4,239,15,133,244,51,129,
+ 124,253,199,4,239,15,133,244,51,139,44,252,234,43,44,199,15,128,244,50,255,
+ 129,124,253,252,234,4,239,15,133,244,53,129,124,253,199,4,239,15,133,244,
+ 53,139,4,199,43,4,252,234,15,128,244,52,255,129,124,253,252,234,4,239,15,
+ 133,244,56,129,124,253,194,4,239,15,133,244,56,139,44,252,234,43,44,194,15,
+ 128,244,55,255,252,242,15,16,4,252,234,252,242,15,92,4,199,255,221,4,252,
+ 234,220,36,199,255,252,242,15,16,4,199,252,242,15,92,4,252,234,255,221,4,
+ 199,220,36,252,234,255,252,242,15,16,4,252,234,252,242,15,92,4,194,255,221,
+ 4,252,234,220,36,194,255,129,124,253,252,234,4,239,15,133,244,51,129,124,
+ 253,199,4,239,15,133,244,51,139,44,252,234,15,175,44,199,15,128,244,50,255,
+ 129,124,253,252,234,4,239,15,133,244,53,129,124,253,199,4,239,15,133,244,
+ 53,139,4,199,15,175,4,252,234,15,128,244,52,255,129,124,253,252,234,4,239,
+ 15,133,244,56,129,124,253,194,4,239,15,133,244,56,139,44,252,234,15,175,44,
+ 194,15,128,244,55,255,252,242,15,16,4,252,234,252,242,15,89,4,199,255,221,
+ 4,252,234,220,12,199,255,252,242,15,16,4,199,252,242,15,89,4,252,234,255,
+ 221,4,199,220,12,252,234,255,252,242,15,16,4,252,234,252,242,15,89,4,194,
+ 255,221,4,252,234,220,12,194,255,252,242,15,16,4,252,234,252,242,15,94,4,
+ 199,255,221,4,252,234,220,52,199,255,252,242,15,16,4,199,252,242,15,94,4,
+ 252,234,255,221,4,199,220,52,252,234,255,252,242,15,16,4,252,234,252,242,
+ 15,94,4,194,255,221,4,252,234,220,52,194,255,252,242,15,16,4,252,234,252,
+ 242,15,16,12,199,255,221,4,252,234,221,4,199,255,252,242,15,16,4,199,252,
+ 242,15,16,12,252,234,255,221,4,199,221,4,252,234,255,252,242,15,16,4,252,
+ 234,252,242,15,16,12,194,255,221,4,252,234,221,4,194,255,248,168,232,244,
+ 157,255,252,233,244,168,255,232,244,117,255,15,182,252,236,15,182,192,141,
+ 12,194,41,232,137,76,36,4,137,68,36,8,248,36,139,108,36,48,137,44,36,137,
+ 149,233,137,116,36,24,232,251,1,29,139,149,233,133,192,15,133,244,45,15,182,
+ 110,252,255,15,182,78,252,253,139,68,252,234,4,139,44,252,234,137,68,202,
+ 4,137,44,202,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,
+ 171,255,252,247,208,139,4,135,199,68,202,4,237,137,4,202,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,171,255,15,191,192,199,68,202,
+ 4,237,137,4,202,255,15,191,192,252,242,15,42,192,252,242,15,17,4,202,255,
+ 223,70,252,254,221,28,202,255,252,242,15,16,4,199,252,242,15,17,4,202,255,
+ 221,4,199,221,28,202,255,252,247,208,137,68,202,4,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,171,255,141,76,202,12,141,68,194,4,189,
+ 237,137,105,252,248,248,1,137,41,131,193,8,57,193,15,134,244,1,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,139,106,252,248,139,
+ 172,253,133,233,139,173,233,139,69,4,139,109,0,137,68,202,4,137,44,202,139,
+ 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,139,106,252,
+ 248,139,172,253,141,233,128,189,233,0,139,173,233,139,12,194,139,68,194,4,
+ 137,77,0,137,69,4,15,132,244,247,252,246,133,233,235,15,133,244,248,248,1,
+ 139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,2,129,
+ 232,239,129,252,248,239,15,134,244,1,252,246,129,233,235,15,132,244,1,135,
+ 213,141,139,233,255,232,251,1,30,137,252,234,252,233,244,1,255,252,247,208,
+ 139,106,252,248,139,172,253,141,233,139,12,135,139,133,233,137,8,199,64,4,
+ 237,252,246,133,233,235,15,133,244,248,248,1,139,6,15,182,204,15,182,232,
+ 131,198,4,193,232,16,252,255,36,171,248,2,252,246,129,233,235,15,132,244,
+ 1,128,189,233,0,15,132,244,1,137,213,137,194,141,139,233,232,251,1,30,137,
+ 252,234,252,233,244,1,255,139,106,252,248,255,252,242,15,16,4,199,255,139,
+ 172,253,141,233,139,141,233,255,252,242,15,17,1,255,221,25,255,252,247,208,
+ 139,106,252,248,139,172,253,141,233,139,141,233,137,65,4,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,171,255,141,180,253,134,233,139,
+ 108,36,48,131,189,233,0,15,132,244,247,137,149,233,141,20,202,137,252,233,
+ 232,251,1,31,139,149,233,248,1,139,6,15,182,204,15,182,232,131,198,4,193,
+ 232,16,252,255,36,171,255,252,247,208,139,74,252,248,139,4,135,139,108,36,
+ 48,137,76,36,8,137,68,36,4,137,44,36,137,149,233,137,116,36,24,232,251,1,
+ 32,139,149,233,15,182,78,252,253,137,4,202,199,68,202,4,237,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,252,255,36,171,255,139,108,36,48,137,
+ 149,233,139,139,233,59,139,233,137,116,36,24,15,131,244,251,248,1,137,193,
+ 37,252,255,7,0,0,193,252,233,11,137,76,36,8,61,252,255,7,0,0,15,132,244,249,
+ 248,2,137,44,36,137,68,36,4,232,251,1,33,139,149,233,15,182,78,252,253,137,
+ 4,202,199,68,202,4,237,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,
+ 255,36,171,248,3,184,1,8,0,0,252,233,244,2,248,5,137,252,233,232,251,1,34,
+ 15,183,70,252,254,252,233,244,1,255,252,247,208,139,108,36,48,139,139,233,
+ 137,116,36,24,59,139,233,137,149,233,15,131,244,249,248,2,139,20,135,137,
+ 252,233,232,251,1,35,139,149,233,15,182,78,252,253,137,4,202,199,68,202,4,
+ 237,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,3,
+ 137,252,233,232,251,1,34,15,183,70,252,254,252,247,208,252,233,244,2,255,
+ 252,247,208,139,106,252,248,139,173,233,139,4,135,252,233,244,169,255,252,
+ 247,208,139,106,252,248,139,173,233,139,4,135,252,233,244,170,255,15,182,
+ 252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,39,139,44,252,234,
+ 255,129,124,253,194,4,239,15,133,244,251,139,4,194,255,129,124,253,194,4,
+ 239,15,131,244,251,255,252,242,15,16,4,194,252,242,15,45,192,252,242,15,42,
+ 200,102,15,46,193,255,221,4,194,219,20,36,219,4,36,255,15,133,244,39,255,
+ 59,133,233,15,131,244,39,193,224,3,3,133,233,129,120,253,4,239,15,132,244,
+ 248,139,40,139,64,4,137,44,202,137,68,202,4,248,1,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,171,248,2,131,189,233,0,15,132,244,249,
+ 139,141,233,252,246,129,233,235,15,132,244,39,15,182,78,252,253,248,3,199,
+ 68,202,4,237,252,233,244,1,248,5,255,129,124,253,194,4,239,15,133,244,39,
+ 139,4,194,252,233,244,169,255,15,182,252,236,15,182,192,252,247,208,139,4,
+ 135,129,124,253,252,234,4,239,15,133,244,37,139,44,252,234,248,169,139,141,
+ 233,35,136,233,105,201,239,3,141,233,248,1,129,185,233,239,15,133,244,250,
+ 57,129,233,15,133,244,250,129,121,253,4,239,15,132,244,251,15,182,70,252,
+ 253,139,41,139,73,4,137,44,194,137,76,194,4,248,2,255,139,6,15,182,204,15,
+ 182,232,131,198,4,193,232,16,252,255,36,171,248,3,15,182,70,252,253,199,68,
+ 194,4,237,252,233,244,2,248,4,139,137,233,133,201,15,133,244,1,248,5,139,
+ 141,233,133,201,15,132,244,3,252,246,129,233,235,15,133,244,3,252,233,244,
+ 37,255,15,182,252,236,15,182,192,129,124,253,252,234,4,239,15,133,244,38,
+ 139,44,252,234,59,133,233,15,131,244,38,193,224,3,3,133,233,129,120,253,4,
+ 239,15,132,244,248,139,40,139,64,4,137,44,202,137,68,202,4,248,1,139,6,15,
+ 182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,2,131,189,233,
+ 0,15,132,244,249,139,141,233,252,246,129,233,235,15,132,244,38,255,15,182,
+ 78,252,253,248,3,199,68,202,4,237,252,233,244,1,255,15,182,252,236,15,182,
+ 192,129,124,253,252,234,4,239,15,133,244,42,139,44,252,234,255,15,133,244,
+ 42,255,59,133,233,15,131,244,42,193,224,3,3,133,233,129,120,253,4,239,15,
+ 132,244,249,248,1,252,246,133,233,235,15,133,244,253,248,2,139,108,202,4,
+ 139,12,202,137,104,4,137,8,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,171,248,3,131,189,233,0,15,132,244,1,139,141,233,252,246,129,
+ 233,235,255,15,132,244,42,15,182,78,252,253,252,233,244,1,248,5,129,124,253,
+ 194,4,239,15,133,244,42,139,4,194,252,233,244,170,248,7,128,165,233,235,139,
+ 139,233,137,171,233,137,141,233,15,182,78,252,253,252,233,244,2,255,15,182,
+ 252,236,15,182,192,252,247,208,139,4,135,129,124,253,252,234,4,239,15,133,
+ 244,40,139,44,252,234,248,170,139,141,233,35,136,233,105,201,239,198,133,
+ 233,0,3,141,233,248,1,129,185,233,239,15,133,244,251,57,129,233,15,133,244,
+ 251,129,121,253,4,239,15,132,244,250,248,2,255,252,246,133,233,235,15,133,
+ 244,253,248,3,15,182,70,252,253,139,108,194,4,139,4,194,137,105,4,137,1,139,
+ 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,4,131,189,
+ 233,0,15,132,244,2,137,76,36,16,139,141,233,252,246,129,233,235,15,132,244,
+ 40,139,76,36,16,252,233,244,2,248,5,139,137,233,133,201,15,133,244,1,255,
+ 139,141,233,133,201,15,132,244,252,252,246,129,233,235,15,132,244,40,248,
+ 6,137,68,36,16,199,68,36,20,237,137,108,36,12,141,68,36,16,137,108,36,4,139,
+ 108,36,48,137,68,36,8,137,44,36,137,149,233,137,116,36,24,232,251,1,36,139,
+ 149,233,139,108,36,12,137,193,252,233,244,2,248,7,128,165,233,235,139,131,
+ 233,137,171,233,137,133,233,252,233,244,3,255,15,182,252,236,15,182,192,129,
+ 124,253,252,234,4,239,15,133,244,41,139,44,252,234,59,133,233,15,131,244,
+ 41,193,224,3,3,133,233,129,120,253,4,239,15,132,244,249,248,1,252,246,133,
+ 233,235,15,133,244,253,248,2,139,108,202,4,139,12,202,137,104,4,137,8,139,
+ 6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,3,131,189,
+ 233,0,15,132,244,1,255,139,141,233,252,246,129,233,235,15,132,244,41,15,182,
+ 78,252,253,252,233,244,1,248,7,128,165,233,235,139,139,233,137,171,233,137,
+ 141,233,15,182,78,252,253,252,233,244,2,255,137,124,36,16,139,60,199,248,
+ 1,141,12,202,139,105,252,248,252,246,133,233,235,15,133,244,253,248,2,139,
+ 68,36,20,131,232,1,15,132,244,250,1,252,248,59,133,233,15,135,244,251,41,
+ 252,248,193,231,3,3,189,233,248,3,139,41,137,47,139,105,4,131,193,8,137,111,
+ 4,131,199,8,131,232,1,15,133,244,3,248,4,139,124,36,16,139,6,15,182,204,15,
+ 182,232,131,198,4,193,232,16,252,255,36,171,248,5,137,108,36,4,139,108,36,
+ 48,137,149,233,137,68,36,8,137,44,36,137,116,36,24,232,251,1,37,139,149,233,
+ 15,182,78,252,253,252,233,244,1,248,7,255,128,165,233,235,139,131,233,137,
+ 171,233,137,133,233,252,233,244,2,255,3,68,36,20,255,129,124,253,202,4,239,
+ 139,44,202,15,133,244,59,141,84,202,8,137,114,252,252,139,181,233,139,14,
+ 15,182,252,233,15,182,205,131,198,4,252,255,36,171,255,141,76,202,8,137,215,
+ 139,105,252,248,129,121,253,252,252,239,15,133,244,29,248,60,139,114,252,
+ 252,252,247,198,237,15,133,244,253,248,1,137,106,252,248,137,68,36,20,131,
+ 232,1,15,132,244,249,248,2,139,41,137,47,139,105,4,131,193,8,137,111,4,131,
+ 199,8,131,232,1,15,133,244,2,139,106,252,248,248,3,139,68,36,20,128,189,233,
+ 1,15,135,244,251,248,4,139,181,233,139,14,15,182,252,233,15,182,205,131,198,
+ 4,252,255,36,171,248,5,255,252,247,198,237,15,133,244,4,15,182,78,252,253,
+ 252,247,209,141,12,202,139,121,252,248,139,191,233,139,191,233,252,233,244,
+ 4,248,7,129,252,238,239,252,247,198,237,15,133,244,254,41,252,242,137,215,
+ 139,114,252,252,252,233,244,1,248,8,129,198,239,252,233,244,1,255,141,76,
+ 202,8,139,105,232,139,65,252,236,137,41,137,65,4,139,105,252,240,139,65,252,
+ 244,137,105,8,137,65,12,139,105,224,139,65,228,137,105,252,248,137,65,252,
+ 252,129,252,248,239,184,237,15,133,244,29,137,202,137,114,252,252,139,181,
+ 233,139,14,15,182,252,233,15,182,205,131,198,4,252,255,36,171,255,137,124,
+ 36,16,137,92,36,20,139,108,202,252,240,139,68,202,252,248,139,157,233,131,
+ 198,4,139,189,233,248,1,57,216,15,131,244,251,129,124,253,199,4,239,15,132,
+ 244,250,255,219,68,202,252,248,255,139,108,199,4,137,108,202,12,139,44,199,
+ 137,108,202,8,131,192,1,255,137,68,202,252,248,248,2,15,183,70,252,254,141,
+ 180,253,134,233,248,3,139,92,36,20,139,124,36,16,139,6,15,182,204,15,182,
+ 232,131,198,4,193,232,16,252,255,36,171,248,4,131,192,1,255,137,68,202,252,
+ 248,255,252,233,244,1,248,5,41,216,248,6,59,133,233,15,135,244,3,105,252,
+ 248,239,3,189,233,129,191,233,239,15,132,244,253,141,92,24,1,139,175,233,
+ 139,135,233,137,44,202,137,68,202,4,139,175,233,139,135,233,137,108,202,8,
+ 137,68,202,12,137,92,202,252,248,252,233,244,2,248,7,255,131,192,1,252,233,
+ 244,6,255,129,124,253,202,252,236,239,15,133,244,251,139,108,202,232,129,
+ 124,253,202,252,244,239,15,133,244,251,129,124,253,202,252,252,239,15,133,
+ 244,251,128,189,233,235,15,133,244,251,141,180,253,134,233,199,68,202,252,
+ 248,0,0,0,0,248,1,139,6,15,182,204,15,182,232,131,198,4,193,232,16,252,255,
+ 36,171,248,5,198,70,252,252,235,141,180,253,134,233,198,6,235,252,233,244,
+ 1,255,15,182,252,236,15,182,192,137,124,36,16,141,188,253,194,233,141,12,
+ 202,43,122,252,252,133,252,237,15,132,244,251,141,108,252,233,252,248,57,
+ 215,15,131,244,248,248,1,139,71,252,248,137,1,139,71,252,252,131,199,8,137,
+ 65,4,131,193,8,57,252,233,15,131,244,249,57,215,15,130,244,1,248,2,199,65,
+ 4,237,131,193,8,57,252,233,15,130,244,2,248,3,139,124,36,16,139,6,15,182,
+ 204,15,182,232,131,198,4,193,232,16,252,255,36,171,248,5,199,68,36,20,1,0,
+ 0,0,137,208,41,252,248,15,134,244,3,137,197,193,252,237,3,131,197,1,137,108,
+ 36,20,139,108,36,48,1,200,59,133,233,15,135,244,253,248,6,255,139,71,252,
+ 248,137,1,139,71,252,252,131,199,8,137,65,4,131,193,8,57,215,15,130,244,6,
+ 252,233,244,3,248,7,137,149,233,137,141,233,137,116,36,24,41,215,139,84,36,
+ 20,131,252,234,1,137,252,233,232,251,1,0,139,149,233,139,141,233,1,215,252,
+ 233,244,6,255,193,225,3,255,248,1,139,114,252,252,137,68,36,20,252,247,198,
+ 237,15,133,244,253,255,248,13,137,215,131,232,1,15,132,244,249,248,2,139,
+ 44,15,137,111,252,248,139,108,15,4,137,111,252,252,131,199,8,131,232,1,15,
+ 133,244,2,248,3,139,68,36,20,15,182,110,252,255,248,5,57,197,15,135,244,252,
+ 255,139,108,10,4,137,106,252,252,139,44,10,137,106,252,248,255,248,5,56,70,
+ 252,255,15,135,244,252,255,15,182,78,252,253,252,247,209,141,20,202,139,122,
+ 252,248,139,191,233,139,191,233,139,6,15,182,204,15,182,232,131,198,4,193,
+ 232,16,252,255,36,171,248,6,255,199,71,252,252,237,131,199,8,255,199,68,194,
+ 252,244,237,255,131,192,1,252,233,244,5,248,7,141,174,233,252,247,197,237,
+ 15,133,244,14,41,252,234,255,1,252,233,255,137,252,245,209,252,237,129,229,
+ 239,102,129,172,253,43,233,238,15,130,244,149,255,141,12,202,255,129,121,
+ 253,4,239,15,133,244,255,255,129,121,253,12,239,15,133,244,61,129,121,253,
+ 20,239,15,133,244,61,139,41,131,121,16,0,15,140,244,251,255,129,121,253,12,
+ 239,15,133,244,165,129,121,253,20,239,15,133,244,165,255,139,105,16,133,252,
+ 237,15,136,244,251,3,41,15,128,244,247,137,41,255,59,105,8,199,65,28,237,
+ 137,105,24,255,15,142,244,253,248,1,248,6,141,180,253,134,233,255,141,180,
+ 253,134,233,15,183,70,252,254,15,142,245,248,1,248,6,255,15,143,244,253,248,
+ 6,141,180,253,134,233,248,1,255,248,7,139,6,15,182,204,15,182,232,131,198,
+ 4,193,232,16,252,255,36,171,248,5,255,3,41,15,128,244,1,137,41,255,15,141,
+ 244,7,255,141,180,253,134,233,15,183,70,252,254,15,141,245,255,15,140,244,
+ 7,255,252,233,244,6,248,9,255,129,121,253,4,239,255,15,131,244,61,129,121,
+ 253,12,239,15,131,244,61,255,129,121,253,12,239,15,131,244,165,129,121,253,
+ 20,239,15,131,244,165,255,139,105,20,255,129,252,253,239,15,131,244,61,255,
+ 252,242,15,16,1,252,242,15,16,73,8,255,252,242,15,88,65,16,252,242,15,17,
+ 1,133,252,237,15,136,244,249,255,15,140,244,249,255,102,15,46,200,248,1,252,
+ 242,15,17,65,24,255,221,65,8,221,1,255,220,65,16,221,17,221,81,24,133,252,
+ 237,15,136,244,247,255,221,81,24,15,140,244,247,255,217,201,248,1,255,15,
+ 183,70,252,254,255,15,131,244,7,255,15,131,244,248,141,180,253,134,233,255,
+ 141,180,253,134,233,15,183,70,252,254,15,131,245,255,15,130,244,7,255,15,
+ 130,244,248,141,180,253,134,233,255,248,3,102,15,46,193,252,233,244,1,255,
+ 141,12,202,139,105,4,129,252,253,239,15,132,244,247,255,137,105,252,252,139,
+ 41,137,105,252,248,252,233,245,255,141,180,253,134,233,139,1,137,105,252,
+ 252,137,65,252,248,255,139,139,233,139,4,129,139,128,233,139,108,36,48,137,
+ 147,233,137,171,233,252,255,224,255,141,180,253,134,233,139,6,15,182,204,
+ 15,182,232,131,198,4,193,232,16,252,255,36,171,255,137,252,245,209,252,237,
+ 129,229,239,102,129,172,253,43,233,238,15,130,244,151,255,139,190,233,139,
+ 108,36,48,141,12,202,59,141,233,15,135,244,24,15,182,142,233,57,200,15,134,
+ 244,249,248,2,255,15,183,70,252,254,252,233,245,255,248,3,199,68,194,252,
+ 252,237,131,192,1,57,200,15,134,244,3,252,233,244,2,255,141,44,197,237,141,
+ 4,194,139,122,252,248,137,104,252,252,137,120,252,248,139,108,36,48,141,12,
+ 200,59,141,233,15,135,244,23,137,209,137,194,15,182,174,233,133,252,237,15,
+ 132,244,248,248,1,131,193,8,57,209,15,131,244,249,139,121,252,248,137,56,
+ 139,121,252,252,137,120,4,131,192,8,199,65,252,252,237,131,252,237,1,15,133,
+ 244,1,248,2,255,139,190,233,139,6,15,182,204,15,182,232,131,198,4,193,232,
+ 16,252,255,36,171,255,248,3,199,64,4,237,131,192,8,131,252,237,1,15,133,244,
+ 3,252,233,244,2,255,139,106,252,248,139,189,233,139,108,36,48,141,68,194,
+ 252,248,137,149,233,141,136,233,59,141,233,137,133,233,255,137,44,36,255,
+ 137,124,36,4,137,44,36,255,15,135,244,22,199,131,233,237,255,252,255,215,
+ 255,252,255,147,233,255,199,131,233,237,139,149,233,141,12,194,252,247,217,
+ 3,141,233,139,114,252,252,252,233,244,12,255,254,0
+};
+
+enum {
+ GLOB_vm_returnp,
+ GLOB_cont_dispatch,
+ GLOB_vm_returnc,
+ GLOB_BC_RET_Z,
+ GLOB_vm_return,
+ GLOB_vm_leave_cp,
+ GLOB_vm_leave_unw,
+ GLOB_vm_unwind_c,
+ GLOB_vm_unwind_c_eh,
+ GLOB_vm_unwind_rethrow,
+ GLOB_vm_unwind_ff,
+ GLOB_vm_unwind_ff_eh,
+ GLOB_vm_growstack_c,
+ GLOB_vm_growstack_v,
+ GLOB_vm_growstack_f,
+ GLOB_vm_resume,
+ GLOB_vm_pcall,
+ GLOB_vm_call,
+ GLOB_vm_call_dispatch,
+ GLOB_vmeta_call,
+ GLOB_vm_call_dispatch_f,
+ GLOB_vm_cpcall,
+ GLOB_cont_ffi_callback,
+ GLOB_vm_call_tail,
+ GLOB_cont_cat,
+ GLOB_cont_ra,
+ GLOB_BC_CAT_Z,
+ GLOB_vmeta_tgets,
+ GLOB_vmeta_tgetb,
+ GLOB_vmeta_tgetv,
+ GLOB_vmeta_tsets,
+ GLOB_vmeta_tsetb,
+ GLOB_vmeta_tsetv,
+ GLOB_cont_nop,
+ GLOB_vmeta_comp,
+ GLOB_vmeta_binop,
+ GLOB_cont_condt,
+ GLOB_cont_condf,
+ GLOB_vmeta_equal,
+ GLOB_vmeta_equal_cd,
+ GLOB_vmeta_arith_vno,
+ GLOB_vmeta_arith_vn,
+ GLOB_vmeta_arith_nvo,
+ GLOB_vmeta_arith_nv,
+ GLOB_vmeta_unm,
+ GLOB_vmeta_arith_vvo,
+ GLOB_vmeta_arith_vv,
+ GLOB_vmeta_len,
+ GLOB_BC_LEN_Z,
+ GLOB_vmeta_call_ra,
+ GLOB_BC_CALLT_Z,
+ GLOB_vmeta_for,
+ GLOB_ff_assert,
+ GLOB_fff_fallback,
+ GLOB_fff_res_,
+ GLOB_ff_type,
+ GLOB_fff_res1,
+ GLOB_ff_getmetatable,
+ GLOB_ff_setmetatable,
+ GLOB_ff_rawget,
+ GLOB_ff_tonumber,
+ GLOB_fff_resi,
+ GLOB_fff_resxmm0,
+ GLOB_fff_resn,
+ GLOB_ff_tostring,
+ GLOB_fff_gcstep,
+ GLOB_ff_next,
+ GLOB_fff_res2,
+ GLOB_fff_res,
+ GLOB_ff_pairs,
+ GLOB_ff_ipairs_aux,
+ GLOB_fff_res0,
+ GLOB_ff_ipairs,
+ GLOB_ff_pcall,
+ GLOB_ff_xpcall,
+ GLOB_ff_coroutine_resume,
+ GLOB_ff_coroutine_wrap_aux,
+ GLOB_ff_coroutine_yield,
+ GLOB_ff_math_abs,
+ GLOB_fff_resbit,
+ GLOB_ff_math_floor,
+ GLOB_vm_floor,
+ GLOB_ff_math_ceil,
+ GLOB_vm_ceil,
+ GLOB_ff_math_sqrt,
+ GLOB_ff_math_log,
+ GLOB_ff_math_log10,
+ GLOB_ff_math_exp,
+ GLOB_vm_exp_x87,
+ GLOB_ff_math_sin,
+ GLOB_ff_math_cos,
+ GLOB_ff_math_tan,
+ GLOB_ff_math_asin,
+ GLOB_ff_math_acos,
+ GLOB_ff_math_atan,
+ GLOB_ff_math_sinh,
+ GLOB_ff_math_cosh,
+ GLOB_ff_math_tanh,
+ GLOB_ff_math_deg,
+ GLOB_ff_math_rad,
+ GLOB_ff_math_atan2,
+ GLOB_ff_math_ldexp,
+ GLOB_ff_math_frexp,
+ GLOB_ff_math_modf,
+ GLOB_vm_trunc,
+ GLOB_ff_math_fmod,
+ GLOB_ff_math_pow,
+ GLOB_vm_pow,
+ GLOB_ff_math_min,
+ GLOB_ff_math_max,
+ GLOB_ff_string_len,
+ GLOB_ff_string_byte,
+ GLOB_ff_string_char,
+ GLOB_fff_newstr,
+ GLOB_ff_string_sub,
+ GLOB_fff_emptystr,
+ GLOB_ff_string_rep,
+ GLOB_fff_fallback_2,
+ GLOB_ff_string_reverse,
+ GLOB_fff_fallback_1,
+ GLOB_ff_string_lower,
+ GLOB_ff_string_upper,
+ GLOB_ff_table_getn,
+ GLOB_ff_bit_tobit,
+ GLOB_ff_bit_band,
+ GLOB_fff_fallback_bit_op,
+ GLOB_ff_bit_bor,
+ GLOB_ff_bit_bxor,
+ GLOB_ff_bit_bswap,
+ GLOB_ff_bit_bnot,
+ GLOB_ff_bit_lshift,
+ GLOB_ff_bit_rshift,
+ GLOB_ff_bit_arshift,
+ GLOB_ff_bit_rol,
+ GLOB_ff_bit_ror,
+ GLOB_vm_record,
+ GLOB_vm_rethook,
+ GLOB_vm_inshook,
+ GLOB_cont_hook,
+ GLOB_vm_hotloop,
+ GLOB_vm_callhook,
+ GLOB_vm_hotcall,
+ GLOB_vm_exit_handler,
+ GLOB_vm_exit_interp,
+ GLOB_vm_floor_sse,
+ GLOB_vm_ceil_sse,
+ GLOB_vm_trunc_sse,
+ GLOB_vm_mod,
+ GLOB_vm_exp2_x87,
+ GLOB_vm_exp2raw,
+ GLOB_vm_pow_sse,
+ GLOB_vm_powi_sse,
+ GLOB_vm_foldfpm,
+ GLOB_vm_foldarith,
+ GLOB_vm_cpuid,
+ GLOB_assert_bad_for_arg_type,
+ GLOB_vm_ffi_callback,
+ GLOB_vm_ffi_call,
+ GLOB_BC_MODVN_Z,
+ GLOB_BC_TGETS_Z,
+ GLOB_BC_TSETS_Z,
+ GLOB__MAX
+};
+static const char *const globnames[] = {
+ "vm_returnp",
+ "cont_dispatch",
+ "vm_returnc",
+ "BC_RET_Z",
+ "vm_return",
+ "vm_leave_cp",
+ "vm_leave_unw",
+ "vm_unwind_c@8",
+ "vm_unwind_c_eh",
+ "vm_unwind_rethrow",
+ "vm_unwind_ff@4",
+ "vm_unwind_ff_eh",
+ "vm_growstack_c",
+ "vm_growstack_v",
+ "vm_growstack_f",
+ "vm_resume",
+ "vm_pcall",
+ "vm_call",
+ "vm_call_dispatch",
+ "vmeta_call",
+ "vm_call_dispatch_f",
+ "vm_cpcall",
+ "cont_ffi_callback",
+ "vm_call_tail",
+ "cont_cat",
+ "cont_ra",
+ "BC_CAT_Z",
+ "vmeta_tgets",
+ "vmeta_tgetb",
+ "vmeta_tgetv",
+ "vmeta_tsets",
+ "vmeta_tsetb",
+ "vmeta_tsetv",
+ "cont_nop",
+ "vmeta_comp",
+ "vmeta_binop",
+ "cont_condt",
+ "cont_condf",
+ "vmeta_equal",
+ "vmeta_equal_cd",
+ "vmeta_arith_vno",
+ "vmeta_arith_vn",
+ "vmeta_arith_nvo",
+ "vmeta_arith_nv",
+ "vmeta_unm",
+ "vmeta_arith_vvo",
+ "vmeta_arith_vv",
+ "vmeta_len",
+ "BC_LEN_Z",
+ "vmeta_call_ra",
+ "BC_CALLT_Z",
+ "vmeta_for",
+ "ff_assert",
+ "fff_fallback",
+ "fff_res_",
+ "ff_type",
+ "fff_res1",
+ "ff_getmetatable",
+ "ff_setmetatable",
+ "ff_rawget",
+ "ff_tonumber",
+ "fff_resi",
+ "fff_resxmm0",
+ "fff_resn",
+ "ff_tostring",
+ "fff_gcstep",
+ "ff_next",
+ "fff_res2",
+ "fff_res",
+ "ff_pairs",
+ "ff_ipairs_aux",
+ "fff_res0",
+ "ff_ipairs",
+ "ff_pcall",
+ "ff_xpcall",
+ "ff_coroutine_resume",
+ "ff_coroutine_wrap_aux",
+ "ff_coroutine_yield",
+ "ff_math_abs",
+ "fff_resbit",
+ "ff_math_floor",
+ "vm_floor",
+ "ff_math_ceil",
+ "vm_ceil",
+ "ff_math_sqrt",
+ "ff_math_log",
+ "ff_math_log10",
+ "ff_math_exp",
+ "vm_exp_x87",
+ "ff_math_sin",
+ "ff_math_cos",
+ "ff_math_tan",
+ "ff_math_asin",
+ "ff_math_acos",
+ "ff_math_atan",
+ "ff_math_sinh",
+ "ff_math_cosh",
+ "ff_math_tanh",
+ "ff_math_deg",
+ "ff_math_rad",
+ "ff_math_atan2",
+ "ff_math_ldexp",
+ "ff_math_frexp",
+ "ff_math_modf",
+ "vm_trunc",
+ "ff_math_fmod",
+ "ff_math_pow",
+ "vm_pow",
+ "ff_math_min",
+ "ff_math_max",
+ "ff_string_len",
+ "ff_string_byte",
+ "ff_string_char",
+ "fff_newstr",
+ "ff_string_sub",
+ "fff_emptystr",
+ "ff_string_rep",
+ "fff_fallback_2",
+ "ff_string_reverse",
+ "fff_fallback_1",
+ "ff_string_lower",
+ "ff_string_upper",
+ "ff_table_getn",
+ "ff_bit_tobit",
+ "ff_bit_band",
+ "fff_fallback_bit_op",
+ "ff_bit_bor",
+ "ff_bit_bxor",
+ "ff_bit_bswap",
+ "ff_bit_bnot",
+ "ff_bit_lshift",
+ "ff_bit_rshift",
+ "ff_bit_arshift",
+ "ff_bit_rol",
+ "ff_bit_ror",
+ "vm_record",
+ "vm_rethook",
+ "vm_inshook",
+ "cont_hook",
+ "vm_hotloop",
+ "vm_callhook",
+ "vm_hotcall",
+ "vm_exit_handler",
+ "vm_exit_interp",
+ "vm_floor_sse",
+ "vm_ceil_sse",
+ "vm_trunc_sse",
+ "vm_mod",
+ "vm_exp2_x87",
+ "vm_exp2raw",
+ "vm_pow_sse",
+ "vm_powi_sse",
+ "vm_foldfpm",
+ "vm_foldarith",
+ "vm_cpuid",
+ "assert_bad_for_arg_type",
+ "vm_ffi_callback",
+ "vm_ffi_call@4",
+ "BC_MODVN_Z",
+ "BC_TGETS_Z",
+ "BC_TSETS_Z",
+ (const char *)0
+};
+static const char *const extnames[] = {
+ "lj_state_growstack@8",
+ "lj_meta_tget",
+ "lj_meta_tset",
+ "lj_meta_comp",
+ "lj_meta_equal",
+ "lj_meta_equal_cd@8",
+ "lj_meta_arith",
+ "lj_meta_len@8",
+ "lj_meta_call",
+ "lj_meta_for@8",
+ "lj_tab_get",
+ "lj_str_fromnumber@8",
+ "lj_str_fromnum@8",
+ "lj_tab_next",
+ "lj_tab_getinth@8",
+ "lj_ffh_coroutine_wrap_err@8",
+ "lj_vm_sinh",
+ "lj_vm_cosh",
+ "lj_vm_tanh",
+ "lj_str_new",
+ "lj_tab_len@4",
+ "lj_gc_step@4",
+ "lj_dispatch_ins@8",
+ "lj_trace_hot@8",
+ "lj_dispatch_call@8",
+ "lj_trace_exit@8",
+ "lj_err_throw@8",
+ "lj_ccallback_enter@8",
+ "lj_ccallback_leave@8",
+ "lj_meta_cat",
+ "lj_gc_barrieruv@8",
+ "lj_func_closeuv@8",
+ "lj_func_newL_gc",
+ "lj_tab_new",
+ "lj_gc_step_fixtop@4",
+ "lj_tab_dup@8",
+ "lj_tab_newkey",
+ "lj_tab_reasize",
+ (const char *)0
+};
+#define Dt1(_V) (int)(ptrdiff_t)&(((lua_State *)0)_V)
+#define Dt2(_V) (int)(ptrdiff_t)&(((global_State *)0)_V)
+#define Dt3(_V) (int)(ptrdiff_t)&(((TValue *)0)_V)
+#define Dt4(_V) (int)(ptrdiff_t)&(((GCobj *)0)_V)
+#define Dt5(_V) (int)(ptrdiff_t)&(((GCstr *)0)_V)
+#define Dt6(_V) (int)(ptrdiff_t)&(((GCtab *)0)_V)
+#define Dt7(_V) (int)(ptrdiff_t)&(((GCfuncL *)0)_V)
+#define Dt8(_V) (int)(ptrdiff_t)&(((GCfuncC *)0)_V)
+#define Dt9(_V) (int)(ptrdiff_t)&(((GCproto *)0)_V)
+#define DtA(_V) (int)(ptrdiff_t)&(((GCupval *)0)_V)
+#define DtB(_V) (int)(ptrdiff_t)&(((Node *)0)_V)
+#define DtC(_V) (int)(ptrdiff_t)&(((int *)0)_V)
+#define DtD(_V) (int)(ptrdiff_t)&(((GCtrace *)0)_V)
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx, int cmov, int sse)
+{
+ dasm_put(Dst, 0);
+ dasm_put(Dst, 2, FRAME_P, LJ_TTRUE, FRAME_TYPE, FRAME_C, FRAME_TYPE, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ dasm_put(Dst, 114, Dt1(->base), Dt1(->top), Dt1(->cframe), Dt1(->maxstack), LJ_TNIL);
+ dasm_put(Dst, 200, Dt1(->top), Dt1(->top), Dt1(->glref), Dt2(->vmstate), ~LJ_VMST_C, CFRAME_RAWMASK);
+ dasm_put(Dst, 275, 1+1, Dt1(->base), Dt1(->glref), GG_G2DISP, LJ_TFALSE, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, LUA_MINSTACK, -4+PC2PROTO(framesize), Dt1(->base));
+ dasm_put(Dst, 353, Dt1(->top), Dt1(->base), Dt1(->top), Dt7(->pc), FRAME_CP, CFRAME_RESUME, Dt1(->glref), GG_G2DISP, Dt1(->cframe), Dt1(->status), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->status), Dt1(->base), Dt1(->top), FRAME_TYPE);
+ dasm_put(Dst, 495, FRAME_CP, FRAME_C, Dt1(->cframe), Dt1(->cframe), Dt1(->glref), GG_G2DISP, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base));
+ dasm_put(Dst, 573, Dt1(->top), LJ_TFUNC, Dt7(->pc), Dt1(->stack), Dt1(->top), Dt1(->cframe), Dt1(->cframe), FRAME_CP, LJ_TNIL);
+#if LJ_HASFFI
+#endif
+ dasm_put(Dst, 743);
+#if LJ_HASFFI
+ dasm_put(Dst, 748);
+#endif
+ dasm_put(Dst, 757, Dt7(->pc), PC2PROTO(k));
+#if LJ_HASFFI
+ dasm_put(Dst, 771);
+#endif
+ dasm_put(Dst, 792, LJ_TSTR, BC_GGET, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 890, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 900);
+ } else {
+ dasm_put(Dst, 913);
+ }
+ dasm_put(Dst, 926, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 2+1, LJ_TSTR, BC_GSET);
+ dasm_put(Dst, 1078, DISPATCH_GL(tmptv), LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 890, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 900);
+ } else {
+ dasm_put(Dst, 913);
+ }
+ dasm_put(Dst, 1101, Dt1(->base), Dt1(->base), Dt1(->top), FRAME_CONT, 3+1, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 1295, -BCBIAS_J*4, LJ_TISTRUECOND, LJ_TISTRUECOND, Dt1(->base));
+ dasm_put(Dst, 1402);
+#if LJ_HASFFI
+ dasm_put(Dst, 1417, Dt1(->base));
+#endif
+ dasm_put(Dst, 1448);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1451);
+#endif
+ dasm_put(Dst, 1457);
+#if LJ_DUALNUM
+ dasm_put(Dst, 884);
+#endif
+ dasm_put(Dst, 1469);
+#if LJ_DUALNUM
+ dasm_put(Dst, 1451);
+#endif
+ dasm_put(Dst, 1497, Dt1(->base), Dt1(->base), FRAME_CONT, 2+1, Dt1(->base), Dt1(->base));
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 1607);
+#else
+ dasm_put(Dst, 1626);
+#endif
+ dasm_put(Dst, 1631, Dt1(->base), Dt1(->base), Dt7(->pc), Dt1(->base), Dt1(->base), GG_DISP2STATIC, 1+1, LJ_TISTRUECOND);
+ dasm_put(Dst, 1820, 1+1, ~LJ_TNUMX);
+ if (cmov) {
+ dasm_put(Dst, 1878);
+ } else {
+ dasm_put(Dst, 1882);
+ }
+ dasm_put(Dst, 1891, ((char *)(&((GCfuncC *)0)->upvalue)), LJ_TSTR, 1+1, LJ_TTAB, Dt6(->metatable), LJ_TNIL, DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable), LJ_TTAB);
+ dasm_put(Dst, 1974, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), DtB(->next), LJ_TNIL);
+ dasm_put(Dst, 2032, LJ_TUDATA, LJ_TISNUM, LJ_TNUMX, DISPATCH_GL(gcroot[GCROOT_BASEMT]), 2+1);
+ dasm_put(Dst, 2095, LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->metatable), LJ_TTAB, Dt6(->marked), LJ_GC_BLACK, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ dasm_put(Dst, 2167, 2+1, LJ_TTAB, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2256);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 2278);
+ } else {
+ dasm_put(Dst, 2288);
+ }
+ dasm_put(Dst, 2295, 1+1, LJ_TSTR, LJ_TSTR, LJ_TISNUM, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM]), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 2361, Dt1(->base));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2385);
+ } else {
+ dasm_put(Dst, 2390);
+ }
+ dasm_put(Dst, 2395, Dt1(->base), 1+1, LJ_TTAB, Dt1(->base), Dt1(->top), Dt1(->base), 1+2);
+ dasm_put(Dst, 2504, LJ_TNIL, LJ_TNIL, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2551, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2560, Dt8(->upvalue[0]), LJ_TFUNC, LJ_TNIL, 1+3, 1+1, LJ_TTAB, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2546);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 2615);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2620, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2636);
+ } else {
+ dasm_put(Dst, 2675);
+ }
+ dasm_put(Dst, 2693, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->hmask), 1+0);
+ dasm_put(Dst, 2531, 1+1, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 2551, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 2774, Dt8(->upvalue[0]), LJ_TFUNC);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2795, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 2807);
+ } else {
+ dasm_put(Dst, 2817);
+ }
+ dasm_put(Dst, 2824, 1+3, 1+1, 8+FRAME_PCALL, DISPATCH_GL(hookmask), HOOK_ACTIVE_SHIFT, 2+1, LJ_TFUNC);
+ dasm_put(Dst, 2888, LJ_TFUNC, 16+FRAME_PCALL, 1+1, LJ_TTHREAD, Dt1(->cframe), Dt1(->status), LUA_YIELD, Dt1(->top));
+ dasm_put(Dst, 2976, Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP);
+ dasm_put(Dst, 3077, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack), LJ_TTRUE, FRAME_TYPE);
+ dasm_put(Dst, 3191, LJ_TFALSE, Dt1(->top), Dt1(->top), 1+2, Dt1(->top), Dt1(->base), Dt8(->upvalue[0].gcr), Dt1(->cframe));
+ dasm_put(Dst, 3289, Dt1(->status), LUA_YIELD, Dt1(->top), Dt1(->base), Dt1(->maxstack), Dt1(->top), Dt1(->base), Dt1(->top));
+ dasm_put(Dst, 3355, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), LUA_YIELD, Dt1(->base), Dt1(->top), Dt1(->top), Dt1(->maxstack));
+ dasm_put(Dst, 3456, FRAME_TYPE, Dt1(->top), Dt1(->base), Dt1(->cframe), CFRAME_RESUME);
+ dasm_put(Dst, 3569, Dt1(->base), Dt1(->top), Dt1(->cframe), LUA_YIELD, Dt1(->status));
+ if (!LJ_DUALNUM) {
+ dasm_put(Dst, 3595);
+ }
+ if (sse) {
+ dasm_put(Dst, 3598);
+ }
+ dasm_put(Dst, 3613, 1+1);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3624, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3704, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3714);
+ } else {
+ dasm_put(Dst, 3750);
+ }
+ dasm_put(Dst, 3767, 1+1, FRAME_TYPE, LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3859, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3704, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3881);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3890);
+ }
+ dasm_put(Dst, 2283);
+ } else {
+ dasm_put(Dst, 3924);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3930);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ dasm_put(Dst, 3966);
+ } else {
+ dasm_put(Dst, 2290);
+ }
+ }
+ dasm_put(Dst, 3983);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3859, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 3704, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 3986);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3890);
+ }
+ dasm_put(Dst, 2283);
+ } else {
+ dasm_put(Dst, 3995);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3930);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ dasm_put(Dst, 3966);
+ } else {
+ dasm_put(Dst, 2290);
+ }
+ }
+ if (sse) {
+ dasm_put(Dst, 4001, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4030, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4059, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4128, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4185, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1);
+ dasm_put(Dst, 4248, LJ_TISNUM, 1+1, LJ_TISNUM, 1+1, LJ_TISNUM);
+ dasm_put(Dst, 4338);
+ if (sse) {
+ dasm_put(Dst, 4350, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4381, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4406);
+ if (sse) {
+ dasm_put(Dst, 4420, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4451, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4476);
+ if (sse) {
+ dasm_put(Dst, 4490, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4521, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4546);
+ if (sse) {
+ dasm_put(Dst, 4562, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ } else {
+ dasm_put(Dst, 4601, 1+1, LJ_TISNUM, Dt8(->upvalue[0]));
+ }
+ dasm_put(Dst, 4634, 2+1, LJ_TISNUM, LJ_TISNUM, 2+1, LJ_TISNUM, LJ_TISNUM);
+ dasm_put(Dst, 4699, 1+1, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4798);
+ } else {
+ dasm_put(Dst, 4804);
+ }
+ dasm_put(Dst, 4813);
+ if (sse) {
+ dasm_put(Dst, 4838);
+ } else {
+ dasm_put(Dst, 4844);
+ }
+ dasm_put(Dst, 4847, 1+2);
+ if (sse) {
+ dasm_put(Dst, 4856);
+ } else {
+ dasm_put(Dst, 4864);
+ }
+ dasm_put(Dst, 4872);
+ if (sse) {
+ dasm_put(Dst, 4875);
+ } else {
+ dasm_put(Dst, 4907);
+ }
+ dasm_put(Dst, 4926);
+ if (sse) {
+ dasm_put(Dst, 4942, 1+1, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 4967, 1+1, LJ_TISNUM);
+ }
+ dasm_put(Dst, 4989);
+ if (sse) {
+ dasm_put(Dst, 5011);
+ } else {
+ dasm_put(Dst, 5037);
+ }
+ dasm_put(Dst, 5054, 1+2);
+ if (sse) {
+ dasm_put(Dst, 5094);
+ } else {
+ dasm_put(Dst, 5102);
+ }
+ dasm_put(Dst, 5112, 2+1, LJ_TISNUM, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 5164, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 5211, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 5252, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5265, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4798);
+ } else {
+ dasm_put(Dst, 4804);
+ }
+ dasm_put(Dst, 5315);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 5326, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5347);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5368);
+ } else {
+ dasm_put(Dst, 5393, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5411);
+ } else {
+ dasm_put(Dst, 5429);
+ }
+ dasm_put(Dst, 5434);
+ if (cmov) {
+ dasm_put(Dst, 5444);
+ } else {
+ dasm_put(Dst, 5452);
+ }
+ dasm_put(Dst, 5385);
+ }
+ dasm_put(Dst, 5473, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5486, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 4798);
+ } else {
+ dasm_put(Dst, 4804);
+ }
+ dasm_put(Dst, 5315);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 5326, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5347);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5536);
+ } else {
+ dasm_put(Dst, 5393, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5411);
+ } else {
+ dasm_put(Dst, 5429);
+ }
+ dasm_put(Dst, 5434);
+ if (cmov) {
+ dasm_put(Dst, 5561);
+ } else {
+ dasm_put(Dst, 5569);
+ }
+ dasm_put(Dst, 5385);
+ }
+ if (!sse) {
+ dasm_put(Dst, 5590);
+ }
+ dasm_put(Dst, 5599, 1+1, LJ_TSTR);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5621, Dt5(->len));
+ } else if (sse) {
+ dasm_put(Dst, 5629, Dt5(->len));
+ } else {
+ dasm_put(Dst, 5640, Dt5(->len));
+ }
+ dasm_put(Dst, 5648, 1+1, LJ_TSTR, Dt5(->len), Dt5([1]));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 3978);
+ } else if (sse) {
+ dasm_put(Dst, 5686);
+ } else {
+ dasm_put(Dst, 5696);
+ }
+ dasm_put(Dst, 5709, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5740);
+ } else if (sse) {
+ dasm_put(Dst, 5763);
+ } else {
+ dasm_put(Dst, 5789);
+ }
+ dasm_put(Dst, 5813, Dt1(->base), Dt1(->base), LJ_TSTR, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), 1+2, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5916);
+ } else if (sse) {
+ dasm_put(Dst, 5928);
+ } else {
+ dasm_put(Dst, 5943);
+ }
+ dasm_put(Dst, 5955, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 2546);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ dasm_put(Dst, 5972, Dt5(->len));
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 5982);
+ } else if (sse) {
+ dasm_put(Dst, 5986);
+ } else {
+ dasm_put(Dst, 5993);
+ }
+ dasm_put(Dst, 6005, sizeof(GCstr)-1);
+ dasm_put(Dst, 6080, 2+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold));
+ dasm_put(Dst, 6139, LJ_TSTR, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6156);
+ } else if (sse) {
+ dasm_put(Dst, 6164);
+ } else {
+ dasm_put(Dst, 6175);
+ }
+ dasm_put(Dst, 6191, Dt5(->len), DISPATCH_GL(tmpbuf.sz), Dt5([1]), DISPATCH_GL(tmpbuf.buf), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 6256, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 6319, 1+1, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz));
+ dasm_put(Dst, 6390, sizeof(GCstr), DISPATCH_GL(tmpbuf.buf), 1+1);
+ dasm_put(Dst, 6475, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), LJ_TSTR, Dt5(->len), DISPATCH_GL(tmpbuf.sz), sizeof(GCstr), DISPATCH_GL(tmpbuf.buf));
+ dasm_put(Dst, 6545, 1+1, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6613);
+ } else if (sse) {
+ dasm_put(Dst, 6620);
+ } else {
+ dasm_put(Dst, 6630);
+ }
+ dasm_put(Dst, 6641, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6657);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 6703);
+ }
+ dasm_put(Dst, 111);
+ if (LJ_DUALNUM || sse) {
+ if (!sse) {
+ dasm_put(Dst, 6721);
+ }
+ dasm_put(Dst, 6725);
+ } else {
+ dasm_put(Dst, 6633);
+ }
+ dasm_put(Dst, 6730, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6741);
+ } else {
+ dasm_put(Dst, 6756);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6782);
+ } else {
+ dasm_put(Dst, 6797);
+ }
+ dasm_put(Dst, 6810, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6835);
+ } else {
+ dasm_put(Dst, 6855);
+ }
+ if (sse) {
+ dasm_put(Dst, 6860);
+ } else {
+ dasm_put(Dst, 6877);
+ }
+ dasm_put(Dst, 6890, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6741);
+ } else {
+ dasm_put(Dst, 6756);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6782);
+ } else {
+ dasm_put(Dst, 6797);
+ }
+ dasm_put(Dst, 6810, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6908);
+ } else {
+ dasm_put(Dst, 6855);
+ }
+ if (sse) {
+ dasm_put(Dst, 6928);
+ } else {
+ dasm_put(Dst, 6945);
+ }
+ dasm_put(Dst, 6958, 1+1);
+ if (sse) {
+ dasm_put(Dst, 6741);
+ } else {
+ dasm_put(Dst, 6756);
+ }
+ dasm_put(Dst, 2250, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6782);
+ } else {
+ dasm_put(Dst, 6797);
+ }
+ dasm_put(Dst, 6810, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6976);
+ } else {
+ dasm_put(Dst, 6855);
+ }
+ if (sse) {
+ dasm_put(Dst, 6996);
+ } else {
+ dasm_put(Dst, 7013);
+ }
+ dasm_put(Dst, 7026, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7070, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7094);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6725);
+ } else if (sse) {
+ dasm_put(Dst, 7100);
+ } else {
+ dasm_put(Dst, 7112);
+ }
+ dasm_put(Dst, 7125);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7136, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7152, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7167, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 7239, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 7303);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7310, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7152, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7326, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 7398, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 7462);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7470, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7152, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7486, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 7558, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 7622);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7630, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7152, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7646, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 7718, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 7782);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 7789, 1+1, LJ_TISNUM);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 6765);
+ } else {
+ dasm_put(Dst, 2273);
+ }
+ if (sse) {
+ dasm_put(Dst, 6674);
+ } else {
+ dasm_put(Dst, 7049);
+ }
+ dasm_put(Dst, 7152, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 7805, 2+1, LJ_TISNUM, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 7877, 2+1, LJ_TISNUM, LJ_TISNUM);
+ }
+ dasm_put(Dst, 7941, 1+2, 1+1, Dt1(->base), 8*LUA_MINSTACK, Dt1(->top), Dt1(->maxstack), Dt8(->f), Dt1(->base));
+ dasm_put(Dst, 8017, Dt1(->top), Dt7(->pc), FRAME_TYPE, LUA_MINSTACK, Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 8141, Dt1(->top), Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 8179, DISPATCH_GL(hookmask), HOOK_VMEVENT, HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount));
+#endif
+ dasm_put(Dst, 8210, DISPATCH_GL(hookmask), HOOK_ACTIVE, DISPATCH_GL(hookmask), HOOK_ACTIVE, LUA_MASKLINE|LUA_MASKCOUNT, DISPATCH_GL(hookcount), LUA_MASKLINE);
+ dasm_put(Dst, 8261, Dt1(->base), Dt1(->base), GG_DISP2STATIC);
+#if LJ_HASJIT
+ dasm_put(Dst, 8327, Dt7(->pc), PC2PROTO(framesize), Dt1(->base), Dt1(->top), GG_DISP2J, DISPATCH_J(L));
+#endif
+ dasm_put(Dst, 8373);
+#if LJ_HASJIT
+ dasm_put(Dst, 8205);
+#endif
+ dasm_put(Dst, 8380);
+#if LJ_HASJIT
+ dasm_put(Dst, 8383);
+#endif
+ dasm_put(Dst, 8393, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 8427);
+#endif
+ dasm_put(Dst, 8432, Dt1(->base), Dt1(->top));
+#if LJ_HASJIT
+ dasm_put(Dst, 8461, DISPATCH_GL(vmstate), DISPATCH_GL(vmstate), ~LJ_VMST_EXIT, DISPATCH_J(exitno), DISPATCH_J(parent), 8*8+16, DISPATCH_GL(jit_L), DISPATCH_GL(jit_base), DISPATCH_J(L), DISPATCH_GL(jit_L), Dt1(->base), GG_DISP2J, Dt1(->cframe), CFRAME_RAWMASK, CFRAME_OFS_L, Dt1(->base), CFRAME_OFS_PC);
+#endif
+ dasm_put(Dst, 8604);
+#if LJ_HASJIT
+ dasm_put(Dst, 8607, Dt7(->pc), PC2PROTO(k), DISPATCH_GL(jit_L), DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, BC_FUNCF);
+#endif
+ dasm_put(Dst, 8685);
+ if (!sse) {
+ dasm_put(Dst, 8688);
+ }
+ dasm_put(Dst, 8733);
+ if (!sse) {
+ dasm_put(Dst, 8835);
+ }
+ dasm_put(Dst, 8880);
+ if (!sse) {
+ dasm_put(Dst, 8982);
+ }
+ dasm_put(Dst, 9021);
+ if (sse) {
+ dasm_put(Dst, 9126);
+ } else {
+ dasm_put(Dst, 9256);
+ }
+ dasm_put(Dst, 9303);
+ if (!sse) {
+ dasm_put(Dst, 9377);
+ if (cmov) {
+ dasm_put(Dst, 9388);
+ } else {
+ dasm_put(Dst, 9392);
+ }
+ dasm_put(Dst, 9399);
+ dasm_put(Dst, 9473);
+ dasm_put(Dst, 9573);
+ if (cmov) {
+ dasm_put(Dst, 9576);
+ } else {
+ dasm_put(Dst, 9580);
+ }
+ dasm_put(Dst, 9587);
+ if (cmov) {
+ dasm_put(Dst, 9388);
+ } else {
+ dasm_put(Dst, 9392);
+ }
+ dasm_put(Dst, 9605);
+ } else {
+ dasm_put(Dst, 9684);
+ }
+ dasm_put(Dst, 9687);
+ dasm_put(Dst, 9772);
+ dasm_put(Dst, 9902);
+ dasm_put(Dst, 10108);
+#if LJ_HASJIT
+ if (sse) {
+ dasm_put(Dst, 10115);
+ dasm_put(Dst, 10172);
+ dasm_put(Dst, 10263);
+ } else {
+ dasm_put(Dst, 10305);
+ dasm_put(Dst, 10397);
+ }
+ dasm_put(Dst, 10443);
+#endif
+ dasm_put(Dst, 10447);
+ if (sse) {
+ dasm_put(Dst, 10450);
+ dasm_put(Dst, 10555);
+ dasm_put(Dst, 10638);
+ } else {
+ dasm_put(Dst, 10710);
+ dasm_put(Dst, 10793);
+ if (cmov) {
+ dasm_put(Dst, 10848);
+ } else {
+ dasm_put(Dst, 10867);
+ }
+ dasm_put(Dst, 10443);
+ }
+ dasm_put(Dst, 10908);
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 10445);
+#endif
+ dasm_put(Dst, 10964);
+#if LJ_HASFFI
+#define DtE(_V) (int)(ptrdiff_t)&(((CTState *)0)_V)
+ dasm_put(Dst, 10968, GG_G2DISP, Dt2(->ctype_state), DtE(->cb.slot), CFRAME_SIZE+16, DtE(->cb.gpr[0]), DtE(->cb.gpr[1]), DtE(->cb.stack), CFRAME_SIZE+12, CFRAME_SIZE+8, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top), Dt7(->pc));
+#endif
+ dasm_put(Dst, 11078);
+#if LJ_HASFFI
+ dasm_put(Dst, 11081, DISPATCH_GL(ctype_state), DtE(->L), Dt1(->base), Dt1(->top), DtE(->cb.gpr[0]), DtE(->cb.gpr[1]), DtE(->cb.gpr[2]), DtE(->cb.fpr[0].d), DtE(->cb.fpr[0].f), Dt1(->top));
+#endif
+ dasm_put(Dst, 11170);
+#if LJ_HASFFI
+#define DtF(_V) (int)(ptrdiff_t)&(((CCallState *)0)_V)
+ dasm_put(Dst, 11173, DtF(->spadj));
+#if LJ_TARGET_WINDOWS
+ dasm_put(Dst, 11183, DtF(->spadj));
+#endif
+ dasm_put(Dst, 11187, DtF(->nsp), offsetof(CCallState, stack), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->func), DtF(->gpr[0]), DtF(->gpr[1]), DtF(->resx87), DtF(->fpr[0].d[0]));
+ dasm_put(Dst, 11257, DtF(->fpr[0].f[0]));
+#if LJ_TARGET_WINDOWS
+ dasm_put(Dst, 11263, DtF(->spadj));
+#endif
+ dasm_put(Dst, 11267);
+#endif
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop, int cmov, int sse)
+{
+ int vk = 0;
+ dasm_put(Dst, 11274, defop);
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11276, LJ_TISNUM, LJ_TISNUM);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 11306);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 11311);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 11316);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 11321);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 11326, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 11379);
+ } else {
+ dasm_put(Dst, 11390);
+ }
+ dasm_put(Dst, 11401);
+ if (sse) {
+ dasm_put(Dst, 11408);
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 11428);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 11433);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 11438);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 11443);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 11448);
+ } else {
+ dasm_put(Dst, 11453);
+ }
+ } else {
+ dasm_put(Dst, 11461, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 11482);
+ } else {
+ dasm_put(Dst, 11503);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ }
+ if (LJ_DUALNUM) {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 11428);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 11433);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 11438);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 11443);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 11448);
+ } else {
+ switch (op) {
+ case BC_ISLT:
+ dasm_put(Dst, 752);
+ break;
+ case BC_ISGE:
+ dasm_put(Dst, 11519);
+ break;
+ case BC_ISLE:
+ dasm_put(Dst, 11524);
+ break;
+ case BC_ISGT:
+ dasm_put(Dst, 11529);
+ break;
+ default: break; /* Shut up GCC. */
+ }
+ dasm_put(Dst, 11534, -BCBIAS_J*4);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ dasm_put(Dst, 11565);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11573, LJ_TISNUM, LJ_TISNUM);
+ if (vk) {
+ dasm_put(Dst, 11598);
+ } else {
+ dasm_put(Dst, 11603);
+ }
+ dasm_put(Dst, 11608, -BCBIAS_J*4, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 11659);
+ } else {
+ dasm_put(Dst, 11666);
+ }
+ dasm_put(Dst, 11670);
+ if (sse) {
+ dasm_put(Dst, 11681);
+ } else {
+ dasm_put(Dst, 11693);
+ }
+ dasm_put(Dst, 11700);
+ } else {
+ dasm_put(Dst, 11705, LJ_TISNUM, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 11724);
+ } else {
+ dasm_put(Dst, 11742);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ }
+ iseqne_fp:
+ if (vk) {
+ dasm_put(Dst, 11755);
+ } else {
+ dasm_put(Dst, 11764);
+ }
+ iseqne_end:
+ if (vk) {
+ dasm_put(Dst, 11773, -BCBIAS_J*4);
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4853);
+ }
+ } else {
+ if (!LJ_HASFFI) {
+ dasm_put(Dst, 4853);
+ }
+ dasm_put(Dst, 11788, -BCBIAS_J*4);
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ dasm_put(Dst, 11803);
+ } else {
+ dasm_put(Dst, 11546);
+ }
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ dasm_put(Dst, 11808);
+ if (LJ_HASFFI) {
+ dasm_put(Dst, 11811, LJ_TCDATA, LJ_TCDATA);
+ }
+ dasm_put(Dst, 11830, LJ_TISPRI, LJ_TISTABUD, Dt6(->metatable), Dt6(->nomm), 1<len), LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 12450, Dt5(->len));
+ } else {
+ dasm_put(Dst, 12468, Dt5(->len));
+ }
+ dasm_put(Dst, 12477, LJ_TTAB);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 12511, Dt6(->metatable));
+#endif
+ dasm_put(Dst, 12525);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 12534);
+ } else {
+ dasm_put(Dst, 12540);
+ }
+ dasm_put(Dst, 12547);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ dasm_put(Dst, 12560, Dt6(->nomm), 1<base), Dt1(->base));
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ dasm_put(Dst, 13397, LJ_TSTR);
+ break;
+ case BC_KCDATA:
+#if LJ_HASFFI
+ dasm_put(Dst, 13397, LJ_TCDATA);
+#endif
+ break;
+ case BC_KSHORT:
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 13430, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 13442);
+ } else {
+ dasm_put(Dst, 13457);
+ }
+ dasm_put(Dst, 11546);
+ break;
+ case BC_KNUM:
+ if (sse) {
+ dasm_put(Dst, 13465);
+ } else {
+ dasm_put(Dst, 13478);
+ }
+ dasm_put(Dst, 11546);
+ break;
+ case BC_KPRI:
+ dasm_put(Dst, 13485);
+ break;
+ case BC_KNIL:
+ dasm_put(Dst, 13511, LJ_TNIL);
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ dasm_put(Dst, 13557, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ dasm_put(Dst, 13601, offsetof(GCfuncL, uvptr), DtA(->closed), DtA(->v), TV2MARKOFS, LJ_GC_BLACK, LJ_TISGCV, LJ_TISNUM - LJ_TISGCV, Dt4(->gch.marked), LJ_GC_WHITES, GG_DISP2G);
+ dasm_put(Dst, 13691);
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ dasm_put(Dst, 13703, offsetof(GCfuncL, uvptr), DtA(->v), LJ_TSTR, DtA(->marked), LJ_GC_BLACK, Dt4(->gch.marked), LJ_GC_WHITES, DtA(->closed), GG_DISP2G);
+ break;
+ case BC_USETN:
+ dasm_put(Dst, 13794);
+ if (sse) {
+ dasm_put(Dst, 13799);
+ } else {
+ dasm_put(Dst, 12056);
+ }
+ dasm_put(Dst, 13806, offsetof(GCfuncL, uvptr), DtA(->v));
+ if (sse) {
+ dasm_put(Dst, 13815);
+ } else {
+ dasm_put(Dst, 13821);
+ }
+ dasm_put(Dst, 11546);
+ break;
+ case BC_USETP:
+ dasm_put(Dst, 13824, offsetof(GCfuncL, uvptr), DtA(->v));
+ break;
+ case BC_UCLO:
+ dasm_put(Dst, 13861, -BCBIAS_J*4, Dt1(->openupval), Dt1(->base), Dt1(->base));
+ break;
+
+ case BC_FNEW:
+ dasm_put(Dst, 13915, Dt1(->base), Dt1(->base), LJ_TFUNC);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ dasm_put(Dst, 13986, Dt1(->base), DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), LJ_TTAB);
+ break;
+ case BC_TDUP:
+ dasm_put(Dst, 14112, DISPATCH_GL(gc.total), DISPATCH_GL(gc.threshold), Dt1(->base), Dt1(->base), LJ_TTAB);
+ break;
+
+ case BC_GGET:
+ dasm_put(Dst, 14204, Dt7(->env));
+ break;
+ case BC_GSET:
+ dasm_put(Dst, 14222, Dt7(->env));
+ break;
+
+ case BC_TGETV:
+ dasm_put(Dst, 14240, LJ_TTAB);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 14263, LJ_TISNUM);
+ } else {
+ dasm_put(Dst, 14277, LJ_TISNUM);
+ if (sse) {
+ dasm_put(Dst, 14288);
+ } else {
+ dasm_put(Dst, 14309);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ dasm_put(Dst, 2689);
+ }
+ dasm_put(Dst, 14319);
+ }
+ dasm_put(Dst, 14324, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<hmask), Dt5(->hash), sizeof(Node), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 14525, LJ_TNIL, DtB(->next), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETS:
+ dasm_put(Dst, 14880, LJ_TTAB, Dt6(->hmask), Dt5(->hash), sizeof(Node), Dt6(->nomm), Dt6(->node), DtB(->key.it), LJ_TSTR, DtB(->key.gcr), LJ_TNIL);
+ dasm_put(Dst, 14955, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable), Dt6(->metatable), Dt6(->nomm), 1<next));
+ dasm_put(Dst, 15047, Dt6(->metatable), Dt6(->nomm), 1<base), Dt1(->base), Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+ case BC_TSETB:
+ dasm_put(Dst, 15143, LJ_TTAB, Dt6(->asize), Dt6(->array), LJ_TNIL, Dt6(->marked), LJ_GC_BLACK, Dt6(->metatable));
+ dasm_put(Dst, 15241, Dt6(->metatable), Dt6(->nomm), 1<marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ case BC_TSETM:
+ dasm_put(Dst, 15287, Dt6(->marked), LJ_GC_BLACK, Dt6(->asize), Dt6(->array), Dt1(->base), Dt1(->base));
+ dasm_put(Dst, 15436, Dt6(->marked), (uint8_t)~LJ_GC_BLACK, DISPATCH_GL(gc.grayagain), DISPATCH_GL(gc.grayagain), Dt6(->gclist));
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ dasm_put(Dst, 12580);
+ if (op == BC_CALLM) {
+ dasm_put(Dst, 15454);
+ }
+ dasm_put(Dst, 15459, LJ_TFUNC, Dt7(->pc));
+ break;
+
+ case BC_CALLMT:
+ dasm_put(Dst, 15454);
+ break;
+ case BC_CALLT:
+ dasm_put(Dst, 15500, LJ_TFUNC, FRAME_TYPE, Dt7(->ffid), Dt7(->pc));
+ dasm_put(Dst, 15618, FRAME_TYPE, Dt7(->pc), PC2PROTO(k), FRAME_VARG, FRAME_TYPEP, FRAME_VARG);
+ break;
+
+ case BC_ITERC:
+ dasm_put(Dst, 15688, LJ_TFUNC, 2+1, Dt7(->pc));
+ break;
+
+ case BC_ITERN:
+#if LJ_HASJIT
+#endif
+ dasm_put(Dst, 15768, Dt6(->asize), Dt6(->array), LJ_TNIL);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 12441, LJ_TISNUM);
+ } else if (sse) {
+ dasm_put(Dst, 12534);
+ } else {
+ dasm_put(Dst, 15814);
+ }
+ dasm_put(Dst, 15820);
+ if (LJ_DUALNUM) {
+ } else if (sse) {
+ dasm_put(Dst, 12406);
+ } else {
+ dasm_put(Dst, 12418);
+ }
+ dasm_put(Dst, 15839, -BCBIAS_J*4);
+ if (!LJ_DUALNUM && !sse) {
+ dasm_put(Dst, 15890);
+ }
+ dasm_put(Dst, 15896, Dt6(->hmask), sizeof(Node), Dt6(->node), DtB(->val.it), LJ_TNIL, DtB(->key.gcr), DtB(->key.it), DtB(->val.gcr), DtB(->val.it));
+ dasm_put(Dst, 15971);
+ break;
+
+ case BC_ISNEXT:
+ dasm_put(Dst, 15979, LJ_TFUNC, LJ_TTAB, LJ_TNIL, Dt8(->ffid), FF_next_N, -BCBIAS_J*4, BC_JMP, -BCBIAS_J*4, BC_ITERC);
+ break;
+
+ case BC_VARG:
+ dasm_put(Dst, 16078, (8+FRAME_VARG), LJ_TNIL, Dt1(->maxstack));
+ dasm_put(Dst, 16242, Dt1(->base), Dt1(->top), Dt1(->base), Dt1(->top));
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ dasm_put(Dst, 15454);
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ if (op != BC_RET0) {
+ dasm_put(Dst, 16313);
+ }
+ dasm_put(Dst, 16317, FRAME_TYPE);
+ switch (op) {
+ case BC_RET:
+ dasm_put(Dst, 16336);
+ break;
+ case BC_RET1:
+ dasm_put(Dst, 16394);
+ /* fallthrough */
+ case BC_RET0:
+ dasm_put(Dst, 16410);
+ default:
+ break;
+ }
+ dasm_put(Dst, 16421, Dt7(->pc), PC2PROTO(k));
+ if (op == BC_RET) {
+ dasm_put(Dst, 16463, LJ_TNIL);
+ } else {
+ dasm_put(Dst, 16472, LJ_TNIL);
+ }
+ dasm_put(Dst, 16479, -FRAME_VARG, FRAME_TYPEP);
+ if (op != BC_RET0) {
+ dasm_put(Dst, 16503);
+ }
+ dasm_put(Dst, 4937);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+
+ case BC_FORL:
+#if LJ_HASJIT
+ dasm_put(Dst, 16507, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ dasm_put(Dst, 16528);
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 16532, LJ_TISNUM);
+ if (!vk) {
+ dasm_put(Dst, 16542, LJ_TISNUM, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 16571, LJ_TISNUM, LJ_TISNUM);
+#endif
+ dasm_put(Dst, 16590);
+ }
+ dasm_put(Dst, 16609, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 16620, -BCBIAS_J*4);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 16634, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 16652, -BCBIAS_J*4);
+ } else {
+ dasm_put(Dst, 16644, BC_JLOOP);
+ }
+ dasm_put(Dst, 16666);
+ if (vk) {
+ dasm_put(Dst, 16689);
+ }
+ dasm_put(Dst, 16609, LJ_TISNUM);
+ if (op == BC_FORI) {
+ dasm_put(Dst, 16698);
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 16703, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ dasm_put(Dst, 16717);
+ } else {
+ dasm_put(Dst, 16713, BC_JLOOP);
+ }
+ dasm_put(Dst, 16722);
+ } else if (!vk) {
+ dasm_put(Dst, 16729, LJ_TISNUM);
+ }
+ if (!vk) {
+ dasm_put(Dst, 16735, LJ_TISNUM);
+ } else {
+#ifdef LUA_USE_ASSERT
+ dasm_put(Dst, 16749, LJ_TISNUM, LJ_TISNUM);
+#endif
+ }
+ dasm_put(Dst, 16768);
+ if (!vk) {
+ dasm_put(Dst, 16772, LJ_TISNUM);
+ }
+ if (sse) {
+ dasm_put(Dst, 16781);
+ if (vk) {
+ dasm_put(Dst, 16793);
+ } else {
+ dasm_put(Dst, 16812);
+ }
+ dasm_put(Dst, 16817);
+ } else {
+ dasm_put(Dst, 16830);
+ if (vk) {
+ dasm_put(Dst, 16836);
+ } else {
+ dasm_put(Dst, 16852);
+ }
+ dasm_put(Dst, 16860);
+ if (cmov) {
+ dasm_put(Dst, 3953);
+ } else {
+ dasm_put(Dst, 3959);
+ }
+ if (!cmov) {
+ dasm_put(Dst, 16865);
+ }
+ }
+ if (op == BC_FORI) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 16871);
+ } else {
+ dasm_put(Dst, 16876, -BCBIAS_J*4);
+ }
+ } else if (op == BC_JFORI) {
+ dasm_put(Dst, 16886, -BCBIAS_J*4, BC_JLOOP);
+ } else if (op == BC_IFORL) {
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 16900);
+ } else {
+ dasm_put(Dst, 16905, -BCBIAS_J*4);
+ }
+ } else {
+ dasm_put(Dst, 16896, BC_JLOOP);
+ }
+ if (LJ_DUALNUM) {
+ dasm_put(Dst, 11448);
+ } else {
+ dasm_put(Dst, 12184);
+ }
+ if (sse) {
+ dasm_put(Dst, 16915);
+ }
+ break;
+
+ case BC_ITERL:
+#if LJ_HASJIT
+ dasm_put(Dst, 16507, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ dasm_put(Dst, 16926, LJ_TNIL);
+ if (op == BC_JITERL) {
+ dasm_put(Dst, 16941, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 16955, -BCBIAS_J*4);
+ }
+ dasm_put(Dst, 11544);
+ break;
+
+ case BC_LOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 16507, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_LOOP);
+#endif
+ break;
+
+ case BC_ILOOP:
+ dasm_put(Dst, 11546);
+ break;
+
+ case BC_JLOOP:
+#if LJ_HASJIT
+ dasm_put(Dst, 16971, DISPATCH_J(trace), DtD(->mcode), DISPATCH_GL(jit_base), DISPATCH_GL(jit_L));
+#endif
+ break;
+
+ case BC_JMP:
+ dasm_put(Dst, 16994, -BCBIAS_J*4);
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+#if LJ_HASJIT
+ dasm_put(Dst, 17018, HOTCOUNT_PCMASK, GG_DISP2HOT, HOTCOUNT_CALL);
+#endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ dasm_put(Dst, 17039, -4+PC2PROTO(k), Dt1(->maxstack), -4+PC2PROTO(numparams));
+ if (op == BC_JFUNCF) {
+ dasm_put(Dst, 17069, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 11546);
+ }
+ dasm_put(Dst, 17078, LJ_TNIL);
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ dasm_put(Dst, 10445);
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ dasm_put(Dst, 17100, FRAME_VARG, Dt1(->maxstack), -4+PC2PROTO(numparams), LJ_TNIL);
+ if (op == BC_JFUNCV) {
+ dasm_put(Dst, 17069, BC_JLOOP);
+ } else {
+ dasm_put(Dst, 17191, -4+PC2PROTO(k));
+ }
+ dasm_put(Dst, 17213, LJ_TNIL);
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ dasm_put(Dst, 17235, Dt8(->f), Dt1(->base), 8*LUA_MINSTACK, Dt1(->maxstack), Dt1(->top));
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 17264);
+ } else {
+ dasm_put(Dst, 17268);
+ }
+ dasm_put(Dst, 17276, DISPATCH_GL(vmstate), ~LJ_VMST_C);
+ if (op == BC_FUNCC) {
+ dasm_put(Dst, 17285);
+ } else {
+ dasm_put(Dst, 17289, DISPATCH_GL(wrapf));
+ }
+ dasm_put(Dst, 17294, DISPATCH_GL(vmstate), ~LJ_VMST_INTERP, Dt1(->base), Dt1(->top));
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ int cmov = 1;
+ int sse = 0;
+#ifdef LUAJIT_CPU_NOCMOV
+ cmov = 0;
+#endif
+#if defined(LUAJIT_CPU_SSE2) || defined(LJ_TARGET_X64)
+ sse = 1;
+#endif
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx, cmov, sse);
+
+ dasm_put(Dst, 17319);
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op, cmov, sse);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+#if LJ_64
+#define SZPTR "8"
+#define BSZPTR "3"
+#define REG_SP "0x7"
+#define REG_RA "0x10"
+#else
+#define SZPTR "4"
+#define BSZPTR "2"
+#define REG_SP "0x4"
+#define REG_RA "0x8"
+#endif
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if (defined(__sun__) && defined(__svr4__)) || defined(__solaris_)
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.section .eh_frame,\"dr\"\n");
+ fprintf(ctx->fp,
+ "\t.def %slj_err_unwind_dwarf; .scl 2; .type 32; .endef\n",
+ LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "Lframe1:\n"
+ "\t.long LECIE1-LSCIE1\n"
+ "LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zP\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 5\n" /* augmentation length */
+ "\t.byte 0x00\n" /* absptr */
+ "\t.long %slj_err_unwind_dwarf\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ "LECIE1:\n\n", LJ_32 ? "_" : "");
+ fprintf(ctx->fp,
+ "LSFDE1:\n"
+ "\t.long LEFDE1-LASFDE1\n"
+ "LASFDE1:\n"
+ "\t.long LASFDE1-Lframe1\n"
+ "\t.long %slj_vm_asm_begin\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ "LEFDE1:\n\n", LJ_32 ? "_" : "", (int)ctx->codesz, CFRAME_SIZE);
+ break;
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+#if LJ_64
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 1\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0xd\n\t.uleb128 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+#if LJ_64
+ fprintf(ctx->fp, "\t.subsections_via_symbols\n");
+#else
+ fprintf(ctx->fp,
+ "\t.non_lazy_symbol_pointer\n"
+ "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
+ ".indirect_symbol _lj_err_unwind_dwarf\n"
+ ".long 0\n");
+#endif
+ }
+ break;
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/src/lua/src/lauxlib.h b/src/LuaJIT/src/lauxlib.h
similarity index 94%
rename from src/lua/src/lauxlib.h
rename to src/LuaJIT/src/lauxlib.h
index 34258235d..505a9f522 100644
--- a/src/lua/src/lauxlib.h
+++ b/src/LuaJIT/src/lauxlib.h
@@ -15,31 +15,18 @@
#include "lua.h"
-#if defined(LUA_COMPAT_GETN)
-LUALIB_API int (luaL_getn) (lua_State *L, int t);
-LUALIB_API void (luaL_setn) (lua_State *L, int t, int n);
-#else
#define luaL_getn(L,i) ((int)lua_objlen(L, i))
#define luaL_setn(L,i,j) ((void)0) /* no op! */
-#endif
-
-#if defined(LUA_COMPAT_OPENLIB)
-#define luaI_openlib luaL_openlib
-#endif
-
/* extra error code for `luaL_load' */
#define LUA_ERRFILE (LUA_ERRERR+1)
-
typedef struct luaL_Reg {
const char *name;
lua_CFunction func;
} luaL_Reg;
-
-
-LUALIB_API void (luaI_openlib) (lua_State *L, const char *libname,
+LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname,
const luaL_Reg *l, int nup);
LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
const luaL_Reg *l);
@@ -170,5 +157,3 @@ LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
#define luaL_reg luaL_Reg
#endif
-
-
diff --git a/src/LuaJIT/src/lib_aux.c b/src/LuaJIT/src/lib_aux.c
new file mode 100644
index 000000000..104888bd3
--- /dev/null
+++ b/src/LuaJIT/src/lib_aux.c
@@ -0,0 +1,375 @@
+/*
+** Auxiliary library for the Lua/C API.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major parts taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+#include
+#include
+
+#define lib_aux_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_state.h"
+#include "lj_lib.h"
+
+/* -- Module registration ------------------------------------------------- */
+
+LUALIB_API const char *luaL_findtable(lua_State *L, int idx,
+ const char *fname, int szhint)
+{
+ const char *e;
+ lua_pushvalue(L, idx);
+ do {
+ e = strchr(fname, '.');
+ if (e == NULL) e = fname + strlen(fname);
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_rawget(L, -2);
+ if (lua_isnil(L, -1)) { /* no such field? */
+ lua_pop(L, 1); /* remove this nil */
+ lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_pushvalue(L, -2);
+ lua_settable(L, -4); /* set new table into field */
+ } else if (!lua_istable(L, -1)) { /* field has a non-table value? */
+ lua_pop(L, 2); /* remove table and value */
+ return fname; /* return problematic part of the name */
+ }
+ lua_remove(L, -2); /* remove previous table */
+ fname = e + 1;
+ } while (*e == '.');
+ return NULL;
+}
+
+static int libsize(const luaL_Reg *l)
+{
+ int size = 0;
+ for (; l->name; l++) size++;
+ return size;
+}
+
+LUALIB_API void luaL_openlib(lua_State *L, const char *libname,
+ const luaL_Reg *l, int nup)
+{
+ lj_lib_checkfpu(L);
+ if (libname) {
+ int size = libsize(l);
+ /* check whether lib already exists */
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname); /* get _LOADED[libname] */
+ if (!lua_istable(L, -1)) { /* not found? */
+ lua_pop(L, 1); /* remove previous result */
+ /* try global variable (and create one if it does not exist) */
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ lua_remove(L, -2); /* remove _LOADED table */
+ lua_insert(L, -(nup+1)); /* move library table to below upvalues */
+ }
+ for (; l->name; l++) {
+ int i;
+ for (i = 0; i < nup; i++) /* copy upvalues to the top */
+ lua_pushvalue(L, -nup);
+ lua_pushcclosure(L, l->func, nup);
+ lua_setfield(L, -(nup+2), l->name);
+ }
+ lua_pop(L, nup); /* remove upvalues */
+}
+
+LUALIB_API void luaL_register(lua_State *L, const char *libname,
+ const luaL_Reg *l)
+{
+ luaL_openlib(L, libname, l, 0);
+}
+
+LUALIB_API const char *luaL_gsub(lua_State *L, const char *s,
+ const char *p, const char *r)
+{
+ const char *wild;
+ size_t l = strlen(p);
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while ((wild = strstr(s, p)) != NULL) {
+ luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */
+ luaL_addstring(&b, r); /* push replacement in place of pattern */
+ s = wild + l; /* continue after `p' */
+ }
+ luaL_addstring(&b, s); /* push last suffix */
+ luaL_pushresult(&b);
+ return lua_tostring(L, -1);
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define bufflen(B) ((size_t)((B)->p - (B)->buffer))
+#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B)))
+
+static int emptybuffer(luaL_Buffer *B)
+{
+ size_t l = bufflen(B);
+ if (l == 0)
+ return 0; /* put nothing on stack */
+ lua_pushlstring(B->L, B->buffer, l);
+ B->p = B->buffer;
+ B->lvl++;
+ return 1;
+}
+
+static void adjuststack(luaL_Buffer *B)
+{
+ if (B->lvl > 1) {
+ lua_State *L = B->L;
+ int toget = 1; /* number of levels to concat */
+ size_t toplen = lua_strlen(L, -1);
+ do {
+ size_t l = lua_strlen(L, -(toget+1));
+ if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l))
+ break;
+ toplen += l;
+ toget++;
+ } while (toget < B->lvl);
+ lua_concat(L, toget);
+ B->lvl = B->lvl - toget + 1;
+ }
+}
+
+LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B)
+{
+ if (emptybuffer(B))
+ adjuststack(B);
+ return B->buffer;
+}
+
+LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l)
+{
+ while (l--)
+ luaL_addchar(B, *s++);
+}
+
+LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s)
+{
+ luaL_addlstring(B, s, strlen(s));
+}
+
+LUALIB_API void luaL_pushresult(luaL_Buffer *B)
+{
+ emptybuffer(B);
+ lua_concat(B->L, B->lvl);
+ B->lvl = 1;
+}
+
+LUALIB_API void luaL_addvalue(luaL_Buffer *B)
+{
+ lua_State *L = B->L;
+ size_t vl;
+ const char *s = lua_tolstring(L, -1, &vl);
+ if (vl <= bufffree(B)) { /* fit into buffer? */
+ memcpy(B->p, s, vl); /* put it there */
+ B->p += vl;
+ lua_pop(L, 1); /* remove from stack */
+ } else {
+ if (emptybuffer(B))
+ lua_insert(L, -2); /* put buffer before new value */
+ B->lvl++; /* add new value into B stack */
+ adjuststack(B);
+ }
+}
+
+LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B)
+{
+ B->L = L;
+ B->p = B->buffer;
+ B->lvl = 0;
+}
+
+/* -- Reference management ------------------------------------------------ */
+
+#define FREELIST_REF 0
+
+/* Convert a stack index to an absolute index. */
+#define abs_index(L, i) \
+ ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1)
+
+LUALIB_API int luaL_ref(lua_State *L, int t)
+{
+ int ref;
+ t = abs_index(L, t);
+ if (lua_isnil(L, -1)) {
+ lua_pop(L, 1); /* remove from stack */
+ return LUA_REFNIL; /* `nil' has a unique fixed reference */
+ }
+ lua_rawgeti(L, t, FREELIST_REF); /* get first free element */
+ ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */
+ lua_pop(L, 1); /* remove it from stack */
+ if (ref != 0) { /* any free element? */
+ lua_rawgeti(L, t, ref); /* remove it from list */
+ lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */
+ } else { /* no free elements */
+ ref = (int)lua_objlen(L, t);
+ ref++; /* create new reference */
+ }
+ lua_rawseti(L, t, ref);
+ return ref;
+}
+
+LUALIB_API void luaL_unref(lua_State *L, int t, int ref)
+{
+ if (ref >= 0) {
+ t = abs_index(L, t);
+ lua_rawgeti(L, t, FREELIST_REF);
+ lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */
+ lua_pushinteger(L, ref);
+ lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */
+ }
+}
+
+/* -- Load Lua code ------------------------------------------------------- */
+
+typedef struct FileReaderCtx {
+ FILE *fp;
+ char buf[LUAL_BUFFERSIZE];
+} FileReaderCtx;
+
+static const char *reader_file(lua_State *L, void *ud, size_t *size)
+{
+ FileReaderCtx *ctx = (FileReaderCtx *)ud;
+ UNUSED(L);
+ if (feof(ctx->fp)) return NULL;
+ *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp);
+ return *size > 0 ? ctx->buf : NULL;
+}
+
+LUALIB_API int luaL_loadfile(lua_State *L, const char *filename)
+{
+ FileReaderCtx ctx;
+ int status;
+ const char *chunkname;
+ if (filename) {
+ ctx.fp = fopen(filename, "rb");
+ if (ctx.fp == NULL) {
+ lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno));
+ return LUA_ERRFILE;
+ }
+ chunkname = lua_pushfstring(L, "@%s", filename);
+ } else {
+ ctx.fp = stdin;
+ chunkname = "=stdin";
+ }
+ status = lua_load(L, reader_file, &ctx, chunkname);
+ if (ferror(ctx.fp)) {
+ L->top -= filename ? 2 : 1;
+ lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno));
+ if (filename)
+ fclose(ctx.fp);
+ return LUA_ERRFILE;
+ }
+ if (filename) {
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ fclose(ctx.fp);
+ }
+ return status;
+}
+
+typedef struct StringReaderCtx {
+ const char *str;
+ size_t size;
+} StringReaderCtx;
+
+static const char *reader_string(lua_State *L, void *ud, size_t *size)
+{
+ StringReaderCtx *ctx = (StringReaderCtx *)ud;
+ UNUSED(L);
+ if (ctx->size == 0) return NULL;
+ *size = ctx->size;
+ ctx->size = 0;
+ return ctx->str;
+}
+
+LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size,
+ const char *name)
+{
+ StringReaderCtx ctx;
+ ctx.str = buf;
+ ctx.size = size;
+ return lua_load(L, reader_string, &ctx, name);
+}
+
+LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
+{
+ return luaL_loadbuffer(L, s, strlen(s), s);
+}
+
+/* -- Default allocator and panic function -------------------------------- */
+
+static int panic(lua_State *L)
+{
+ fprintf(stderr, "PANIC: unprotected error in call to Lua API (%s)\n",
+ lua_tostring(L, -1));
+ return 0;
+}
+
+#ifdef LUAJIT_USE_SYSMALLOC
+
+#if LJ_64
+#error "Must use builtin allocator for 64 bit target"
+#endif
+
+static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
+{
+ (void)ud;
+ (void)osize;
+ if (nsize == 0) {
+ free(ptr);
+ return NULL;
+ } else {
+ return realloc(ptr, nsize);
+ }
+}
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L = lua_newstate(mem_alloc, NULL);
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#else
+
+#include "lj_alloc.h"
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L;
+ void *ud = lj_alloc_create();
+ if (ud == NULL) return NULL;
+#if LJ_64
+ L = lj_state_newstate(lj_alloc_f, ud);
+#else
+ L = lua_newstate(lj_alloc_f, ud);
+#endif
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#if LJ_64
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+{
+ UNUSED(f); UNUSED(ud);
+ fprintf(stderr, "Must use luaL_newstate() for 64 bit target\n");
+ return NULL;
+}
+#endif
+
+#endif
+
diff --git a/src/LuaJIT/src/lib_base.c b/src/LuaJIT/src/lib_base.c
new file mode 100644
index 000000000..927c1bca9
--- /dev/null
+++ b/src/LuaJIT/src/lib_base.c
@@ -0,0 +1,652 @@
+/*
+** Base and coroutine library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+
+#define lib_base_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#endif
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_dispatch.h"
+#include "lj_char.h"
+#include "lj_lib.h"
+
+/* -- Base library: checks ------------------------------------------------ */
+
+#define LJLIB_MODULE_base
+
+LJLIB_ASM(assert) LJLIB_REC(.)
+{
+ GCstr *s;
+ lj_lib_checkany(L, 1);
+ s = lj_lib_optstr(L, 2);
+ if (s)
+ lj_err_callermsg(L, strdata(s));
+ else
+ lj_err_caller(L, LJ_ERR_ASSERT);
+ return FFH_UNREACHABLE;
+}
+
+/* ORDER LJ_T */
+LJLIB_PUSH("nil")
+LJLIB_PUSH("boolean")
+LJLIB_PUSH(top-1) /* boolean */
+LJLIB_PUSH("userdata")
+LJLIB_PUSH("string")
+LJLIB_PUSH("upval")
+LJLIB_PUSH("thread")
+LJLIB_PUSH("proto")
+LJLIB_PUSH("function")
+LJLIB_PUSH("trace")
+LJLIB_PUSH("cdata")
+LJLIB_PUSH("table")
+LJLIB_PUSH(top-9) /* userdata */
+LJLIB_PUSH("number")
+LJLIB_ASM_(type) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+/* -- Base library: getters and setters ----------------------------------- */
+
+LJLIB_ASM_(getmetatable) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+LJLIB_ASM(setmetatable) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCtab *mt = lj_lib_checktabornil(L, 2);
+ if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable)))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ setgcref(t->metatable, obj2gco(mt));
+ if (mt) { lj_gc_objbarriert(L, t, mt); }
+ settabV(L, L->base-1, t);
+ return FFH_RES(1);
+}
+
+LJLIB_CF(getfenv)
+{
+ GCfunc *fn;
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_optint(L, 1, 1);
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ }
+ fn = &gcval(o)->fn;
+ settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env));
+ return 1;
+}
+
+LJLIB_CF(setfenv)
+{
+ GCfunc *fn;
+ GCtab *t = lj_lib_checktab(L, 2);
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_checkint(L, 1);
+ if (level == 0) {
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(t));
+ return 0;
+ }
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ }
+ fn = &gcval(o)->fn;
+ if (!isluafunc(fn))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ setgcref(fn->l.env, obj2gco(t));
+ lj_gc_objbarrier(L, obj2gco(fn), t);
+ setfuncV(L, L->top++, fn);
+ return 1;
+}
+
+LJLIB_ASM(rawget) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_CF(rawset) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ L->top = 1+lj_lib_checkany(L, 3);
+ lua_rawset(L, 1);
+ return 1;
+}
+
+LJLIB_CF(rawequal) LJLIB_REC(.)
+{
+ cTValue *o1 = lj_lib_checkany(L, 1);
+ cTValue *o2 = lj_lib_checkany(L, 2);
+ setboolV(L->top-1, lj_obj_equal(o1, o2));
+ return 1;
+}
+
+LJLIB_CF(unpack)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = lj_lib_optint(L, 2, 1);
+ int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ?
+ lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t);
+ if (i > e) return 0;
+ n = e - i + 1;
+ if (n <= 0 || !lua_checkstack(L, n))
+ lj_err_caller(L, LJ_ERR_UNPACK);
+ do {
+ cTValue *tv = lj_tab_getint(t, i);
+ if (tv) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setnilV(L->top++);
+ }
+ } while (i++ < e);
+ return n;
+}
+
+LJLIB_CF(select) LJLIB_REC(.)
+{
+ int32_t n = (int32_t)(L->top - L->base);
+ if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') {
+ setintV(L->top-1, n-1);
+ return 1;
+ } else {
+ int32_t i = lj_lib_checkint(L, 1);
+ if (i < 0) i = n + i; else if (i > n) i = n;
+ if (i < 1)
+ lj_err_arg(L, 1, LJ_ERR_IDXRNG);
+ return n - i;
+ }
+}
+
+/* -- Base library: conversions ------------------------------------------- */
+
+LJLIB_ASM(tonumber) LJLIB_REC(.)
+{
+ int32_t base = lj_lib_optint(L, 2, 10);
+ if (base == 10) {
+ TValue *o = lj_lib_checkany(L, 1);
+ if (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))) {
+ copyTV(L, L->base-1, o);
+ return FFH_RES(1);
+ }
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ CTState *cts = ctype_cts(L);
+ CType *ct = lj_ctype_rawref(cts, cdataV(o)->typeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) &&
+ ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) {
+ int32_t i;
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0);
+ setintV(L->base-1, i);
+ return FFH_RES(1);
+ }
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE),
+ (uint8_t *)&(L->base-1)->n, o, 0);
+ return FFH_RES(1);
+ }
+ }
+#endif
+ } else {
+ const char *p = strdata(lj_lib_checkstr(L, 1));
+ char *ep;
+ unsigned long ul;
+ if (base < 2 || base > 36)
+ lj_err_arg(L, 2, LJ_ERR_BASERNG);
+ ul = strtoul(p, &ep, base);
+ if (p != ep) {
+ while (lj_char_isspace((unsigned char)(*ep))) ep++;
+ if (*ep == '\0') {
+ if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u))
+ setintV(L->base-1, (int32_t)ul);
+ else
+ setnumV(L->base-1, (lua_Number)ul);
+ return FFH_RES(1);
+ }
+ }
+ }
+ setnilV(L->base-1);
+ return FFH_RES(1);
+}
+
+LJLIB_PUSH("nil")
+LJLIB_PUSH("false")
+LJLIB_PUSH("true")
+LJLIB_ASM(tostring) LJLIB_REC(.)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo;
+ L->top = o+1; /* Only keep one argument. */
+ if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ copyTV(L, L->base-1, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ } else {
+ GCstr *s;
+ if (tvisnumber(o)) {
+ s = lj_str_fromnumber(L, o);
+ } else if (tvispri(o)) {
+ s = strV(lj_lib_upvalue(L, -(int32_t)itype(o)));
+ } else {
+ if (tvisfunc(o) && isffunc(funcV(o)))
+ lua_pushfstring(L, "function: fast#%d", funcV(o)->c.ffid);
+ else
+ lua_pushfstring(L, "%s: %p", typename(o), lua_topointer(L, 1));
+ /* Note: lua_pushfstring calls the GC which may invalidate o. */
+ s = strV(L->top-1);
+ }
+ setstrV(L, L->base-1, s);
+ return FFH_RES(1);
+ }
+}
+
+/* -- Base library: iterators --------------------------------------------- */
+
+/* This solves a circular dependency problem -- change FF_next_N as needed. */
+LJ_STATIC_ASSERT((int)FF_next == FF_next_N);
+
+LJLIB_ASM(next)
+{
+ lj_lib_checktab(L, 1);
+ return FFH_UNREACHABLE;
+}
+
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+static int ffh_pairs(lua_State *L, MMS mm)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo = lj_meta_lookup(L, o, mm);
+ if (!tvisnil(mo)) {
+ L->top = o+1; /* Only keep one argument. */
+ copyTV(L, L->base-1, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ } else {
+ if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE);
+ setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1)));
+ if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0);
+ return FFH_RES(3);
+ }
+}
+#else
+#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE)
+#endif
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(pairs)
+{
+ return ffh_pairs(L, MM_pairs);
+}
+
+LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkint(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(ipairs) LJLIB_REC(.)
+{
+ return ffh_pairs(L, MM_ipairs);
+}
+
+/* -- Base library: throw and catch errors -------------------------------- */
+
+LJLIB_CF(error)
+{
+ int32_t level = lj_lib_optint(L, 2, 1);
+ lua_settop(L, 1);
+ if (lua_isstring(L, 1) && level > 0) {
+ luaL_where(L, level);
+ lua_pushvalue(L, 1);
+ lua_concat(L, 2);
+ }
+ return lua_error(L);
+}
+
+LJLIB_ASM(pcall) LJLIB_REC(.)
+{
+ lj_lib_checkany(L, 1);
+ lj_lib_checkfunc(L, 2); /* For xpcall only. */
+ return FFH_UNREACHABLE;
+}
+LJLIB_ASM_(xpcall) LJLIB_REC(.)
+
+/* -- Base library: load Lua code ----------------------------------------- */
+
+static int load_aux(lua_State *L, int status)
+{
+ if (status == 0)
+ return 1;
+ copyTV(L, L->top, L->top-1);
+ setnilV(L->top-1);
+ L->top++;
+ return 2;
+}
+
+LJLIB_CF(loadstring)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ GCstr *name = lj_lib_optstr(L, 2);
+ return load_aux(L,
+ luaL_loadbuffer(L, strdata(s), s->len, strdata(name ? name : s)));
+}
+
+LJLIB_CF(loadfile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ return load_aux(L, luaL_loadfile(L, fname ? strdata(fname) : NULL));
+}
+
+static const char *reader_func(lua_State *L, void *ud, size_t *size)
+{
+ UNUSED(ud);
+ luaL_checkstack(L, 2, "too many nested functions");
+ copyTV(L, L->top++, L->base);
+ lua_call(L, 0, 1); /* Call user-supplied function. */
+ L->top--;
+ if (tvisnil(L->top)) {
+ *size = 0;
+ return NULL;
+ } else if (tvisstr(L->top) || tvisnumber(L->top)) {
+ copyTV(L, L->base+2, L->top); /* Anchor string in reserved stack slot. */
+ return lua_tolstring(L, 3, size);
+ } else {
+ lj_err_caller(L, LJ_ERR_RDRSTR);
+ return NULL;
+ }
+}
+
+LJLIB_CF(load)
+{
+ GCstr *name;
+ if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base)))
+ return lj_cf_loadstring(L);
+ lj_lib_checkfunc(L, 1);
+ name = lj_lib_optstr(L, 2);
+ lua_settop(L, 3); /* Reserve a slot for the string from the reader. */
+ return load_aux(L,
+ lua_load(L, reader_func, NULL, name ? strdata(name) : "=(load)"));
+}
+
+LJLIB_CF(dofile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ setnilV(L->top);
+ L->top = L->base+1;
+ if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0)
+ lua_error(L);
+ lua_call(L, 0, LUA_MULTRET);
+ return (int)(L->top - L->base) - 1;
+}
+
+/* -- Base library: GC control -------------------------------------------- */
+
+LJLIB_CF(gcinfo)
+{
+ setintV(L->top++, (G(L)->gc.total >> 10));
+ return 1;
+}
+
+LJLIB_CF(collectgarbage)
+{
+ int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */
+ "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul");
+ int32_t data = lj_lib_optint(L, 2, 0);
+ if (opt == LUA_GCCOUNT) {
+ setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0);
+ } else {
+ int res = lua_gc(L, opt, data);
+ if (opt == LUA_GCSTEP)
+ setboolV(L->top, res);
+ else
+ setintV(L->top, res);
+ }
+ L->top++;
+ return 1;
+}
+
+/* -- Base library: miscellaneous functions ------------------------------- */
+
+LJLIB_PUSH(top-2) /* Upvalue holds weak table. */
+LJLIB_CF(newproxy)
+{
+ lua_settop(L, 1);
+ lua_newuserdata(L, 0);
+ if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */
+ return 1;
+ } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */
+ lua_newtable(L);
+ lua_pushvalue(L, -1);
+ lua_pushboolean(L, 1);
+ lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */
+ } else { /* newproxy(proxy): inherit metatable. */
+ int validproxy = 0;
+ if (lua_getmetatable(L, 1)) {
+ lua_rawget(L, lua_upvalueindex(1));
+ validproxy = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ }
+ if (!validproxy)
+ lj_err_arg(L, 1, LJ_ERR_NOPROXY);
+ lua_getmetatable(L, 1);
+ }
+ lua_setmetatable(L, 2);
+ return 1;
+}
+
+LJLIB_PUSH("tostring")
+LJLIB_CF(print)
+{
+ ptrdiff_t i, nargs = L->top - L->base;
+ cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1)));
+ int shortcut;
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1)));
+ lua_gettable(L, LUA_GLOBALSINDEX);
+ tv = L->top-1;
+ }
+ shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring);
+ for (i = 0; i < nargs; i++) {
+ const char *str;
+ size_t size;
+ cTValue *o = &L->base[i];
+ if (shortcut && tvisstr(o)) {
+ str = strVdata(o);
+ size = strV(o)->len;
+ } else if (shortcut && tvisint(o)) {
+ char buf[LJ_STR_INTBUF];
+ char *p = lj_str_bufint(buf, intV(o));
+ size = (size_t)(buf+LJ_STR_INTBUF-p);
+ str = p;
+ } else if (shortcut && tvisnum(o)) {
+ char buf[LJ_STR_NUMBUF];
+ size = lj_str_bufnum(buf, o);
+ str = buf;
+ } else {
+ copyTV(L, L->top+1, o);
+ copyTV(L, L->top, L->top-1);
+ L->top += 2;
+ lua_call(L, 1, 1);
+ str = lua_tolstring(L, -1, &size);
+ if (!str)
+ lj_err_caller(L, LJ_ERR_PRTOSTR);
+ L->top--;
+ }
+ if (i)
+ putchar('\t');
+ fwrite(str, 1, size, stdout);
+ }
+ putchar('\n');
+ return 0;
+}
+
+LJLIB_PUSH(top-3)
+LJLIB_SET(_VERSION)
+
+#include "lj_libdef.h"
+
+/* -- Coroutine library --------------------------------------------------- */
+
+#define LJLIB_MODULE_coroutine
+
+LJLIB_CF(coroutine_status)
+{
+ const char *s;
+ lua_State *co;
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ co = threadV(L->base);
+ if (co == L) s = "running";
+ else if (co->status == LUA_YIELD) s = "suspended";
+ else if (co->status != 0) s = "dead";
+ else if (co->base > tvref(co->stack)+1) s = "normal";
+ else if (co->top == co->base) s = "dead";
+ else s = "suspended";
+ lua_pushstring(L, s);
+ return 1;
+}
+
+LJLIB_CF(coroutine_running)
+{
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ int ismain = lua_pushthread(L);
+ setboolV(L->top++, ismain);
+ return 2;
+#else
+ if (lua_pushthread(L))
+ setnilV(L->top++);
+ return 1;
+#endif
+}
+
+LJLIB_CF(coroutine_create)
+{
+ lua_State *L1 = lua_newthread(L);
+ if (!(L->base < L->top && tvisfunc(L->base)))
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ setfuncV(L, L1->top++, funcV(L->base));
+ return 1;
+}
+
+LJLIB_ASM(coroutine_yield)
+{
+ lj_err_caller(L, LJ_ERR_CYIELD);
+ return FFH_UNREACHABLE;
+}
+
+static int ffh_resume(lua_State *L, lua_State *co, int wrap)
+{
+ if (co->cframe != NULL || co->status > LUA_YIELD ||
+ (co->status == 0 && co->top == co->base)) {
+ ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD;
+ if (wrap) lj_err_caller(L, em);
+ setboolV(L->base-1, 0);
+ setstrV(L, L->base, lj_err_str(L, em));
+ return FFH_RES(2);
+ }
+ lj_state_growstack(co, (MSize)(L->top - L->base));
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(coroutine_resume)
+{
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ return ffh_resume(L, threadV(L->base), 0);
+}
+
+LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux)
+{
+ return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1);
+}
+
+/* Inline declarations. */
+LJ_ASMF void lj_ff_coroutine_wrap_aux(void);
+#if !(LJ_TARGET_MIPS && defined(ljamalg_c))
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+#endif
+
+/* Error handler, called from assembler VM. */
+void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co)
+{
+ co->top--; copyTV(L, L->top, co->top); L->top++;
+ if (tvisstr(L->top-1))
+ lj_err_callermsg(L, strVdata(L->top-1));
+ else
+ lj_err_run(L);
+}
+
+/* Forward declaration. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn);
+
+LJLIB_CF(coroutine_wrap)
+{
+ lj_cf_coroutine_create(L);
+ lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1);
+ setpc_wrap_aux(L, funcV(L->top-1));
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+/* Fix the PC of wrap_aux. Really ugly workaround. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn)
+{
+ setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void newproxy_weaktable(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "kv"));
+ t->nomm = (uint8_t)(~(1u<env);
+ settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env);
+ lua_pushliteral(L, LUA_VERSION); /* top-3. */
+ newproxy_weaktable(L); /* top-2. */
+ LJ_LIB_REG(L, "_G", base);
+ LJ_LIB_REG(L, LUA_COLIBNAME, coroutine);
+ return 2;
+}
+
diff --git a/src/LuaJIT/src/lib_bit.c b/src/LuaJIT/src/lib_bit.c
new file mode 100644
index 000000000..f44ed37fc
--- /dev/null
+++ b/src/LuaJIT/src/lib_bit.c
@@ -0,0 +1,74 @@
+/*
+** Bit manipulation library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_bit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_bit
+
+LJLIB_ASM(bit_tobit) LJLIB_REC(bit_unary IR_TOBIT)
+{
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
+LJLIB_ASM_(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
+
+LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL)
+{
+ lj_lib_checknumber(L, 1);
+ lj_lib_checkbit(L, 2);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR)
+LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR)
+LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL)
+LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR)
+
+LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND)
+{
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR)
+LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR)
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(bit_tohex)
+{
+ uint32_t b = (uint32_t)lj_lib_checkbit(L, 1);
+ int32_t i, n = L->base+1 >= L->top ? 8 : lj_lib_checkbit(L, 2);
+ const char *hexdigits = "0123456789abcdef";
+ char buf[8];
+ if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; }
+ if (n > 8) n = 8;
+ for (i = n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; }
+ lua_pushlstring(L, buf, (size_t)n);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_bit(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_BITLIBNAME, bit);
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_debug.c b/src/LuaJIT/src/lib_debug.c
new file mode 100644
index 000000000..f9b7a4782
--- /dev/null
+++ b/src/LuaJIT/src/lib_debug.c
@@ -0,0 +1,366 @@
+/*
+** Debug library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_debug_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_debug
+
+LJLIB_CF(debug_getregistry)
+{
+ copyTV(L, L->top++, registry(L));
+ return 1;
+}
+
+LJLIB_CF(debug_getmetatable)
+{
+ lj_lib_checkany(L, 1);
+ if (!lua_getmetatable(L, 1)) {
+ setnilV(L->top-1);
+ }
+ return 1;
+}
+
+LJLIB_CF(debug_setmetatable)
+{
+ lj_lib_checktabornil(L, 2);
+ L->top = L->base+2;
+ lua_setmetatable(L, 1);
+ setboolV(L->top-1, 1);
+ return 1;
+}
+
+LJLIB_CF(debug_getfenv)
+{
+ lj_lib_checkany(L, 1);
+ lua_getfenv(L, 1);
+ return 1;
+}
+
+LJLIB_CF(debug_setfenv)
+{
+ lj_lib_checktab(L, 2);
+ L->top = L->base+2;
+ if (!lua_setfenv(L, 1))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void settabss(lua_State *L, const char *i, const char *v)
+{
+ lua_pushstring(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static void settabsi(lua_State *L, const char *i, int v)
+{
+ lua_pushinteger(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static lua_State *getthread(lua_State *L, int *arg)
+{
+ if (L->base < L->top && tvisthread(L->base)) {
+ *arg = 1;
+ return threadV(L->base);
+ } else {
+ *arg = 0;
+ return L;
+ }
+}
+
+static void treatstackoption(lua_State *L, lua_State *L1, const char *fname)
+{
+ if (L == L1) {
+ lua_pushvalue(L, -2);
+ lua_remove(L, -3);
+ }
+ else
+ lua_xmove(L1, L, 1);
+ lua_setfield(L, -2, fname);
+}
+
+LJLIB_CF(debug_getinfo)
+{
+ lua_Debug ar;
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ const char *options = luaL_optstring(L, arg+2, "flnSu");
+ if (lua_isnumber(L, arg+1)) {
+ if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) {
+ options = lua_pushfstring(L, ">%s", options);
+ setfuncV(L1, L1->top++, funcV(L->base+arg));
+ } else {
+ lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL);
+ }
+ if (!lua_getinfo(L1, options, &ar))
+ lj_err_arg(L, arg+2, LJ_ERR_INVOPT);
+ lua_createtable(L, 0, 16);
+ if (strchr(options, 'S')) {
+ settabss(L, "source", ar.source);
+ settabss(L, "short_src", ar.short_src);
+ settabsi(L, "linedefined", ar.linedefined);
+ settabsi(L, "lastlinedefined", ar.lastlinedefined);
+ settabss(L, "what", ar.what);
+ }
+ if (strchr(options, 'l'))
+ settabsi(L, "currentline", ar.currentline);
+ if (strchr(options, 'u'))
+ settabsi(L, "nups", ar.nups);
+ if (strchr(options, 'n')) {
+ settabss(L, "name", ar.name);
+ settabss(L, "namewhat", ar.namewhat);
+ }
+ if (strchr(options, 'L'))
+ treatstackoption(L, L1, "activelines");
+ if (strchr(options, 'f'))
+ treatstackoption(L, L1, "func");
+ return 1; /* return table */
+}
+
+LJLIB_CF(debug_getlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ const char *name;
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ name = lua_getlocal(L1, &ar, lj_lib_checkint(L, arg+2));
+ if (name) {
+ lua_xmove(L1, L, 1);
+ lua_pushstring(L, name);
+ lua_pushvalue(L, -2);
+ return 2;
+ } else {
+ setnilV(L->top-1);
+ return 1;
+ }
+}
+
+LJLIB_CF(debug_setlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ TValue *tv;
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ tv = lj_lib_checkany(L, arg+3);
+ copyTV(L1, L1->top++, tv);
+ lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2)));
+ return 1;
+}
+
+static int debug_getupvalue(lua_State *L, int get)
+{
+ int32_t n = lj_lib_checkint(L, 2);
+ if (isluafunc(lj_lib_checkfunc(L, 1))) {
+ const char *name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
+ if (name) {
+ lua_pushstring(L, name);
+ if (!get) return 1;
+ copyTV(L, L->top, L->top-2);
+ L->top++;
+ return 2;
+ }
+ }
+ return 0;
+}
+
+LJLIB_CF(debug_getupvalue)
+{
+ return debug_getupvalue(L, 1);
+}
+
+LJLIB_CF(debug_setupvalue)
+{
+ lj_lib_checkany(L, 3);
+ return debug_getupvalue(L, 0);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static const char KEY_HOOK = 'h';
+
+static void hookf(lua_State *L, lua_Debug *ar)
+{
+ static const char *const hooknames[] =
+ {"call", "return", "line", "count", "tail return"};
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_rawget(L, LUA_REGISTRYINDEX);
+ if (lua_isfunction(L, -1)) {
+ lua_pushstring(L, hooknames[(int)ar->event]);
+ if (ar->currentline >= 0)
+ lua_pushinteger(L, ar->currentline);
+ else lua_pushnil(L);
+ lua_call(L, 2, 0);
+ }
+}
+
+static int makemask(const char *smask, int count)
+{
+ int mask = 0;
+ if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
+ if (strchr(smask, 'r')) mask |= LUA_MASKRET;
+ if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
+ if (count > 0) mask |= LUA_MASKCOUNT;
+ return mask;
+}
+
+static char *unmakemask(int mask, char *smask)
+{
+ int i = 0;
+ if (mask & LUA_MASKCALL) smask[i++] = 'c';
+ if (mask & LUA_MASKRET) smask[i++] = 'r';
+ if (mask & LUA_MASKLINE) smask[i++] = 'l';
+ smask[i] = '\0';
+ return smask;
+}
+
+LJLIB_CF(debug_sethook)
+{
+ int arg, mask, count;
+ lua_Hook func;
+ (void)getthread(L, &arg);
+ if (lua_isnoneornil(L, arg+1)) {
+ lua_settop(L, arg+1);
+ func = NULL; mask = 0; count = 0; /* turn off hooks */
+ } else {
+ const char *smask = luaL_checkstring(L, arg+2);
+ luaL_checktype(L, arg+1, LUA_TFUNCTION);
+ count = luaL_optint(L, arg+3, 0);
+ func = hookf; mask = makemask(smask, count);
+ }
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_pushvalue(L, arg+1);
+ lua_rawset(L, LUA_REGISTRYINDEX);
+ lua_sethook(L, func, mask, count);
+ return 0;
+}
+
+LJLIB_CF(debug_gethook)
+{
+ char buff[5];
+ int mask = lua_gethookmask(L);
+ lua_Hook hook = lua_gethook(L);
+ if (hook != NULL && hook != hookf) { /* external hook? */
+ lua_pushliteral(L, "external hook");
+ } else {
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
+ }
+ lua_pushstring(L, unmakemask(mask, buff));
+ lua_pushinteger(L, lua_gethookcount(L));
+ return 3;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(debug_debug)
+{
+ for (;;) {
+ char buffer[250];
+ fputs("lua_debug> ", stderr);
+ if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
+ strcmp(buffer, "cont\n") == 0)
+ return 0;
+ if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
+ lua_pcall(L, 0, 0, 0)) {
+ fputs(lua_tostring(L, -1), stderr);
+ fputs("\n", stderr);
+ }
+ lua_settop(L, 0); /* remove eventual returns */
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define LEVELS1 12 /* size of the first part of the stack */
+#define LEVELS2 10 /* size of the second part of the stack */
+
+LJLIB_CF(debug_traceback)
+{
+ int level;
+ int firstpart = 1; /* still before eventual `...' */
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ if (lua_isnumber(L, arg+2)) {
+ level = (int)lua_tointeger(L, arg+2);
+ lua_pop(L, 1);
+ }
+ else
+ level = (L == L1) ? 1 : 0; /* level 0 may be this own function */
+ if (lua_gettop(L) == arg)
+ lua_pushliteral(L, "");
+ else if (!lua_isstring(L, arg+1)) return 1; /* message is not a string */
+ else lua_pushliteral(L, "\n");
+ lua_pushliteral(L, "stack traceback:");
+ while (lua_getstack(L1, level++, &ar)) {
+ if (level > LEVELS1 && firstpart) {
+ /* no more than `LEVELS2' more levels? */
+ if (!lua_getstack(L1, level+LEVELS2, &ar)) {
+ level--; /* keep going */
+ } else {
+ lua_pushliteral(L, "\n\t..."); /* too many levels */
+ /* This only works with LuaJIT 2.x. Avoids O(n^2) behaviour. */
+ lua_getstack(L1, -10, &ar);
+ level = ar.i_ci - LEVELS2;
+ }
+ firstpart = 0;
+ continue;
+ }
+ lua_pushliteral(L, "\n\t");
+ lua_getinfo(L1, "Snl", &ar);
+ lua_pushfstring(L, "%s:", ar.short_src);
+ if (ar.currentline > 0)
+ lua_pushfstring(L, "%d:", ar.currentline);
+ if (*ar.namewhat != '\0') { /* is there a name? */
+ lua_pushfstring(L, " in function " LUA_QS, ar.name);
+ } else {
+ if (*ar.what == 'm') /* main? */
+ lua_pushfstring(L, " in main chunk");
+ else if (*ar.what == 'C' || *ar.what == 't')
+ lua_pushliteral(L, " ?"); /* C function or tail call */
+ else
+ lua_pushfstring(L, " in function <%s:%d>",
+ ar.short_src, ar.linedefined);
+ }
+ lua_concat(L, lua_gettop(L) - arg);
+ }
+ lua_concat(L, lua_gettop(L) - arg);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_debug(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_DBLIBNAME, debug);
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_ffi.c b/src/LuaJIT/src/lib_ffi.c
new file mode 100644
index 000000000..2a674b88f
--- /dev/null
+++ b/src/LuaJIT/src/lib_ffi.c
@@ -0,0 +1,811 @@
+/*
+** FFI library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_ffi_c
+#define LUA_LIB
+
+#include
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ctype.h"
+#include "lj_cparse.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_carith.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_clib.h"
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* -- C type checks ------------------------------------------------------- */
+
+/* Check first argument for a C type and returns its ID. */
+static CTypeID ffi_checkctype(lua_State *L, CTState *cts)
+{
+ TValue *o = L->base;
+ if (!(o < L->top)) {
+ err_argtype:
+ lj_err_argtype(L, 1, "C type");
+ }
+ if (tvisstr(o)) { /* Parse an abstract C type declaration. */
+ GCstr *s = strV(o);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = cts;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ return cp.val.id;
+ } else {
+ GCcdata *cd;
+ if (!tviscdata(o)) goto err_argtype;
+ cd = cdataV(o);
+ return cd->typeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->typeid;
+ }
+}
+
+/* Check argument for C data and return it. */
+static GCcdata *ffi_checkcdata(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tviscdata(o)))
+ lj_err_argt(L, narg, LUA_TCDATA);
+ return cdataV(o);
+}
+
+/* Convert argument to C pointer. */
+static void *ffi_checkptr(lua_State *L, int narg, CTypeID id)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ void *p;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg));
+ return p;
+}
+
+/* Convert argument to int32_t. */
+static int32_t ffi_checkint(lua_State *L, int narg)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ int32_t i;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o,
+ CCF_ARG(narg));
+ return i;
+}
+
+/* -- C type metamethods -------------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_meta
+
+/* Handle ctype __index/__newindex metamethods. */
+static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, mm);
+ TValue *base = L->base;
+ if (!tv) {
+ const char *s;
+ err_index:
+ s = strdata(lj_ctype_repr(L, id, NULL));
+ if (tvisstr(L->base+1))
+ lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1));
+ else
+ lj_err_callerv(L, LJ_ERR_FFI_BADIDX, s);
+ }
+ if (!tvisfunc(tv)) {
+ if (mm == MM_index) {
+ cTValue *o = lj_meta_tget(L, tv, base+1);
+ if (o) {
+ if (tvisnil(o)) goto err_index;
+ copyTV(L, L->top-1, o);
+ return 1;
+ }
+ } else {
+ TValue *o = lj_meta_tset(L, tv, base+1);
+ if (o) {
+ copyTV(L, o, base+2);
+ return 0;
+ }
+ }
+ tv = L->top-1;
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1))
+ return ffi_index_meta(L, cts, ct, MM_index);
+ if (lj_cdata_get(cts, ct, L->top-1, p))
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1)) {
+ if ((qual & CTF_CONST))
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return ffi_index_meta(L, cts, ct, MM_newindex);
+ }
+ lj_cdata_set(cts, ct, p, o+2, qual);
+ return 0;
+}
+
+/* Common handler for cdata arithmetic. */
+static int ffi_arith(lua_State *L)
+{
+ MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq);
+ return lj_carith_op(L, mm);
+}
+
+/* The following functions must be in contiguous ORDER MM. */
+LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat)
+{
+ return ffi_arith(L);
+}
+
+/* Handle ctype __call metamethod. */
+static int ffi_call_meta(lua_State *L, CTypeID id)
+{
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, id);
+ cTValue *tv;
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, MM_call);
+ if (!tv)
+ lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL)));
+ return lj_meta_tailcall(L, tv);
+}
+
+/* Forward declaration. */
+static int lj_cf_ffi_new(lua_State *L);
+
+LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ int ret;
+ if (cd->typeid == CTID_CTYPEID)
+ return lj_cf_ffi_new(L);
+ if ((ret = lj_ccall_func(L, cd)) < 0)
+ return ffi_call_meta(L, cd->typeid);
+ return ret;
+}
+
+LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm)
+{
+ return ffi_arith(L);
+}
+/* End of contiguous ORDER MM. */
+
+LJLIB_CF(ffi_meta___tostring)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ const char *msg = "cdata<%s>: %p";
+ CTypeID id = cd->typeid;
+ void *p = cdataptr(cd);
+ if (id == CTID_CTYPEID) {
+ msg = "ctype<%s>";
+ id = *(CTypeID *)p;
+ } else {
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ if (ctype_iscomplex(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size));
+ goto checkgc;
+ } else if (ct->size == 8 && ctype_isinteger(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd),
+ (ct->info & CTF_UNSIGNED)));
+ goto checkgc;
+ } else if (ctype_isfunc(ct->info)) {
+ p = *(void **)p;
+ } else {
+ if (ctype_isptr(ct->info)) {
+ p = cdata_getptr(p, ct->size);
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) {
+ /* Handle ctype __tostring metamethod. */
+ cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring);
+ if (tv)
+ return lj_meta_tailcall(L, tv);
+ }
+ }
+ }
+ lj_str_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p);
+checkgc:
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_PUSH("ffi") LJLIB_SET(__metatable)
+
+#include "lj_libdef.h"
+
+/* -- C library metamethods ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_clib
+
+/* Index C library by a name. */
+static TValue *ffi_clib_index(lua_State *L)
+{
+ TValue *o = L->base;
+ CLibrary *cl;
+ if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB))
+ lj_err_argt(L, 1, LUA_TUSERDATA);
+ cl = (CLibrary *)uddata(udataV(o));
+ if (!(o+1 < L->top && tvisstr(o+1)))
+ lj_err_argt(L, 2, LUA_TSTRING);
+ return lj_clib_index(L, cl, strV(o+1));
+}
+
+LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1)
+{
+ TValue *tv = ffi_clib_index(L);
+ if (tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *s = ctype_get(cts, cd->typeid);
+ if (ctype_isextern(s->info)) {
+ CTypeID sid = ctype_cid(s->info);
+ void *sp = *(void **)cdataptr(cd);
+ CType *ct = ctype_raw(cts, sid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp))
+ lj_gc_check(L);
+ return 1;
+ }
+ }
+ copyTV(L, L->top-1, tv);
+ return 1;
+}
+
+LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0)
+{
+ TValue *tv = ffi_clib_index(L);
+ TValue *o = L->base+2;
+ if (o < L->top && tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *d = ctype_get(cts, cd->typeid);
+ if (ctype_isextern(d->info)) {
+ CTInfo qual = 0;
+ for (;;) { /* Skip attributes and collect qualifiers. */
+ d = ctype_child(cts, d);
+ if (!ctype_isattrib(d->info)) break;
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ }
+ if (!((d->info|qual) & CTF_CONST)) {
+ lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0);
+ return 0;
+ }
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return 0; /* unreachable */
+}
+
+LJLIB_CF(ffi_clib___gc)
+{
+ TValue *o = L->base;
+ if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)
+ lj_clib_unload((CLibrary *)uddata(udataV(o)));
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+/* -- Callback function metamethods --------------------------------------- */
+
+#define LJLIB_MODULE_ffi_callback
+
+static int ffi_callback_set(lua_State *L, GCfunc *fn)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->typeid);
+ if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) {
+ MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd));
+ if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) {
+ GCtab *t = cts->miscmap;
+ TValue *tv = lj_tab_setint(L, t, (int32_t)slot);
+ if (fn) {
+ setfuncV(L, tv, fn);
+ lj_gc_anybarriert(L, t);
+ } else {
+ setnilV(tv);
+ cts->cb.cbid[slot] = 0;
+ cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid;
+ }
+ return 0;
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_BADCBACK);
+ return 0;
+}
+
+LJLIB_CF(ffi_callback_free)
+{
+ return ffi_callback_set(L, NULL);
+}
+
+LJLIB_CF(ffi_callback_set)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 2);
+ return ffi_callback_set(L, fn);
+}
+
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+#include "lj_libdef.h"
+
+/* -- FFI library functions ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi
+
+LJLIB_CF(ffi_cdef)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = ctype_cts(L);
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ lj_gc_check(L);
+ return 0;
+}
+
+LJLIB_CF(ffi_new) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ CType *ct = ctype_raw(cts, id);
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ TValue *o = L->base+1;
+ GCcdata *cd;
+ if ((info & CTF_VLA)) {
+ o++;
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ }
+ if (sz == CTSIZE_INVALID)
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE);
+ if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN)
+ cd = lj_cdata_new(cts, id, sz);
+ else
+ cd = lj_cdata_newv(cts, id, sz, ctype_align(info));
+ setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */
+ lj_cconv_ct_init(cts, ct, sz, cdataptr(cd),
+ o, (MSize)(L->top - o)); /* Initialize cdata. */
+ if (ctype_isstruct(ct->info)) {
+ /* Handle ctype __gc metamethod. Use the fast lookup here. */
+ cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) {
+ GCtab *t = cts->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add to finalizer table, if still enabled. */
+ copyTV(L, lj_tab_set(L, t, o-1), tv);
+ lj_gc_anybarriert(L, t);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ }
+ }
+ }
+ L->top = o; /* Only return the cdata itself. */
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ CType *d = ctype_raw(cts, id);
+ TValue *o = lj_lib_checkany(L, 2);
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ if (!(tviscdata(o) && cdataV(o)->typeid == id)) {
+ GCcdata *cd = lj_cdata_new(cts, id, d->size);
+ lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST);
+ setcdataV(L, o, cd);
+ lj_gc_check(L);
+ }
+ return 1;
+}
+
+LJLIB_CF(ffi_typeof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_istype) LJLIB_REC(ffi_istype)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id1 = ffi_checkctype(L, cts);
+ TValue *o = lj_lib_checkany(L, 2);
+ int b = 0;
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id2 = cd->typeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) :
+ cd->typeid;
+ CType *ct1 = lj_ctype_rawref(cts, id1);
+ CType *ct2 = lj_ctype_rawref(cts, id2);
+ if (ct1 == ct2) {
+ b = 1;
+ } else if (ctype_type(ct1->info) == ctype_type(ct2->info) &&
+ ct1->size == ct2->size) {
+ if (ctype_ispointer(ct1->info))
+ b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL);
+ else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info))
+ b = (((ct1->info ^ ct2->info) & ~CTF_QUAL) == 0);
+ } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) &&
+ ct1 == ctype_rawchild(cts, ct2)) {
+ b = 1;
+ }
+ }
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+LJLIB_CF(ffi_sizeof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ CTSize sz;
+ if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) {
+ sz = cdatavlen(cdataV(L->base));
+ } else {
+ CType *ct = lj_ctype_rawref(cts, id);
+ if (ctype_isvltype(ct->info))
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ else
+ sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+ if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ }
+ setintV(L->top-1, (int32_t)sz);
+ return 1;
+}
+
+LJLIB_CF(ffi_alignof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ CTSize sz = 0;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ setintV(L->top-1, 1 << ctype_align(info));
+ return 1;
+}
+
+LJLIB_CF(ffi_offsetof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ GCstr *name = lj_lib_checkstr(L, 2);
+ CType *ct = lj_ctype_rawref(cts, id);
+ CTSize ofs;
+ if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) {
+ CType *fct = lj_ctype_getfield(cts, ct, name, &ofs);
+ if (fct) {
+ setintV(L->top-1, ofs);
+ if (ctype_isfield(fct->info)) {
+ return 1;
+ } else if (ctype_isbitfield(fct->info)) {
+ setintV(L->top++, ctype_bitpos(fct->info));
+ setintV(L->top++, ctype_bitbsz(fct->info));
+ return 3;
+ }
+ }
+ }
+ return 0;
+}
+
+LJLIB_CF(ffi_errno) LJLIB_REC(.)
+{
+ int err = errno;
+ if (L->top > L->base)
+ errno = ffi_checkint(L, 1);
+ setintV(L->top++, err);
+ return 1;
+}
+
+LJLIB_CF(ffi_string) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = lj_lib_checkany(L, 1);
+ const char *p;
+ size_t len;
+ if (o+1 < L->top) {
+ len = (size_t)ffi_checkint(L, 2);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ } else {
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ len = strlen(p);
+ }
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ setstrV(L, o, lj_str_new(L, p, len));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_copy) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ void *sp = ffi_checkptr(L, 2, CTID_P_CVOID);
+ TValue *o = L->base+1;
+ CTSize len;
+ if (tvisstr(o) && o+1 >= L->top)
+ len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */
+ else
+ len = (CTSize)ffi_checkint(L, 3);
+ memcpy(dp, sp, len);
+ return 0;
+}
+
+LJLIB_CF(ffi_fill) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ CTSize len = (CTSize)ffi_checkint(L, 2);
+ int32_t fill = 0;
+ if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3);
+ memset(dp, fill, len);
+ return 0;
+}
+
+#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
+
+/* Test ABI string. */
+LJLIB_CF(ffi_abi) LJLIB_REC(.)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int b = 0;
+ switch (s->hash) {
+#if LJ_64
+ case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */
+#else
+ case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */
+#endif
+#if LJ_ARCH_HASFPU
+ case H_(e33ee463,e33ee463): b = 1; break; /* fpu */
+#endif
+#if LJ_ABI_SOFTFP
+ case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */
+#else
+ case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */
+#endif
+#if LJ_ABI_EABI
+ case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */
+#endif
+#if LJ_ABI_WIN
+ case H_(4ab624a8,4ab624a8): b = 1; break; /* win */
+#endif
+ case H_(3af93066,1f001464): b = 1; break; /* le/be */
+ default:
+ break;
+ }
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+#undef H_
+
+LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */
+
+LJLIB_CF(ffi_metatype)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts);
+ GCtab *mt = lj_lib_checktab(L, 2);
+ GCtab *t = cts->miscmap;
+ CType *ct = ctype_get(cts, id); /* Only allow raw types. */
+ TValue *tv;
+ GCcdata *cd;
+ if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) ||
+ ctype_isvector(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ tv = lj_tab_setinth(L, t, -(int32_t)id);
+ if (!tvisnil(tv))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ settabV(L, tv, mt);
+ lj_gc_anybarriert(L, t);
+ cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */
+
+LJLIB_CF(ffi_gc)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ TValue *fin = lj_lib_checkany(L, 2);
+ CTState *cts = ctype_cts(L);
+ GCtab *t = cts->finalizer;
+ CType *ct = ctype_raw(cts, cd->typeid);
+ if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isrefarray(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ if (gcref(t->metatable)) { /* Update finalizer table, if still enabled. */
+ copyTV(L, lj_tab_set(L, t, L->base), fin);
+ lj_gc_anybarriert(L, t);
+ if (!tvisnil(fin))
+ cd->marked |= LJ_GC_CDATA_FIN;
+ else
+ cd->marked &= ~LJ_GC_CDATA_FIN;
+ }
+ L->top = L->base+1; /* Pass through the cdata object. */
+ return 1;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */
+
+LJLIB_CF(ffi_load)
+{
+ GCstr *name = lj_lib_checkstr(L, 1);
+ int global = (L->base+1 < L->top && tvistruecond(L->base+1));
+ lj_clib_load(L, tabref(curr_func(L)->c.env), name, global);
+ return 1;
+}
+
+LJLIB_PUSH(top-4) LJLIB_SET(C)
+LJLIB_PUSH(top-3) LJLIB_SET(os)
+LJLIB_PUSH(top-2) LJLIB_SET(arch)
+
+#include "lj_libdef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Create special weak-keyed finalizer table. */
+static GCtab *ffi_finalizer(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "K"));
+ t->nomm = (uint8_t)(~(1u<top-1);
+ lj_gc_anybarriert(L, t);
+ }
+}
+
+LUALIB_API int luaopen_ffi(lua_State *L)
+{
+ CTState *cts = lj_ctype_init(L);
+ settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1)));
+ cts->finalizer = ffi_finalizer(L);
+ LJ_LIB_REG(L, NULL, ffi_meta);
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1)));
+ LJ_LIB_REG(L, NULL, ffi_clib);
+ LJ_LIB_REG(L, NULL, ffi_callback);
+ /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */
+ settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1));
+ L->top--;
+ lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */
+ ffi_register_module(L);
+ return 1;
+}
+
+#endif
diff --git a/src/LuaJIT/src/lib_init.c b/src/LuaJIT/src/lib_init.c
new file mode 100644
index 000000000..8501e21de
--- /dev/null
+++ b/src/LuaJIT/src/lib_init.c
@@ -0,0 +1,53 @@
+/*
+** Library initialization.
+** Major parts taken verbatim from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_init_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_arch.h"
+
+static const luaL_Reg lj_lib_load[] = {
+ { "", luaopen_base },
+ { LUA_LOADLIBNAME, luaopen_package },
+ { LUA_TABLIBNAME, luaopen_table },
+ { LUA_IOLIBNAME, luaopen_io },
+ { LUA_OSLIBNAME, luaopen_os },
+ { LUA_STRLIBNAME, luaopen_string },
+ { LUA_MATHLIBNAME, luaopen_math },
+ { LUA_DBLIBNAME, luaopen_debug },
+ { LUA_BITLIBNAME, luaopen_bit },
+ { LUA_JITLIBNAME, luaopen_jit },
+ { NULL, NULL }
+};
+
+static const luaL_Reg lj_lib_preload[] = {
+#if LJ_HASFFI
+ { LUA_FFILIBNAME, luaopen_ffi },
+#endif
+ { NULL, NULL }
+};
+
+LUALIB_API void luaL_openlibs(lua_State *L)
+{
+ const luaL_Reg *lib;
+ for (lib = lj_lib_load; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_pushstring(L, lib->name);
+ lua_call(L, 1, 0);
+ }
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD",
+ sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1);
+ for (lib = lj_lib_preload; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_setfield(L, -2, lib->name);
+ }
+ lua_pop(L, 1);
+}
+
diff --git a/src/LuaJIT/src/lib_io.c b/src/LuaJIT/src/lib_io.c
new file mode 100644
index 000000000..6078e74d3
--- /dev/null
+++ b/src/LuaJIT/src/lib_io.c
@@ -0,0 +1,533 @@
+/*
+** I/O library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+#include
+
+#define lib_io_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ff.h"
+#include "lj_trace.h"
+#include "lj_lib.h"
+
+/* Userdata payload for I/O file. */
+typedef struct IOFileUD {
+ FILE *fp; /* File handle. */
+ uint32_t type; /* File type. */
+} IOFileUD;
+
+#define IOFILE_TYPE_FILE 0 /* Regular file. */
+#define IOFILE_TYPE_PIPE 1 /* Pipe. */
+#define IOFILE_TYPE_STDF 2 /* Standard file handle. */
+#define IOFILE_TYPE_MASK 3
+
+#define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */
+
+#define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud)
+#define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id))))
+
+/* -- Error handling ------------------------------------------------------ */
+
+static int io_pushresult(lua_State *L, int ok, const char *fname)
+{
+ if (ok) {
+ setboolV(L->top++, 1);
+ return 1;
+ } else {
+ int en = errno; /* Lua API calls may change this value. */
+ setnilV(L->top++);
+ if (fname)
+ lua_pushfstring(L, "%s: %s", fname, strerror(en));
+ else
+ lua_pushfstring(L, "%s", strerror(en));
+ setintV(L->top++, en);
+ lj_trace_abort(G(L));
+ return 3;
+ }
+}
+
+/* -- Open/close helpers -------------------------------------------------- */
+
+static IOFileUD *io_tofilep(lua_State *L)
+{
+ if (!(L->base < L->top && tvisudata(L->base) &&
+ udataV(L->base)->udtype == UDTYPE_IO_FILE))
+ lj_err_argtype(L, 1, "FILE*");
+ return (IOFileUD *)uddata(udataV(L->base));
+}
+
+static IOFileUD *io_tofile(lua_State *L)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOCLFL);
+ return iof;
+}
+
+static FILE *io_stdfile(lua_State *L, ptrdiff_t id)
+{
+ IOFileUD *iof = IOSTDF_IOF(L, id);
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOSTDCL);
+ return iof->fp;
+}
+
+static IOFileUD *io_file_new(lua_State *L)
+{
+ IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD));
+ GCudata *ud = udataV(L->top-1);
+ ud->udtype = UDTYPE_IO_FILE;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcrefr(ud->metatable, curr_func(L)->c.env);
+ iof->fp = NULL;
+ iof->type = IOFILE_TYPE_FILE;
+ return iof;
+}
+
+static IOFileUD *io_file_open(lua_State *L, const char *mode)
+{
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ IOFileUD *iof = io_file_new(L);
+ iof->fp = fopen(fname, mode);
+ if (iof->fp == NULL)
+ luaL_argerror(L, 1, lj_str_pushf(L, "%s: %s", fname, strerror(errno)));
+ return iof;
+}
+
+static int io_file_close(lua_State *L, IOFileUD *iof)
+{
+ int ok;
+ if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) {
+ ok = (fclose(iof->fp) == 0);
+ } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) {
+#if LJ_TARGET_POSIX
+ ok = (pclose(iof->fp) != -1);
+#elif LJ_TARGET_WINDOWS
+ ok = (_pclose(iof->fp) != -1);
+#else
+ ok = 0;
+#endif
+ } else {
+ lua_assert((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF);
+ setnilV(L->top++);
+ lua_pushliteral(L, "cannot close standard file");
+ return 2;
+ }
+ iof->fp = NULL;
+ return io_pushresult(L, ok, NULL);
+}
+
+/* -- Read/write helpers -------------------------------------------------- */
+
+static int io_file_readnum(lua_State *L, FILE *fp)
+{
+ lua_Number d;
+ if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) {
+ if (LJ_DUALNUM) {
+ int32_t i = lj_num2int(d);
+ if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) {
+ setintV(L->top++, i);
+ return 1;
+ }
+ }
+ setnumV(L->top++, d);
+ return 1;
+ } else {
+ setnilV(L->top++);
+ return 0;
+ }
+}
+
+static int io_file_testeof(lua_State *L, FILE *fp)
+{
+ int c = getc(fp);
+ ungetc(c, fp);
+ lua_pushlstring(L, NULL, 0);
+ return (c != EOF);
+}
+
+static int io_file_readline(lua_State *L, FILE *fp, size_t chop)
+{
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ for (;;) {
+ size_t len;
+ char *p = luaL_prepbuffer(&b);
+ if (fgets(p, LUAL_BUFFERSIZE, fp) == NULL) { /* EOF? */
+ luaL_pushresult(&b);
+ return (strV(L->top-1)->len > 0); /* Anything read? */
+ }
+ len = strlen(p);
+ if (len == 0 || p[len-1] != '\n') { /* Partial line? */
+ luaL_addsize(&b, len);
+ } else {
+ luaL_addsize(&b, len - chop); /* Keep or remove EOL. */
+ luaL_pushresult(&b);
+ return 1; /* Got at least an EOL. */
+ }
+ }
+}
+
+static int io_file_readchars(lua_State *L, FILE *fp, size_t n)
+{
+ size_t rlen; /* how much to read */
+ size_t nr; /* number of chars actually read */
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ rlen = LUAL_BUFFERSIZE; /* try to read that much each time */
+ do {
+ char *p = luaL_prepbuffer(&b);
+ if (rlen > n) rlen = n; /* cannot read more than asked */
+ nr = fread(p, 1, rlen, fp);
+ luaL_addsize(&b, nr);
+ n -= nr; /* still have to read `n' chars */
+ } while (n > 0 && nr == rlen); /* until end of count or eof */
+ luaL_pushresult(&b); /* close buffer */
+ return (n == 0 || strV(L->top-1)->len > 0);
+}
+
+static int io_file_read(lua_State *L, FILE *fp, int start)
+{
+ int ok, n, nargs = (int)(L->top - L->base) - start;
+ clearerr(fp);
+ if (nargs == 0) {
+ ok = io_file_readline(L, fp, 1);
+ n = start+1; /* Return 1 result. */
+ } else {
+ /* The results plus the buffers go on top of the args. */
+ luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments");
+ ok = 1;
+ for (n = start; nargs-- && ok; n++) {
+ if (tvisstr(L->base+n)) {
+ const char *p = strVdata(L->base+n);
+ if (p[0] != '*')
+ lj_err_arg(L, n+1, LJ_ERR_INVOPT);
+ if (p[1] == 'n')
+ ok = io_file_readnum(L, fp);
+ else if ((p[1] & ~0x20) == 'L')
+ ok = io_file_readline(L, fp, (p[1] == 'l'));
+ else if (p[1] == 'a')
+ io_file_readchars(L, fp, ~((size_t)0));
+ else
+ lj_err_arg(L, n+1, LJ_ERR_INVFMT);
+ } else if (tvisnumber(L->base+n)) {
+ size_t len = (size_t)lj_lib_checkint(L, n+1);
+ ok = len ? io_file_readchars(L, fp, len) : io_file_testeof(L, fp);
+ } else {
+ lj_err_arg(L, n+1, LJ_ERR_INVOPT);
+ }
+ }
+ }
+ if (ferror(fp))
+ return io_pushresult(L, 0, NULL);
+ if (!ok)
+ setnilV(L->top-1); /* Replace last result with nil. */
+ return n - start;
+}
+
+static int io_file_write(lua_State *L, FILE *fp, int start)
+{
+ cTValue *tv;
+ int status = 1;
+ for (tv = L->base+start; tv < L->top; tv++) {
+ if (tvisstr(tv)) {
+ MSize len = strV(tv)->len;
+ status = status && (fwrite(strVdata(tv), 1, len, fp) == len);
+ } else if (tvisint(tv)) {
+ char buf[LJ_STR_INTBUF];
+ char *p = lj_str_bufint(buf, intV(tv));
+ size_t len = (size_t)(buf+LJ_STR_INTBUF-p);
+ status = status && (fwrite(p, 1, len, fp) == len);
+ } else if (tvisnum(tv)) {
+ status = status && (fprintf(fp, LUA_NUMBER_FMT, numV(tv)) > 0);
+ } else {
+ lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING);
+ }
+ }
+ return io_pushresult(L, status, NULL);
+}
+
+/* -- I/O file methods ---------------------------------------------------- */
+
+#define LJLIB_MODULE_io_method
+
+LJLIB_CF(io_method_close)
+{
+ IOFileUD *iof = L->base < L->top ? io_tofile(L) :
+ IOSTDF_IOF(L, GCROOT_IO_OUTPUT);
+ return io_file_close(L, iof);
+}
+
+LJLIB_CF(io_method_read)
+{
+ return io_file_read(L, io_tofile(L)->fp, 1);
+}
+
+LJLIB_CF(io_method_write) LJLIB_REC(io_write 0)
+{
+ return io_file_write(L, io_tofile(L)->fp, 1);
+}
+
+LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0)
+{
+ return io_pushresult(L, fflush(io_tofile(L)->fp) == 0, NULL);
+}
+
+LJLIB_CF(io_method_seek)
+{
+ FILE *fp = io_tofile(L)->fp;
+ int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end");
+ int64_t ofs = 0;
+ cTValue *o;
+ int res;
+ if (opt == 0) opt = SEEK_SET;
+ else if (opt == 1) opt = SEEK_CUR;
+ else if (opt == 2) opt = SEEK_END;
+ o = L->base+2;
+ if (o < L->top) {
+ if (tvisint(o))
+ ofs = (int64_t)intV(o);
+ else if (tvisnum(o))
+ ofs = (int64_t)numV(o);
+ else if (!tvisnil(o))
+ lj_err_argt(L, 3, LUA_TNUMBER);
+ }
+#if LJ_TARGET_POSIX
+ res = fseeko(fp, ofs, opt);
+#elif _MSC_VER >= 1400
+ res = _fseeki64(fp, ofs, opt);
+#elif defined(__MINGW32__)
+ res = fseeko64(fp, ofs, opt);
+#else
+ res = fseek(fp, (long)ofs, opt);
+#endif
+ if (res)
+ return io_pushresult(L, 0, NULL);
+#if LJ_TARGET_POSIX
+ ofs = ftello(fp);
+#elif _MSC_VER >= 1400
+ ofs = _ftelli64(fp);
+#elif defined(__MINGW32__)
+ ofs = ftello64(fp);
+#else
+ ofs = (int64_t)ftell(fp);
+#endif
+ setint64V(L->top-1, ofs);
+ return 1;
+}
+
+LJLIB_CF(io_method_setvbuf)
+{
+ FILE *fp = io_tofile(L)->fp;
+ int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no");
+ size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE);
+ if (opt == 0) opt = _IOFBF;
+ else if (opt == 1) opt = _IOLBF;
+ else if (opt == 2) opt = _IONBF;
+ return io_pushresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL);
+}
+
+LJLIB_PUSH(top-2) /* io_lines_iter */
+LJLIB_CF(io_method_lines)
+{
+ io_tofile(L);
+ setfuncV(L, L->top, funcV(lj_lib_upvalue(L, 1)));
+ setudataV(L, L->top+1, udataV(L->base));
+ L->top += 2;
+ return 2;
+}
+
+LJLIB_CF(io_method___gc)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF)
+ io_file_close(L, iof);
+ return 0;
+}
+
+LJLIB_CF(io_method___tostring)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp != NULL)
+ lua_pushfstring(L, "file (%p)", iof->fp);
+ else
+ lua_pushliteral(L, "file (closed)");
+ return 1;
+}
+
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+#include "lj_libdef.h"
+
+/* -- I/O library functions ----------------------------------------------- */
+
+#define LJLIB_MODULE_io
+
+LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */
+
+LJLIB_CF(io_open)
+{
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ GCstr *s = lj_lib_optstr(L, 2);
+ const char *mode = s ? strdata(s) : "r";
+ IOFileUD *iof = io_file_new(L);
+ iof->fp = fopen(fname, mode);
+ return iof->fp != NULL ? 1 : io_pushresult(L, 0, fname);
+}
+
+LJLIB_CF(io_popen)
+{
+#if LJ_TARGET_POSIX || LJ_TARGET_WINDOWS
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ GCstr *s = lj_lib_optstr(L, 2);
+ const char *mode = s ? strdata(s) : "r";
+ IOFileUD *iof = io_file_new(L);
+ iof->type = IOFILE_TYPE_PIPE;
+#if LJ_TARGET_POSIX
+ fflush(NULL);
+ iof->fp = popen(fname, mode);
+#else
+ iof->fp = _popen(fname, mode);
+#endif
+ return iof->fp != NULL ? 1 : io_pushresult(L, 0, fname);
+#else
+ luaL_error(L, LUA_QL("popen") " not supported");
+#endif
+}
+
+LJLIB_CF(io_tmpfile)
+{
+ IOFileUD *iof = io_file_new(L);
+ iof->fp = tmpfile();
+ return iof->fp != NULL ? 1 : io_pushresult(L, 0, NULL);
+}
+
+LJLIB_CF(io_close)
+{
+ return lj_cf_io_method_close(L);
+}
+
+LJLIB_CF(io_read)
+{
+ return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0);
+}
+
+LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT)
+{
+ return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0);
+}
+
+LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT)
+{
+ return io_pushresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)) == 0, NULL);
+}
+
+static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode)
+{
+ if (L->base < L->top && !tvisnil(L->base)) {
+ if (tvisudata(L->base)) {
+ io_tofile(L);
+ L->top = L->base+1;
+ } else {
+ io_file_open(L, mode);
+ }
+ /* NOBARRIER: The standard I/O handles are GC roots. */
+ setgcref(G(L)->gcroot[id], gcV(L->top-1));
+ } else {
+ setudataV(L, L->top++, IOSTDF_UD(L, id));
+ }
+ return 1;
+}
+
+LJLIB_CF(io_input)
+{
+ return io_std_getset(L, GCROOT_IO_INPUT, "r");
+}
+
+LJLIB_CF(io_output)
+{
+ return io_std_getset(L, GCROOT_IO_OUTPUT, "w");
+}
+
+LJLIB_NOREG LJLIB_CF(io_lines_iter)
+{
+ IOFileUD *iof = io_tofile(L);
+ int ok = io_file_readline(L, iof->fp, 1);
+ if (ferror(iof->fp))
+ lj_err_callermsg(L, strerror(errno));
+ if (!ok && (iof->type & IOFILE_FLAG_CLOSE))
+ io_file_close(L, iof); /* Return values are ignored (ok is 0). */
+ return ok;
+}
+
+LJLIB_PUSH(top-3) /* io_lines_iter */
+LJLIB_CF(io_lines)
+{
+ if (L->base < L->top && !tvisnil(L->base)) { /* io.lines(fname) */
+ IOFileUD *iof = io_file_open(L, "r");
+ iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE;
+ setfuncV(L, L->top-2, funcV(lj_lib_upvalue(L, 1)));
+ } else { /* io.lines() iterates over stdin. */
+ setfuncV(L, L->top, funcV(lj_lib_upvalue(L, 1)));
+ setudataV(L, L->top+1, IOSTDF_UD(L, GCROOT_IO_INPUT));
+ L->top += 2;
+ }
+ return 2;
+}
+
+LJLIB_CF(io_type)
+{
+ cTValue *o = lj_lib_checkany(L, 1);
+ if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE))
+ setnilV(L->top++);
+ else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL)
+ lua_pushliteral(L, "file");
+ else
+ lua_pushliteral(L, "closed file");
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+/* ------------------------------------------------------------------------ */
+
+static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name)
+{
+ IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD));
+ GCudata *ud = udataV(L->top-1);
+ ud->udtype = UDTYPE_IO_FILE;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, gcV(L->top-3));
+ iof->fp = fp;
+ iof->type = IOFILE_TYPE_STDF;
+ lua_setfield(L, -2, name);
+ return obj2gco(ud);
+}
+
+LUALIB_API int luaopen_io(lua_State *L)
+{
+ lj_lib_pushcf(L, lj_cf_io_lines_iter, FF_io_lines_iter);
+ LJ_LIB_REG(L, NULL, io_method);
+ copyTV(L, L->top, L->top-1); L->top++;
+ lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE);
+ LJ_LIB_REG(L, LUA_IOLIBNAME, io);
+ setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin"));
+ setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout"));
+ io_std_new(L, stderr, "stderr");
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_jit.c b/src/LuaJIT/src/lib_jit.c
new file mode 100644
index 000000000..7d5e0aef8
--- /dev/null
+++ b/src/LuaJIT/src/lib_jit.c
@@ -0,0 +1,673 @@
+/*
+** JIT library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_jit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_arch.h"
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+#endif
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+#include "lj_lib.h"
+
+#include "luajit.h"
+
+/* -- jit.* functions ----------------------------------------------------- */
+
+#define LJLIB_MODULE_jit
+
+static int setjitmode(lua_State *L, int mode)
+{
+ int idx = 0;
+ if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */
+ mode |= LUAJIT_MODE_ENGINE;
+ } else {
+ /* jit.on/off/flush(func|proto, nil|true|false) */
+ if (tvisfunc(L->base) || tvisproto(L->base))
+ idx = 1;
+ else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */
+ goto err;
+ if (L->base+1 < L->top && tvisbool(L->base+1))
+ mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC;
+ else
+ mode |= LUAJIT_MODE_FUNC;
+ }
+ if (luaJIT_setmode(L, idx, mode) != 1) {
+ if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE)
+ lj_err_caller(L, LJ_ERR_NOJIT);
+ err:
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ }
+ return 0;
+}
+
+LJLIB_CF(jit_on)
+{
+ return setjitmode(L, LUAJIT_MODE_ON);
+}
+
+LJLIB_CF(jit_off)
+{
+ return setjitmode(L, LUAJIT_MODE_OFF);
+}
+
+LJLIB_CF(jit_flush)
+{
+#if LJ_HASJIT
+ if (L->base < L->top && !tvisnil(L->base)) {
+ int traceno = lj_lib_checkint(L, 1);
+ luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE);
+ return 0;
+ }
+#endif
+ return setjitmode(L, LUAJIT_MODE_FLUSH);
+}
+
+#if LJ_HASJIT
+/* Push a string for every flag bit that is set. */
+static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base,
+ const char *str)
+{
+ for (; *str; base <<= 1, str += 1+*str)
+ if (flags & base)
+ setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str));
+}
+#endif
+
+LJLIB_CF(jit_status)
+{
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+ L->top = L->base;
+ setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0);
+ flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING);
+ flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING);
+ return (int)(L->top - L->base);
+#else
+ setboolV(L->top++, 0);
+ return 1;
+#endif
+}
+
+LJLIB_CF(jit_attach)
+{
+#ifdef LUAJIT_DISABLE_VMEVENT
+ luaL_error(L, "vmevent API disabled");
+#else
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ GCstr *s = lj_lib_optstr(L, 2);
+ luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE);
+ if (s) { /* Attach to given event. */
+ const uint8_t *p = (const uint8_t *)strdata(s);
+ uint32_t h = s->len;
+ while (*p) h = h ^ (lj_rol(h, 6) + *p++);
+ lua_pushvalue(L, 1);
+ lua_rawseti(L, -2, VMEVENT_HASHIDX(h));
+ G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */
+ } else { /* Detach if no event given. */
+ setnilV(L->top++);
+ while (lua_next(L, -2)) {
+ L->top--;
+ if (tvisfunc(L->top) && funcV(L->top) == fn) {
+ setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1));
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(os)
+LJLIB_PUSH(top-4) LJLIB_SET(arch)
+LJLIB_PUSH(top-3) LJLIB_SET(version_num)
+LJLIB_PUSH(top-2) LJLIB_SET(version)
+
+#include "lj_libdef.h"
+
+/* -- jit.util.* functions ------------------------------------------------ */
+
+#define LJLIB_MODULE_jit_util
+
+/* -- Reflection API for Lua functions ------------------------------------ */
+
+/* Return prototype of first argument (Lua function or prototype object) */
+static GCproto *check_Lproto(lua_State *L, int nolua)
+{
+ TValue *o = L->base;
+ if (L->top > o) {
+ if (tvisproto(o)) {
+ return protoV(o);
+ } else if (tvisfunc(o)) {
+ if (isluafunc(funcV(o)))
+ return funcproto(funcV(o));
+ else if (nolua)
+ return NULL;
+ }
+ }
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ return NULL; /* unreachable */
+}
+
+static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val)
+{
+ setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val);
+}
+
+/* local info = jit.util.funcinfo(func [,pc]) */
+LJLIB_CF(jit_util_funcinfo)
+{
+ GCproto *pt = check_Lproto(L, 1);
+ if (pt) {
+ BCPos pc = (BCPos)lj_lib_optint(L, 2, 0);
+ GCtab *t;
+ lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "linedefined", pt->firstline);
+ setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline);
+ setintfield(L, t, "stackslots", pt->framesize);
+ setintfield(L, t, "params", pt->numparams);
+ setintfield(L, t, "bytecodes", (int32_t)pt->sizebc);
+ setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc);
+ setintfield(L, t, "nconsts", (int32_t)pt->sizekn);
+ setintfield(L, t, "upvalues", (int32_t)pt->sizeuv);
+ if (pc < pt->sizebc)
+ setintfield(L, t, "currentline", lj_debug_line(pt, pc));
+ lua_pushboolean(L, (pt->flags & PROTO_VARARG));
+ lua_setfield(L, -2, "isvararg");
+ lua_pushboolean(L, (pt->flags & PROTO_CHILD));
+ lua_setfield(L, -2, "children");
+ setstrV(L, L->top++, proto_chunkname(pt));
+ lua_setfield(L, -2, "source");
+ lj_debug_pushloc(L, pt, pc);
+ lua_setfield(L, -2, "loc");
+ } else {
+ GCfunc *fn = funcV(L->base);
+ GCtab *t;
+ lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ if (!iscfunc(fn))
+ setintfield(L, t, "ffid", fn->c.ffid);
+ setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")),
+ (intptr_t)(void *)fn->c.f);
+ setintfield(L, t, "upvalues", fn->c.nupvalues);
+ }
+ return 1;
+}
+
+/* local ins, m = jit.util.funcbc(func, pc) */
+LJLIB_CF(jit_util_funcbc)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ BCPos pc = (BCPos)lj_lib_checkint(L, 2);
+ if (pc < pt->sizebc) {
+ BCIns ins = proto_bc(pt)[pc];
+ BCOp op = bc_op(ins);
+ lua_assert(op < BC__MAX);
+ setintV(L->top, ins);
+ setintV(L->top+1, lj_bc_mode[op]);
+ L->top += 2;
+ return 2;
+ }
+ return 0;
+}
+
+/* local k = jit.util.funck(func, idx) */
+LJLIB_CF(jit_util_funck)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2);
+ if (idx >= 0) {
+ if (idx < (ptrdiff_t)pt->sizekn) {
+ copyTV(L, L->top-1, proto_knumtv(pt, idx));
+ return 1;
+ }
+ } else {
+ if (~idx < (ptrdiff_t)pt->sizekgc) {
+ GCobj *gc = proto_kgc(pt, idx);
+ setgcV(L, L->top-1, gc, ~gc->gch.gct);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* local name = jit.util.funcuvname(func, idx) */
+LJLIB_CF(jit_util_funcuvname)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 2);
+ if (idx < pt->sizeuv) {
+ setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx)));
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Reflection API for traces ------------------------------------------- */
+
+#if LJ_HASJIT
+
+/* Check trace argument. Must not throw for non-existent trace numbers. */
+static GCtrace *jit_checktrace(lua_State *L)
+{
+ TraceNo tr = (TraceNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (tr > 0 && tr < J->sizetrace)
+ return traceref(J, tr);
+ return NULL;
+}
+
+/* Names of link types. ORDER LJ_TRLINK */
+static const char *const jit_trlinkname[] = {
+ "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion",
+ "interpreter", "return"
+};
+
+/* local info = jit.util.traceinfo(tr) */
+LJLIB_CF(jit_util_traceinfo)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T) {
+ GCtab *t;
+ lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1);
+ setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk);
+ setintfield(L, t, "link", T->link);
+ setintfield(L, t, "nexit", T->nsnap);
+ setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype]));
+ lua_setfield(L, -2, "linktype");
+ /* There are many more fields. Add them only when needed. */
+ return 1;
+ }
+ return 0;
+}
+
+/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */
+LJLIB_CF(jit_util_traceir)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= REF_BIAS && ref < T->nins) {
+ IRIns *ir = &T->ir[ref];
+ int32_t m = lj_ir_mode[ir->o];
+ setintV(L->top-2, m);
+ setintV(L->top-1, ir->ot);
+ setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, ir->prev);
+ return 5;
+ }
+ return 0;
+}
+
+/* local k, t [, slot] = jit.util.tracek(tr, idx) */
+LJLIB_CF(jit_util_tracek)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= T->nk && ref < REF_BIAS) {
+ IRIns *ir = &T->ir[ref];
+ int32_t slot = -1;
+ if (ir->o == IR_KSLOT) {
+ slot = ir->op2;
+ ir = &T->ir[ir->op1];
+ }
+ lj_ir_kvalue(L, L->top-2, ir);
+ setintV(L->top-1, (int32_t)irt_type(ir->t));
+ if (slot == -1)
+ return 2;
+ setintV(L->top++, slot);
+ return 3;
+ }
+ return 0;
+}
+
+/* local snap = jit.util.tracesnap(tr, sn) */
+LJLIB_CF(jit_util_tracesnap)
+{
+ GCtrace *T = jit_checktrace(L);
+ SnapNo sn = (SnapNo)lj_lib_checkint(L, 2);
+ if (T && sn < T->nsnap) {
+ SnapShot *snap = &T->snap[sn];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ GCtab *t;
+ lua_createtable(L, nent+2, 0);
+ t = tabV(L->top-1);
+ setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS);
+ setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots);
+ for (n = 0; n < nent; n++)
+ setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]);
+ setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0));
+ return 1;
+ }
+ return 0;
+}
+
+/* local mcode, addr, loop = jit.util.tracemc(tr) */
+LJLIB_CF(jit_util_tracemc)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T && T->mcode != NULL) {
+ setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode));
+ setintptrV(L->top++, (intptr_t)(void *)T->mcode);
+ setintV(L->top++, T->mcloop);
+ return 3;
+ }
+ return 0;
+}
+
+/* local addr = jit.util.traceexitstub([tr,] exitno) */
+LJLIB_CF(jit_util_traceexitstub)
+{
+#ifdef EXITSTUBS_PER_GROUP
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
+ return 1;
+ }
+#else
+ if (L->top > L->base+1) { /* Don't throw for one-argument variant. */
+ GCtrace *T = jit_checktrace(L);
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2);
+ ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap;
+ if (T && T->mcode != NULL && exitno < maxexit) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno));
+ return 1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* local addr = jit.util.ircalladdr(idx) */
+LJLIB_CF(jit_util_ircalladdr)
+{
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 1);
+ if (idx < IRCALL__MAX) {
+ setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func);
+ return 1;
+ }
+ return 0;
+}
+
+#else
+
+static int trace_nojit(lua_State *L)
+{
+ UNUSED(L);
+ return 0;
+}
+#define lj_cf_jit_util_traceinfo trace_nojit
+#define lj_cf_jit_util_traceir trace_nojit
+#define lj_cf_jit_util_tracek trace_nojit
+#define lj_cf_jit_util_tracesnap trace_nojit
+#define lj_cf_jit_util_tracemc trace_nojit
+#define lj_cf_jit_util_traceexitstub trace_nojit
+#define lj_cf_jit_util_ircalladdr trace_nojit
+
+#endif
+
+#include "lj_libdef.h"
+
+/* -- jit.opt module ------------------------------------------------------ */
+
+#define LJLIB_MODULE_jit_opt
+
+#if LJ_HASJIT
+/* Parse optimization level. */
+static int jitopt_level(jit_State *J, const char *str)
+{
+ if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') {
+ uint32_t flags;
+ if (str[0] == '0') flags = JIT_F_OPT_0;
+ else if (str[0] == '1') flags = JIT_F_OPT_1;
+ else if (str[0] == '2') flags = JIT_F_OPT_2;
+ else flags = JIT_F_OPT_3;
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags;
+ return 1; /* Ok. */
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization flag. */
+static int jitopt_flag(jit_State *J, const char *str)
+{
+ const char *lst = JIT_F_OPTSTRING;
+ uint32_t opt;
+ int set = 1;
+ if (str[0] == '+') {
+ str++;
+ } else if (str[0] == '-') {
+ str++;
+ set = 0;
+ } else if (str[0] == 'n' && str[1] == 'o') {
+ str += str[2] == '-' ? 3 : 2;
+ set = 0;
+ }
+ for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) {
+ size_t len = *(const uint8_t *)lst;
+ if (len == 0)
+ break;
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') {
+ if (set) J->flags |= opt; else J->flags &= ~opt;
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization parameter. */
+static int jitopt_param(jit_State *J, const char *str)
+{
+ const char *lst = JIT_P_STRING;
+ int i;
+ for (i = 0; i < JIT_P__MAX; i++) {
+ size_t len = *(const uint8_t *)lst;
+ lua_assert(len != 0);
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '=') {
+ int32_t n = 0;
+ const char *p = &str[len+1];
+ while (*p >= '0' && *p <= '9')
+ n = n*10 + (*p++ - '0');
+ if (*p) return 0; /* Malformed number. */
+ J->param[i] = n;
+ if (i == JIT_P_hotloop)
+ lj_dispatch_init_hotcount(J2G(J));
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+#endif
+
+/* jit.opt.start(flags...) */
+LJLIB_CF(jit_opt_start)
+{
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+ int nargs = (int)(L->top - L->base);
+ if (nargs == 0) {
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT;
+ } else {
+ int i;
+ for (i = 1; i <= nargs; i++) {
+ const char *str = strdata(lj_lib_checkstr(L, i));
+ if (!jitopt_level(J, str) &&
+ !jitopt_flag(J, str) &&
+ !jitopt_param(J, str))
+ lj_err_callerv(L, LJ_ERR_JITOPT, str);
+ }
+ }
+#else
+ lj_err_caller(L, LJ_ERR_NOJIT);
+#endif
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+/* -- JIT compiler initialization ----------------------------------------- */
+
+#if LJ_HASJIT
+/* Default values for JIT parameters. */
+static const int32_t jit_param_default[JIT_P__MAX+1] = {
+#define JIT_PARAMINIT(len, name, value) (value),
+JIT_PARAMDEF(JIT_PARAMINIT)
+#undef JIT_PARAMINIT
+ 0
+};
+#endif
+
+#if LJ_TARGET_ARM && LJ_TARGET_LINUX
+#include
+#endif
+
+/* Arch-dependent CPU detection. */
+static uint32_t jit_cpudetect(lua_State *L)
+{
+ uint32_t flags = 0;
+#if LJ_TARGET_X86ORX64
+ uint32_t vendor[4];
+ uint32_t features[4];
+ if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) {
+#if !LJ_HASJIT
+#define JIT_F_CMOV 1
+#define JIT_F_SSE2 2
+#endif
+ flags |= ((features[3] >> 15)&1) * JIT_F_CMOV;
+ flags |= ((features[3] >> 26)&1) * JIT_F_SSE2;
+#if LJ_HASJIT
+ flags |= ((features[2] >> 0)&1) * JIT_F_SSE3;
+ flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1;
+ if (vendor[2] == 0x6c65746e) { /* Intel. */
+ if ((features[0] & 0x0ff00f00) == 0x00000f00) /* P4. */
+ flags |= JIT_F_P4; /* Currently unused. */
+ else if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */
+ flags |= JIT_F_LEA_AGU;
+ } else if (vendor[2] == 0x444d4163) { /* AMD. */
+ uint32_t fam = (features[0] & 0x0ff00f00);
+ if (fam == 0x00000f00) /* K8. */
+ flags |= JIT_F_SPLIT_XMM;
+ if (fam >= 0x00000f00) /* K8, K10. */
+ flags |= JIT_F_PREFER_IMUL;
+ }
+#endif
+ }
+ /* Check for required instruction set support on x86 (unnecessary on x64). */
+#if LJ_TARGET_X86
+#if !defined(LUAJIT_CPU_NOCMOV)
+ if (!(flags & JIT_F_CMOV))
+ luaL_error(L, "Ancient CPU lacks CMOV support (recompile with -DLUAJIT_CPU_NOCMOV)");
+#endif
+#if defined(LUAJIT_CPU_SSE2)
+ if (!(flags & JIT_F_SSE2))
+ luaL_error(L, "CPU does not support SSE2 (recompile without -DLUAJIT_CPU_SSE2)");
+#endif
+#endif
+#elif LJ_TARGET_ARM
+#if LJ_HASJIT
+ /* Compile-time ARM CPU detection. */
+#if __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
+ flags |= JIT_F_ARMV6|JIT_F_ARMV6T2|JIT_F_ARMV7;
+#elif __ARM_ARCH_6T2__
+ flags |= JIT_F_ARMV6|JIT_F_ARMV6T2;
+#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__
+ flags |= JIT_F_ARMV6;
+#endif
+ /* Runtime ARM CPU detection. */
+#if LJ_TARGET_LINUX
+ if (!(flags & JIT_F_ARMV7)) {
+ struct utsname ut;
+ uname(&ut);
+ if (strncmp(ut.machine, "armv", 4) == 0) {
+ if (ut.machine[4] >= '7')
+ flags |= JIT_F_ARMV6|JIT_F_ARMV6T2|JIT_F_ARMV7;
+ else if (ut.machine[4] == '6')
+ flags |= JIT_F_ARMV6;
+ }
+ }
+#endif
+#endif
+#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
+ /* Nothing to do. */
+#elif LJ_TARGET_MIPS
+#if LJ_HASJIT
+ /* Compile-time MIPS CPU detection. */
+#if _MIPS_ARCH_MIPS32R2
+ flags |= JIT_F_MIPS32R2;
+#endif
+ /* Runtime MIPS CPU detection. */
+#if defined(__GNUC__)
+ if (!(flags & JIT_F_MIPS32R2)) {
+ int x;
+ /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */
+ __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2");
+ if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */
+ }
+#endif
+#endif
+#else
+#error "Missing CPU detection for this architecture"
+#endif
+ UNUSED(L);
+ return flags;
+}
+
+/* Initialize JIT compiler. */
+static void jit_init(lua_State *L)
+{
+ uint32_t flags = jit_cpudetect(L);
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+#if LJ_TARGET_X86
+ /* Silently turn off the JIT compiler on CPUs without SSE2. */
+ if ((flags & JIT_F_SSE2))
+#endif
+ J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT;
+ memcpy(J->param, jit_param_default, sizeof(J->param));
+ lj_dispatch_update(G(L));
+#else
+ UNUSED(flags);
+#endif
+}
+
+LUALIB_API int luaopen_jit(lua_State *L)
+{
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ lua_pushinteger(L, LUAJIT_VERSION_NUM);
+ lua_pushliteral(L, LUAJIT_VERSION);
+ LJ_LIB_REG(L, LUA_JITLIBNAME, jit);
+#ifndef LUAJIT_DISABLE_JITUTIL
+ LJ_LIB_REG(L, "jit.util", jit_util);
+#endif
+ LJ_LIB_REG(L, "jit.opt", jit_opt);
+ L->top -= 2;
+ jit_init(L);
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_math.c b/src/LuaJIT/src/lib_math.c
new file mode 100644
index 000000000..3a56514b9
--- /dev/null
+++ b/src/LuaJIT/src/lib_math.c
@@ -0,0 +1,218 @@
+/*
+** Math library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include
+
+#define lib_math_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_lib.h"
+#include "lj_vm.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_math
+
+LJLIB_ASM(math_abs) LJLIB_REC(.)
+{
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR)
+LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL)
+
+LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT)
+{
+ lj_lib_checknum(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_log) LJLIB_REC(math_unary IRFPM_LOG)
+LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10)
+LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP)
+LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN)
+LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS)
+LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN)
+LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin)
+LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos)
+LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan)
+LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh)
+LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh)
+LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh)
+LJLIB_ASM_(math_frexp)
+LJLIB_ASM_(math_modf) LJLIB_REC(.)
+
+LJLIB_PUSH(57.29577951308232)
+LJLIB_ASM_(math_deg) LJLIB_REC(math_degrad)
+
+LJLIB_PUSH(0.017453292519943295)
+LJLIB_ASM_(math_rad) LJLIB_REC(math_degrad)
+
+LJLIB_ASM(math_atan2) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+ lj_lib_checknum(L, 2);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_pow) LJLIB_REC(.)
+LJLIB_ASM_(math_fmod)
+
+LJLIB_ASM(math_ldexp) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+#if LJ_DUALNUM && !LJ_TARGET_X86ORX64
+ lj_lib_checkint(L, 2);
+#else
+ lj_lib_checknum(L, 2);
+#endif
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN)
+{
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX)
+
+LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi)
+LJLIB_PUSH(1e310) LJLIB_SET(huge)
+
+/* ------------------------------------------------------------------------ */
+
+/* This implements a Tausworthe PRNG with period 2^223. Based on:
+** Tables of maximally-equidistributed combined LFSR generators,
+** Pierre L'Ecuyer, 1991, table 3, 1st entry.
+** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
+*/
+
+/* PRNG state. */
+struct RandomState {
+ uint64_t gen[4]; /* State of the 4 LFSR generators. */
+ int valid; /* State is valid. */
+};
+
+/* Union needed for bit-pattern conversion between uint64_t and double. */
+typedef union { uint64_t u64; double d; } U64double;
+
+/* Update generator i and compute a running xor of all states. */
+#define TW223_GEN(i, k, q, s) \
+ z = rs->gen[i]; \
+ z = (((z<> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<gen[i] = z;
+
+/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */
+LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs)
+{
+ uint64_t z, r = 0;
+ TW223_GEN(0, 63, 31, 18)
+ TW223_GEN(1, 58, 19, 28)
+ TW223_GEN(2, 55, 24, 7)
+ TW223_GEN(3, 47, 21, 8)
+ return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
+}
+
+/* PRNG initialization function. */
+static void random_init(RandomState *rs, double d)
+{
+ uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */
+ int i;
+ for (i = 0; i < 4; i++) {
+ U64double u;
+ uint32_t m = 1u << (r&255);
+ r >>= 8;
+ u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354;
+ if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */
+ rs->gen[i] = u.u64;
+ }
+ rs->valid = 1;
+ for (i = 0; i < 10; i++)
+ lj_math_random_step(rs);
+}
+
+/* PRNG extract function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
+LJLIB_CF(math_random) LJLIB_REC(.)
+{
+ int n = (int)(L->top - L->base);
+ RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ U64double u;
+ double d;
+ if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0);
+ u.u64 = lj_math_random_step(rs);
+ d = u.d - 1.0;
+ if (n > 0) {
+#if LJ_DUALNUM
+ int isint = 1;
+ double r1;
+ lj_lib_checknumber(L, 1);
+ if (tvisint(L->base)) {
+ r1 = (lua_Number)intV(L->base);
+ } else {
+ isint = 0;
+ r1 = numV(L->base);
+ }
+#else
+ double r1 = lj_lib_checknum(L, 1);
+#endif
+ if (n == 1) {
+ d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */
+ } else {
+#if LJ_DUALNUM
+ double r2;
+ lj_lib_checknumber(L, 2);
+ if (tvisint(L->base+1)) {
+ r2 = (lua_Number)intV(L->base+1);
+ } else {
+ isint = 0;
+ r2 = numV(L->base+1);
+ }
+#else
+ double r2 = lj_lib_checknum(L, 2);
+#endif
+ d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */
+ }
+#if LJ_DUALNUM
+ if (isint) {
+ setintV(L->top-1, lj_num2int(d));
+ return 1;
+ }
+#endif
+ } /* else: d is a double in range [0, 1] */
+ setnumV(L->top++, d);
+ return 1;
+}
+
+/* PRNG seed function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
+LJLIB_CF(math_randomseed)
+{
+ RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ random_init(rs, lj_lib_checknum(L, 1));
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_math(lua_State *L)
+{
+ RandomState *rs;
+ rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState));
+ rs->valid = 0; /* Use lazy initialization to save some time on startup. */
+ LJ_LIB_REG(L, LUA_MATHLIBNAME, math);
+#if defined(LUA_COMPAT_MOD)
+ lua_getfield(L, -1, "fmod");
+ lua_setfield(L, -2, "mod");
+#endif
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_os.c b/src/LuaJIT/src/lib_os.c
new file mode 100644
index 000000000..a3c951baa
--- /dev/null
+++ b/src/LuaJIT/src/lib_os.c
@@ -0,0 +1,256 @@
+/*
+** OS library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+#include
+#include
+
+#define lib_os_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_lib.h"
+
+#if LJ_TARGET_POSIX
+#include
+#else
+#include
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_os
+
+static int os_pushresult(lua_State *L, int i, const char *filename)
+{
+ int en = errno; /* calls to Lua API may change this value */
+ if (i) {
+ setboolV(L->top-1, 1);
+ return 1;
+ } else {
+ setnilV(L->top-1);
+ lua_pushfstring(L, "%s: %s", filename, strerror(en));
+ lua_pushinteger(L, en);
+ return 3;
+ }
+}
+
+LJLIB_CF(os_execute)
+{
+ lua_pushinteger(L, system(luaL_optstring(L, 1, NULL)));
+ return 1;
+}
+
+LJLIB_CF(os_remove)
+{
+ const char *filename = luaL_checkstring(L, 1);
+ return os_pushresult(L, remove(filename) == 0, filename);
+}
+
+LJLIB_CF(os_rename)
+{
+ const char *fromname = luaL_checkstring(L, 1);
+ const char *toname = luaL_checkstring(L, 2);
+ return os_pushresult(L, rename(fromname, toname) == 0, fromname);
+}
+
+LJLIB_CF(os_tmpname)
+{
+#if LJ_TARGET_POSIX
+ char buf[15+1];
+ int fp;
+ strcpy(buf, "/tmp/lua_XXXXXX");
+ fp = mkstemp(buf);
+ if (fp != -1)
+ close(fp);
+ else
+ lj_err_caller(L, LJ_ERR_OSUNIQF);
+#else
+ char buf[L_tmpnam];
+ if (tmpnam(buf) == NULL)
+ lj_err_caller(L, LJ_ERR_OSUNIQF);
+#endif
+ lua_pushstring(L, buf);
+ return 1;
+}
+
+LJLIB_CF(os_getenv)
+{
+ lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */
+ return 1;
+}
+
+LJLIB_CF(os_exit)
+{
+ int status;
+ if (L->base < L->top && tvisbool(L->base))
+ status = boolV(L->base) ? EXIT_SUCCESS : EXIT_FAILURE;
+ else
+ status = lj_lib_optint(L, 1, EXIT_SUCCESS);
+ if (L->base+1 < L->top && tvistruecond(L->base+1))
+ lua_close(L);
+ exit(status);
+ return 0; /* Unreachable. */
+}
+
+LJLIB_CF(os_clock)
+{
+ setnumV(L->top++, ((lua_Number)clock())*(1.0/(lua_Number)CLOCKS_PER_SEC));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void setfield(lua_State *L, const char *key, int value)
+{
+ lua_pushinteger(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setboolfield(lua_State *L, const char *key, int value)
+{
+ if (value < 0) /* undefined? */
+ return; /* does not set field */
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int getboolfield(lua_State *L, const char *key)
+{
+ int res;
+ lua_getfield(L, -1, key);
+ res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+}
+
+static int getfield(lua_State *L, const char *key, int d)
+{
+ int res;
+ lua_getfield(L, -1, key);
+ if (lua_isnumber(L, -1)) {
+ res = (int)lua_tointeger(L, -1);
+ } else {
+ if (d < 0)
+ lj_err_callerv(L, LJ_ERR_OSDATEF, key);
+ res = d;
+ }
+ lua_pop(L, 1);
+ return res;
+}
+
+LJLIB_CF(os_date)
+{
+ const char *s = luaL_optstring(L, 1, "%c");
+ time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL));
+ struct tm *stm;
+ if (*s == '!') { /* UTC? */
+ stm = gmtime(&t);
+ s++; /* skip `!' */
+ } else {
+ stm = localtime(&t);
+ }
+ if (stm == NULL) { /* invalid date? */
+ setnilV(L->top-1);
+ } else if (strcmp(s, "*t") == 0) {
+ lua_createtable(L, 0, 9); /* 9 = number of fields */
+ setfield(L, "sec", stm->tm_sec);
+ setfield(L, "min", stm->tm_min);
+ setfield(L, "hour", stm->tm_hour);
+ setfield(L, "day", stm->tm_mday);
+ setfield(L, "month", stm->tm_mon+1);
+ setfield(L, "year", stm->tm_year+1900);
+ setfield(L, "wday", stm->tm_wday+1);
+ setfield(L, "yday", stm->tm_yday+1);
+ setboolfield(L, "isdst", stm->tm_isdst);
+ } else {
+ char cc[3];
+ luaL_Buffer b;
+ cc[0] = '%'; cc[2] = '\0';
+ luaL_buffinit(L, &b);
+ for (; *s; s++) {
+ if (*s != '%' || *(s + 1) == '\0') { /* no conversion specifier? */
+ luaL_addchar(&b, *s);
+ } else {
+ size_t reslen;
+ char buff[200]; /* should be big enough for any conversion result */
+ cc[1] = *(++s);
+ reslen = strftime(buff, sizeof(buff), cc, stm);
+ luaL_addlstring(&b, buff, reslen);
+ }
+ }
+ luaL_pushresult(&b);
+ }
+ return 1;
+}
+
+LJLIB_CF(os_time)
+{
+ time_t t;
+ if (lua_isnoneornil(L, 1)) { /* called without args? */
+ t = time(NULL); /* get current time */
+ } else {
+ struct tm ts;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_settop(L, 1); /* make sure table is at the top */
+ ts.tm_sec = getfield(L, "sec", 0);
+ ts.tm_min = getfield(L, "min", 0);
+ ts.tm_hour = getfield(L, "hour", 12);
+ ts.tm_mday = getfield(L, "day", -1);
+ ts.tm_mon = getfield(L, "month", -1) - 1;
+ ts.tm_year = getfield(L, "year", -1) - 1900;
+ ts.tm_isdst = getboolfield(L, "isdst");
+ t = mktime(&ts);
+ }
+ if (t == (time_t)(-1))
+ lua_pushnil(L);
+ else
+ lua_pushnumber(L, (lua_Number)t);
+ return 1;
+}
+
+LJLIB_CF(os_difftime)
+{
+ lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)),
+ (time_t)(luaL_optnumber(L, 2, (lua_Number)0))));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(os_setlocale)
+{
+ GCstr *s = lj_lib_optstr(L, 1);
+ const char *str = s ? strdata(s) : NULL;
+ int opt = lj_lib_checkopt(L, 2, 6,
+ "\5ctype\7numeric\4time\7collate\10monetary\1\377\3all");
+ if (opt == 0) opt = LC_CTYPE;
+ else if (opt == 1) opt = LC_NUMERIC;
+ else if (opt == 2) opt = LC_TIME;
+ else if (opt == 3) opt = LC_COLLATE;
+ else if (opt == 4) opt = LC_MONETARY;
+ else if (opt == 6) opt = LC_ALL;
+ lua_pushstring(L, setlocale(opt, str));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_os(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_OSLIBNAME, os);
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_package.c b/src/LuaJIT/src/lib_package.c
new file mode 100644
index 000000000..e8ea740ff
--- /dev/null
+++ b/src/LuaJIT/src/lib_package.c
@@ -0,0 +1,585 @@
+/*
+** Package library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_package_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Error codes for ll_loadfunc. */
+#define PACKAGE_ERR_LIB 1
+#define PACKAGE_ERR_FUNC 2
+#define PACKAGE_ERR_LOAD 3
+
+/* Redefined in platform specific part. */
+#define PACKAGE_LIB_FAIL "open"
+#define setprogdir(L) ((void)0)
+
+/* Symbol name prefixes. */
+#define SYMPREFIX_CF "luaopen_%s"
+#define SYMPREFIX_BC "luaJIT_BC_%s"
+
+#if LJ_TARGET_DLOPEN
+
+#include
+
+static void ll_unloadlib(void *lib)
+{
+ dlclose(lib);
+}
+
+static void *ll_load(lua_State *L, const char *path)
+{
+ void *lib = dlopen(path, RTLD_NOW);
+ if (lib == NULL) lua_pushstring(L, dlerror());
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)dlsym(lib, sym);
+ if (f == NULL) lua_pushstring(L, dlerror());
+ return f;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+#if defined(RTLD_DEFAULT)
+ if (lib == NULL) lib = RTLD_DEFAULT;
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+ if (lib == NULL) lib = (void *)(intptr_t)-2;
+#endif
+ return (const char *)dlsym(lib, sym);
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+#include
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#undef setprogdir
+
+static void setprogdir(lua_State *L)
+{
+ char buff[MAX_PATH + 1];
+ char *lb;
+ DWORD nsize = sizeof(buff);
+ DWORD n = GetModuleFileNameA(NULL, buff, nsize);
+ if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) {
+ luaL_error(L, "unable to get ModuleFileName");
+ } else {
+ *lb = '\0';
+ luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff);
+ lua_remove(L, -2); /* remove original string */
+ }
+}
+
+static void pusherror(lua_State *L)
+{
+ DWORD error = GetLastError();
+ char buffer[128];
+ if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, error, 0, buffer, sizeof(buffer), NULL))
+ lua_pushstring(L, buffer);
+ else
+ lua_pushfstring(L, "system error %d\n", error);
+}
+
+static void ll_unloadlib(void *lib)
+{
+ FreeLibrary((HINSTANCE)lib);
+}
+
+static void *ll_load(lua_State *L, const char *path)
+{
+ HINSTANCE lib = LoadLibraryA(path);
+ if (lib == NULL) pusherror(L);
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym);
+ if (f == NULL) pusherror(L);
+ return f;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ if (lib) {
+ return (const char *)GetProcAddress((HINSTANCE)lib, sym);
+ } else {
+ HINSTANCE h = GetModuleHandleA(NULL);
+ const char *p = (const char *)GetProcAddress(h, sym);
+ if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)ll_bcsym, &h))
+ p = (const char *)GetProcAddress(h, sym);
+ return p;
+ }
+}
+
+#else
+
+#undef PACKAGE_LIB_FAIL
+#define PACKAGE_LIB_FAIL "absent"
+
+#define DLMSG "dynamic libraries not enabled; no support for target OS"
+
+static void ll_unloadlib(void *lib)
+{
+ (void)lib;
+}
+
+static void *ll_load(lua_State *L, const char *path)
+{
+ (void)path;
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ (void)lib; (void)sym;
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ (void)lib; (void)sym;
+ return NULL;
+}
+
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static void **ll_register(lua_State *L, const char *path)
+{
+ void **plib;
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */
+ if (!lua_isnil(L, -1)) { /* is there an entry? */
+ plib = (void **)lua_touserdata(L, -1);
+ } else { /* no entry yet; create one */
+ lua_pop(L, 1);
+ plib = (void **)lua_newuserdata(L, sizeof(void *));
+ *plib = NULL;
+ luaL_getmetatable(L, "_LOADLIB");
+ lua_setmetatable(L, -2);
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_pushvalue(L, -2);
+ lua_settable(L, LUA_REGISTRYINDEX);
+ }
+ return plib;
+}
+
+static const char *mksymname(lua_State *L, const char *modname,
+ const char *prefix)
+{
+ const char *funcname;
+ const char *mark = strchr(modname, *LUA_IGMARK);
+ if (mark) modname = mark + 1;
+ funcname = luaL_gsub(L, modname, ".", "_");
+ funcname = lua_pushfstring(L, prefix, funcname);
+ lua_remove(L, -2); /* remove 'gsub' result */
+ return funcname;
+}
+
+static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r)
+{
+ void **reg = ll_register(L, path);
+ if (*reg == NULL) *reg = ll_load(L, path);
+ if (*reg == NULL) {
+ return PACKAGE_ERR_LIB; /* unable to load library */
+ } else {
+ const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF);
+ lua_CFunction f = ll_sym(L, *reg, sym);
+ if (f) {
+ lua_pushcfunction(L, f);
+ return 0;
+ }
+ if (!r) {
+ const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC));
+ lua_pop(L, 1);
+ if (bcdata) {
+ if (luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ return PACKAGE_ERR_LOAD;
+ return 0;
+ }
+ }
+ return PACKAGE_ERR_FUNC; /* unable to find function */
+ }
+}
+
+static int lj_cf_package_loadlib(lua_State *L)
+{
+ const char *path = luaL_checkstring(L, 1);
+ const char *init = luaL_checkstring(L, 2);
+ int st = ll_loadfunc(L, path, init, 1);
+ if (st == 0) { /* no errors? */
+ return 1; /* return the loaded function */
+ } else { /* error; error message is on stack top */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init");
+ return 3; /* return nil, error message, and where */
+ }
+}
+
+static int lj_cf_package_unloadlib(lua_State *L)
+{
+ void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB");
+ if (*lib) ll_unloadlib(*lib);
+ *lib = NULL; /* mark library as closed */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int readable(const char *filename)
+{
+ FILE *f = fopen(filename, "r"); /* try to open file */
+ if (f == NULL) return 0; /* open failed */
+ fclose(f);
+ return 1;
+}
+
+static const char *pushnexttemplate(lua_State *L, const char *path)
+{
+ const char *l;
+ while (*path == *LUA_PATHSEP) path++; /* skip separators */
+ if (*path == '\0') return NULL; /* no more templates */
+ l = strchr(path, *LUA_PATHSEP); /* find next separator */
+ if (l == NULL) l = path + strlen(path);
+ lua_pushlstring(L, path, (size_t)(l - path)); /* template */
+ return l;
+}
+
+static const char *searchpath (lua_State *L, const char *name,
+ const char *path)
+{
+ name = luaL_gsub(L, name, ".", LUA_DIRSEP);
+ lua_pushliteral(L, ""); /* error accumulator */
+ while ((path = pushnexttemplate(L, path)) != NULL) {
+ const char *filename = luaL_gsub(L, lua_tostring(L, -1),
+ LUA_PATH_MARK, name);
+ lua_remove(L, -2); /* remove path template */
+ if (readable(filename)) /* does file exist and is readable? */
+ return filename; /* return that file name */
+ lua_pushfstring(L, "\n\tno file " LUA_QS, filename);
+ lua_remove(L, -2); /* remove file name */
+ lua_concat(L, 2); /* add entry to possible error message */
+ }
+ return NULL; /* not found */
+}
+
+static int lj_cf_package_searchpath(lua_State *L)
+{
+ const char *f = searchpath(L, luaL_checkstring(L, 1), luaL_checkstring(L, 2));
+ if (f != NULL) {
+ return 1;
+ } else { /* error message is on top of the stack */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ return 2; /* return nil + error message */
+ }
+}
+
+static const char *findfile(lua_State *L, const char *name,
+ const char *pname)
+{
+ const char *path;
+ lua_getfield(L, LUA_ENVIRONINDEX, pname);
+ path = lua_tostring(L, -1);
+ if (path == NULL)
+ luaL_error(L, LUA_QL("package.%s") " must be a string", pname);
+ return searchpath(L, name, path);
+}
+
+static void loaderror(lua_State *L, const char *filename)
+{
+ luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s",
+ lua_tostring(L, 1), filename, lua_tostring(L, -1));
+}
+
+static int lj_cf_package_loader_lua(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ filename = findfile(L, name, "path");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (luaL_loadfile(L, filename) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_c(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ const char *filename = findfile(L, name, "cpath");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (ll_loadfunc(L, filename, name, 0) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_croot(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ const char *p = strchr(name, '.');
+ int st;
+ if (p == NULL) return 0; /* is root */
+ lua_pushlstring(L, name, (size_t)(p - name));
+ filename = findfile(L, lua_tostring(L, -1), "cpath");
+ if (filename == NULL) return 1; /* root not found */
+ if ((st = ll_loadfunc(L, filename, name, 0)) != 0) {
+ if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */
+ lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS,
+ name, filename);
+ return 1; /* function not found */
+ }
+ return 1;
+}
+
+static int lj_cf_package_loader_preload(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ lua_getfield(L, LUA_ENVIRONINDEX, "preload");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.preload") " must be a table");
+ lua_getfield(L, -1, name);
+ if (lua_isnil(L, -1)) { /* Not found? */
+ const char *bcname = mksymname(L, name, SYMPREFIX_BC);
+ const char *bcdata = ll_bcsym(NULL, bcname);
+ if (bcdata == NULL || luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ lua_pushfstring(L, "\n\tno field package.preload['%s']", name);
+ }
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static const int sentinel_ = 0;
+#define sentinel ((void *)&sentinel_)
+
+static int lj_cf_package_require(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ int i;
+ lua_settop(L, 1); /* _LOADED table will be at index 2 */
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, 2, name);
+ if (lua_toboolean(L, -1)) { /* is it there? */
+ if (lua_touserdata(L, -1) == sentinel) /* check loops */
+ luaL_error(L, "loop or previous error loading module " LUA_QS, name);
+ return 1; /* package is already loaded */
+ }
+ /* else must load it; iterate over available loaders */
+ lua_getfield(L, LUA_ENVIRONINDEX, "loaders");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.loaders") " must be a table");
+ lua_pushliteral(L, ""); /* error message accumulator */
+ for (i = 1; ; i++) {
+ lua_rawgeti(L, -2, i); /* get a loader */
+ if (lua_isnil(L, -1))
+ luaL_error(L, "module " LUA_QS " not found:%s",
+ name, lua_tostring(L, -2));
+ lua_pushstring(L, name);
+ lua_call(L, 1, 1); /* call it */
+ if (lua_isfunction(L, -1)) /* did it find module? */
+ break; /* module loaded successfully */
+ else if (lua_isstring(L, -1)) /* loader returned error message? */
+ lua_concat(L, 2); /* accumulate it */
+ else
+ lua_pop(L, 1);
+ }
+ lua_pushlightuserdata(L, sentinel);
+ lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */
+ lua_pushstring(L, name); /* pass name as argument to module */
+ lua_call(L, 1, 1); /* run loaded module */
+ if (!lua_isnil(L, -1)) /* non-nil return? */
+ lua_setfield(L, 2, name); /* _LOADED[name] = returned value */
+ lua_getfield(L, 2, name);
+ if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */
+ lua_pushboolean(L, 1); /* use true as result */
+ lua_pushvalue(L, -1); /* extra copy to be returned */
+ lua_setfield(L, 2, name); /* _LOADED[name] = true */
+ }
+ lj_lib_checkfpu(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void setfenv(lua_State *L)
+{
+ lua_Debug ar;
+ if (lua_getstack(L, 1, &ar) == 0 ||
+ lua_getinfo(L, "f", &ar) == 0 || /* get calling function */
+ lua_iscfunction(L, -1))
+ luaL_error(L, LUA_QL("module") " not called from a Lua function");
+ lua_pushvalue(L, -2);
+ lua_setfenv(L, -2);
+ lua_pop(L, 1);
+}
+
+static void dooptions(lua_State *L, int n)
+{
+ int i;
+ for (i = 2; i <= n; i++) {
+ lua_pushvalue(L, i); /* get option (a function) */
+ lua_pushvalue(L, -2); /* module */
+ lua_call(L, 1, 0);
+ }
+}
+
+static void modinit(lua_State *L, const char *modname)
+{
+ const char *dot;
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -2, "_M"); /* module._M = module */
+ lua_pushstring(L, modname);
+ lua_setfield(L, -2, "_NAME");
+ dot = strrchr(modname, '.'); /* look for last dot in module name */
+ if (dot == NULL) dot = modname; else dot++;
+ /* set _PACKAGE as package name (full module name minus last part) */
+ lua_pushlstring(L, modname, (size_t)(dot - modname));
+ lua_setfield(L, -2, "_PACKAGE");
+}
+
+static int lj_cf_package_module(lua_State *L)
+{
+ const char *modname = luaL_checkstring(L, 1);
+ int loaded = lua_gettop(L) + 1; /* index of _LOADED table */
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, loaded, modname); /* get _LOADED[modname] */
+ if (!lua_istable(L, -1)) { /* not found? */
+ lua_pop(L, 1); /* remove previous result */
+ /* try global variable (and create one if it does not exist) */
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, modname);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */
+ }
+ /* check whether table already has a _NAME field */
+ lua_getfield(L, -1, "_NAME");
+ if (!lua_isnil(L, -1)) { /* is table an initialized module? */
+ lua_pop(L, 1);
+ } else { /* no; initialize it */
+ lua_pop(L, 1);
+ modinit(L, modname);
+ }
+ lua_pushvalue(L, -1);
+ setfenv(L);
+ dooptions(L, loaded - 1);
+ return 0;
+}
+
+static int lj_cf_package_seeall(lua_State *L)
+{
+ luaL_checktype(L, 1, LUA_TTABLE);
+ if (!lua_getmetatable(L, 1)) {
+ lua_createtable(L, 0, 1); /* create new metatable */
+ lua_pushvalue(L, -1);
+ lua_setmetatable(L, 1);
+ }
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ lua_setfield(L, -2, "__index"); /* mt.__index = _G */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define AUXMARK "\1"
+
+static void setpath(lua_State *L, const char *fieldname, const char *envname,
+ const char *def)
+{
+ const char *path = getenv(envname);
+ if (path == NULL) {
+ lua_pushstring(L, def);
+ } else {
+ path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP,
+ LUA_PATHSEP AUXMARK LUA_PATHSEP);
+ luaL_gsub(L, path, AUXMARK, def);
+ lua_remove(L, -2);
+ }
+ setprogdir(L);
+ lua_setfield(L, -2, fieldname);
+}
+
+static const luaL_Reg package_lib[] = {
+ { "loadlib", lj_cf_package_loadlib },
+ { "searchpath", lj_cf_package_searchpath },
+ { "seeall", lj_cf_package_seeall },
+ { NULL, NULL }
+};
+
+static const luaL_Reg package_global[] = {
+ { "module", lj_cf_package_module },
+ { "require", lj_cf_package_require },
+ { NULL, NULL }
+};
+
+static const lua_CFunction package_loaders[] =
+{
+ lj_cf_package_loader_preload,
+ lj_cf_package_loader_lua,
+ lj_cf_package_loader_c,
+ lj_cf_package_loader_croot,
+ NULL
+};
+
+LUALIB_API int luaopen_package(lua_State *L)
+{
+ int i;
+ luaL_newmetatable(L, "_LOADLIB");
+ lj_lib_pushcf(L, lj_cf_package_unloadlib, 1);
+ lua_setfield(L, -2, "__gc");
+ luaL_register(L, LUA_LOADLIBNAME, package_lib);
+ lua_pushvalue(L, -1);
+ lua_replace(L, LUA_ENVIRONINDEX);
+ lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0);
+ for (i = 0; package_loaders[i] != NULL; i++) {
+ lj_lib_pushcf(L, package_loaders[i], 1);
+ lua_rawseti(L, -2, i+1);
+ }
+ lua_setfield(L, -2, "loaders");
+ setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT);
+ setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT);
+ lua_pushliteral(L, LUA_PATH_CONFIG);
+ lua_setfield(L, -2, "config");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_setfield(L, -2, "loaded");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
+ lua_setfield(L, -2, "preload");
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ luaL_register(L, NULL, package_global);
+ lua_pop(L, 1);
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lib_string.c b/src/LuaJIT/src/lib_string.c
new file mode 100644
index 000000000..3dac9d372
--- /dev/null
+++ b/src/LuaJIT/src/lib_string.c
@@ -0,0 +1,855 @@
+/*
+** String library.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+
+#define lib_string_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_ff.h"
+#include "lj_bcdump.h"
+#include "lj_char.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_string
+
+LJLIB_ASM(string_len) LJLIB_REC(.)
+{
+ lj_lib_checkstr(L, 1);
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t len = (int32_t)s->len;
+ int32_t start = lj_lib_optint(L, 2, 1);
+ int32_t stop = lj_lib_optint(L, 3, start);
+ int32_t n, i;
+ const unsigned char *p;
+ if (stop < 0) stop += len+1;
+ if (start < 0) start += len+1;
+ if (start <= 0) start = 1;
+ if (stop > len) stop = len;
+ if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */
+ start--;
+ n = stop - start;
+ if ((uint32_t)n > LUAI_MAXCSTACK)
+ lj_err_caller(L, LJ_ERR_STRSLC);
+ lj_state_checkstack(L, (MSize)n);
+ p = (const unsigned char *)strdata(s) + start;
+ for (i = 0; i < n; i++)
+ setintV(L->base + i-1, p[i]);
+ return FFH_RES(n);
+}
+
+LJLIB_ASM(string_char)
+{
+ int i, nargs = (int)(L->top - L->base);
+ char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, (size_t)nargs);
+ for (i = 1; i <= nargs; i++) {
+ int32_t k = lj_lib_checkint(L, i);
+ if (!checku8(k))
+ lj_err_arg(L, i, LJ_ERR_BADVAL);
+ buf[i-1] = (char)k;
+ }
+ setstrV(L, L->base-1, lj_str_new(L, buf, (size_t)nargs));
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(string_sub) LJLIB_REC(string_range 1)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkint(L, 2);
+ setintV(L->base+2, lj_lib_optint(L, 3, -1));
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(string_rep)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t len = (int32_t)s->len;
+ int32_t k = lj_lib_checkint(L, 2);
+ int64_t tlen = (int64_t)k * len;
+ const char *src;
+ char *buf;
+ if (k <= 0) return FFH_RETRY;
+ if (tlen > LJ_MAX_STR)
+ lj_err_caller(L, LJ_ERR_STROV);
+ buf = lj_str_needbuf(L, &G(L)->tmpbuf, (MSize)tlen);
+ if (len <= 1) return FFH_RETRY; /* ASM code only needed buffer resize. */
+ src = strdata(s);
+ do {
+ int32_t i = 0;
+ do { *buf++ = src[i++]; } while (i < len);
+ } while (--k > 0);
+ setstrV(L, L->base-1, lj_str_new(L, G(L)->tmpbuf.buf, (size_t)tlen));
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(string_reverse)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ lj_str_needbuf(L, &G(L)->tmpbuf, s->len);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(string_lower)
+LJLIB_ASM_(string_upper)
+
+/* ------------------------------------------------------------------------ */
+
+static int writer_buf(lua_State *L, const void *p, size_t size, void *b)
+{
+ luaL_addlstring((luaL_Buffer *)b, (const char *)p, size);
+ UNUSED(L);
+ return 0;
+}
+
+LJLIB_CF(string_dump)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ int strip = L->base+1 < L->top && tvistruecond(L->base+1);
+ luaL_Buffer b;
+ L->top = L->base+1;
+ luaL_buffinit(L, &b);
+ if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, &b, strip))
+ lj_err_caller(L, LJ_ERR_STRDUMP);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* macro to `unsign' a character */
+#define uchar(c) ((unsigned char)(c))
+
+#define CAP_UNFINISHED (-1)
+#define CAP_POSITION (-2)
+
+typedef struct MatchState {
+ const char *src_init; /* init of source string */
+ const char *src_end; /* end (`\0') of source string */
+ lua_State *L;
+ int level; /* total number of captures (finished or unfinished) */
+ struct {
+ const char *init;
+ ptrdiff_t len;
+ } capture[LUA_MAXCAPTURES];
+} MatchState;
+
+#define L_ESC '%'
+#define SPECIALS "^$*+?.([%-"
+
+static int check_capture(MatchState *ms, int l)
+{
+ l -= '1';
+ if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED)
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ return l;
+}
+
+static int capture_to_close(MatchState *ms)
+{
+ int level = ms->level;
+ for (level--; level>=0; level--)
+ if (ms->capture[level].len == CAP_UNFINISHED) return level;
+ lj_err_caller(ms->L, LJ_ERR_STRPATC);
+ return 0; /* unreachable */
+}
+
+static const char *classend(MatchState *ms, const char *p)
+{
+ switch (*p++) {
+ case L_ESC:
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATE);
+ return p+1;
+ case '[':
+ if (*p == '^') p++;
+ do { /* look for a `]' */
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATM);
+ if (*(p++) == L_ESC && *p != '\0')
+ p++; /* skip escapes (e.g. `%]') */
+ } while (*p != ']');
+ return p+1;
+ default:
+ return p;
+ }
+}
+
+static const unsigned char match_class_map[32] = {
+ 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0,
+ LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0,
+ LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0
+};
+
+static int match_class(int c, int cl)
+{
+ if ((cl & 0xc0) == 0x40) {
+ int t = match_class_map[(cl&0x1f)];
+ if (t) {
+ t = lj_char_isa(c, t);
+ return (cl & 0x20) ? t : !t;
+ }
+ if (cl == 'z') return c == 0;
+ if (cl == 'Z') return c != 0;
+ }
+ return (cl == c);
+}
+
+static int matchbracketclass(int c, const char *p, const char *ec)
+{
+ int sig = 1;
+ if (*(p+1) == '^') {
+ sig = 0;
+ p++; /* skip the `^' */
+ }
+ while (++p < ec) {
+ if (*p == L_ESC) {
+ p++;
+ if (match_class(c, uchar(*p)))
+ return sig;
+ }
+ else if ((*(p+1) == '-') && (p+2 < ec)) {
+ p+=2;
+ if (uchar(*(p-2)) <= c && c <= uchar(*p))
+ return sig;
+ }
+ else if (uchar(*p) == c) return sig;
+ }
+ return !sig;
+}
+
+static int singlematch(int c, const char *p, const char *ep)
+{
+ switch (*p) {
+ case '.': return 1; /* matches any char */
+ case L_ESC: return match_class(c, uchar(*(p+1)));
+ case '[': return matchbracketclass(c, p, ep-1);
+ default: return (uchar(*p) == c);
+ }
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p);
+
+static const char *matchbalance(MatchState *ms, const char *s, const char *p)
+{
+ if (*p == 0 || *(p+1) == 0)
+ lj_err_caller(ms->L, LJ_ERR_STRPATU);
+ if (*s != *p) {
+ return NULL;
+ } else {
+ int b = *p;
+ int e = *(p+1);
+ int cont = 1;
+ while (++s < ms->src_end) {
+ if (*s == e) {
+ if (--cont == 0) return s+1;
+ } else if (*s == b) {
+ cont++;
+ }
+ }
+ }
+ return NULL; /* string ends out of balance */
+}
+
+static const char *max_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ ptrdiff_t i = 0; /* counts maximum expand for item */
+ while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep))
+ i++;
+ /* keeps trying to match with the maximum repetitions */
+ while (i>=0) {
+ const char *res = match(ms, (s+i), ep+1);
+ if (res) return res;
+ i--; /* else didn't match; reduce 1 repetition to try again */
+ }
+ return NULL;
+}
+
+static const char *min_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ for (;;) {
+ const char *res = match(ms, s, ep+1);
+ if (res != NULL)
+ return res;
+ else if (ssrc_end && singlematch(uchar(*s), p, ep))
+ s++; /* try with one more repetition */
+ else
+ return NULL;
+ }
+}
+
+static const char *start_capture(MatchState *ms, const char *s,
+ const char *p, int what)
+{
+ const char *res;
+ int level = ms->level;
+ if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN);
+ ms->capture[level].init = s;
+ ms->capture[level].len = what;
+ ms->level = level+1;
+ if ((res=match(ms, s, p)) == NULL) /* match failed? */
+ ms->level--; /* undo capture */
+ return res;
+}
+
+static const char *end_capture(MatchState *ms, const char *s,
+ const char *p)
+{
+ int l = capture_to_close(ms);
+ const char *res;
+ ms->capture[l].len = s - ms->capture[l].init; /* close capture */
+ if ((res = match(ms, s, p)) == NULL) /* match failed? */
+ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
+ return res;
+}
+
+static const char *match_capture(MatchState *ms, const char *s, int l)
+{
+ size_t len;
+ l = check_capture(ms, l);
+ len = (size_t)ms->capture[l].len;
+ if ((size_t)(ms->src_end-s) >= len &&
+ memcmp(ms->capture[l].init, s, len) == 0)
+ return s+len;
+ else
+ return NULL;
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p)
+{
+ init: /* using goto's to optimize tail recursion */
+ switch (*p) {
+ case '(': /* start capture */
+ if (*(p+1) == ')') /* position capture? */
+ return start_capture(ms, s, p+2, CAP_POSITION);
+ else
+ return start_capture(ms, s, p+1, CAP_UNFINISHED);
+ case ')': /* end capture */
+ return end_capture(ms, s, p+1);
+ case L_ESC:
+ switch (*(p+1)) {
+ case 'b': /* balanced string? */
+ s = matchbalance(ms, s, p+2);
+ if (s == NULL) return NULL;
+ p+=4;
+ goto init; /* else return match(ms, s, p+4); */
+ case 'f': { /* frontier? */
+ const char *ep; char previous;
+ p += 2;
+ if (*p != '[')
+ lj_err_caller(ms->L, LJ_ERR_STRPATB);
+ ep = classend(ms, p); /* points to what is next */
+ previous = (s == ms->src_init) ? '\0' : *(s-1);
+ if (matchbracketclass(uchar(previous), p, ep-1) ||
+ !matchbracketclass(uchar(*s), p, ep-1)) return NULL;
+ p=ep;
+ goto init; /* else return match(ms, s, ep); */
+ }
+ default:
+ if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
+ s = match_capture(ms, s, uchar(*(p+1)));
+ if (s == NULL) return NULL;
+ p+=2;
+ goto init; /* else return match(ms, s, p+2) */
+ }
+ goto dflt; /* case default */
+ }
+ case '\0': /* end of pattern */
+ return s; /* match succeeded */
+ case '$':
+ if (*(p+1) == '\0') /* is the `$' the last char in pattern? */
+ return (s == ms->src_end) ? s : NULL; /* check end of string */
+ else
+ goto dflt;
+ default: dflt: { /* it is a pattern item */
+ const char *ep = classend(ms, p); /* points to what is next */
+ int m = ssrc_end && singlematch(uchar(*s), p, ep);
+ switch (*ep) {
+ case '?': { /* optional */
+ const char *res;
+ if (m && ((res=match(ms, s+1, ep+1)) != NULL))
+ return res;
+ p=ep+1;
+ goto init; /* else return match(ms, s, ep+1); */
+ }
+ case '*': /* 0 or more repetitions */
+ return max_expand(ms, s, p, ep);
+ case '+': /* 1 or more repetitions */
+ return (m ? max_expand(ms, s+1, p, ep) : NULL);
+ case '-': /* 0 or more repetitions (minimum) */
+ return min_expand(ms, s, p, ep);
+ default:
+ if (!m) return NULL;
+ s++; p=ep;
+ goto init; /* else return match(ms, s+1, ep); */
+ }
+ }
+ }
+}
+
+static const char *lmemfind(const char *s1, size_t l1,
+ const char *s2, size_t l2)
+{
+ if (l2 == 0) {
+ return s1; /* empty strings are everywhere */
+ } else if (l2 > l1) {
+ return NULL; /* avoids a negative `l1' */
+ } else {
+ const char *init; /* to search for a `*s2' inside `s1' */
+ l2--; /* 1st char will be checked by `memchr' */
+ l1 = l1-l2; /* `s2' cannot be found after that */
+ while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
+ init++; /* 1st char is already checked */
+ if (memcmp(init, s2+1, l2) == 0) {
+ return init-1;
+ } else { /* correct `l1' and `s1' to try again */
+ l1 -= (size_t)(init-s1);
+ s1 = init;
+ }
+ }
+ return NULL; /* not found */
+ }
+}
+
+static void push_onecapture(MatchState *ms, int i, const char *s, const char *e)
+{
+ if (i >= ms->level) {
+ if (i == 0) /* ms->level == 0, too */
+ lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */
+ else
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ } else {
+ ptrdiff_t l = ms->capture[i].len;
+ if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU);
+ if (l == CAP_POSITION)
+ lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
+ else
+ lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l);
+ }
+}
+
+static int push_captures(MatchState *ms, const char *s, const char *e)
+{
+ int i;
+ int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
+ luaL_checkstack(ms->L, nlevels, "too many captures");
+ for (i = 0; i < nlevels; i++)
+ push_onecapture(ms, i, s, e);
+ return nlevels; /* number of strings pushed */
+}
+
+static ptrdiff_t posrelat(ptrdiff_t pos, size_t len)
+{
+ /* relative string position: negative means back from end */
+ if (pos < 0) pos += (ptrdiff_t)len + 1;
+ return (pos >= 0) ? pos : 0;
+}
+
+static int str_find_aux(lua_State *L, int find)
+{
+ size_t l1, l2;
+ const char *s = luaL_checklstring(L, 1, &l1);
+ const char *p = luaL_checklstring(L, 2, &l2);
+ ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
+ if (init < 0)
+ init = 0;
+ else if ((size_t)(init) > l1)
+ init = (ptrdiff_t)l1;
+ if (find && (lua_toboolean(L, 4) || /* explicit request? */
+ strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
+ /* do a plain search */
+ const char *s2 = lmemfind(s+init, l1-(size_t)init, p, l2);
+ if (s2) {
+ lua_pushinteger(L, s2-s+1);
+ lua_pushinteger(L, s2-s+(ptrdiff_t)l2);
+ return 2;
+ }
+ } else {
+ MatchState ms;
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ const char *s1=s+init;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+l1;
+ do {
+ const char *res;
+ ms.level = 0;
+ if ((res=match(&ms, s1, p)) != NULL) {
+ if (find) {
+ lua_pushinteger(L, s1-s+1); /* start */
+ lua_pushinteger(L, res-s); /* end */
+ return push_captures(&ms, NULL, 0) + 2;
+ } else {
+ return push_captures(&ms, s1, res);
+ }
+ }
+ } while (s1++ < ms.src_end && !anchor);
+ }
+ lua_pushnil(L); /* not found */
+ return 1;
+}
+
+LJLIB_CF(string_find)
+{
+ return str_find_aux(L, 1);
+}
+
+LJLIB_CF(string_match)
+{
+ return str_find_aux(L, 0);
+}
+
+LJLIB_NOREG LJLIB_CF(string_gmatch_aux)
+{
+ const char *p = strVdata(lj_lib_upvalue(L, 2));
+ GCstr *str = strV(lj_lib_upvalue(L, 1));
+ const char *s = strdata(str);
+ TValue *tvpos = lj_lib_upvalue(L, 3);
+ const char *src = s + tvpos->u32.lo;
+ MatchState ms;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s + str->len;
+ for (; src <= ms.src_end; src++) {
+ const char *e;
+ ms.level = 0;
+ if ((e = match(&ms, src, p)) != NULL) {
+ int32_t pos = (int32_t)(e - s);
+ if (e == src) pos++; /* Ensure progress for empty match. */
+ tvpos->u32.lo = (uint32_t)pos;
+ return push_captures(&ms, src, e);
+ }
+ }
+ return 0; /* not found */
+}
+
+LJLIB_CF(string_gmatch)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkstr(L, 2);
+ L->top = L->base+3;
+ (L->top-1)->u64 = 0;
+ lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3);
+ return 1;
+}
+
+static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e)
+{
+ size_t l, i;
+ const char *news = lua_tolstring(ms->L, 3, &l);
+ for (i = 0; i < l; i++) {
+ if (news[i] != L_ESC) {
+ luaL_addchar(b, news[i]);
+ } else {
+ i++; /* skip ESC */
+ if (!lj_char_isdigit(uchar(news[i]))) {
+ luaL_addchar(b, news[i]);
+ } else if (news[i] == '0') {
+ luaL_addlstring(b, s, (size_t)(e - s));
+ } else {
+ push_onecapture(ms, news[i] - '1', s, e);
+ luaL_addvalue(b); /* add capture to accumulated result */
+ }
+ }
+ }
+}
+
+static void add_value(MatchState *ms, luaL_Buffer *b,
+ const char *s, const char *e)
+{
+ lua_State *L = ms->L;
+ switch (lua_type(L, 3)) {
+ case LUA_TNUMBER:
+ case LUA_TSTRING: {
+ add_s(ms, b, s, e);
+ return;
+ }
+ case LUA_TFUNCTION: {
+ int n;
+ lua_pushvalue(L, 3);
+ n = push_captures(ms, s, e);
+ lua_call(L, n, 1);
+ break;
+ }
+ case LUA_TTABLE: {
+ push_onecapture(ms, 0, s, e);
+ lua_gettable(L, 3);
+ break;
+ }
+ }
+ if (!lua_toboolean(L, -1)) { /* nil or false? */
+ lua_pop(L, 1);
+ lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */
+ } else if (!lua_isstring(L, -1)) {
+ lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1));
+ }
+ luaL_addvalue(b); /* add result to accumulator */
+}
+
+LJLIB_CF(string_gsub)
+{
+ size_t srcl;
+ const char *src = luaL_checklstring(L, 1, &srcl);
+ const char *p = luaL_checkstring(L, 2);
+ int tr = lua_type(L, 3);
+ int max_s = luaL_optint(L, 4, (int)(srcl+1));
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ int n = 0;
+ MatchState ms;
+ luaL_Buffer b;
+ if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING ||
+ tr == LUA_TFUNCTION || tr == LUA_TTABLE))
+ lj_err_arg(L, 3, LJ_ERR_NOSFT);
+ luaL_buffinit(L, &b);
+ ms.L = L;
+ ms.src_init = src;
+ ms.src_end = src+srcl;
+ while (n < max_s) {
+ const char *e;
+ ms.level = 0;
+ e = match(&ms, src, p);
+ if (e) {
+ n++;
+ add_value(&ms, &b, src, e);
+ }
+ if (e && e>src) /* non empty match? */
+ src = e; /* skip it */
+ else if (src < ms.src_end)
+ luaL_addchar(&b, *src++);
+ else
+ break;
+ if (anchor)
+ break;
+ }
+ luaL_addlstring(&b, src, (size_t)(ms.src_end-src));
+ luaL_pushresult(&b);
+ lua_pushinteger(L, n); /* number of substitutions */
+ return 2;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
+#define MAX_FMTITEM 512
+/* valid flags in a format specification */
+#define FMT_FLAGS "-+ #0"
+/*
+** maximum size of each format specification (such as '%-099.99d')
+** (+10 accounts for %99.99x plus margin of error)
+*/
+#define MAX_FMTSPEC (sizeof(FMT_FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
+
+static void addquoted(lua_State *L, luaL_Buffer *b, int arg)
+{
+ GCstr *str = lj_lib_checkstr(L, arg);
+ int32_t len = (int32_t)str->len;
+ const char *s = strdata(str);
+ luaL_addchar(b, '"');
+ while (len--) {
+ if (*s == '"' || *s == '\\' || *s == '\n') {
+ luaL_addchar(b, '\\');
+ luaL_addchar(b, *s);
+ } else if (lj_char_iscntrl(uchar(*s))) {
+ uint32_t c1, c2, c3;
+ luaL_addchar(b, '\\');
+ c1 = uchar(*s); c3 = c1 % 10; c1 /= 10; c2 = c1 % 10; c1 /= 10;
+ if (c1 + lj_char_isdigit(uchar(s[1]))) luaL_addchar(b, '0' + c1);
+ if (c2 + (c1 + lj_char_isdigit(uchar(s[1])))) luaL_addchar(b, '0' + c2);
+ luaL_addchar(b, '0' + c3);
+ } else {
+ luaL_addchar(b, *s);
+ }
+ s++;
+ }
+ luaL_addchar(b, '"');
+}
+
+static const char *scanformat(lua_State *L, const char *strfrmt, char *form)
+{
+ const char *p = strfrmt;
+ while (*p != '\0' && strchr(FMT_FLAGS, *p) != NULL) p++; /* skip flags */
+ if ((size_t)(p - strfrmt) >= sizeof(FMT_FLAGS))
+ lj_err_caller(L, LJ_ERR_STRFMTR);
+ if (lj_char_isdigit(uchar(*p))) p++; /* skip width */
+ if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ if (*p == '.') {
+ p++;
+ if (lj_char_isdigit(uchar(*p))) p++; /* skip precision */
+ if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ }
+ if (lj_char_isdigit(uchar(*p)))
+ lj_err_caller(L, LJ_ERR_STRFMTW);
+ *(form++) = '%';
+ strncpy(form, strfrmt, (size_t)(p - strfrmt + 1));
+ form += p - strfrmt + 1;
+ *form = '\0';
+ return p;
+}
+
+static void addintlen(char *form)
+{
+ size_t l = strlen(form);
+ char spec = form[l - 1];
+ strcpy(form + l - 1, LUA_INTFRMLEN);
+ form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
+ form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
+}
+
+static unsigned LUA_INTFRM_T num2intfrm(lua_State *L, int arg)
+{
+ if (sizeof(LUA_INTFRM_T) == 4) {
+ return (LUA_INTFRM_T)lj_lib_checkbit(L, arg);
+ } else {
+ cTValue *o;
+ lj_lib_checknumber(L, arg);
+ o = L->base+arg-1;
+ if (tvisint(o))
+ return (LUA_INTFRM_T)intV(o);
+ else
+ return (LUA_INTFRM_T)numV(o);
+ }
+}
+
+static unsigned LUA_INTFRM_T num2uintfrm(lua_State *L, int arg)
+{
+ if (sizeof(LUA_INTFRM_T) == 4) {
+ return (unsigned LUA_INTFRM_T)lj_lib_checkbit(L, arg);
+ } else {
+ cTValue *o;
+ lj_lib_checknumber(L, arg);
+ o = L->base+arg-1;
+ if (tvisint(o))
+ return (unsigned LUA_INTFRM_T)intV(o);
+ else if ((int32_t)o->u32.hi < 0)
+ return (unsigned LUA_INTFRM_T)(LUA_INTFRM_T)numV(o);
+ else
+ return (unsigned LUA_INTFRM_T)numV(o);
+ }
+}
+
+LJLIB_CF(string_format)
+{
+ int arg = 1, top = (int)(L->top - L->base);
+ GCstr *fmt = lj_lib_checkstr(L, arg);
+ const char *strfrmt = strdata(fmt);
+ const char *strfrmt_end = strfrmt + fmt->len;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while (strfrmt < strfrmt_end) {
+ if (*strfrmt != L_ESC) {
+ luaL_addchar(&b, *strfrmt++);
+ } else if (*++strfrmt == L_ESC) {
+ luaL_addchar(&b, *strfrmt++); /* %% */
+ } else { /* format item */
+ char form[MAX_FMTSPEC]; /* to store the format (`%...') */
+ char buff[MAX_FMTITEM]; /* to store the formatted item */
+ if (++arg > top)
+ luaL_argerror(L, arg, lj_obj_typename[0]);
+ strfrmt = scanformat(L, strfrmt, form);
+ switch (*strfrmt++) {
+ case 'c':
+ sprintf(buff, form, lj_lib_checkint(L, arg));
+ break;
+ case 'd': case 'i':
+ addintlen(form);
+ sprintf(buff, form, num2intfrm(L, arg));
+ break;
+ case 'o': case 'u': case 'x': case 'X':
+ addintlen(form);
+ sprintf(buff, form, num2uintfrm(L, arg));
+ break;
+ case 'e': case 'E': case 'f': case 'g': case 'G': {
+ TValue tv;
+ tv.n = lj_lib_checknum(L, arg);
+ if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) {
+ /* Canonicalize output of non-finite values. */
+ char *p, nbuf[LJ_STR_NUMBUF];
+ size_t len = lj_str_bufnum(nbuf, &tv);
+ if (strfrmt[-1] == 'E' || strfrmt[-1] == 'G') {
+ nbuf[len-3] = nbuf[len-3] - 0x20;
+ nbuf[len-2] = nbuf[len-2] - 0x20;
+ nbuf[len-1] = nbuf[len-1] - 0x20;
+ }
+ nbuf[len] = '\0';
+ for (p = form; *p < 'e' && *p != '.'; p++) ;
+ *p++ = 's'; *p = '\0';
+ sprintf(buff, form, nbuf);
+ break;
+ }
+ sprintf(buff, form, (double)tv.n);
+ break;
+ }
+ case 'q':
+ addquoted(L, &b, arg);
+ continue;
+ case 'p':
+ lj_str_pushf(L, "%p", lua_topointer(L, arg));
+ luaL_addvalue(&b);
+ continue;
+ case 's': {
+ GCstr *str = lj_lib_checkstr(L, arg);
+ if (!strchr(form, '.') && str->len >= 100) {
+ /* no precision and string is too long to be formatted;
+ keep original string */
+ setstrV(L, L->top++, str);
+ luaL_addvalue(&b);
+ continue;
+ }
+ sprintf(buff, form, strdata(str));
+ break;
+ }
+ default:
+ lj_err_callerv(L, LJ_ERR_STRFMTO, *(strfrmt -1));
+ break;
+ }
+ luaL_addlstring(&b, buff, strlen(buff));
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_string(lua_State *L)
+{
+ GCtab *mt;
+ global_State *g;
+ LJ_LIB_REG(L, LUA_STRLIBNAME, string);
+#if defined(LUA_COMPAT_GFIND)
+ lua_getfield(L, -1, "gmatch");
+ lua_setfield(L, -2, "gfind");
+#endif
+ mt = lj_tab_new(L, 0, 1);
+ /* NOBARRIER: basemt is a GC root. */
+ g = G(L);
+ setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt));
+ settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1));
+ mt->nomm = (uint8_t)(~(1u<top, func);
+ setintV(L->top+1, i);
+ val = lj_tab_getint(t, (int32_t)i);
+ if (val) { copyTV(L, L->top+2, val); } else { setnilV(L->top+2); }
+ L->top += 3;
+ lua_call(L, 2, 1);
+ if (!tvisnil(L->top-1))
+ return 1;
+ L->top--;
+ }
+ return 0;
+}
+
+LJLIB_CF(table_foreach)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCfunc *func = lj_lib_checkfunc(L, 2);
+ L->top = L->base+3;
+ setnilV(L->top-1);
+ while (lj_tab_next(L, t, L->top-1)) {
+ copyTV(L, L->top+2, L->top);
+ copyTV(L, L->top+1, L->top-1);
+ setfuncV(L, L->top, func);
+ L->top += 3;
+ lua_call(L, 2, 1);
+ if (!tvisnil(L->top-1))
+ return 1;
+ L->top--;
+ }
+ return 0;
+}
+
+LJLIB_ASM(table_getn) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_CF(table_maxn)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ TValue *array = tvref(t->array);
+ Node *node;
+ lua_Number m = 0;
+ ptrdiff_t i;
+ for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--)
+ if (!tvisnil(&array[i])) {
+ m = (lua_Number)(int32_t)i;
+ break;
+ }
+ node = noderef(t->node);
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) {
+ lua_Number n = numberVnum(&node[i].key);
+ if (n > m) m = n;
+ }
+ setnumV(L->top-1, m);
+ return 1;
+}
+
+LJLIB_CF(table_insert) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = (int32_t)lj_tab_len(t) + 1;
+ int nargs = (int)((char *)L->top - (char *)L->base);
+ if (nargs != 2*sizeof(TValue)) {
+ if (nargs != 3*sizeof(TValue))
+ lj_err_caller(L, LJ_ERR_TABINS);
+ /* NOBARRIER: This just moves existing elements around. */
+ for (n = lj_lib_checkint(L, 2); i > n; i--) {
+ /* The set may invalidate the get pointer, so need to do it first! */
+ TValue *dst = lj_tab_setint(L, t, i);
+ cTValue *src = lj_tab_getint(t, i-1);
+ if (src) {
+ copyTV(L, dst, src);
+ } else {
+ setnilV(dst);
+ }
+ }
+ i = n;
+ }
+ {
+ TValue *dst = lj_tab_setint(L, t, i);
+ copyTV(L, dst, L->top-1); /* Set new value. */
+ lj_gc_barriert(L, t, dst);
+ }
+ return 0;
+}
+
+LJLIB_CF(table_remove) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t e = (int32_t)lj_tab_len(t);
+ int32_t pos = lj_lib_optint(L, 2, e);
+ if (!(1 <= pos && pos <= e)) /* Nothing to remove? */
+ return 0;
+ lua_rawgeti(L, 1, pos); /* Get previous value. */
+ /* NOBARRIER: This just moves existing elements around. */
+ for (; pos < e; pos++) {
+ cTValue *src = lj_tab_getint(t, pos+1);
+ TValue *dst = lj_tab_setint(L, t, pos);
+ if (src) {
+ copyTV(L, dst, src);
+ } else {
+ setnilV(dst);
+ }
+ }
+ setnilV(lj_tab_setint(L, t, e)); /* Remove (last) value. */
+ return 1; /* Return previous value. */
+}
+
+LJLIB_CF(table_concat)
+{
+ luaL_Buffer b;
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCstr *sep = lj_lib_optstr(L, 2);
+ MSize seplen = sep ? sep->len : 0;
+ int32_t i = lj_lib_optint(L, 3, 1);
+ int32_t e = L->base+3 < L->top ? lj_lib_checkint(L, 4) :
+ (int32_t)lj_tab_len(t);
+ luaL_buffinit(L, &b);
+ if (i <= e) {
+ for (;;) {
+ cTValue *o;
+ lua_rawgeti(L, 1, i);
+ o = L->top-1;
+ if (!(tvisstr(o) || tvisnumber(o)))
+ lj_err_callerv(L, LJ_ERR_TABCAT, typename(o), i);
+ luaL_addvalue(&b);
+ if (i++ == e) break;
+ if (seplen)
+ luaL_addlstring(&b, strdata(sep), seplen);
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void set2(lua_State *L, int i, int j)
+{
+ lua_rawseti(L, 1, i);
+ lua_rawseti(L, 1, j);
+}
+
+static int sort_comp(lua_State *L, int a, int b)
+{
+ if (!lua_isnil(L, 2)) { /* function? */
+ int res;
+ lua_pushvalue(L, 2);
+ lua_pushvalue(L, a-1); /* -1 to compensate function */
+ lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */
+ lua_call(L, 2, 1);
+ res = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+ } else { /* a < b? */
+ return lua_lessthan(L, a, b);
+ }
+}
+
+static void auxsort(lua_State *L, int l, int u)
+{
+ while (l < u) { /* for tail recursion */
+ int i, j;
+ /* sort elements a[l], a[(l+u)/2] and a[u] */
+ lua_rawgeti(L, 1, l);
+ lua_rawgeti(L, 1, u);
+ if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */
+ set2(L, l, u); /* swap a[l] - a[u] */
+ else
+ lua_pop(L, 2);
+ if (u-l == 1) break; /* only 2 elements */
+ i = (l+u)/2;
+ lua_rawgeti(L, 1, i);
+ lua_rawgeti(L, 1, l);
+ if (sort_comp(L, -2, -1)) { /* a[i]= P */
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
+ if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[i] */
+ }
+ /* repeat --j until a[j] <= P */
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
+ if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[j] */
+ }
+ if (jbase+1))
+ lj_lib_checkfunc(L, 2);
+ auxsort(L, 1, n);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_table(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_TABLIBNAME, table);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ lua_getglobal(L, "unpack");
+ lua_setfield(L, -2, "unpack");
+#endif
+ return 1;
+}
+
diff --git a/src/LuaJIT/src/lj.supp b/src/LuaJIT/src/lj.supp
new file mode 100644
index 000000000..411f26170
--- /dev/null
+++ b/src/LuaJIT/src/lj.supp
@@ -0,0 +1,26 @@
+# Valgrind suppression file for LuaJIT 2.0.
+{
+ Optimized string compare
+ Memcheck:Addr4
+ fun:lj_str_cmp
+}
+{
+ Optimized string compare
+ Memcheck:Addr1
+ fun:lj_str_cmp
+}
+{
+ Optimized string compare
+ Memcheck:Addr4
+ fun:lj_str_new
+}
+{
+ Optimized string compare
+ Memcheck:Addr1
+ fun:lj_str_new
+}
+{
+ Optimized string compare
+ Memcheck:Cond
+ fun:lj_str_new
+}
diff --git a/src/LuaJIT/src/lj_alloc.c b/src/LuaJIT/src/lj_alloc.c
new file mode 100644
index 000000000..e4ce76347
--- /dev/null
+++ b/src/LuaJIT/src/lj_alloc.c
@@ -0,0 +1,1381 @@
+/*
+** Bundled memory allocator.
+**
+** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
+** The original bears the following remark:
+**
+** This is a version (aka dlmalloc) of malloc/free/realloc written by
+** Doug Lea and released to the public domain, as explained at
+** http://creativecommons.org/licenses/publicdomain.
+**
+** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
+**
+** No additional copyright is claimed over the customizations.
+** Please do NOT bother the original author about this version here!
+**
+** If you want to use dlmalloc in another project, you should get
+** the original from: ftp://gee.cs.oswego.edu/pub/misc/
+** For thread-safe derivatives, take a look at:
+** - ptmalloc: http://www.malloc.de/
+** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
+*/
+
+#define lj_alloc_c
+#define LUA_CORE
+
+/* To get the mremap prototype. Must be defined before any system includes. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#include "lj_def.h"
+#include "lj_arch.h"
+#include "lj_alloc.h"
+
+#ifndef LUAJIT_USE_SYSMALLOC
+
+#define MAX_SIZE_T (~(size_t)0)
+#define MALLOC_ALIGNMENT ((size_t)8U)
+
+#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
+#define MAX_RELEASE_CHECK_RATE 255
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some platforms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP support ------------------------------- */
+
+#define MFAIL ((void *)(MAX_SIZE_T))
+#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
+
+#define IS_DIRECT_BIT (SIZE_T_ONE)
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include
+
+#if LJ_64
+
+/* Undocumented, but hey, that's what we all love so much about Windows. */
+typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
+ size_t *size, ULONG alloctype, ULONG prot);
+static PNTAVM ntavm;
+
+/* Number of top bits of the lower 32 bits of an address that must be zero.
+** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
+*/
+#define NTAVM_ZEROBITS 1
+
+static void INIT_MMAP(void)
+{
+ ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
+ "NtAllocateVirtualMemory");
+}
+
+/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
+static LJ_AINLINE void *CALL_MMAP(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = NULL;
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
+ MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ SetLastError(olderr);
+ return st == 0 ? ptr : MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static LJ_AINLINE void *DIRECT_MMAP(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = NULL;
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
+ SetLastError(olderr);
+ return st == 0 ? ptr : MFAIL;
+}
+
+#else
+
+#define INIT_MMAP() ((void)0)
+
+/* Win32 MMAP via VirtualAlloc */
+static LJ_AINLINE void *CALL_MMAP(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ SetLastError(olderr);
+ return ptr ? ptr : MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static LJ_AINLINE void *DIRECT_MMAP(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+ PAGE_READWRITE);
+ SetLastError(olderr);
+ return ptr ? ptr : MFAIL;
+}
+
+#endif
+
+/* This function supports releasing coalesed segments */
+static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
+{
+ DWORD olderr = GetLastError();
+ MEMORY_BASIC_INFORMATION minfo;
+ char *cptr = (char *)ptr;
+ while (size) {
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+ return -1;
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+ return -1;
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+ return -1;
+ cptr += minfo.RegionSize;
+ size -= minfo.RegionSize;
+ }
+ SetLastError(olderr);
+ return 0;
+}
+
+#else
+
+#include
+#include
+
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+
+#if LJ_64
+/* 64 bit mode needs special support for allocating memory in the lower 2GB. */
+
+#if LJ_TARGET_LINUX
+
+/* Actually this only gives us max. 1GB in current Linux kernels. */
+static LJ_AINLINE void *CALL_MMAP(size_t size)
+{
+ int olderr = errno;
+ void *ptr = mmap(NULL, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
+ errno = olderr;
+ return ptr;
+}
+
+#elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__)
+
+/* OSX and FreeBSD mmap() use a naive first-fit linear search.
+** That's perfect for us. Except that -pagezero_size must be set for OSX,
+** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
+** to be reduced to 250MB on FreeBSD.
+*/
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__)
+#include
+#define MMAP_REGION_START ((uintptr_t)0x10000000)
+#else
+#define MMAP_REGION_START ((uintptr_t)0x10000)
+#endif
+#define MMAP_REGION_END ((uintptr_t)0x80000000)
+
+static LJ_AINLINE void *CALL_MMAP(size_t size)
+{
+ int olderr = errno;
+ /* Hint for next allocation. Doesn't need to be thread-safe. */
+ static uintptr_t alloc_hint = MMAP_REGION_START;
+ int retry = 0;
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+ static int rlimit_modified = 0;
+ if (LJ_UNLIKELY(rlimit_modified == 0)) {
+ struct rlimit rlim;
+ rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START;
+ setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */
+ rlimit_modified = 1;
+ }
+#endif
+ for (;;) {
+ void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
+ if ((uintptr_t)p >= MMAP_REGION_START &&
+ (uintptr_t)p + size < MMAP_REGION_END) {
+ alloc_hint = (uintptr_t)p + size;
+ errno = olderr;
+ return p;
+ }
+ if (p != CMFAIL) munmap(p, size);
+ if (retry) break;
+ retry = 1;
+ alloc_hint = MMAP_REGION_START;
+ }
+ errno = olderr;
+ return CMFAIL;
+}
+
+#else
+
+#error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
+
+#endif
+
+#else
+
+/* 32 bit mode is easy. */
+static LJ_AINLINE void *CALL_MMAP(size_t size)
+{
+ int olderr = errno;
+ void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
+ errno = olderr;
+ return ptr;
+}
+
+#endif
+
+#define INIT_MMAP() ((void)0)
+#define DIRECT_MMAP(s) CALL_MMAP(s)
+
+static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
+{
+ int olderr = errno;
+ int ret = munmap(ptr, size);
+ errno = olderr;
+ return ret;
+}
+
+#if LJ_TARGET_LINUX
+/* Need to define _GNU_SOURCE to get the mremap prototype. */
+static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
+ int flags)
+{
+ int olderr = errno;
+ ptr = mremap(ptr, osz, nsz, flags);
+ errno = olderr;
+ return ptr;
+}
+
+#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
+#define CALL_MREMAP_NOMOVE 0
+#define CALL_MREMAP_MAYMOVE 1
+#if LJ_64
+#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
+#else
+#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
+#endif
+#endif
+
+#endif
+
+#ifndef CALL_MREMAP
+#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
+#endif
+
+/* ----------------------- Chunk representations ------------------------ */
+
+struct malloc_chunk {
+ size_t prev_foot; /* Size of previous chunk (if free). */
+ size_t head; /* Size and inuse bits. */
+ struct malloc_chunk *fd; /* double links -- used only if free. */
+ struct malloc_chunk *bk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk *mchunkptr;
+typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
+typedef size_t bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+#define CHUNK_OVERHEAD (SIZE_T_SIZE)
+
+/* Direct chunks need a second word of overhead ... */
+#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p) ((p)->head & CINUSE_BIT)
+#define pinuse(p) ((p)->head & PINUSE_BIT)
+#define chunksize(p) ((p)->head & ~(INUSE_BITS))
+
+#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
+#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
+#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
+#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+#define is_direct(p)\
+ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t prev_foot;
+ size_t head;
+ struct malloc_tree_chunk *fd;
+ struct malloc_tree_chunk *bk;
+
+ struct malloc_tree_chunk *child[2];
+ struct malloc_tree_chunk *parent;
+ bindex_t index;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk *tchunkptr;
+typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+/* ----------------------------- Segments -------------------------------- */
+
+struct malloc_segment {
+ char *base; /* base address */
+ size_t size; /* allocated size */
+ struct malloc_segment *next; /* ptr to next segment */
+};
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment *msegmentptr;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+ binmap_t smallmap;
+ binmap_t treemap;
+ size_t dvsize;
+ size_t topsize;
+ mchunkptr dv;
+ mchunkptr top;
+ size_t trim_check;
+ size_t release_checks;
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
+ tbinptr treebins[NTREEBINS];
+ msegment seg;
+};
+
+typedef struct malloc_state *mstate;
+
+#define is_initialized(M) ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+ (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
+ & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
+
+#if LJ_TARGET_WINDOWS
+#define mmap_align(S) granularity_align(S)
+#else
+#define mmap_align(S) page_align(S)
+#endif
+
+/* True if segment S holds address A */
+#define segment_holds(S, A)\
+ ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char *addr)
+{
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= sp->base && addr < sp->base + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/* Return true if segment contains a segment link */
+static int has_segment_link(mstate m, msegmentptr ss)
+{
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
+ return 1;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s) ((s) >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i) (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I */
+#define compute_tree_index(S, I)\
+{\
+ unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
+ if (X == 0) {\
+ I = 0;\
+ } else if (X > 0xFFFF) {\
+ I = NTREEBINS-1;\
+ } else {\
+ unsigned int K = lj_fls(X);\
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+ ((i == NTREEBINS-1)? 0 : \
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i) ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
+#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
+
+#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
+#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
+#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x) ((x<<1) | (~(x<<1)+1))
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/* Link a free chunk into a smallbin */
+#define insert_small_chunk(M, P, S) {\
+ bindex_t I = small_index(S);\
+ mchunkptr B = smallbin_at(M, I);\
+ mchunkptr F = B;\
+ if (!smallmap_is_marked(M, I))\
+ mark_smallmap(M, I);\
+ else\
+ F = B->fd;\
+ B->fd = P;\
+ F->bk = P;\
+ P->fd = F;\
+ P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin */
+#define unlink_small_chunk(M, P, S) {\
+ mchunkptr F = P->fd;\
+ mchunkptr B = P->bk;\
+ bindex_t I = small_index(S);\
+ if (F == B) {\
+ clear_smallmap(M, I);\
+ } else {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+}
+
+/* Unlink the first chunk from a smallbin */
+#define unlink_first_small_chunk(M, B, P, I) {\
+ mchunkptr F = P->fd;\
+ if (B == F) {\
+ clear_smallmap(M, I);\
+ } else {\
+ B->fd = F;\
+ F->bk = B;\
+ }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+ size_t DVS = M->dvsize;\
+ if (DVS != 0) {\
+ mchunkptr DV = M->dv;\
+ insert_small_chunk(M, DV, DVS);\
+ }\
+ M->dvsize = S;\
+ M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+ tbinptr *H;\
+ bindex_t I;\
+ compute_tree_index(S, I);\
+ H = treebin_at(M, I);\
+ X->index = I;\
+ X->child[0] = X->child[1] = 0;\
+ if (!treemap_is_marked(M, I)) {\
+ mark_treemap(M, I);\
+ *H = X;\
+ X->parent = (tchunkptr)H;\
+ X->fd = X->bk = X;\
+ } else {\
+ tchunkptr T = *H;\
+ size_t K = S << leftshift_for_tree_index(I);\
+ for (;;) {\
+ if (chunksize(T) != S) {\
+ tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+ K <<= 1;\
+ if (*C != 0) {\
+ T = *C;\
+ } else {\
+ *C = X;\
+ X->parent = T;\
+ X->fd = X->bk = X;\
+ break;\
+ }\
+ } else {\
+ tchunkptr F = T->fd;\
+ T->fd = F->bk = X;\
+ X->fd = F;\
+ X->bk = T;\
+ X->parent = 0;\
+ break;\
+ }\
+ }\
+ }\
+}
+
+#define unlink_large_chunk(M, X) {\
+ tchunkptr XP = X->parent;\
+ tchunkptr R;\
+ if (X->bk != X) {\
+ tchunkptr F = X->fd;\
+ R = X->bk;\
+ F->bk = R;\
+ R->fd = F;\
+ } else {\
+ tchunkptr *RP;\
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
+ tchunkptr *CP;\
+ while ((*(CP = &(R->child[1])) != 0) ||\
+ (*(CP = &(R->child[0])) != 0)) {\
+ R = *(RP = CP);\
+ }\
+ *RP = 0;\
+ }\
+ }\
+ if (XP != 0) {\
+ tbinptr *H = treebin_at(M, X->index);\
+ if (X == *H) {\
+ if ((*H = R) == 0) \
+ clear_treemap(M, X->index);\
+ } else {\
+ if (XP->child[0] == X) \
+ XP->child[0] = R;\
+ else \
+ XP->child[1] = R;\
+ }\
+ if (R != 0) {\
+ tchunkptr C0, C1;\
+ R->parent = XP;\
+ if ((C0 = X->child[0]) != 0) {\
+ R->child[0] = C0;\
+ C0->parent = R;\
+ }\
+ if ((C1 = X->child[1]) != 0) {\
+ R->child[1] = C1;\
+ C1->parent = R;\
+ }\
+ }\
+ }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+ if (is_small(S)) { insert_small_chunk(M, P, S)\
+ } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+ if (is_small(S)) { unlink_small_chunk(M, P, S)\
+ } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+/* ----------------------- Direct-mmapping chunks ----------------------- */
+
+static void *direct_alloc(size_t nb)
+{
+ size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
+ char *mm = (char *)(DIRECT_MMAP(mmsize));
+ if (mm != CMFAIL) {
+ size_t offset = align_offset(chunk2mem(mm));
+ size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
+ mchunkptr p = (mchunkptr)(mm + offset);
+ p->prev_foot = offset | IS_DIRECT_BIT;
+ p->head = psize|CINUSE_BIT;
+ chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+ return chunk2mem(p);
+ }
+ }
+ return NULL;
+}
+
+static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
+{
+ size_t oldsize = chunksize(oldp);
+ if (is_small(nb)) /* Can't shrink direct regions below small size */
+ return NULL;
+ /* Keep old chunk if big enough but not too big */
+ if (oldsize >= nb + SIZE_T_SIZE &&
+ (oldsize - nb) <= (DEFAULT_GRANULARITY << 1)) {
+ return oldp;
+ } else {
+ size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
+ size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
+ size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
+ oldmmsize, newmmsize, CALL_MREMAP_MV);
+ if (cp != CMFAIL) {
+ mchunkptr newp = (mchunkptr)(cp + offset);
+ size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
+ newp->head = psize|CINUSE_BIT;
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+ return newp;
+ }
+ }
+ return NULL;
+}
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize)
+{
+ /* Ensure alignment */
+ size_t offset = align_offset(chunk2mem(p));
+ p = (mchunkptr)((char *)p + offset);
+ psize -= offset;
+
+ m->top = p;
+ m->topsize = psize;
+ p->head = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+ m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m)
+{
+ /* Establish circular links for smallbins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; i++) {
+ sbinptr bin = smallbin_at(m,i);
+ bin->fd = bin->bk = bin;
+ }
+}
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
+{
+ mchunkptr p = align_as_chunk(newbase);
+ mchunkptr oldfirst = align_as_chunk(oldbase);
+ size_t psize = (size_t)((char *)oldfirst - (char *)p);
+ mchunkptr q = chunk_plus_offset(p, nb);
+ size_t qsize = psize - nb;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+ /* consolidate remainder with first chunk of old base */
+ if (oldfirst == m->top) {
+ size_t tsize = m->topsize += qsize;
+ m->top = q;
+ q->head = tsize | PINUSE_BIT;
+ } else if (oldfirst == m->dv) {
+ size_t dsize = m->dvsize += qsize;
+ m->dv = q;
+ set_size_and_pinuse_of_free_chunk(q, dsize);
+ } else {
+ if (!cinuse(oldfirst)) {
+ size_t nsize = chunksize(oldfirst);
+ unlink_chunk(m, oldfirst, nsize);
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
+ qsize += nsize;
+ }
+ set_free_with_pinuse(q, qsize, oldfirst);
+ insert_chunk(m, q, qsize);
+ }
+
+ return chunk2mem(p);
+}
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char *tbase, size_t tsize)
+{
+ /* Determine locations and sizes of segment, fenceposts, old top */
+ char *old_top = (char *)m->top;
+ msegmentptr oldsp = segment_holding(m, old_top);
+ char *old_end = oldsp->base + oldsp->size;
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
+ char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ size_t offset = align_offset(chunk2mem(rawsp));
+ char *asp = rawsp + offset;
+ char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+ mchunkptr sp = (mchunkptr)csp;
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
+ mchunkptr p = tnext;
+
+ /* reset top to new space */
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+ /* Set up segment record */
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+ *ss = m->seg; /* Push current record */
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->seg.next = ss;
+
+ /* Insert trailing fenceposts */
+ for (;;) {
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+ p->head = FENCEPOST_HEAD;
+ if ((char *)(&(nextp->head)) < old_end)
+ p = nextp;
+ else
+ break;
+ }
+
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
+ if (csp != old_top) {
+ mchunkptr q = (mchunkptr)old_top;
+ size_t psize = (size_t)(csp - old_top);
+ mchunkptr tn = chunk_plus_offset(q, psize);
+ set_free_with_pinuse(q, psize, tn);
+ insert_chunk(m, q, psize);
+ }
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+static void *alloc_sys(mstate m, size_t nb)
+{
+ char *tbase = CMFAIL;
+ size_t tsize = 0;
+
+ /* Directly map large chunks */
+ if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
+ void *mem = direct_alloc(nb);
+ if (mem != 0)
+ return mem;
+ }
+
+ {
+ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
+ size_t rsize = granularity_align(req);
+ if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
+ char *mp = (char *)(CALL_MMAP(rsize));
+ if (mp != CMFAIL) {
+ tbase = mp;
+ tsize = rsize;
+ }
+ }
+ }
+
+ if (tbase != CMFAIL) {
+ msegmentptr sp = &m->seg;
+ /* Try to merge with an existing segment */
+ while (sp != 0 && tbase != sp->base + sp->size)
+ sp = sp->next;
+ if (sp != 0 && segment_holds(sp, m->top)) { /* append */
+ sp->size += tsize;
+ init_top(m, m->top, m->topsize + tsize);
+ } else {
+ sp = &m->seg;
+ while (sp != 0 && sp->base != tbase + tsize)
+ sp = sp->next;
+ if (sp != 0) {
+ char *oldbase = sp->base;
+ sp->base = tbase;
+ sp->size += tsize;
+ return prepend_alloc(m, tbase, oldbase, nb);
+ } else {
+ add_segment(m, tbase, tsize);
+ }
+ }
+
+ if (nb < m->topsize) { /* Allocate from new or extended top space */
+ size_t rsize = m->topsize -= nb;
+ mchunkptr p = m->top;
+ mchunkptr r = m->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+ return chunk2mem(p);
+ }
+ }
+
+ return NULL;
+}
+
+/* ----------------------- system deallocation -------------------------- */
+
+/* Unmap and unlink any mmapped segments that don't contain used chunks */
+static size_t release_unused_segments(mstate m)
+{
+ size_t released = 0;
+ size_t nsegs = 0;
+ msegmentptr pred = &m->seg;
+ msegmentptr sp = pred->next;
+ while (sp != 0) {
+ char *base = sp->base;
+ size_t size = sp->size;
+ msegmentptr next = sp->next;
+ nsegs++;
+ {
+ mchunkptr p = align_as_chunk(base);
+ size_t psize = chunksize(p);
+ /* Can unmap if first chunk holds entire segment and not pinned */
+ if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
+ tchunkptr tp = (tchunkptr)p;
+ if (p == m->dv) {
+ m->dv = 0;
+ m->dvsize = 0;
+ } else {
+ unlink_large_chunk(m, tp);
+ }
+ if (CALL_MUNMAP(base, size) == 0) {
+ released += size;
+ /* unlink obsoleted record */
+ sp = pred;
+ sp->next = next;
+ } else { /* back out if cannot unmap */
+ insert_large_chunk(m, tp, psize);
+ }
+ }
+ }
+ pred = sp;
+ sp = next;
+ }
+ /* Reset check counter */
+ m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
+ nsegs : MAX_RELEASE_CHECK_RATE;
+ return released;
+}
+
+static int alloc_trim(mstate m, size_t pad)
+{
+ size_t released = 0;
+ if (pad < MAX_REQUEST && is_initialized(m)) {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->topsize > pad) {
+ /* Shrink top space in granularity-size units, keeping at least one */
+ size_t unit = DEFAULT_GRANULARITY;
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+ SIZE_T_ONE) * unit;
+ msegmentptr sp = segment_holding(m, (char *)m->top);
+
+ if (sp->size >= extra &&
+ !has_segment_link(m, sp)) { /* can't shrink if pinned */
+ size_t newsize = sp->size - extra;
+ /* Prefer mremap, fall back to munmap */
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
+ released = extra;
+ }
+ }
+
+ if (released != 0) {
+ sp->size -= released;
+ init_top(m, m->top, m->topsize - released);
+ }
+ }
+
+ /* Unmap any unused mmapped segments */
+ released += release_unused_segments(m);
+
+ /* On failure, disable autotrim to avoid repeated failed future calls */
+ if (released == 0 && m->topsize > m->trim_check)
+ m->trim_check = MAX_SIZE_T;
+ }
+
+ return (released != 0)? 1 : 0;
+}
+
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+static void *tmalloc_large(mstate m, size_t nb)
+{
+ tchunkptr v = 0;
+ size_t rsize = ~nb+1; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ compute_tree_index(nb, idx);
+
+ if ((t = *treebin_at(m, idx)) != 0) {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << leftshift_for_tree_index(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;) {
+ tchunkptr rt;
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->child[1];
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0) {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+
+ if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
+ if (leftbits != 0)
+ t = *treebin_at(m, lj_ffs(leftbits));
+ }
+
+ while (t != 0) { /* find smallest of tree or subtree */
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = leftmost_child(t);
+ }
+
+ /* If dv is a better fit, return NULL so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
+ mchunkptr r = chunk_plus_offset(v, nb);
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ insert_chunk(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ return NULL;
+}
+
+/* allocate a small request from the best fitting chunk in a treebin */
+static void *tmalloc_small(mstate m, size_t nb)
+{
+ tchunkptr t, v;
+ mchunkptr r;
+ size_t rsize;
+ bindex_t i = lj_ffs(m->treemap);
+
+ v = t = *treebin_at(m, i);
+ rsize = chunksize(t) - nb;
+
+ while ((t = leftmost_child(t)) != 0) {
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ r = chunk_plus_offset(v, nb);
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(m, r, rsize);
+ }
+ return chunk2mem(v);
+}
+
+/* ----------------------------------------------------------------------- */
+
+void *lj_alloc_create(void)
+{
+ size_t tsize = DEFAULT_GRANULARITY;
+ char *tbase;
+ INIT_MMAP();
+ tbase = (char *)(CALL_MMAP(tsize));
+ if (tbase != CMFAIL) {
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ mchunkptr mn;
+ mchunkptr msp = align_as_chunk(tbase);
+ mstate m = (mstate)(chunk2mem(msp));
+ memset(m, 0, msize);
+ msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->release_checks = MAX_RELEASE_CHECK_RATE;
+ init_bins(m);
+ mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
+ return m;
+ }
+ return NULL;
+}
+
+void lj_alloc_destroy(void *msp)
+{
+ mstate ms = (mstate)msp;
+ msegmentptr sp = &ms->seg;
+ while (sp != 0) {
+ char *base = sp->base;
+ size_t size = sp->size;
+ sp = sp->next;
+ CALL_MUNMAP(base, size);
+ }
+}
+
+static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
+{
+ mstate ms = (mstate)msp;
+ void *mem;
+ size_t nb;
+ if (nsize <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
+ idx = small_index(nb);
+ smallbits = ms->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(ms, idx);
+ p = b->fd;
+ unlink_first_small_chunk(ms, b, p, idx);
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ return mem;
+ } else if (nb > ms->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ bindex_t i = lj_ffs(leftbits);
+ b = smallbin_at(ms, i);
+ p = b->fd;
+ unlink_first_small_chunk(ms, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(ms, r, rsize);
+ }
+ mem = chunk2mem(p);
+ return mem;
+ } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+ return mem;
+ }
+ }
+ } else if (nsize >= MAX_REQUEST) {
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ } else {
+ nb = pad_request(nsize);
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+ return mem;
+ }
+ }
+
+ if (nb <= ms->dvsize) {
+ size_t rsize = ms->dvsize - nb;
+ mchunkptr p = ms->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+ ms->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ } else { /* exhaust dv */
+ size_t dvs = ms->dvsize;
+ ms->dvsize = 0;
+ ms->dv = 0;
+ set_inuse_and_pinuse(ms, p, dvs);
+ }
+ mem = chunk2mem(p);
+ return mem;
+ } else if (nb < ms->topsize) { /* Split top */
+ size_t rsize = ms->topsize -= nb;
+ mchunkptr p = ms->top;
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ mem = chunk2mem(p);
+ return mem;
+ }
+ return alloc_sys(ms, nb);
+}
+
+static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
+{
+ if (ptr != 0) {
+ mchunkptr p = mem2chunk(ptr);
+ mstate fm = (mstate)msp;
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_DIRECT_BIT) != 0) {
+ prevsize &= ~IS_DIRECT_BIT;
+ psize += prevsize + DIRECT_FOOT_PAD;
+ CALL_MUNMAP((char *)p - prevsize, psize);
+ return NULL;
+ } else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ return NULL;
+ }
+ }
+ }
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (tsize > fm->trim_check)
+ alloc_trim(fm, 0);
+ return NULL;
+ } else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ return NULL;
+ } else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ return NULL;
+ }
+ }
+ } else {
+ set_free_with_pinuse(p, psize, next);
+ }
+
+ if (is_small(psize)) {
+ insert_small_chunk(fm, p, psize);
+ } else {
+ tchunkptr tp = (tchunkptr)p;
+ insert_large_chunk(fm, tp, psize);
+ if (--fm->release_checks == 0)
+ release_unused_segments(fm);
+ }
+ }
+ return NULL;
+}
+
+static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
+{
+ if (nsize >= MAX_REQUEST) {
+ return NULL;
+ } else {
+ mstate m = (mstate)msp;
+ mchunkptr oldp = mem2chunk(ptr);
+ size_t oldsize = chunksize(oldp);
+ mchunkptr next = chunk_plus_offset(oldp, oldsize);
+ mchunkptr newp = 0;
+ size_t nb = request2size(nsize);
+
+ /* Try to either shrink or extend into top. Else malloc-copy-free */
+ if (is_direct(oldp)) {
+ newp = direct_resize(oldp, nb); /* this may return NULL. */
+ } else if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE) {
+ mchunkptr rem = chunk_plus_offset(newp, nb);
+ set_inuse(m, newp, nb);
+ set_inuse(m, rem, rsize);
+ lj_alloc_free(m, chunk2mem(rem));
+ }
+ } else if (next == m->top && oldsize + m->topsize > nb) {
+ /* Expand into top */
+ size_t newsize = oldsize + m->topsize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = chunk_plus_offset(oldp, nb);
+ set_inuse(m, oldp, nb);
+ newtop->head = newtopsize |PINUSE_BIT;
+ m->top = newtop;
+ m->topsize = newtopsize;
+ newp = oldp;
+ }
+
+ if (newp != 0) {
+ return chunk2mem(newp);
+ } else {
+ void *newmem = lj_alloc_malloc(m, nsize);
+ if (newmem != 0) {
+ size_t oc = oldsize - overhead_for(oldp);
+ memcpy(newmem, ptr, oc < nsize ? oc : nsize);
+ lj_alloc_free(m, ptr);
+ }
+ return newmem;
+ }
+ }
+}
+
+void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
+{
+ (void)osize;
+ if (nsize == 0) {
+ return lj_alloc_free(msp, ptr);
+ } else if (ptr == NULL) {
+ return lj_alloc_malloc(msp, nsize);
+ } else {
+ return lj_alloc_realloc(msp, ptr, nsize);
+ }
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_alloc.h b/src/LuaJIT/src/lj_alloc.h
new file mode 100644
index 000000000..f87a7cf34
--- /dev/null
+++ b/src/LuaJIT/src/lj_alloc.h
@@ -0,0 +1,17 @@
+/*
+** Bundled memory allocator.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_ALLOC_H
+#define _LJ_ALLOC_H
+
+#include "lj_def.h"
+
+#ifndef LUAJIT_USE_SYSMALLOC
+LJ_FUNC void *lj_alloc_create(void);
+LJ_FUNC void lj_alloc_destroy(void *msp);
+LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_api.c b/src/LuaJIT/src/lj_api.c
new file mode 100644
index 000000000..b807900dd
--- /dev/null
+++ b/src/LuaJIT/src/lj_api.c
@@ -0,0 +1,1220 @@
+/*
+** Public Lua/C API.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_api_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#include "lj_frame.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_parse.h"
+
+/* -- Common helper functions --------------------------------------------- */
+
+#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base))
+#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L))
+
+static TValue *index2adr(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ return o < L->top ? o : niltv(L);
+ } else if (idx > LUA_REGISTRYINDEX) {
+ api_check(L, idx != 0 && -idx <= L->top - L->base);
+ return L->top + idx;
+ } else if (idx == LUA_GLOBALSINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(L->env));
+ return o;
+ } else if (idx == LUA_REGISTRYINDEX) {
+ return registry(L);
+ } else {
+ GCfunc *fn = curr_func(L);
+ api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn));
+ if (idx == LUA_ENVIRONINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(fn->c.env));
+ return o;
+ } else {
+ idx = LUA_GLOBALSINDEX - idx;
+ return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L);
+ }
+ }
+}
+
+static TValue *stkindex2adr(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ return o < L->top ? o : niltv(L);
+ } else {
+ api_check(L, idx != 0 && -idx <= L->top - L->base);
+ return L->top + idx;
+ }
+}
+
+static GCtab *getcurrenv(lua_State *L)
+{
+ GCfunc *fn = curr_func(L);
+ return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env);
+}
+
+/* -- Miscellaneous API functions ----------------------------------------- */
+
+LUA_API int lua_status(lua_State *L)
+{
+ return L->status;
+}
+
+LUA_API int lua_checkstack(lua_State *L, int size)
+{
+ if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) {
+ return 0; /* Stack overflow. */
+ } else if (size > 0) {
+ lj_state_checkstack(L, (MSize)size);
+ }
+ return 1;
+}
+
+LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg)
+{
+ if (!lua_checkstack(L, size))
+ lj_err_callerv(L, LJ_ERR_STKOVM, msg);
+}
+
+LUA_API void lua_xmove(lua_State *from, lua_State *to, int n)
+{
+ TValue *f, *t;
+ if (from == to) return;
+ api_checknelems(from, n);
+ api_check(from, G(from) == G(to));
+ lj_state_checkstack(to, (MSize)n);
+ f = from->top;
+ t = to->top = to->top + n;
+ while (--n >= 0) copyTV(to, --t, --f);
+ from->top = f;
+}
+
+/* -- Stack manipulation -------------------------------------------------- */
+
+LUA_API int lua_gettop(lua_State *L)
+{
+ return (int)(L->top - L->base);
+}
+
+LUA_API void lua_settop(lua_State *L, int idx)
+{
+ if (idx >= 0) {
+ api_check(L, idx <= tvref(L->maxstack) - L->base);
+ if (L->base + idx > L->top) {
+ if (L->base + idx >= tvref(L->maxstack))
+ lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base));
+ do { setnilV(L->top++); } while (L->top < L->base + idx);
+ } else {
+ L->top = L->base + idx;
+ }
+ } else {
+ api_check(L, -(idx+1) <= (L->top - L->base));
+ L->top += idx+1; /* Shrinks top (idx < 0). */
+ }
+}
+
+LUA_API void lua_remove(lua_State *L, int idx)
+{
+ TValue *p = stkindex2adr(L, idx);
+ api_checkvalidindex(L, p);
+ while (++p < L->top) copyTV(L, p-1, p);
+ L->top--;
+}
+
+LUA_API void lua_insert(lua_State *L, int idx)
+{
+ TValue *q, *p = stkindex2adr(L, idx);
+ api_checkvalidindex(L, p);
+ for (q = L->top; q > p; q--) copyTV(L, q, q-1);
+ copyTV(L, p, L->top);
+}
+
+LUA_API void lua_replace(lua_State *L, int idx)
+{
+ api_checknelems(L, 1);
+ if (idx == LUA_GLOBALSINDEX) {
+ api_check(L, tvistab(L->top-1));
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(tabV(L->top-1)));
+ } else if (idx == LUA_ENVIRONINDEX) {
+ GCfunc *fn = curr_func(L);
+ if (fn->c.gct != ~LJ_TFUNC)
+ lj_err_msg(L, LJ_ERR_NOENV);
+ api_check(L, tvistab(L->top-1));
+ setgcref(fn->c.env, obj2gco(tabV(L->top-1)));
+ lj_gc_barrier(L, fn, L->top-1);
+ } else {
+ TValue *o = index2adr(L, idx);
+ api_checkvalidindex(L, o);
+ copyTV(L, o, L->top-1);
+ if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */
+ lj_gc_barrier(L, curr_func(L), L->top-1);
+ }
+ L->top--;
+}
+
+LUA_API void lua_pushvalue(lua_State *L, int idx)
+{
+ copyTV(L, L->top, index2adr(L, idx));
+ incr_top(L);
+}
+
+/* -- Stack getters ------------------------------------------------------- */
+
+LUA_API int lua_type(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisnumber(o)) {
+ return LUA_TNUMBER;
+#if LJ_64
+ } else if (tvislightud(o)) {
+ return LUA_TLIGHTUSERDATA;
+#endif
+ } else if (o == niltv(L)) {
+ return LUA_TNONE;
+ } else { /* Magic internal/external tag conversion. ORDER LJ_T */
+ uint32_t t = ~itype(o);
+#if LJ_64
+ int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u);
+#else
+ int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u);
+#endif
+ lua_assert(tt != LUA_TNIL || tvisnil(o));
+ return tt;
+ }
+}
+
+LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt)
+{
+ if (lua_type(L, idx) != tt)
+ lj_err_argt(L, idx, tt);
+}
+
+LUALIB_API void luaL_checkany(lua_State *L, int idx)
+{
+ if (index2adr(L, idx) == niltv(L))
+ lj_err_arg(L, idx, LJ_ERR_NOVAL);
+}
+
+LUA_API const char *lua_typename(lua_State *L, int t)
+{
+ UNUSED(L);
+ return lj_obj_typename[t+1];
+}
+
+LUA_API int lua_iscfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvisfunc(o) && !isluafunc(funcV(o));
+}
+
+LUA_API int lua_isnumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ return (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), &tmp)));
+}
+
+LUA_API int lua_isstring(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisstr(o) || tvisnumber(o));
+}
+
+LUA_API int lua_isuserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisudata(o) || tvislightud(o));
+}
+
+LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2);
+}
+
+LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) == intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) == numberVnum(o2);
+ } else if (itype(o1) != itype(o2)) {
+ return 0;
+ } else if (tvispri(o1)) {
+ return o1 != niltv(L) && o2 != niltv(L);
+#if LJ_64
+ } else if (tvislightud(o1)) {
+ return o1->u64 == o2->u64;
+#endif
+ } else if (gcrefeq(o1->gcr, o2->gcr)) {
+ return 1;
+ } else if (!tvistabud(o1)) {
+ return 0;
+ } else {
+ TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2;
+ return tvistruecond(L->top+1);
+ }
+ }
+}
+
+LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (o1 == niltv(L) || o2 == niltv(L)) {
+ return 0;
+ } else if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) < intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) < numberVnum(o2);
+ } else {
+ TValue *base = lj_meta_comp(L, o1, o2, 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2;
+ return tvistruecond(L->top+1);
+ }
+ }
+}
+
+LUA_API lua_Number lua_tonumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisstr(o) && lj_str_tonum(strV(o), &tmp))
+ return numV(&tmp);
+ else
+ return 0;
+}
+
+LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (!(tvisstr(o) && lj_str_tonum(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisnil(o))
+ return def;
+ else if (!(tvisstr(o) && lj_str_tonum(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUA_API lua_Integer lua_tointeger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp)))
+ return 0;
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else if (tvisnil(o)) {
+ return def;
+ } else {
+ if (!(tvisstr(o) && lj_str_tonumber(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUA_API int lua_toboolean(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvistruecond(o);
+}
+
+LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ if (len != NULL) *len = 0;
+ return NULL;
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_optlstring(lua_State *L, int idx,
+ const char *def, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnil(o)) {
+ if (len != NULL) *len = def ? strlen(def) : 0;
+ return def;
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def,
+ const char *const lst[])
+{
+ ptrdiff_t i;
+ const char *s = lua_tolstring(L, idx, NULL);
+ if (s == NULL && (s = def) == NULL)
+ lj_err_argt(L, idx, LUA_TSTRING);
+ for (i = 0; lst[i]; i++)
+ if (strcmp(lst[i], s) == 0)
+ return (int)i;
+ lj_err_argv(L, idx, LJ_ERR_INVOPTM, s);
+}
+
+LUA_API size_t lua_objlen(lua_State *L, int idx)
+{
+ TValue *o = index2adr(L, idx);
+ if (tvisstr(o)) {
+ return strV(o)->len;
+ } else if (tvistab(o)) {
+ return (size_t)lj_tab_len(tabV(o));
+ } else if (tvisudata(o)) {
+ return udataV(o)->len;
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ return s->len;
+ } else {
+ return 0;
+ }
+}
+
+LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisfunc(o)) {
+ BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns));
+ if (op == BC_FUNCC || op == BC_FUNCCW)
+ return funcV(o)->c.f;
+ }
+ return NULL;
+}
+
+LUA_API void *lua_touserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(o);
+ else
+ return NULL;
+}
+
+LUA_API lua_State *lua_tothread(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (!tvisthread(o)) ? NULL : threadV(o);
+}
+
+LUA_API const void *lua_topointer(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(o);
+ else if (tviscdata(o))
+ return cdataptr(cdataV(o));
+ else if (tvisgcv(o))
+ return gcV(o);
+ else
+ return NULL;
+}
+
+/* -- Stack setters (object creation) ------------------------------------- */
+
+LUA_API void lua_pushnil(lua_State *L)
+{
+ setnilV(L->top);
+ incr_top(L);
+}
+
+LUA_API void lua_pushnumber(lua_State *L, lua_Number n)
+{
+ setnumV(L->top, n);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top); /* Canonicalize injected NaNs. */
+ incr_top(L);
+}
+
+LUA_API void lua_pushinteger(lua_State *L, lua_Integer n)
+{
+ setintptrV(L->top, n);
+ incr_top(L);
+}
+
+LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len)
+{
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_new(L, str, len);
+ setstrV(L, L->top, s);
+ incr_top(L);
+}
+
+LUA_API void lua_pushstring(lua_State *L, const char *str)
+{
+ if (str == NULL) {
+ setnilV(L->top);
+ } else {
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_newz(L, str);
+ setstrV(L, L->top, s);
+ }
+ incr_top(L);
+}
+
+LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt,
+ va_list argp)
+{
+ lj_gc_check(L);
+ return lj_str_pushvf(L, fmt, argp);
+}
+
+LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
+{
+ const char *ret;
+ va_list argp;
+ lj_gc_check(L);
+ va_start(argp, fmt);
+ ret = lj_str_pushvf(L, fmt, argp);
+ va_end(argp);
+ return ret;
+}
+
+LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n)
+{
+ GCfunc *fn;
+ lj_gc_check(L);
+ api_checknelems(L, n);
+ fn = lj_func_newC(L, (MSize)n, getcurrenv(L));
+ fn->c.f = f;
+ L->top -= n;
+ while (n--)
+ copyTV(L, &fn->c.upvalue[n], L->top+n);
+ setfuncV(L, L->top, fn);
+ lua_assert(iswhite(obj2gco(fn)));
+ incr_top(L);
+}
+
+LUA_API void lua_pushboolean(lua_State *L, int b)
+{
+ setboolV(L->top, (b != 0));
+ incr_top(L);
+}
+
+LUA_API void lua_pushlightuserdata(lua_State *L, void *p)
+{
+ setlightudV(L->top, checklightudptr(L, p));
+ incr_top(L);
+}
+
+LUA_API void lua_createtable(lua_State *L, int narray, int nrec)
+{
+ GCtab *t;
+ lj_gc_check(L);
+ t = lj_tab_new(L, (uint32_t)(narray > 0 ? narray+1 : 0), hsize2hbits(nrec));
+ settabV(L, L->top, t);
+ incr_top(L);
+}
+
+LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname)
+{
+ GCtab *regt = tabV(registry(L));
+ TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname));
+ if (tvisnil(tv)) {
+ GCtab *mt = lj_tab_new(L, 0, 1);
+ settabV(L, tv, mt);
+ settabV(L, L->top++, mt);
+ lj_gc_anybarriert(L, regt);
+ return 1;
+ } else {
+ copyTV(L, L->top++, tv);
+ return 0;
+ }
+}
+
+LUA_API int lua_pushthread(lua_State *L)
+{
+ setthreadV(L, L->top, L);
+ incr_top(L);
+ return (mainthread(G(L)) == L);
+}
+
+LUA_API lua_State *lua_newthread(lua_State *L)
+{
+ lua_State *L1;
+ lj_gc_check(L);
+ L1 = lj_state_new(L);
+ setthreadV(L, L->top, L1);
+ incr_top(L);
+ return L1;
+}
+
+LUA_API void *lua_newuserdata(lua_State *L, size_t size)
+{
+ GCudata *ud;
+ lj_gc_check(L);
+ if (size > LJ_MAX_UDATA)
+ lj_err_msg(L, LJ_ERR_UDATAOV);
+ ud = lj_udata_new(L, (MSize)size, getcurrenv(L));
+ setudataV(L, L->top, ud);
+ incr_top(L);
+ return uddata(ud);
+}
+
+LUA_API void lua_concat(lua_State *L, int n)
+{
+ api_checknelems(L, n);
+ if (n >= 2) {
+ n--;
+ do {
+ TValue *top = lj_meta_cat(L, L->top-1, -n);
+ if (top == NULL) {
+ L->top -= n;
+ break;
+ }
+ n -= (int)(L->top - top);
+ L->top = top+2;
+ lj_vm_call(L, top, 1+1);
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ } while (--n > 0);
+ } else if (n == 0) { /* Push empty string. */
+ setstrV(L, L->top, &G(L)->strempty);
+ incr_top(L);
+ }
+ /* else n == 1: nothing to do. */
+}
+
+/* -- Object getters ------------------------------------------------------ */
+
+LUA_API void lua_gettable(lua_State *L, int idx)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ api_checkvalidindex(L, t);
+ v = lj_meta_tget(L, t, L->top-1);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2;
+ v = L->top+1;
+ }
+ copyTV(L, L->top-1, v);
+}
+
+LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ TValue key;
+ api_checkvalidindex(L, t);
+ setstrV(L, &key, lj_str_newz(L, k));
+ v = lj_meta_tget(L, t, &key);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2;
+ v = L->top+1;
+ }
+ copyTV(L, L->top, v);
+ incr_top(L);
+}
+
+LUA_API void lua_rawget(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ api_check(L, tvistab(t));
+ copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1));
+}
+
+LUA_API void lua_rawgeti(lua_State *L, int idx, int n)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ api_check(L, tvistab(t));
+ v = lj_tab_getint(tabV(t), n);
+ if (v) {
+ copyTV(L, L->top, v);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_getmetatable(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ GCtab *mt = NULL;
+ if (tvistab(o))
+ mt = tabref(tabV(o)->metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt == NULL)
+ return 0;
+ settabV(L, L->top, mt);
+ incr_top(L);
+ return 1;
+}
+
+LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field)
+{
+ if (lua_getmetatable(L, idx)) {
+ cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field));
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top-1, tv);
+ return 1;
+ }
+ L->top--;
+ }
+ return 0;
+}
+
+LUA_API void lua_getfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ api_checkvalidindex(L, o);
+ if (tvisfunc(o)) {
+ settabV(L, L->top, tabref(funcV(o)->c.env));
+ } else if (tvisudata(o)) {
+ settabV(L, L->top, tabref(udataV(o)->env));
+ } else if (tvisthread(o)) {
+ settabV(L, L->top, tabref(threadV(o)->env));
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_next(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ int more;
+ api_check(L, tvistab(t));
+ more = lj_tab_next(L, tabV(t), L->top-1);
+ if (more) {
+ incr_top(L); /* Return new key and value slot. */
+ } else { /* End of traversal. */
+ L->top--; /* Remove key slot. */
+ }
+ return more;
+}
+
+LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n)
+{
+ TValue *val;
+ const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val);
+ if (name) {
+ copyTV(L, L->top, val);
+ incr_top(L);
+ }
+ return name;
+}
+
+LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o)) {
+ GCudata *ud = udataV(o);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname));
+ if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable))
+ return uddata(ud);
+ }
+ lj_err_argtype(L, idx, tname);
+ return NULL; /* unreachable */
+}
+
+/* -- Object setters ------------------------------------------------------ */
+
+LUA_API void lua_settable(lua_State *L, int idx)
+{
+ TValue *o;
+ cTValue *t = index2adr(L, idx);
+ api_checknelems(L, 2);
+ api_checkvalidindex(L, t);
+ o = lj_meta_tset(L, t, L->top-2);
+ if (o) {
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ copyTV(L, o, L->top-1);
+ L->top -= 2;
+ } else {
+ L->top += 3;
+ copyTV(L, L->top-1, L->top-6);
+ lj_vm_call(L, L->top-3, 0+1);
+ L->top -= 3;
+ }
+}
+
+LUA_API void lua_setfield(lua_State *L, int idx, const char *k)
+{
+ TValue *o;
+ TValue key;
+ cTValue *t = index2adr(L, idx);
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, t);
+ setstrV(L, &key, lj_str_newz(L, k));
+ o = lj_meta_tset(L, t, &key);
+ if (o) {
+ L->top--;
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ copyTV(L, o, L->top);
+ } else {
+ L->top += 3;
+ copyTV(L, L->top-1, L->top-6);
+ lj_vm_call(L, L->top-3, 0+1);
+ L->top -= 2;
+ }
+}
+
+LUA_API void lua_rawset(lua_State *L, int idx)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *key;
+ api_checknelems(L, 2);
+ key = L->top-2;
+ dst = lj_tab_set(L, t, key);
+ copyTV(L, dst, key+1);
+ lj_gc_anybarriert(L, t);
+ L->top = key;
+}
+
+LUA_API void lua_rawseti(lua_State *L, int idx, int n)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *src;
+ api_checknelems(L, 1);
+ dst = lj_tab_setint(L, t, n);
+ src = L->top-1;
+ copyTV(L, dst, src);
+ lj_gc_barriert(L, t, dst);
+ L->top = src;
+}
+
+LUA_API int lua_setmetatable(lua_State *L, int idx)
+{
+ global_State *g;
+ GCtab *mt;
+ cTValue *o = index2adr(L, idx);
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, o);
+ if (tvisnil(L->top-1)) {
+ mt = NULL;
+ } else {
+ api_check(L, tvistab(L->top-1));
+ mt = tabV(L->top-1);
+ }
+ g = G(L);
+ if (tvistab(o)) {
+ setgcref(tabV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarriert(L, tabV(o), mt);
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarrier(L, udataV(o), mt);
+ } else {
+ /* Flush cache, since traces specialize to basemt. But not during __gc. */
+ if (lj_trace_flushall(L))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ if (tvisbool(o)) {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt));
+ setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt));
+ } else {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_obj(g, o), obj2gco(mt));
+ }
+ }
+ L->top--;
+ return 1;
+}
+
+LUA_API int lua_setfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ GCtab *t;
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, o);
+ api_check(L, tvistab(L->top-1));
+ t = tabV(L->top-1);
+ if (tvisfunc(o)) {
+ setgcref(funcV(o)->c.env, obj2gco(t));
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->env, obj2gco(t));
+ } else if (tvisthread(o)) {
+ setgcref(threadV(o)->env, obj2gco(t));
+ } else {
+ L->top--;
+ return 0;
+ }
+ lj_gc_objbarrier(L, gcV(o), t);
+ L->top--;
+ return 1;
+}
+
+LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
+{
+ cTValue *f = index2adr(L, idx);
+ TValue *val;
+ const char *name;
+ api_checknelems(L, 1);
+ name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val);
+ if (name) {
+ L->top--;
+ copyTV(L, val, L->top);
+ lj_gc_barrier(L, funcV(f), L->top);
+ }
+ return name;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+LUA_API void lua_call(lua_State *L, int nargs, int nresults)
+{
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ api_checknelems(L, nargs+1);
+ lj_vm_call(L, L->top - nargs, nresults+1);
+}
+
+LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ ptrdiff_t ef;
+ int status;
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ api_checknelems(L, nargs+1);
+ if (errfunc == 0) {
+ ef = 0;
+ } else {
+ cTValue *o = stkindex2adr(L, errfunc);
+ api_checkvalidindex(L, o);
+ ef = savestack(L, o);
+ }
+ status = lj_vm_pcall(L, L->top - nargs, nresults+1, ef);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L));
+ fn->c.f = func;
+ setfuncV(L, L->top, fn);
+ setlightudV(L->top+1, checklightudptr(L, ud));
+ cframe_nres(L->cframe) = 1+0; /* Zero results. */
+ L->top += 2;
+ return L->top-1; /* Now call the newly allocated C function. */
+}
+
+LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ int status;
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ status = lj_vm_cpcall(L, func, ud, cpcall);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
+{
+ if (luaL_getmetafield(L, idx, field)) {
+ TValue *base = L->top--;
+ copyTV(L, base, index2adr(L, idx));
+ L->top = base+1;
+ lj_vm_call(L, base, 1+1);
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Coroutine yield and resume ------------------------------------------ */
+
+LUA_API int lua_yield(lua_State *L, int nresults)
+{
+ void *cf = L->cframe;
+ global_State *g = G(L);
+ if (cframe_canyield(cf)) {
+ cf = cframe_raw(cf);
+ if (!hook_active(g)) { /* Regular yield: move results down if needed. */
+ cTValue *f = L->top - nresults;
+ if (f > L->base) {
+ TValue *t = L->base;
+ while (--nresults >= 0) copyTV(L, t++, f++);
+ L->top = t;
+ }
+ } else { /* Yield from hook: add a pseudo-frame. */
+ TValue *top = L->top;
+ hook_leave(g);
+ top->u64 = cframe_multres(cf);
+ setcont(top+1, lj_cont_hook);
+ setframe_pc(top+1, cframe_pc(cf)-1);
+ setframe_gc(top+2, obj2gco(L));
+ setframe_ftsz(top+2, (int)((char *)(top+3)-(char *)L->base)+FRAME_CONT);
+ L->top = L->base = top+3;
+ }
+#if LJ_TARGET_X64
+ lj_err_throw(L, LUA_YIELD);
+#else
+ L->cframe = NULL;
+ L->status = LUA_YIELD;
+ lj_vm_unwind_c(cf, LUA_YIELD);
+#endif
+ }
+ lj_err_msg(L, LJ_ERR_CYIELD);
+ return 0; /* unreachable */
+}
+
+LUA_API int lua_resume(lua_State *L, int nargs)
+{
+ if (L->cframe == NULL && L->status <= LUA_YIELD)
+ return lj_vm_resume(L, L->top - nargs, 0, 0);
+ L->top = L->base;
+ setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP));
+ incr_top(L);
+ return LUA_ERRRUN;
+}
+
+/* -- Load and dump Lua code ---------------------------------------------- */
+
+static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ LexState *ls = (LexState *)ud;
+ GCproto *pt;
+ GCfunc *fn;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ pt = lj_lex_setup(L, ls) ? lj_bcread(ls) : lj_parse(ls);
+ fn = lj_func_newL_empty(L, pt, tabref(L->env));
+ /* Don't combine above/below into one statement. */
+ setfuncV(L, L->top++, fn);
+ return NULL;
+}
+
+LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname)
+{
+ LexState ls;
+ int status;
+ ls.rfunc = reader;
+ ls.rdata = data;
+ ls.chunkarg = chunkname ? chunkname : "?";
+ lj_str_initbuf(&ls.sb);
+ status = lj_vm_cpcall(L, NULL, &ls, cpparser);
+ lj_lex_cleanup(L, &ls);
+ lj_gc_check(L);
+ return status;
+}
+
+LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
+{
+ cTValue *o = L->top-1;
+ api_checknelems(L, 1);
+ if (tvisfunc(o) && isluafunc(funcV(o)))
+ return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
+ else
+ return 1;
+}
+
+/* -- GC and memory management -------------------------------------------- */
+
+LUA_API int lua_gc(lua_State *L, int what, int data)
+{
+ global_State *g = G(L);
+ int res = 0;
+ switch (what) {
+ case LUA_GCSTOP:
+ g->gc.threshold = LJ_MAX_MEM;
+ break;
+ case LUA_GCRESTART:
+ g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total;
+ break;
+ case LUA_GCCOLLECT:
+ lj_gc_fullgc(L);
+ break;
+ case LUA_GCCOUNT:
+ res = (int)(g->gc.total >> 10);
+ break;
+ case LUA_GCCOUNTB:
+ res = (int)(g->gc.total & 0x3ff);
+ break;
+ case LUA_GCSTEP: {
+ MSize a = (MSize)data << 10;
+ g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0;
+ while (g->gc.total >= g->gc.threshold)
+ if (lj_gc_step(L)) {
+ res = 1;
+ break;
+ }
+ break;
+ }
+ case LUA_GCSETPAUSE:
+ res = (int)(g->gc.pause);
+ g->gc.pause = (MSize)data;
+ break;
+ case LUA_GCSETSTEPMUL:
+ res = (int)(g->gc.stepmul);
+ g->gc.stepmul = (MSize)data;
+ break;
+ default:
+ res = -1; /* Invalid option. */
+ }
+ return res;
+}
+
+LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud)
+{
+ global_State *g = G(L);
+ if (ud) *ud = g->allocd;
+ return g->allocf;
+}
+
+LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud)
+{
+ global_State *g = G(L);
+ g->allocd = ud;
+ g->allocf = f;
+}
+
diff --git a/src/LuaJIT/src/lj_arch.h b/src/LuaJIT/src/lj_arch.h
new file mode 100644
index 000000000..1f8e10269
--- /dev/null
+++ b/src/LuaJIT/src/lj_arch.h
@@ -0,0 +1,323 @@
+/*
+** Target architecture selection.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ARCH_H
+#define _LJ_ARCH_H
+
+#include "lua.h"
+
+/* Target endianess. */
+#define LUAJIT_LE 0
+#define LUAJIT_BE 1
+
+/* Target architectures. */
+#define LUAJIT_ARCH_X86 1
+#define LUAJIT_ARCH_x86 1
+#define LUAJIT_ARCH_X64 2
+#define LUAJIT_ARCH_x64 2
+#define LUAJIT_ARCH_ARM 3
+#define LUAJIT_ARCH_arm 3
+#define LUAJIT_ARCH_PPC 4
+#define LUAJIT_ARCH_ppc 4
+#define LUAJIT_ARCH_PPCSPE 5
+#define LUAJIT_ARCH_ppcspe 5
+#define LUAJIT_ARCH_MIPS 6
+#define LUAJIT_ARCH_mips 6
+
+/* Target OS. */
+#define LUAJIT_OS_OTHER 0
+#define LUAJIT_OS_WINDOWS 1
+#define LUAJIT_OS_LINUX 2
+#define LUAJIT_OS_OSX 3
+#define LUAJIT_OS_BSD 4
+#define LUAJIT_OS_POSIX 5
+
+/* Select native target if no target defined. */
+#ifndef LUAJIT_TARGET
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+#define LUAJIT_TARGET LUAJIT_ARCH_X86
+#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
+#define LUAJIT_TARGET LUAJIT_ARCH_X64
+#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM)
+#define LUAJIT_TARGET LUAJIT_ARCH_ARM
+#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC)
+#ifdef __NO_FPRS__
+#define LUAJIT_TARGET LUAJIT_ARCH_PPCSPE
+#else
+#define LUAJIT_TARGET LUAJIT_ARCH_PPC
+#endif
+#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS)
+#define LUAJIT_TARGET LUAJIT_ARCH_MIPS
+#else
+#error "No support for this architecture (yet)"
+#endif
+
+#endif
+
+/* Select native OS if no target OS defined. */
+#ifndef LUAJIT_OS
+
+#if defined(_WIN32)
+#define LUAJIT_OS LUAJIT_OS_WINDOWS
+#elif defined(__linux__)
+#define LUAJIT_OS LUAJIT_OS_LINUX
+#elif defined(__MACH__) && defined(__APPLE__)
+#define LUAJIT_OS LUAJIT_OS_OSX
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+#define LUAJIT_OS LUAJIT_OS_BSD
+#elif (defined(__sun__) && defined(__svr4__)) || defined(__solaris__) || \
+ defined(__CYGWIN__)
+#define LUAJIT_OS LUAJIT_OS_POSIX
+#else
+#define LUAJIT_OS LUAJIT_OS_OTHER
+#endif
+
+#endif
+
+/* Set target OS properties. */
+#if LUAJIT_OS == LUAJIT_OS_WINDOWS
+#define LJ_OS_NAME "Windows"
+#elif LUAJIT_OS == LUAJIT_OS_LINUX
+#define LJ_OS_NAME "Linux"
+#elif LUAJIT_OS == LUAJIT_OS_OSX
+#define LJ_OS_NAME "OSX"
+#elif LUAJIT_OS == LUAJIT_OS_BSD
+#define LJ_OS_NAME "BSD"
+#elif LUAJIT_OS == LUAJIT_OS_POSIX
+#define LJ_OS_NAME "POSIX"
+#else
+#define LJ_OS_NAME "Other"
+#endif
+
+#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS)
+#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX)
+#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX)
+#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS)
+#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX
+
+#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */
+#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */
+#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */
+#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */
+
+/* Set target architecture properties. */
+#if LUAJIT_TARGET == LUAJIT_ARCH_X86
+
+#define LJ_ARCH_NAME "x86"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#define LJ_ARCH_HASFPU 1
+#define LJ_ABI_WIN LJ_TARGET_WINDOWS
+#define LJ_TARGET_X86 1
+#define LJ_TARGET_X86ORX64 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_X64
+
+#define LJ_ARCH_NAME "x64"
+#define LJ_ARCH_BITS 64
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#define LJ_ARCH_HASFPU 1
+#define LJ_ABI_WIN LJ_TARGET_WINDOWS
+#define LJ_TARGET_X64 1
+#define LJ_TARGET_X86ORX64 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM
+
+#define LJ_ARCH_NAME "arm"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#define LJ_ARCH_HASFPU 0
+#define LJ_ABI_SOFTFP 1
+#define LJ_ABI_EABI 1
+#define LJ_TARGET_ARM 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
+#define LJ_TARGET_MASKSHIFT 0
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
+#if LJ_TARGET_OSX
+/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */
+#define LJ_ARCH_NOJIT 1
+#endif
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC
+
+#define LJ_ARCH_NAME "ppc"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#define LJ_ARCH_HASFPU 1
+#define LJ_TARGET_PPC 1
+#define LJ_TARGET_EHRETREG 3
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
+#define LJ_TARGET_MASKSHIFT 0
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_PPCSPE
+
+#define LJ_ARCH_NAME "ppcspe"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#define LJ_ARCH_HASFPU 1
+#define LJ_ABI_SOFTFP 1
+#define LJ_ABI_EABI 1
+#define LJ_TARGET_PPCSPE 1
+#define LJ_TARGET_EHRETREG 3
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
+#define LJ_TARGET_MASKSHIFT 0
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE
+#define LJ_ARCH_NOFFI 1 /* NYI: comparisons, calls. */
+#define LJ_ARCH_NOJIT 1
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS
+
+#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL)
+#define LJ_ARCH_NAME "mipsel"
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#else
+#define LJ_ARCH_NAME "mips"
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#endif
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_HASFPU 1
+#define LJ_TARGET_MIPS 1
+#define LJ_TARGET_EHRETREG 4
+#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE
+
+#else
+#error "No target architecture defined"
+#endif
+
+#ifndef LJ_PAGESIZE
+#define LJ_PAGESIZE 4096
+#endif
+
+/* Check for minimum required compiler versions. */
+#if defined(__GNUC__)
+#if LJ_TARGET_X64
+#if __GNUC__ < 4
+#error "Need at least GCC 4.0 or newer"
+#endif
+#elif LJ_TARGET_ARM
+#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2)
+#error "Need at least GCC 4.2 or newer"
+#endif
+#elif LJ_TARGET_PPC
+#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3)
+#error "Need at least GCC 4.3 or newer"
+#endif
+#else
+#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4)
+#error "Need at least GCC 3.4 or newer"
+#endif
+#endif
+#endif
+
+/* Check target-specific constraints. */
+#ifndef _BUILDVM_H
+#if LJ_TARGET_ARM
+#if defined(__ARMEB__)
+#error "No support for big-endian ARM"
+#endif
+#if defined(__ARM_PCS_VFP)
+#error "No support for ARM hard-float ABI (yet)"
+#endif
+#if !(__ARM_EABI__ || LJ_TARGET_OSX)
+#error "Only ARM EABI or iOS 3.0+ ABI is supported"
+#endif
+#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
+#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
+#error "No support for PowerPC CPUs without double-precision FPU"
+#endif
+#if defined(_LITTLE_ENDIAN)
+#error "No support for little-endian PowerPC"
+#endif
+#if defined(_LP64)
+#error "No support for PowerPC 64 bit mode"
+#endif
+#endif
+#endif
+
+/* Enable or disable the dual-number mode for the VM. */
+#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL && LUAJIT_NUMMODE == 1)
+#error "No support for this number mode on this architecture"
+#endif
+#if LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL_SINGLE && LUAJIT_NUMMODE != 1) || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE_DUAL && LUAJIT_NUMMODE == 2)
+#define LJ_DUALNUM 1
+#else
+#define LJ_DUALNUM 0
+#endif
+
+/* Disable or enable the JIT compiler. */
+#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT)
+#define LJ_HASJIT 0
+#else
+#define LJ_HASJIT 1
+#endif
+
+/* Disable or enable the FFI extension. */
+#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI)
+#define LJ_HASFFI 0
+#else
+#define LJ_HASFFI 1
+#endif
+
+#define LJ_SOFTFP (!LJ_ARCH_HASFPU)
+
+#if LJ_ARCH_ENDIAN == LUAJIT_BE
+#define LJ_LE 0
+#define LJ_BE 1
+#define LJ_ENDIAN_SELECT(le, be) be
+#define LJ_ENDIAN_LOHI(lo, hi) hi lo
+#else
+#define LJ_LE 1
+#define LJ_BE 0
+#define LJ_ENDIAN_SELECT(le, be) le
+#define LJ_ENDIAN_LOHI(lo, hi) lo hi
+#endif
+
+#if LJ_ARCH_BITS == 32
+#define LJ_32 1
+#define LJ_64 0
+#else
+#define LJ_32 0
+#define LJ_64 1
+#endif
+
+/* Various workarounds for embedded operating systems. */
+#if defined(__ANDROID__) || defined(__symbian__)
+#define LUAJIT_NO_LOG2
+#endif
+#if defined(__symbian__)
+#define LUAJIT_NO_EXP2
+#endif
+
+#if defined(__symbian__) || (LJ_TARGET_ARM && LJ_TARGET_OSX)
+#define LUAJIT_NO_UNWIND
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_asm.c b/src/LuaJIT/src/lj_asm.c
new file mode 100644
index 000000000..4da1a0a37
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm.c
@@ -0,0 +1,1818 @@
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_asm_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_mcode.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_asm.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_target.h"
+
+/* -- Assembler state and common macros ----------------------------------- */
+
+/* Assembler state. */
+typedef struct ASMState {
+ RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
+
+ MCode *mcp; /* Current MCode pointer (grows down). */
+ MCode *mclim; /* Lower limit for MCode memory + red zone. */
+
+ IRIns *ir; /* Copy of pointer to IR instructions/constants. */
+ jit_State *J; /* JIT compiler state. */
+
+#if LJ_TARGET_X86ORX64
+ x86ModRM mrm; /* Fused x86 address operand. */
+#endif
+
+ RegSet freeset; /* Set of free registers. */
+ RegSet modset; /* Set of registers modified inside the loop. */
+ RegSet weakset; /* Set of weakly referenced registers. */
+ RegSet phiset; /* Set of PHI registers. */
+
+ uint32_t flags; /* Copy of JIT compiler flags. */
+ int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
+
+ int32_t evenspill; /* Next even spill slot. */
+ int32_t oddspill; /* Next odd spill slot (or 0). */
+
+ IRRef curins; /* Reference of current instruction. */
+ IRRef stopins; /* Stop assembly before hitting this instruction. */
+ IRRef orignins; /* Original T->nins. */
+
+ IRRef snapref; /* Current snapshot is active after this reference. */
+ IRRef snaprename; /* Rename highwater mark for snapshot check. */
+ SnapNo snapno; /* Current snapshot number. */
+ SnapNo loopsnapno; /* Loop snapshot number. */
+
+ IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
+ IRRef sectref; /* Section base reference (loopref or 0). */
+ IRRef loopref; /* Reference of LOOP instruction (or 0). */
+
+ BCReg topslot; /* Number of slots for stack check (unless 0). */
+ MSize gcsteps; /* Accumulated number of GC steps (per section). */
+
+ GCtrace *T; /* Trace to assemble. */
+ GCtrace *parent; /* Parent trace (or NULL). */
+
+ MCode *mcbot; /* Bottom of reserved MCode. */
+ MCode *mctop; /* Top of generated MCode. */
+ MCode *mcloop; /* Pointer to loop MCode (or NULL). */
+ MCode *invmcp; /* Points to invertible loop branch (or NULL). */
+ MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
+ MCode *realign; /* Realign loop if not NULL. */
+
+#ifdef RID_NUM_KREF
+ int32_t krefk[RID_NUM_KREF];
+#endif
+ IRRef1 phireg[RID_MAX]; /* PHI register references. */
+ uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent slot to RegSP map. */
+#if LJ_SOFTFP
+ uint16_t parentmaphi[LJ_MAX_JSLOTS]; /* Parent slot to hi RegSP map. */
+#endif
+} ASMState;
+
+#define IR(ref) (&as->ir[(ref)])
+
+#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
+#define ASMREF_TMP2 REF_FALSE /* Temp. register. */
+#define ASMREF_L REF_NIL /* Stores register for L. */
+
+/* Check for variant to invariant references. */
+#define iscrossref(as, ref) ((ref) < as->sectref)
+
+/* Inhibit memory op fusion from variant to invariant references. */
+#define FUSE_DISABLED (~(IRRef)0)
+#define mayfuse(as, ref) ((ref) > as->fuseref)
+#define neverfuse(as) (as->fuseref == FUSE_DISABLED)
+#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
+#define opisfusableload(o) \
+ ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
+ (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
+
+/* Sparse limit checks using a red zone before the actual limit. */
+#define MCLIM_REDZONE 64
+#define checkmclim(as) \
+ if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as)
+
+static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
+{
+ lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
+}
+
+#ifdef RID_NUM_KREF
+#define ra_iskref(ref) ((ref) < RID_NUM_KREF)
+#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
+#define ra_krefk(as, ref) (as->krefk[(ref)])
+
+static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k)
+{
+ IRRef ref = (IRRef)(r - RID_MIN_KREF);
+ as->krefk[ref] = k;
+ as->cost[r] = REGCOST(ref, ref);
+}
+
+#else
+#define ra_iskref(ref) 0
+#define ra_krefreg(ref) RID_MIN_GPR
+#define ra_krefk(as, ref) 0
+#endif
+
+/* Arch-specific field offsets. */
+static const uint8_t field_ofs[IRFL__MAX+1] = {
+#define FLOFS(name, ofs) (uint8_t)(ofs),
+IRFLDEF(FLOFS)
+#undef FLOFS
+ 0
+};
+
+/* -- Target-specific instruction emitter --------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_emit_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_emit_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_emit_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_emit_mips.h"
+#else
+#error "Missing instruction emitter for target CPU"
+#endif
+
+/* -- Register allocator debugging ---------------------------------------- */
+
+/* #define LUAJIT_DEBUG_RA */
+
+#ifdef LUAJIT_DEBUG_RA
+
+#include
+#include
+
+#define RIDNAME(name) #name,
+static const char *const ra_regname[] = {
+ GPRDEF(RIDNAME)
+ FPRDEF(RIDNAME)
+ VRIDDEF(RIDNAME)
+ NULL
+};
+#undef RIDNAME
+
+static char ra_dbg_buf[65536];
+static char *ra_dbg_p;
+static char *ra_dbg_merge;
+static MCode *ra_dbg_mcp;
+
+static void ra_dstart(void)
+{
+ ra_dbg_p = ra_dbg_buf;
+ ra_dbg_merge = NULL;
+ ra_dbg_mcp = NULL;
+}
+
+static void ra_dflush(void)
+{
+ fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
+ ra_dstart();
+}
+
+static void ra_dprintf(ASMState *as, const char *fmt, ...)
+{
+ char *p;
+ va_list argp;
+ va_start(argp, fmt);
+ p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
+ ra_dbg_mcp = NULL;
+ p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
+ for (;;) {
+ const char *e = strchr(fmt, '$');
+ if (e == NULL) break;
+ memcpy(p, fmt, (size_t)(e-fmt));
+ p += e-fmt;
+ if (e[1] == 'r') {
+ Reg r = va_arg(argp, Reg) & RID_MASK;
+ if (r <= RID_MAX) {
+ const char *q;
+ for (q = ra_regname[r]; *q; q++)
+ *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
+ } else {
+ *p++ = '?';
+ lua_assert(0);
+ }
+ } else if (e[1] == 'f' || e[1] == 'i') {
+ IRRef ref;
+ if (e[1] == 'f')
+ ref = va_arg(argp, IRRef);
+ else
+ ref = va_arg(argp, IRIns *) - as->ir;
+ if (ref >= REF_BIAS)
+ p += sprintf(p, "%04d", ref - REF_BIAS);
+ else
+ p += sprintf(p, "K%03d", REF_BIAS - ref);
+ } else if (e[1] == 's') {
+ uint32_t slot = va_arg(argp, uint32_t);
+ p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
+ } else if (e[1] == 'x') {
+ p += sprintf(p, "%08x", va_arg(argp, int32_t));
+ } else {
+ lua_assert(0);
+ }
+ fmt = e+2;
+ }
+ va_end(argp);
+ while (*fmt)
+ *p++ = *fmt++;
+ *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
+ if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
+ fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
+ p = ra_dbg_buf;
+ }
+ ra_dbg_p = p;
+}
+
+#define RA_DBG_START() ra_dstart()
+#define RA_DBG_FLUSH() ra_dflush()
+#define RA_DBG_REF() \
+ do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
+ ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
+#define RA_DBGX(x) ra_dprintf x
+
+#else
+#define RA_DBG_START() ((void)0)
+#define RA_DBG_FLUSH() ((void)0)
+#define RA_DBG_REF() ((void)0)
+#define RA_DBGX(x) ((void)0)
+#endif
+
+/* -- Register allocator -------------------------------------------------- */
+
+#define ra_free(as, r) rset_set(as->freeset, (r))
+#define ra_modified(as, r) rset_set(as->modset, (r))
+#define ra_weak(as, r) rset_set(as->weakset, (r))
+#define ra_noweak(as, r) rset_clear(as->weakset, (r))
+
+#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
+
+/* Setup register allocator. */
+static void ra_setup(ASMState *as)
+{
+ Reg r;
+ /* Initially all regs (except the stack pointer) are free for use. */
+ as->freeset = RSET_INIT;
+ as->modset = RSET_EMPTY;
+ as->weakset = RSET_EMPTY;
+ as->phiset = RSET_EMPTY;
+ memset(as->phireg, 0, sizeof(as->phireg));
+ for (r = RID_MIN_GPR; r < RID_MAX; r++)
+ as->cost[r] = REGCOST(~0u, 0u);
+}
+
+/* Rematerialize constants. */
+static Reg ra_rematk(ASMState *as, IRRef ref)
+{
+ IRIns *ir;
+ Reg r;
+ if (ra_iskref(ref)) {
+ r = ra_krefreg(ref);
+ lua_assert(!rset_test(as->freeset, r));
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_loadi(as, r, ra_krefk(as, ref));
+ return r;
+ }
+ ir = IR(ref);
+ r = ir->r;
+ lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT; /* Do not keep any hint. */
+ RA_DBGX((as, "remat $i $r", ir, r));
+#if !LJ_SOFTFP
+ if (ir->o == IR_KNUM) {
+ emit_loadn(as, r, ir_knum(ir));
+ } else
+#endif
+ if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
+ ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
+ emit_getgl(as, r, jit_base);
+ } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
+ lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */
+ emit_getgl(as, r, jit_L);
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadu64(as, r, ir_kint64(ir)->u64);
+#endif
+ } else {
+ lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
+ emit_loadi(as, r, ir->i);
+ }
+ return r;
+}
+
+/* Force a spill. Allocate a new spill slot if needed. */
+static int32_t ra_spill(ASMState *as, IRIns *ir)
+{
+ int32_t slot = ir->s;
+ if (!ra_hasspill(slot)) {
+ if (irt_is64(ir->t)) {
+ slot = as->evenspill;
+ as->evenspill += 2;
+ } else if (as->oddspill) {
+ slot = as->oddspill;
+ as->oddspill = 0;
+ } else {
+ slot = as->evenspill;
+ as->oddspill = slot+1;
+ as->evenspill += 2;
+ }
+ if (as->evenspill > 256)
+ lj_trace_err(as->J, LJ_TRERR_SPILLOV);
+ ir->s = (uint8_t)slot;
+ }
+ return sps_scale(slot);
+}
+
+/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
+static Reg ra_releasetmp(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ Reg r = ir->r;
+ lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT;
+ return r;
+}
+
+/* Restore a register (marked as free). Rematerialize or force a spill. */
+static Reg ra_restore(ASMState *as, IRRef ref)
+{
+ if (emit_canremat(ref)) {
+ return ra_rematk(as, ref);
+ } else {
+ IRIns *ir = IR(ref);
+ int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
+ Reg r = ir->r;
+ lua_assert(ra_hasreg(r));
+ ra_sethint(ir->r, r); /* Keep hint. */
+ ra_free(as, r);
+ if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
+ ra_modified(as, r);
+ RA_DBGX((as, "restore $i $r", ir, r));
+ emit_spload(as, ir, r, ofs);
+ }
+ return r;
+ }
+}
+
+/* Save a register to a spill slot. */
+static void ra_save(ASMState *as, IRIns *ir, Reg r)
+{
+ RA_DBGX((as, "save $i $r", ir, r));
+ emit_spstore(as, ir, r, sps_scale(ir->s));
+}
+
+#define MINCOST(name) \
+ if (rset_test(RSET_ALL, RID_##name) && \
+ LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
+ cost = as->cost[RID_##name];
+
+/* Evict the register with the lowest cost, forcing a restore. */
+static Reg ra_evict(ASMState *as, RegSet allow)
+{
+ IRRef ref;
+ RegCost cost = ~(RegCost)0;
+ lua_assert(allow != RSET_EMPTY);
+ if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
+ GPRDEF(MINCOST)
+ } else {
+ FPRDEF(MINCOST)
+ }
+ ref = regcost_ref(cost);
+ lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
+ /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
+ if (!irref_isk(ref) && (as->weakset & allow)) {
+ IRIns *ir = IR(ref);
+ if (!rset_test(as->weakset, ir->r))
+ ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
+ }
+ return ra_restore(as, ref);
+}
+
+/* Pick any register (marked as free). Evict on-demand. */
+static Reg ra_pick(ASMState *as, RegSet allow)
+{
+ RegSet pick = as->freeset & allow;
+ if (!pick)
+ return ra_evict(as, allow);
+ else
+ return rset_picktop(pick);
+}
+
+/* Get a scratch register (marked as free). */
+static Reg ra_scratch(ASMState *as, RegSet allow)
+{
+ Reg r = ra_pick(as, allow);
+ ra_modified(as, r);
+ RA_DBGX((as, "scratch $r", r));
+ return r;
+}
+
+/* Evict all registers from a set (if not free). */
+static void ra_evictset(ASMState *as, RegSet drop)
+{
+ RegSet work;
+ as->modset |= drop;
+#if !LJ_SOFTFP
+ work = (drop & ~as->freeset) & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+#endif
+ work = (drop & ~as->freeset) & RSET_GPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+}
+
+/* Evict (rematerialize) all registers allocated to constants. */
+static void ra_evictk(ASMState *as)
+{
+ RegSet work;
+#if !LJ_SOFTFP
+ work = ~as->freeset & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+#endif
+ work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+#ifdef RID_NUM_KREF
+/* Allocate a register for a constant. */
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
+{
+ /* First try to find a register which already holds the same constant. */
+ RegSet pick, work = ~as->freeset & RSET_GPR;
+ Reg r;
+ while (work) {
+ IRRef ref;
+ r = rset_pickbot(work);
+ ref = regcost_ref(as->cost[r]);
+ if (ref < ASMREF_L &&
+ k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
+ return r;
+ rset_clear(work, r);
+ }
+ pick = as->freeset & allow;
+ if (pick) {
+ /* Constants should preferably get unmodified registers. */
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ r = ra_evict(as, allow);
+ }
+ RA_DBGX((as, "allock $x $r", k, r));
+ ra_setkref(as, r, k);
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a specific register for a constant. */
+static void ra_allockreg(ASMState *as, int32_t k, Reg r)
+{
+ Reg kr = ra_allock(as, k, RID2RSET(r));
+ if (kr != r) {
+ IRIns irdummy;
+ irdummy.t.irt = IRT_INT;
+ ra_scratch(as, RID2RSET(r));
+ emit_movrr(as, &irdummy, r, kr);
+ }
+}
+#else
+#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
+#endif
+
+/* Allocate a register for ref from the allowed set of registers.
+** Note: this function assumes the ref does NOT have a register yet!
+** Picks an optimal register, sets the cost and marks the register as non-free.
+*/
+static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ RegSet pick = as->freeset & allow;
+ Reg r;
+ lua_assert(ra_noreg(ir->r));
+ if (pick) {
+ /* First check register hint from propagation or PHI. */
+ if (ra_hashint(ir->r)) {
+ r = ra_gethint(ir->r);
+ if (rset_test(pick, r)) /* Use hint register if possible. */
+ goto found;
+ /* Rematerialization is cheaper than missing a hint. */
+ if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
+ ra_rematk(as, regcost_ref(as->cost[r]));
+ goto found;
+ }
+ RA_DBGX((as, "hintmiss $f $r", ref, r));
+ }
+ /* Invariants should preferably get unmodified registers. */
+ if (ref < as->loopref && !irt_isphi(ir->t)) {
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ /* We've got plenty of regs, so get callee-save regs if possible. */
+ if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
+ pick &= ~RSET_SCRATCH;
+ r = rset_picktop(pick);
+ }
+ } else {
+ r = ra_evict(as, allow);
+ }
+found:
+ RA_DBGX((as, "alloc $f $r", ref, r));
+ ir->r = (uint8_t)r;
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
+ return r;
+}
+
+/* Allocate a register on-demand. */
+static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ /* Note: allow is ignored if the register is already allocated. */
+ if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Rename register allocation and emit move. */
+static void ra_rename(ASMState *as, Reg down, Reg up)
+{
+ IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]);
+ IRIns *ir = IR(ref);
+ ir->r = (uint8_t)up;
+ as->cost[down] = 0;
+ lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
+ lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
+ ra_free(as, down); /* 'down' is free ... */
+ ra_modified(as, down);
+ rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
+ ra_noweak(as, up);
+ RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
+ emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
+ if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
+ lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno);
+ ren = tref_ref(lj_ir_emit(as->J));
+ as->ir = as->T->ir; /* The IR may have been reallocated. */
+ IR(ren)->r = (uint8_t)down;
+ IR(ren)->s = SPS_NONE;
+ }
+}
+
+/* Pick a destination register (marked as free).
+** Caveat: allow is ignored if there's already a destination register.
+** Use ra_destreg() to get a specific register.
+*/
+static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
+{
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ } else {
+ if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
+ dest = ra_gethint(dest);
+ ra_modified(as, dest);
+ RA_DBGX((as, "dest $r", dest));
+ } else {
+ dest = ra_scratch(as, allow);
+ }
+ ir->r = dest;
+ }
+ if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
+ return dest;
+}
+
+/* Force a specific destination register (marked as free). */
+static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
+{
+ Reg dest = ra_dest(as, ir, RID2RSET(r));
+ if (dest != r) {
+ ra_scratch(as, RID2RSET(r));
+ emit_movrr(as, ir, dest, r);
+ }
+}
+
+#if LJ_TARGET_X86ORX64
+/* Propagate dest register to left reference. Emit moves as needed.
+** This is a required fixup step for all 2-operand machine instructions.
+*/
+static void ra_left(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ if (irref_isk(lref)) {
+ if (ir->o == IR_KNUM) {
+ cTValue *tv = ir_knum(ir);
+ /* FP remat needs a load except for +0. Still better than eviction. */
+ if (tvispzero(tv) || !(as->freeset & RSET_FPR)) {
+ emit_loadn(as, dest, tv);
+ return;
+ }
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadu64(as, dest, ir_kint64(ir)->u64);
+ return;
+#endif
+ } else {
+ lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
+ emit_loadi(as, dest, ir->i);
+ return;
+ }
+ }
+ if (!ra_hashint(left) && !iscrossref(as, lref))
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#else
+/* Similar to ra_left, except we override any hints. */
+static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref,
+ (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#endif
+
+#if !LJ_TARGET_X86ORX64
+/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
+static void ra_destpair(ASMState *as, IRIns *ir)
+{
+ Reg destlo = ir->r, desthi = (ir+1)->r;
+ /* First spill unrelated refs blocking the destination registers. */
+ if (!rset_test(as->freeset, RID_RETLO) &&
+ destlo != RID_RETLO && desthi != RID_RETLO)
+ ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
+ if (!rset_test(as->freeset, RID_RETHI) &&
+ destlo != RID_RETHI && desthi != RID_RETHI)
+ ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
+ /* Next free the destination registers (if any). */
+ if (ra_hasreg(destlo)) {
+ ra_free(as, destlo);
+ ra_modified(as, destlo);
+ } else {
+ destlo = RID_RETLO;
+ }
+ if (ra_hasreg(desthi)) {
+ ra_free(as, desthi);
+ ra_modified(as, desthi);
+ } else {
+ desthi = RID_RETHI;
+ }
+ /* Check for conflicts and shuffle the registers as needed. */
+ if (destlo == RID_RETHI) {
+ if (desthi == RID_RETLO) {
+ emit_movrr(as, ir, RID_RETHI, RID_TMP);
+ emit_movrr(as, ir, RID_RETLO, RID_RETHI);
+ emit_movrr(as, ir, RID_TMP, RID_RETLO);
+ } else {
+ emit_movrr(as, ir, RID_RETHI, RID_RETLO);
+ if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
+ }
+ } else if (desthi == RID_RETLO) {
+ emit_movrr(as, ir, RID_RETLO, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
+ } else {
+ if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
+ }
+ /* Restore spill slots (if any). */
+ if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
+ if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
+}
+#endif
+
+/* -- Snapshot handling --------- ----------------------------------------- */
+
+/* Can we rematerialize a KNUM instead of forcing a spill? */
+static int asm_snap_canremat(ASMState *as)
+{
+ Reg r;
+ for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
+ if (irref_isk(regcost_ref(as->cost[r])))
+ return 1;
+ return 0;
+}
+
+/* Allocate register or spill slot for a ref that escapes to a snapshot. */
+static void asm_snap_alloc1(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (!ra_used(ir)) {
+ RegSet allow = (!LJ_SOFTFP && irt_isnum(ir->t)) ? RSET_FPR : RSET_GPR;
+ /* Get a weak register if we have a free one or can rematerialize. */
+ if ((as->freeset & allow) ||
+ (allow == RSET_FPR && asm_snap_canremat(as))) {
+ Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
+ if (!irt_isphi(ir->t))
+ ra_weak(as, r); /* But mark it as weakly referenced. */
+ checkmclim(as);
+ RA_DBGX((as, "snapreg $f $r", ref, ir->r));
+ } else {
+ ra_spill(as, ir); /* Otherwise force a spill slot. */
+ RA_DBGX((as, "snapspill $f $s", ref, ir->s));
+ }
+ }
+}
+
+/* Allocate refs escaping to a snapshot. */
+static void asm_snap_alloc(ASMState *as)
+{
+ SnapShot *snap = &as->T->snap[as->snapno];
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (!irref_isk(ref)) {
+ asm_snap_alloc1(as, ref);
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
+ asm_snap_alloc1(as, ref+1);
+ }
+ }
+ }
+}
+
+/* All guards for a snapshot use the same exitno. This is currently the
+** same as the snapshot number. Since the exact origin of the exit cannot
+** be determined, all guards for the same snapshot must exit with the same
+** RegSP mapping.
+** A renamed ref which has been used in a prior guard for the same snapshot
+** would cause an inconsistency. The easy way out is to force a spill slot.
+*/
+static int asm_snap_checkrename(ASMState *as, IRRef ren)
+{
+ SnapShot *snap = &as->T->snap[as->snapno];
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
+ IRIns *ir = IR(ref);
+ ra_spill(as, ir); /* Register renamed, so force a spill slot. */
+ RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
+ return 1; /* Found. */
+ }
+ }
+ return 0; /* Not found. */
+}
+
+/* Prepare snapshot for next guard instruction. */
+static void asm_snap_prep(ASMState *as)
+{
+ if (as->curins < as->snapref) {
+ do {
+ lua_assert(as->snapno != 0);
+ as->snapno--;
+ as->snapref = as->T->snap[as->snapno].ref;
+ } while (as->curins < as->snapref);
+ asm_snap_alloc(as);
+ as->snaprename = as->T->nins;
+ } else {
+ /* Process any renames above the highwater mark. */
+ for (; as->snaprename < as->T->nins; as->snaprename++) {
+ IRIns *ir = IR(as->snaprename);
+ if (asm_snap_checkrename(as, ir->op1))
+ ir->op2 = REF_BIAS-1; /* Kill rename. */
+ }
+ }
+}
+
+/* -- Miscellaneous helpers ----------------------------------------------- */
+
+/* Collect arguments from CALL* and CARG instructions. */
+static void asm_collectargs(ASMState *as, IRIns *ir,
+ const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n = CCI_NARGS(ci);
+ lua_assert(n <= CCI_NARGS_MAX);
+ if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
+ while (n-- > 1) {
+ ir = IR(ir->op1);
+ lua_assert(ir->o == IR_CARG);
+ args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
+ }
+ args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
+ lua_assert(IR(ir->op1)->o != IR_CARG);
+}
+
+/* Reconstruct CCallInfo flags for CALLX*. */
+static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
+{
+ uint32_t nargs = 0;
+ if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
+ IRIns *ira = IR(ir->op1);
+ nargs++;
+ while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
+ }
+#if LJ_HASFFI
+ if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
+ CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
+ CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
+ nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
+#if LJ_TARGET_X86
+ nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
+#endif
+ }
+#endif
+ return (nargs | (ir->t.irt << CCI_OTSHIFT));
+}
+
+/* Calculate stack adjustment. */
+static int32_t asm_stack_adjust(ASMState *as)
+{
+ if (as->evenspill <= SPS_FIXED)
+ return 0;
+ return sps_scale(sps_align(as->evenspill));
+}
+
+/* Must match with hash*() in lj_tab.c. */
+static uint32_t ir_khash(IRIns *ir)
+{
+ uint32_t lo, hi;
+ if (irt_isstr(ir->t)) {
+ return ir_kstr(ir)->hash;
+ } else if (irt_isnum(ir->t)) {
+ lo = ir_knum(ir)->u32.lo;
+ hi = ir_knum(ir)->u32.hi << 1;
+ } else if (irt_ispri(ir->t)) {
+ lua_assert(!irt_isnil(ir->t));
+ return irt_type(ir->t)-IRT_FALSE;
+ } else {
+ lua_assert(irt_isgcv(ir->t));
+ lo = u32ptr(ir_kgc(ir));
+ hi = lo + HASH_BIAS;
+ }
+ return hashrot(lo, hi);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
+
+static void asm_snew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const char *str */
+ args[2] = ir->op2; /* size_t len */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+}
+
+static void asm_tnew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
+ IRRef args[2];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* uint32_t ahsize */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
+}
+
+static void asm_tdup(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
+ IRRef args[2];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const GCtab *kt */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+}
+
+/* -- PHI and loop handling ----------------------------------------------- */
+
+/* Break a PHI cycle by renaming to a free register (evict if needed). */
+static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
+ RegSet allow)
+{
+ RegSet candidates = blocked & allow;
+ if (candidates) { /* If this register file has candidates. */
+ /* Note: the set for ra_pick cannot be empty, since each register file
+ ** has some registers never allocated to PHIs.
+ */
+ Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
+ if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
+ candidates = candidates & ~blockedby;
+ down = rset_picktop(candidates); /* Pick candidate PHI register. */
+ ra_rename(as, down, up); /* And rename it to the free register. */
+ }
+}
+
+/* PHI register shuffling.
+**
+** The allocator tries hard to preserve PHI register assignments across
+** the loop body. Most of the time this loop does nothing, since there
+** are no register mismatches.
+**
+** If a register mismatch is detected and ...
+** - the register is currently free: rename it.
+** - the register is blocked by an invariant: restore/remat and rename it.
+** - Otherwise the register is used by another PHI, so mark it as blocked.
+**
+** The renames are order-sensitive, so just retry the loop if a register
+** is marked as blocked, but has been freed in the meantime. A cycle is
+** detected if all of the blocked registers are allocated. To break the
+** cycle rename one of them to a free register and retry.
+**
+** Note that PHI spill slots are kept in sync and don't need to be shuffled.
+*/
+static void asm_phi_shuffle(ASMState *as)
+{
+ RegSet work;
+
+ /* Find and resolve PHI register mismatches. */
+ for (;;) {
+ RegSet blocked = RSET_EMPTY;
+ RegSet blockedby = RSET_EMPTY;
+ RegSet phiset = as->phiset;
+ while (phiset) { /* Check all left PHI operand registers. */
+ Reg r = rset_pickbot(phiset);
+ IRIns *irl = IR(as->phireg[r]);
+ Reg left = irl->r;
+ if (r != left) { /* Mismatch? */
+ if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
+ IRRef ref = regcost_ref(as->cost[r]);
+ /* Blocked by other PHI (w/reg)? */
+ if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
+ rset_set(blocked, r);
+ if (ra_hasreg(left))
+ rset_set(blockedby, left);
+ left = RID_NONE;
+ } else { /* Otherwise grab register from invariant. */
+ ra_restore(as, ref);
+ checkmclim(as);
+ }
+ }
+ if (ra_hasreg(left)) {
+ ra_rename(as, left, r);
+ checkmclim(as);
+ }
+ }
+ rset_clear(phiset, r);
+ }
+ if (!blocked) break; /* Finished. */
+ if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
+ asm_phi_break(as, blocked, blockedby, RSET_GPR);
+ if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
+ checkmclim(as);
+ } /* Else retry some more renames. */
+ }
+
+ /* Restore/remat invariants whose registers are modified inside the loop. */
+ work = as->modset & ~(as->freeset | as->phiset);
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+
+ /* Allocate and save all unsaved PHI regs and clear marks. */
+ work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
+ irt_clearmark(ir->t); /* Handled here, so clear marker now. */
+ ra_alloc1(as, lref, RID2RSET(r));
+ ra_save(as, ir, r); /* Save to spill slot inside the loop. */
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+/* Copy unsynced left/right PHI spill slots. Rarely needed. */
+static void asm_phi_copyspill(ASMState *as)
+{
+ int need = 0;
+ IRIns *ir;
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
+ if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
+ need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
+ if ((need & 1)) { /* Copy integer spill slots. */
+#if !LJ_TARGET_X86ORX64
+ Reg r = RID_TMP;
+#else
+ Reg r = RID_RET;
+ if ((as->freeset & RSET_GPR))
+ r = rset_pickbot((as->freeset & RSET_GPR));
+ else
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ }
+ }
+ }
+#if LJ_TARGET_X86ORX64
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ }
+#if !LJ_SOFTFP
+ if ((need & 2)) { /* Copy FP spill slots. */
+#if LJ_TARGET_X86
+ Reg r = RID_XMM0;
+#else
+ Reg r = RID_FPRET;
+#endif
+ if ((as->freeset & RSET_FPR))
+ r = rset_pickbot((as->freeset & RSET_FPR));
+ if (!rset_test(as->freeset, r))
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ }
+ }
+ }
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ }
+#endif
+}
+
+/* Emit renames for left PHIs which are only spilled outside the loop. */
+static void asm_phi_fixup(ASMState *as)
+{
+ RegSet work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ /* Left PHI gained a spill slot before the loop? */
+ if (irt_ismarked(ir->t) && ra_hasspill(ir->s)) {
+ IRRef ren;
+ lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
+ ren = tref_ref(lj_ir_emit(as->J));
+ as->ir = as->T->ir; /* The IR may have been reallocated. */
+ IR(ren)->r = (uint8_t)r;
+ IR(ren)->s = SPS_NONE;
+ }
+ irt_clearmark(ir->t); /* Always clear marker. */
+ rset_clear(work, r);
+ }
+}
+
+/* Setup right PHI reference. */
+static void asm_phi(ASMState *as, IRIns *ir)
+{
+ RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
+ ~as->phiset;
+ RegSet afree = (as->freeset & allow);
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ /* Spill slot shuffling is not implemented yet (but rarely needed). */
+ if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
+ if ((afree & (afree-1))) { /* Two or more free registers? */
+ Reg r;
+ if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
+ r = ra_allocref(as, ir->op2, allow);
+ } else { /* Duplicate right PHI, need a copy (rare). */
+ r = ra_scratch(as, allow);
+ emit_movrr(as, irr, r, irr->r);
+ }
+ ir->r = (uint8_t)r;
+ rset_set(as->phiset, r);
+ as->phireg[r] = (IRRef1)ir->op1;
+ irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
+ if (ra_noreg(irl->r))
+ ra_sethint(irl->r, r); /* Set register hint for left PHI. */
+ } else { /* Otherwise allocate a spill slot. */
+ /* This is overly restrictive, but it triggers only on synthetic code. */
+ if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ ra_spill(as, ir);
+ irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
+ }
+}
+
+static void asm_gc_check(ASMState *as);
+static void asm_loop_fixup(ASMState *as);
+
+/* Middle part of a loop. */
+static void asm_loop(ASMState *as)
+{
+ MCode *mcspill;
+ /* LOOP is a guard, so the snapno is up to date. */
+ as->loopsnapno = as->snapno;
+ if (as->gcsteps)
+ asm_gc_check(as);
+ /* LOOP marks the transition from the variant to the invariant part. */
+ as->flagmcp = as->invmcp = NULL;
+ as->sectref = 0;
+ if (!neverfuse(as)) as->fuseref = 0;
+ asm_phi_shuffle(as);
+ mcspill = as->mcp;
+ asm_phi_copyspill(as);
+ asm_loop_fixup(as);
+ as->mcloop = as->mcp;
+ RA_DBGX((as, "===== LOOP ====="));
+ if (!as->realign) RA_DBG_FLUSH();
+ if (as->mcp != mcspill)
+ emit_jmp(as, mcspill);
+}
+
+/* -- Target-specific assembler ------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_asm_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_asm_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_asm_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_asm_mips.h"
+#else
+#error "Missing assembler for target CPU"
+#endif
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Head of a root trace. */
+static void asm_head_root(ASMState *as)
+{
+ int32_t spadj;
+ asm_head_root_base(as);
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ spadj = asm_stack_adjust(as);
+ as->T->spadjust = (uint16_t)spadj;
+ emit_spsub(as, spadj);
+ /* Root traces assume a checked stack for the starting proto. */
+ as->T->topslot = gcref(as->T->startpt)->pt.framesize;
+}
+
+/* Get RegSP for parent slot. */
+static LJ_AINLINE RegSP asm_head_parentrs(ASMState *as, IRIns *ir)
+{
+#if LJ_SOFTFP
+ if (ir->o == IR_HIOP) return as->parentmaphi[(ir-1)->op1];
+#endif
+ return as->parentmap[ir->op1];
+}
+
+/* Head of a side trace.
+**
+** The current simplistic algorithm requires that all slots inherited
+** from the parent are live in a register between pass 2 and pass 3. This
+** avoids the complexity of stack slot shuffling. But of course this may
+** overflow the register set in some cases and cause the dreaded error:
+** "NYI: register coalescing too complex". A refined algorithm is needed.
+*/
+static void asm_head_side(ASMState *as)
+{
+ IRRef1 sloadins[RID_MAX];
+ RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
+ RegSet live = RSET_EMPTY; /* Live parent registers. */
+ IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
+ int32_t spadj, spdelta;
+ int pass2 = 0;
+ int pass3 = 0;
+ IRRef i;
+
+ allow = asm_head_side_base(as, irp, allow);
+
+ /* Scan all parent SLOADs and collect register dependencies. */
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ RegSP rs;
+ lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
+ (LJ_SOFTFP && ir->o == IR_HIOP));
+ rs = asm_head_parentrs(as, ir);
+ if (ra_hasreg(ir->r)) {
+ rset_clear(allow, ir->r);
+ if (ra_hasspill(ir->s)) {
+ ra_save(as, ir, ir->r);
+ checkmclim(as);
+ }
+ } else if (ra_hasspill(ir->s)) {
+ irt_setmark(ir->t);
+ pass2 = 1;
+ }
+ if (ir->r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, ir->r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ if (ra_hasreg(ir->r))
+ pass3 = 1;
+ } else if (ra_used(ir)) {
+ sloadins[rs] = (IRRef1)i;
+ rset_set(live, rs); /* Block live parent register. */
+ }
+ }
+
+ /* Calculate stack frame adjustment. */
+ spadj = asm_stack_adjust(as);
+ spdelta = spadj - (int32_t)as->parent->spadjust;
+ if (spdelta < 0) { /* Don't shrink the stack frame. */
+ spadj = (int32_t)as->parent->spadjust;
+ spdelta = 0;
+ }
+ as->T->spadjust = (uint16_t)spadj;
+
+ /* Reload spilled target registers. */
+ if (pass2) {
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ if (irt_ismarked(ir->t)) {
+ RegSet mask;
+ Reg r;
+ RegSP rs;
+ irt_clearmark(ir->t);
+ rs = asm_head_parentrs(as, ir);
+ if (!ra_hasspill(regsp_spill(rs)))
+ ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
+ else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
+ continue; /* Same spill slot, do nothing. */
+ mask = ((!LJ_SOFTFP && irt_isnum(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
+ if (mask == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ r = ra_allocref(as, i, mask);
+ ra_save(as, ir, r);
+ rset_clear(allow, r);
+ if (r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, r);
+ rset_clear(live, r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ pass3 = 1;
+ }
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Store trace number and adjust stack frame relative to the parent. */
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ emit_spsub(as, spdelta);
+
+#if !LJ_TARGET_X86ORX64
+ /* Restore BASE register from parent spill slot. */
+ if (ra_hasspill(irp->s))
+ emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
+#endif
+
+ /* Restore target registers from parent spill slots. */
+ if (pass3) {
+ RegSet work = ~as->freeset & RSET_ALL;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRIns *ir = IR(regcost_ref(as->cost[r]));
+ RegSP rs = asm_head_parentrs(as, ir);
+ rset_clear(work, r);
+ if (ra_hasspill(regsp_spill(rs))) {
+ int32_t ofs = sps_scale(regsp_spill(rs));
+ ra_free(as, r);
+ emit_spload(as, ir, r, ofs);
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Shuffle registers to match up target regs with parent regs. */
+ for (;;) {
+ RegSet work;
+
+ /* Repeatedly coalesce free live registers by moving to their target. */
+ while ((work = as->freeset & live) != RSET_EMPTY) {
+ Reg rp = rset_pickbot(work);
+ IRIns *ir = IR(sloadins[rp]);
+ rset_clear(live, rp);
+ rset_clear(allow, rp);
+ ra_free(as, ir->r);
+ emit_movrr(as, ir, ir->r, rp);
+ checkmclim(as);
+ }
+
+ /* We're done if no live registers remain. */
+ if (live == RSET_EMPTY)
+ break;
+
+ /* Break cycles by renaming one target to a temp. register. */
+ if (live & RSET_GPR) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
+ }
+ if (!LJ_SOFTFP && (live & RSET_FPR)) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
+ }
+ checkmclim(as);
+ /* Continue with coalescing to fix up the broken cycle(s). */
+ }
+
+ /* Inherit top stack slot already checked by parent trace. */
+ as->T->topslot = as->parent->topslot;
+ if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
+#ifdef EXITSTATE_CHECKEXIT
+ /* Highest exit + 1 indicates stack check. */
+ ExitNo exitno = as->T->nsnap;
+#else
+ /* Reuse the parent exit in the context of the parent trace. */
+ ExitNo exitno = as->J->exitno;
+#endif
+ as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
+ asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
+ }
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Get base slot for a snapshot. */
+static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n;
+ for (n = snap->nent; n > 0; n--) {
+ SnapEntry sn = map[n-1];
+ if ((sn & SNAP_FRAME)) {
+ *gotframe = 1;
+ return snap_slot(sn);
+ }
+ }
+ return 0;
+}
+
+/* Link to another trace. */
+static void asm_tail_link(ASMState *as)
+{
+ SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
+ SnapShot *snap = &as->T->snap[snapno];
+ int gotframe = 0;
+ BCReg baseslot = asm_baseslot(as, snap, &gotframe);
+
+ as->topslot = snap->topslot;
+ checkmclim(as);
+ ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
+
+ if (as->T->link == 0) {
+ /* Setup fixed registers for exit to interpreter. */
+ const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]);
+ int32_t mres;
+ if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
+ BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
+ if (bc_isret(bc_op(*retpc)))
+ pc = retpc;
+ }
+ ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
+ ra_allockreg(as, i32ptr(pc), RID_LPC);
+ mres = (int32_t)(snap->nslots - baseslot);
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT:
+ mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break;
+ case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
+ case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
+ default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
+ }
+ ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
+ } else if (baseslot) {
+ /* Save modified BASE for linking to trace with higher start frame. */
+ emit_setgl(as, RID_BASE, jit_base);
+ }
+ emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
+
+ /* Sync the interpreter state with the on-trace state. */
+ asm_stack_restore(as, snap);
+
+ /* Root traces that add frames need to check the stack at the end. */
+ if (!as->parent && gotframe)
+ asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Clear reg/sp for all instructions and add register hints. */
+static void asm_setup_regsp(ASMState *as)
+{
+ GCtrace *T = as->T;
+ IRRef i, nins;
+ int inloop;
+#if LJ_TARGET_ARM
+ uint32_t rload = 0xa6402a64;
+#endif
+
+ ra_setup(as);
+
+ /* Clear reg/sp for constants. */
+ for (i = T->nk; i < REF_BIAS; i++)
+ IR(i)->prev = REGSP_INIT;
+
+ /* REF_BASE is used for implicit references to the BASE register. */
+ IR(REF_BASE)->prev = REGSP_HINT(RID_BASE);
+
+ nins = T->nins;
+ if (IR(nins-1)->o == IR_RENAME) {
+ do { nins--; } while (IR(nins-1)->o == IR_RENAME);
+ T->nins = nins; /* Remove any renames left over from ASM restart. */
+ }
+ as->snaprename = nins;
+ as->snapref = nins;
+ as->snapno = T->nsnap;
+
+ as->stopins = REF_BASE;
+ as->orignins = nins;
+ as->curins = nins;
+
+ inloop = 0;
+ as->evenspill = SPS_FIRST;
+ for (i = REF_FIRST; i < nins; i++) {
+ IRIns *ir = IR(i);
+ switch (ir->o) {
+ case IR_LOOP:
+ inloop = 1;
+ break;
+ /* Set hints for slot loads from a parent trace. */
+ case IR_SLOAD:
+ if ((ir->op2 & IRSLOAD_PARENT)) {
+ RegSP rs = as->parentmap[ir->op1];
+ lua_assert(regsp_used(rs));
+ as->stopins = i;
+ if (!ra_hasspill(regsp_spill(rs)) && ra_hasreg(regsp_reg(rs))) {
+ ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
+ continue;
+ }
+ }
+#if LJ_TARGET_ARM
+ if ((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP) {
+ ir->prev = (uint16_t)REGSP_HINT((rload & 15));
+ rload = lj_ror(rload, 4);
+ continue;
+ }
+#endif
+ break;
+#if LJ_TARGET_ARM
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ ir->prev = (uint16_t)REGSP_HINT((rload & 15));
+ rload = lj_ror(rload, 4);
+ continue;
+#endif
+ case IR_CALLXS: {
+ CCallInfo ci;
+ ci.flags = asm_callx_flags(as, ir);
+ ir->prev = asm_setup_call_slots(as, ir, &ci);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+ continue;
+ }
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: {
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ ir->prev = asm_setup_call_slots(as, ir, ci);
+ if (inloop)
+ as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
+ (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
+ continue;
+ }
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+ case IR_HIOP:
+ switch ((ir-1)->o) {
+#if LJ_SOFTFP
+ case IR_SLOAD:
+ if (((ir-1)->op2 & IRSLOAD_PARENT)) {
+ RegSP rs = as->parentmaphi[(ir-1)->op1];
+ lua_assert(regsp_used(rs));
+ as->stopins = i;
+ if (!ra_hasspill(regsp_spill(rs)) && ra_hasreg(regsp_reg(rs))) {
+ ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
+ continue;
+ }
+ }
+#if LJ_TARGET_ARM
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ if (ra_hashint((ir-1)->r)) {
+ ir->prev = (ir-1)->prev + 1;
+ continue;
+ }
+#endif
+ break;
+#endif
+#if LJ_NEED_FP64
+ case IR_CONV:
+ if (irt_isfp((ir-1)->t)) {
+ ir->prev = REGSP_HINT(RID_FPRET);
+ continue;
+ }
+ /* fallthrough */
+#endif
+ case IR_CALLN: case IR_CALLXS:
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+#endif
+ (ir-1)->prev = REGSP_HINT(RID_RETLO);
+ ir->prev = REGSP_HINT(RID_RETHI);
+ continue;
+ default:
+ break;
+ }
+ break;
+#endif
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+ if ((ir+1)->o != IR_HIOP) break;
+ /* fallthrough */
+#endif
+ /* C calls evict all scratch regs and return results in RID_RET. */
+ case IR_SNEW: case IR_XSNEW: case IR_NEWREF:
+ if (REGARG_NUMGPR < 3 && as->evenspill < 3)
+ as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
+ case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR:
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ continue;
+ case IR_STRTO: case IR_OBAR:
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ break;
+#if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
+ case IR_ATAN2: case IR_LDEXP:
+#endif
+ case IR_POW:
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+#if LJ_TARGET_X86ORX64
+ ir->prev = REGSP_HINT(RID_XMM0);
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+#endif
+ continue;
+ }
+ /* fallthrough for integer POW */
+ case IR_DIV: case IR_MOD:
+ if (!irt_isnum(ir->t)) {
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset |= (RSET_SCRATCH & RSET_GPR);
+ continue;
+ }
+ break;
+ case IR_FPMATH:
+#if LJ_TARGET_X86ORX64
+ if (ir->op2 == IRFPM_EXP2) { /* May be joined to lj_vm_pow_sse. */
+ ir->prev = REGSP_HINT(RID_XMM0);
+#if !LJ_64
+ if (as->evenspill < 4) /* Leave room for 16 byte scratch area. */
+ as->evenspill = 4;
+#endif
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
+ continue;
+ } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) {
+ ir->prev = REGSP_HINT(RID_XMM0);
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ continue;
+ }
+ break;
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+ continue;
+#endif
+#if LJ_TARGET_X86ORX64
+ /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
+ case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
+ if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
+ IR(ir->op2)->r = REGSP_HINT(RID_ECX);
+ if (inloop)
+ rset_set(as->modset, RID_ECX);
+ }
+ break;
+#endif
+ /* Do not propagate hints across type conversions. */
+ case IR_TOBIT:
+ break;
+ case IR_CONV:
+ if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
+ (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ break;
+ /* fallthrough */
+ default:
+ /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
+ if (irref_isk(ir->op2) && !irref_isk(ir->op1)) {
+ ir->prev = IR(ir->op1)->prev;
+ continue;
+ }
+ break;
+ }
+ ir->prev = REGSP_INIT;
+ }
+ if ((as->evenspill & 1))
+ as->oddspill = as->evenspill++;
+ else
+ as->oddspill = 0;
+}
+
+/* -- Assembler core ------------------------------------------------------ */
+
+/* Assemble a trace. */
+void lj_asm_trace(jit_State *J, GCtrace *T)
+{
+ ASMState as_;
+ ASMState *as = &as_;
+ MCode *origtop;
+
+ /* Ensure an initialized instruction beyond the last one for HIOP checks. */
+ J->cur.nins = lj_ir_nextins(J);
+ J->cur.ir[J->cur.nins].o = IR_NOP;
+
+ /* Setup initial state. Copy some fields to reduce indirections. */
+ as->J = J;
+ as->T = T;
+ as->ir = T->ir;
+ as->flags = J->flags;
+ as->loopref = J->loopref;
+ as->realign = NULL;
+ as->loopinv = 0;
+ if (J->parent) {
+ as->parent = traceref(J, J->parent);
+ lj_snap_regspmap(as->parentmap, as->parent, J->exitno, 0);
+#if LJ_SOFTFP
+ lj_snap_regspmap(as->parentmaphi, as->parent, J->exitno, 1);
+#endif
+ } else {
+ as->parent = NULL;
+ }
+ /* Reserve MCode memory. */
+ as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
+ as->mcp = as->mctop;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ asm_setup_target(as);
+
+ do {
+ as->mcp = as->mctop;
+ as->curins = T->nins;
+ RA_DBG_START();
+ RA_DBGX((as, "===== STOP ====="));
+
+ /* General trace setup. Emit tail of trace. */
+ asm_tail_prep(as);
+ as->mcloop = NULL;
+ as->flagmcp = NULL;
+ as->topslot = 0;
+ as->gcsteps = 0;
+ as->sectref = as->loopref;
+ as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
+ asm_setup_regsp(as);
+ if (!as->loopref)
+ asm_tail_link(as);
+
+ /* Assemble a trace in linear backwards order. */
+ for (as->curins--; as->curins > as->stopins; as->curins--) {
+ IRIns *ir = IR(as->curins);
+ lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */
+ if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
+ continue; /* Dead-code elimination can be soooo easy. */
+ if (irt_isguard(ir->t))
+ asm_snap_prep(as);
+ RA_DBG_REF();
+ checkmclim(as);
+ asm_ir(as, ir);
+ }
+ } while (as->realign); /* Retry in case the MCode needs to be realigned. */
+
+ /* Emit head of trace. */
+ RA_DBG_REF();
+ checkmclim(as);
+ if (as->gcsteps) {
+ as->curins = as->T->snap[0].ref;
+ asm_snap_prep(as); /* The GC check is a guard. */
+ asm_gc_check(as);
+ }
+ ra_evictk(as);
+ if (as->parent)
+ asm_head_side(as);
+ else
+ asm_head_root(as);
+ asm_phi_fixup(as);
+
+ RA_DBGX((as, "===== START ===="));
+ RA_DBG_FLUSH();
+ if (as->freeset != RSET_ALL)
+ lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
+
+ /* Set trace entry point before fixing up tail to allow link to self. */
+ T->mcode = as->mcp;
+ T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
+ if (!as->loopref)
+ asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
+ T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
+ lj_mcode_sync(T->mcode, origtop);
+}
+
+#undef IR
+
+#endif
diff --git a/src/LuaJIT/src/lj_asm.h b/src/LuaJIT/src/lj_asm.h
new file mode 100644
index 000000000..b9ad9a244
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm.h
@@ -0,0 +1,17 @@
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ASM_H
+#define _LJ_ASM_H
+
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno,
+ MCode *target);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_asm_arm.h b/src/LuaJIT/src/lj_asm_arm.h
new file mode 100644
index 000000000..5ec3d59fd
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm_arm.h
@@ -0,0 +1,1785 @@
+/*
+** ARM IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a scratch register pair. */
+static Reg ra_scratchpair(ASMState *as, RegSet allow)
+{
+ RegSet pick1 = as->freeset & allow;
+ RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
+ Reg r;
+ if (pick2) {
+ r = rset_picktop(pick2);
+ } else {
+ RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
+ if (pick) {
+ r = rset_picktop(pick);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ } else {
+ pick = pick1 & (allow << 1) & RSET_GPRODD;
+ if (pick) {
+ r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
+ } else {
+ r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ }
+ }
+ }
+ lua_assert(rset_test(RSET_GPREVEN, r));
+ ra_modified(as, r);
+ ra_modified(as, r+1);
+ RA_DBGX((as, "scratchpair $r $r", r, r+1));
+ return r;
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ MCode *mxp = as->mcbot;
+ int i;
+ if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
+ asm_mclimit(as);
+ /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
+ *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
+ *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
+ mxp++;
+ *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
+ *mxp++ = group*EXITSTUBS_PER_GROUP;
+ for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
+ *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxp - EXITSTUBS_PER_GROUP;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, ARMCC cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
+ emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
+ return;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (ofs > -4096 && ofs < 4096) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (ofs < 4096) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
+ return ra_allock(as, (ofs & ~255), allow);
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse m operand into arithmetic/logic instructions. */
+static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ return ARMF_M(ir->r);
+ } else if (irref_isk(ref)) {
+ uint32_t k = emit_isk12(ai, ir->i);
+ if (k)
+ return k;
+ } else if (mayfuse(as, ref)) {
+ if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
+ ir->o == IR_BSHR ? ARMSH_LSR :
+ ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
+ if (irref_isk(ir->op2)) {
+ return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
+ } else {
+ Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
+ return m | ARMF_RSH(sh, s);
+ }
+ } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ return m | ARMF_SH(ARMSH_LSL, 1);
+ }
+ }
+ return ra_allocref(as, ref, allow);
+}
+
+/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
+static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
+ irref_isk(ir->op2) && IR(ir->op2)->i == 2)
+ return ir->op1;
+ return 0; /* No fusion. */
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ Reg base;
+ if (ra_noreg(ir->r) && mayfuse(as, ref)) {
+ int32_t lim = (ai & 0x04000000) ? 4096 : 256;
+ if (ir->o == IR_ADD) {
+ if (irref_isk(ir->op2) && (ofs = IR(ir->op2)->i) > -lim && ofs < lim) {
+ ref = ir->op1;
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg rn, rm;
+ if ((ai & 0x04000000)) {
+ IRRef sref = asm_fuselsl2(as, rref);
+ if (sref) {
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
+ lref = rref;
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ }
+ }
+ rn = ra_alloc1(as, lref, allow);
+ rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ } else if (ir->o == IR_STRREF) {
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg rn = ra_alloc1(as, ir->op1, allow);
+ uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, rd, ofs);
+ else
+ emit_lsox(as, ai, rd, rd, ofs);
+ emit_dn(as, ARMI_ADD^m, rd, rn);
+ return;
+ }
+ if (ofs <= -lim || ofs >= lim) {
+ Reg rn = ra_alloc1(as, ref, allow);
+ Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, base, ofs);
+ else
+ emit_lsox(as, ai, rd, base, ofs);
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 0;
+ Reg gpr = REGARG_FIRSTGPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
+ if (ref) ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ if (ref) {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ }
+ ofs += 4;
+ }
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (hiop)
+ ra_destpair(as, ir);
+ else
+ ra_destreg(as, ir, RID_RET);
+ }
+ UNUSED(ci);
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
+ emit_m(as, ARMI_BLXr, freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ /* Need to force a spill on REF_BASE now to update the stack slot. */
+ emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_CMP, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ /* FP conversions and 64 bit integer conversions are handled by SPLIT. */
+ lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
+ lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64));
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((as->flags & JIT_F_ARMV6)) {
+ ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
+ st == IRT_U8 ? ARMI_UXTB :
+ st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
+ emit_dm(as, ai, dest, left);
+ } else if (st == IRT_U8) {
+ emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
+ }
+ } else { /* Handle 32/32 bit no-op (cast). */
+ ra_leftov(as, dest, ir->op1); /* Do nothing, but may need to move regs. */
+ }
+}
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum];
+ IRRef args[2];
+ Reg rlo = 0, rhi = 0, tmp;
+ int destused = ra_used(ir);
+ int32_t ofs = 0;
+ ra_evictset(as, RSET_SCRATCH);
+ if (destused) {
+ if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
+ (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ Reg r = (ir+i)->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
+ }
+ }
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ } else {
+ rhi = ra_dest(as, ir+1, RSET_GPR);
+ rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
+ }
+ }
+ asm_guardcc(as, CC_EQ);
+ if (destused) {
+ emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
+ emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
+ }
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ if (ofs == 0)
+ emit_dm(as, ARMI_MOV, tmp, RID_SP);
+ else
+ emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) { /* Use the number constant itself as a TValue. */
+ lua_assert(irref_isk(ref));
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ } else {
+ /* Otherwise use [sp] and [sp+4] to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_lso(as, ARMI_STR, src, RID_SP, 0);
+ }
+ if ((ir+1)->o == IR_HIOP)
+ type = ra_alloc1(as, ref+1, allow);
+ else
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_lso(as, ARMI_STR, type, RID_SP, 4);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
+ if (k) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_dn(as, ARMI_ADD^k, dest, base);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
+ uint32_t khash;
+ MCLabel l_end, l_loop;
+ rset_clear(allow, tab);
+ if (!irref_isk(refkey) || irt_isstr(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ if (irkey[1].o == IR_HIOP) {
+ if (ra_hasreg((irkey+1)->r)) {
+ keynumhi = (irkey+1)->r;
+ keyhi = RID_TMP;
+ ra_noweak(as, keynumhi);
+ } else {
+ keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
+ }
+ rset_clear(allow, keynumhi);
+ khi = 0;
+ }
+ } else if (irt_isnum(kt)) {
+ int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
+ k = emit_isk12(ARMI_CMP, val);
+ if (!k) {
+ key = ra_allock(as, val, allow);
+ rset_clear(allow, key);
+ }
+ val = (int32_t)ir_knum(irkey)->u32.hi;
+ khi = emit_isk12(ARMI_CMP, val);
+ if (!khi) {
+ keyhi = ra_allock(as, val, allow);
+ rset_clear(allow, keyhi);
+ }
+ } else if (!irt_ispri(kt)) {
+ k = emit_isk12(ARMI_CMP, irkey->i);
+ if (!k) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ }
+ if (!irt_ispri(kt))
+ tmp = ra_scratchpair(as, allow);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_AL);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
+ emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ if (!irt_ispri(kt)) {
+ emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
+ emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
+ emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
+ } else {
+ emit_n(as, ARMI_CMP^khi, tmp);
+ emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
+ }
+ *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
+ if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */
+ emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else if (irref_isk(refkey)) {
+ emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
+ rset_exclude(rset_exclude(RSET_GPR, tab), dest));
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
+ if (keyhi == RID_TMP)
+ emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
+ emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
+ }
+ emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
+ tmp, tmp+1, tmp);
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
+ if (ra_hasreg(keynumhi)) {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
+ emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
+ } else {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
+ rset_exclude(rset_exclude(RSET_GPR, tab), key));
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 4095) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ RegSet even = (as->freeset & allow);
+ even = even & (even >> 1) & RSET_GPREVEN;
+ if (even) {
+ key = ra_scratch(as, even);
+ if (rset_test(as->freeset, key+1)) {
+ type = key+1;
+ ra_modified(as, type);
+ }
+ } else {
+ key = ra_scratch(as, allow);
+ }
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
+ (int32_t)ir_knum(irkey)->u32.hi, allow);
+ emit_opk(as, ARMI_CMP, 0, key,
+ (int32_t)ir_knum(irkey)->u32.lo, allow);
+ } else {
+ if (ra_hasreg(key))
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
+ }
+ emit_lso(as, ARMI_LDR, type, idx, kofs+4);
+ if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
+ if (ofs > 4095)
+ emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, ARMI_LDR, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
+ emit_opk(as, ARMI_ADD, dest, uv,
+ (int32_t)offsetof(GCupval, tv), RSET_GPR);
+ emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_lso(as, ARMI_LDR, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ uint32_t k, m = ARMI_K12|sizeof(GCstr);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ (k = emit_isk12(ARMI_ADD,
+ (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
+ m = k;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_dn(as, ARMI_ADD^m, dest, dest);
+ emit_dnm(as, ARMI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_opk(as, ARMI_ADD, dest, r,
+ sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static ARMIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return ARMI_LDRSB;
+ case IRT_U8: return ARMI_LDRB;
+ case IRT_I16: return ARMI_LDRSH;
+ case IRT_U16: return ARMI_LDRH;
+ case IRT_NUM: lua_assert(0);
+ case IRT_FLOAT:
+ default: return ARMI_LDR;
+ }
+}
+
+static ARMIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return ARMI_STRB;
+ case IRT_I16: case IRT_U16: return ARMI_STRH;
+ case IRT_NUM: lua_assert(0);
+ case IRT_FLOAT:
+ default: return ARMI_STR;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ ARMIns ai = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, dest, idx, ofs);
+ else
+ emit_lsox(as, ai, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ ARMIns ai = asm_fxstoreins(ir);
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, src, idx, ofs);
+ else
+ emit_lsox(as, ai, src, idx, ofs);
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir)
+{
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src));
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ int hiop = ((ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ if (ra_used(ir)) {
+ lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ dest = ra_dest(as, ir, allow);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (!hiop || type == RID_NONE) {
+ rset_clear(allow, idx);
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ if (ra_hasreg(dest)) emit_lso(as, ARMI_LDR, dest, idx, ofs);
+ emit_lso(as, ARMI_LDR, type, idx, ofs+4);
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ if (hiop)
+ type = ra_alloc1(as, (ir+1)->op2, allow);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type));
+ if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
+ emit_lso(as, ARMI_STR, type, idx, ofs+4);
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ int hiop = ((ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ if (ra_used(ir)) {
+ lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ dest = ra_dest(as, ir, allow);
+ rset_clear(allow, dest);
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ if (ra_noreg(type)) {
+ rset_clear(allow, base);
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ }
+ if (ra_hasreg(dest)) emit_lso(as, ARMI_LDR, dest, base, ofs);
+ if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID typeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lua_assert(ir->o == IR_HIOP);
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_lso(as, ARMI_STR, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ }
+ }
+ /* Initialize gct and typeid. lj_mem_newgco() already sets marked. */
+ {
+ uint32_t k = emit_isk12(ARMI_MOV, typeid);
+ Reg r = k ? RID_R1 : ra_allock(as, typeid, allow);
+ emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
+ emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, typeid));
+ emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
+ if (k) emit_d(as, ARMI_MOV^k, RID_R1);
+ }
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
+ rset_exclude(rset_exclude(RSET_GPR, tab), link));
+ Reg mark = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_lso(as, ARMI_STR, tab, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
+ emit_lso(as, ARMI_LDR, link, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
+ emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ if ((l_end[-1] >> 28) == CC_AL)
+ l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_lso(as, ARMI_LDRB, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
+{
+ IRIns *ir;
+ if (irref_isk(rref))
+ return 0; /* Don't swap constants to the left. */
+ if (irref_isk(lref))
+ return 1; /* But swap constants to the right. */
+ ir = IR(rref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 0; /* Don't swap fusable operands to the left. */
+ ir = IR(lref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 1; /* But swap fusable operands to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m;
+ if (asm_swapops(as, lref, rref)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
+ ai ^= (ARMI_SUB^ARMI_RSB);
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
+ if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_VS);
+ ai |= ARMI_S;
+ }
+ emit_dn(as, ai^m, dest, left);
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */
+ uint32_t cc = (as->mcp[1] >> 28);
+ as->flagmcp = NULL;
+ if (cc <= CC_NE) {
+ as->mcp++;
+ ai |= ARMI_S;
+ } else if (cc == CC_GE) {
+ *++as->mcp ^= ((CC_GE^CC_PL) << 28);
+ ai |= ARMI_S;
+ } else if (cc == CC_LT) {
+ *++as->mcp ^= ((CC_LT^CC_MI) << 28);
+ ai |= ARMI_S;
+ } /* else: other conds don't work with bit ops. */
+ }
+ if (ir->op2 == 0) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
+ emit_d(as, ai^m, dest);
+ } else {
+ /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
+ asm_intop(as, ir, ai);
+ }
+}
+
+static void asm_arithop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
+ as->flagmcp = NULL;
+ as->mcp++;
+ ai |= ARMI_S;
+ }
+ asm_intop(as, ir, ai);
+}
+
+static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dn(as, ai|ARMI_K12|0, dest, left);
+}
+
+/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
+static void asm_intmul(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ Reg tmp = RID_NONE;
+ /* ARMv5 restriction: dest != left and dest_hi != left. */
+ if (dest == left && left != right) { left = right; right = dest; }
+ if (irt_isguard(ir->t)) { /* IR_MULOV */
+ if (!(as->flags & JIT_F_ARMV6) && dest == left)
+ tmp = left = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
+ emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
+ } else {
+ if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
+ emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
+ }
+ /* Only need this for the dest == left == right case. */
+ if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
+}
+
+static void asm_intmod(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if ((as->flags & JIT_F_ARMV6)) {
+ emit_dm(as, ARMI_REV, dest, left);
+ } else {
+ Reg tmp2 = dest;
+ if (tmp2 == left)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
+ emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
+ }
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
+{
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
+ /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ int32_t shift = (IR(ir->op2)->i & 31);
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
+ }
+}
+
+static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ uint32_t kcmp = 0, kmov = 0;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ Reg right = 0;
+ if (irref_isk(ir->op2)) {
+ kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
+ if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
+ }
+ if (!kmov) {
+ kcmp = 0;
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ if (dest != right) {
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
+ cc ^= 1; /* Must use opposite conditions for paired moves. */
+ } else {
+ cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
+ }
+ if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, left);
+ emit_nm(as, ARMI_CMP^kcmp, left, right);
+}
+
+static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ args[0] = ir->op1; args[1] = (ir+1)->op1;
+ args[2] = ir->op2; args[3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. */
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
+ if (!rset_test(as->freeset, RID_R2) &&
+ regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
+ if (!rset_test(as->freeset, RID_R3) &&
+ regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
+ ra_evictset(as, drop);
+ ra_destpair(as, ir);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op FP swp int cc FP cc */
+ /* LT */ CC_GE + (CC_HS << 4),
+ /* GE x */ CC_LT + (CC_HI << 4),
+ /* LE */ CC_GT + (CC_HI << 4),
+ /* GT x */ CC_LE + (CC_HS << 4),
+ /* ULT x */ CC_HS + (CC_LS << 4),
+ /* UGE */ CC_LO + (CC_LO << 4),
+ /* ULE x */ CC_HI + (CC_LO << 4),
+ /* UGT */ CC_LS + (CC_LS << 4),
+ /* EQ */ CC_NE + (CC_NE << 4),
+ /* NE */ CC_EQ + (CC_EQ << 4),
+ /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
+};
+
+/* FP comparisons. */
+static void asm_fpcomp(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
+ args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
+ args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
+ for (r = RID_R0; r <= RID_R3; r++)
+ if (!rset_test(as->freeset, r) &&
+ regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
+ ra_evictset(as, drop);
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+
+/* Integer comparisons. */
+static void asm_intcomp(ASMState *as, IRIns *ir)
+{
+ ARMCC cc = (asm_compmap[ir->o] & 15);
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left;
+ uint32_t m;
+ int cmpprev0 = 0;
+ lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ if (asm_swapops(as, lref, rref)) {
+ Reg tmp = lref; lref = rref; rref = tmp;
+ if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
+ else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
+ }
+ if (irref_isk(rref) && IR(rref)->i == 0) {
+ IRIns *irl = IR(lref);
+ cmpprev0 = (irl+1 == ir);
+ /* Combine comp(BAND(left, right), 0) into tst left, right. */
+ if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
+ IRRef blref = irl->op1, brref = irl->op2;
+ uint32_t m2 = 0;
+ Reg bleft;
+ if (asm_swapops(as, blref, brref)) {
+ Reg tmp = blref; blref = brref; brref = tmp;
+ }
+ if (irref_isk(brref)) {
+ m2 = emit_isk12(ARMI_AND, IR(brref)->i);
+ if ((m2 & (ARMI_AND^ARMI_BIC)))
+ goto notst; /* Not beneficial if we miss a constant operand. */
+ }
+ if (cc == CC_GE) cc = CC_PL;
+ else if (cc == CC_LT) cc = CC_MI;
+ else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
+ bleft = ra_alloc1(as, blref, RSET_GPR);
+ if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_TST^m2, bleft);
+ return;
+ }
+ }
+notst:
+ left = ra_alloc1(as, lref, RSET_GPR);
+ m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_CMP^m, left);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+}
+
+/* 64 bit integer comparisons. */
+static void asm_int64comp(ASMState *as, IRIns *ir)
+{
+ int signedcomp = (ir->o <= IR_GT);
+ ARMCC cclo, cchi;
+ Reg leftlo, lefthi;
+ uint32_t mlo, mhi;
+ RegSet allow = RSET_GPR, oldfree;
+
+ /* Always use unsigned comparison for loword. */
+ cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
+ leftlo = ra_alloc1(as, ir->op1, allow);
+ oldfree = as->freeset;
+ mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
+ allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
+
+ /* Use signed or unsigned comparison for hiword. */
+ cchi = asm_compmap[ir->o] & 15;
+ lefthi = ra_alloc1(as, (ir+1)->op1, allow);
+ mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
+
+ /* All register allocations must be performed _before_ this point. */
+ if (signedcomp) {
+ MCLabel l_around = emit_label(as);
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMI_CMP^mlo, leftlo);
+ emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
+ if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
+ asm_guardcc(as, cchi);
+ } else {
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
+ }
+ emit_n(as, ARMI_CMP^mhi, lefthi);
+}
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ if (irt_isint(ir->t))
+ asm_int64comp(as, ir-1);
+ else
+ asm_fpcomp(as, ir-1);
+ return;
+ } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
+ as->curins--; /* Always skip the loword min/max. */
+ if (uselo || usehi)
+ asm_fpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_HASFFI
+ case IR_ADD:
+ as->curins--;
+ asm_intop(as, ir, ARMI_ADC);
+ asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
+ break;
+ case IR_SUB:
+ as->curins--;
+ asm_intop(as, ir, ARMI_SBC);
+ asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
+ break;
+ case IR_NEG:
+ as->curins--;
+ asm_intneg(as, ir, ARMI_RSC);
+ asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
+ break;
+#endif
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
+ break;
+ case IR_CALLN:
+ case IR_CALLS:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE:
+ case IR_TOSTR: case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ Reg pbase;
+ uint32_t k;
+ if (irp) {
+ if (!ra_hasspill(irp->s)) {
+ pbase = irp->r;
+ lua_assert(ra_hasreg(pbase));
+ } else if (allow) {
+ pbase = rset_pickbot(allow);
+ } else {
+ pbase = RID_RET;
+ emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
+ }
+ } else {
+ pbase = RID_BASE;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
+ k = emit_isk12(0, (int32_t)(8*topslot));
+ lua_assert(k);
+ emit_n(as, ARMI_CMP^k, RID_TMP);
+ emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
+ (int32_t)offsetof(lua_State, maxstack));
+ if (irp) { /* Must not spill arbitrary registers in head of side trace. */
+ int32_t i = i32ptr(&J2G(as->J)->jit_L);
+ if (ra_hasspill(irp->s))
+ emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
+ if (ra_hasspill(irp->s) && !allow)
+ emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
+ emit_loadi(as, RID_TMP, (i & ~4095));
+ } else {
+ emit_getgl(as, RID_TMP, jit_L);
+ }
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg tmp;
+ lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
+ rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
+ if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
+ } else {
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg type;
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
+ if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), odd);
+ } else if ((sn & SNAP_SOFTFPNUM)) {
+ type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
+ }
+ emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp1, tmp2;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp1 = ra_releasetmp(as, ASMREF_TMP1);
+ tmp2 = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp2, (int32_t)as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
+ emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
+ emit_lso(as, ARMI_LDR, tmp2, tmp1,
+ (int32_t)offsetof(global_State, gc.threshold));
+ emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
+ (int32_t)offsetof(global_State, gc.total));
+ ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the bcc and patched the final bl. */
+ p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
+ } else {
+ p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Reload L register from g->jit_L. */
+static void asm_head_lreg(ASMState *as)
+{
+ IRIns *ir = IR(ASMREF_L);
+ if (ra_used(ir)) {
+ Reg r = ra_dest(as, ir, RSET_GPR);
+ emit_getgl(as, r, jit_L);
+ ra_evictk(as);
+ }
+}
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir);
+ ra_destreg(as, ir, RID_BASE);
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir);
+ if (ra_hasspill(irp->s)) {
+ rset_clear(allow, ra_dest(as, ir, allow));
+ } else {
+ lua_assert(ra_hasreg(irp->r));
+ rset_clear(allow, irp->r);
+ ra_destreg(as, ir, irp->r);
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ as->mctop = --p;
+ } else {
+ /* Patch stack adjustment. */
+ uint32_t k = emit_isk12(ARMI_ADD, spadj);
+ lua_assert(k);
+ p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-1; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+ *p = 0; /* Prevent load/store merging. */
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE: ra_alloc1(as, ir->op1, RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE:
+ if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
+ as->curins--;
+ asm_href(as, ir-1, (IROp)ir->o);
+ break;
+ }
+ /* fallthrough */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_intcomp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
+ case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
+ case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
+ case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
+ case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
+ case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
+ case IR_BROL: lua_assert(0); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: case IR_ADDOV: asm_arithop(as, ir, ARMI_ADD); break;
+ case IR_SUB: case IR_SUBOV: asm_arithop(as, ir, ARMI_SUB); break;
+ case IR_MUL: case IR_MULOV: asm_intmul(as, ir); break;
+ case IR_MOD: asm_intmod(as, ir); break;
+
+ case IR_NEG: asm_intneg(as, ir, ARMI_RSB); break;
+
+ case IR_MIN: asm_intmin_max(as, ir, CC_GT); break;
+ case IR_MAX: asm_intmin_max(as, ir, CC_LT); break;
+
+ case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
+ case IR_DIV: case IR_POW: case IR_ABS: case IR_TOBIT:
+ lua_assert(0); /* Unused for LJ_SOFTFP. */
+ break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir, 0); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 0, ngpr = REGARG_NUMGPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++)
+ if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) {
+ ngpr &= ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ /* May need extra exit for asm_stack_check on side traces. */
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *cstart = NULL, *cend = p;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode *px = exitstub_addr(J, exitno) - 2;
+ for (; p < pe; p++) {
+ /* Look for bl_cc exitstub, replace with b_cc target. */
+ uint32_t ins = *p;
+ if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
+ ((ins ^ (px-p)) & 0x00ffffffu) == 0) {
+ *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
+ cend = p+1;
+ if (!cstart) cstart = p;
+ }
+ }
+ lua_assert(cstart != NULL);
+ lj_mcode_sync(cstart, cend);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/src/LuaJIT/src/lj_asm_mips.h b/src/LuaJIT/src/lj_asm_mips.h
new file mode 100644
index 000000000..9bae47782
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm_mips.h
@@ -0,0 +1,1949 @@
+/*
+** MIPS IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a register or RID_ZERO. */
+static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0)
+ return RID_ZERO;
+ r = ra_allocref(as, ref, allow);
+ } else {
+ ra_noweak(as, r);
+ }
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_alloc1z(as, ir->op2, allow);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_alloc1z(as, ir->op1, allow);
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Need some spare long-range jump slots, for out-of-range branches. */
+#define MIPS_SPAREJUMP 4
+
+/* Setup spare long-range jump slots per mcarea. */
+static void asm_sparejump_setup(ASMState *as)
+{
+ MCode *mxp = as->mcbot;
+ /* Assumes sizeof(MCLink) == 8. */
+ if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) {
+ lua_assert(MIPSI_NOP == 0);
+ memset(mxp+2, 0, MIPS_SPAREJUMP*8);
+ mxp += MIPS_SPAREJUMP*2;
+ lua_assert(mxp < as->mctop);
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ }
+}
+
+/* Setup exit stub after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as)
+{
+ MCode *mxp = as->mctop;
+ /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
+ *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
+ *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
+ lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0);
+ *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
+ as->mctop = mxp;
+}
+
+/* Keep this in-sync with exitstub_trace_addr(). */
+#define asm_exitstub_addr(as) ((as)->mctop)
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
+{
+ MCode *target = asm_exitstub_addr(as);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->invmcp = NULL;
+ as->loopinv = 1;
+ as->mcp = p+1;
+ mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
+ target = p; /* Patch target later in asm_loop_fixup. */
+ }
+ emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
+ emit_branch(as, mi, rs, rt, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ int32_t jgl = (intptr_t)J2G(as->J);
+ if ((uint32_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ Reg base;
+ if (ra_noreg(ir->r) && mayfuse(as, ref)) {
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) && (ofs2 = IR(ir->op2)->i, checki16(ofs2))) {
+ ref = ir->op1;
+ ofs = ofs2;
+ }
+ } else if (ir->o == IR_STRREF) {
+ int32_t ofs2 = 65536;
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs2 = ofs + IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs2 = ofs + IR(ir->op1)->i;
+ ref = ir->op2;
+ }
+ if (!checki16(ofs2)) {
+ /* NYI: Fuse ADD with constant. */
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_hsi(as, mi, rt, RID_TMP, ofs);
+ emit_dst(as, MIPSI_ADDU, RID_TMP, left, right);
+ return;
+ }
+ ofs = ofs2;
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_hsi(as, mi, rt, base, ofs);
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 16;
+ Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+ if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
+ !(ci->flags & CCI_VARARG)) {
+ lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr += 2;
+ gpr += irt_isnum(ir->t) ? 2 : 1;
+ } else {
+ fpr = REGARG_LASTFPR+1;
+ if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
+ if (irt_isnum(ir->t)) {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
+ lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */
+ gpr += 2;
+ } else if (irt_isfloat(ir->t)) {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ emit_tg(as, MIPSI_MFC1, gpr, r);
+ gpr++;
+ } else {
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ }
+ } else {
+ Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ }
+ } else {
+ fpr = REGARG_LASTFPR+1;
+ if (gpr <= REGARG_LASTGPR)
+ gpr++;
+ else
+ ofs += 4;
+ }
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ int32_t ofs = sps_scale(ir->s);
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
+ emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
+ }
+ if (ofs) {
+ emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
+ }
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need specific register for indirect calls. */
+ Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
+ MCode *p = as->mcp;
+ if (r == RID_CFUNCADDR)
+ *--p = MIPSI_NOP;
+ else
+ *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
+ *--p = MIPSI_JALR | MIPSF_S(r);
+ as->mcp = p;
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_F2)|
+ RID2RSET(RID_F4)|RID2RSET(RID_F12)|RID2RSET(RID_F14);
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_FPRET);
+ asm_gencall(as, ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guard(as, MIPSI_BNE, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tsi(as, MIPSI_LW, RID_TMP, base, -8);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guard(as, MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
+ emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_CVT_W_D, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(irt_isint64(ir->t) ||
+ (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
+ dest, ra_alloc1(as, lref, RSET_FPR));
+ } else if (st == IRT_U32) { /* U32 to FP conversion. */
+ /* y = (x ^ 0x8000000) + 2147483648.0 */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ emit_fgh(as, irt_isfloat(ir->t) ? MIPSI_ADD_S : MIPSI_ADD_D,
+ dest, dest, tmp);
+ emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
+ dest, dest);
+ if (irt_isfloat(ir->t))
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
+ RSET_GPR);
+ else
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
+ RSET_GPR);
+ emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
+ dest, dest);
+ emit_tg(as, MIPSI_MTC1, left, dest);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) {
+ /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
+ emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
+ tmp, tmp);
+ emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
+ tmp, left, tmp);
+ if (st == IRT_FLOAT)
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
+ RSET_GPR);
+ else
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
+ RSET_GPR);
+ } else {
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
+ tmp, left);
+ }
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((ir->op2 & IRCONV_SEXT)) {
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ emit_dta(as, MIPSI_SRA, dest, dest, shift);
+ emit_dta(as, MIPSI_SLL, dest, left, shift);
+ }
+ } else {
+ emit_tsi(as, MIPSI_ANDI, dest, left,
+ (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
+ }
+ } else { /* 32/64 bit integer conversions. */
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+#if LJ_HASFFI
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ const CCallInfo *ci;
+ IRRef args[2];
+ args[LJ_BE?0:1] = ir->op1;
+ args[LJ_BE?1:0] = (ir-1)->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ ci = &lj_ir_callinfo[id];
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum];
+ IRRef args[2];
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1),
+ RID_SP, sps_scale(ir->s));
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ else /* Otherwise force a spill and use the spill slot. */
+ emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_setgl(as, type, tmptv.it);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tsi(as, MIPSI_ADDIU, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base);
+ emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+ if (irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ } else if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ type = ra_allock(as, irt_toitype(irkey->t), allow);
+ rset_clear(allow, type);
+ }
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+
+ /* Key not found in chain: load niltv. */
+ l_end = emit_label(as);
+ if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+ else
+ *--as->mcp = MIPSI_NOP;
+ /* Follow hash chain until the end. */
+ emit_move(as, dest, tmp1);
+ l_loop = --as->mcp;
+ emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (irt_isnum(kt)) {
+ emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
+ emit_tg(as, MIPSI_MFC1, tmp1, key+1);
+ emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
+ emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
+ emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (irt_ispri(kt)) {
+ emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
+ } else {
+ emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
+ emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
+ }
+ }
+ emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (irref_isk(refkey))
+ tmphash = ra_allock(as, khash, allow);
+ emit_dst(as, MIPSI_ADDU, dest, dest, tmp1);
+ lua_assert(sizeof(Node) == 24);
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
+ emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
+ emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
+ emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (irref_isk(refkey)) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
+ emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
+ if (irt_isnum(kt)) {
+ emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
+ emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
+ }
+ emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
+ emit_tg(as, MIPSI_MFC1, tmp2, key);
+ emit_tg(as, MIPSI_MFC1, tmp1, key+1);
+ } else {
+ emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
+ emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ int32_t lo, hi;
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tsi(as, MIPSI_ADDIU, dest, node, ofs);
+ }
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ if (irt_isnum(irkey->t)) {
+ lo = (int32_t)ir_knum(irkey)->u32.lo;
+ hi = (int32_t)ir_knum(irkey)->u32.hi;
+ } else {
+ lo = irkey->i;
+ hi = irt_toitype(irkey->t);
+ if (!ra_hasreg(key))
+ goto nolo;
+ }
+ asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
+nolo:
+ asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
+ if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
+ if (ofs > 32736)
+ emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow));
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tsi(as, MIPSI_LW, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
+ else
+ emit_dst(as, MIPSI_ADDU, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static MIPSIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return MIPSI_LB;
+ case IRT_U8: return MIPSI_LBU;
+ case IRT_I16: return MIPSI_LH;
+ case IRT_U16: return MIPSI_LHU;
+ case IRT_NUM: return MIPSI_LDC1;
+ case IRT_FLOAT: return MIPSI_LWC1;
+ default: return MIPSI_LW;
+ }
+}
+
+static MIPSIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return MIPSI_SB;
+ case IRT_I16: case IRT_U16: return MIPSI_SH;
+ case IRT_NUM: return MIPSI_SDC1;
+ case IRT_FLOAT: return MIPSI_SWC1;
+ default: return MIPSI_SW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ MIPSIns mi = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ lua_assert(!irt_isfp(ir->t));
+ emit_tsi(as, mi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ MIPSIns mi = asm_fxstoreins(ir);
+ lua_assert(!irt_isfp(ir->t));
+ emit_tsi(as, mi, src, idx, ofs);
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir)
+{
+ Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src));
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ rset_clear(allow, idx);
+ if (irt_isnum(t)) {
+ asm_guard(as, MIPSI_BEQ, type, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM);
+ if (ra_hasreg(dest))
+ emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
+ } else {
+ asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow));
+ if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
+ }
+ emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(ir->t)) {
+ emit_hsi(as, MIPSI_SDC1, src, idx, ofs);
+ } else {
+ if (ra_hasreg(src))
+ emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ Reg tmp = ra_scratch(as, RSET_FPR);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_CVT_W_D, tmp, tmp);
+ dest = tmp;
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, RSET_GPR);
+ emit_fg(as, MIPSI_CVT_D_W, dest, dest);
+ emit_tg(as, MIPSI_MTC1, tmp, dest);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+ if (irt_isnum(t)) {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg ktype = ra_allock(as, irt_toitype(t), allow);
+ asm_guard(as, MIPSI_BNE, RID_TMP, ktype);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
+ }
+ if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID typeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4;
+ lua_assert((ir+1)->o == IR_HIOP);
+ if (LJ_LE) ir++;
+ }
+ for (;;) {
+ Reg r = ra_alloc1z(as, ir->op2, allow);
+ emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; if (LJ_BE) ir++; else ir--;
+ }
+ }
+ /* Initialize gct and typeid. lj_mem_newgco() already sets marked. */
+ emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, typeid));
+ emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, MIPSI_LI, RID_TMP, typeid); /* Lower 16 bit used. Sign-ext ok. */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, link, gc.grayagain);
+ emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tsi(as, MIPSI_LBU, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_fgh(as, mi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fg(as, mi, dest, left);
+}
+
+static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
+ IRRef args[2];
+ args[0] = irpp->op1;
+ args[1] = irp->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_ADD_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_SUB_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_MUL_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_MUL, dest, left, right);
+ }
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, MIPSI_NEG_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+ }
+}
+
+static void asm_arithov(ASMState *as, IRIns *ir)
+{
+ Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int k = IR(ir->op2)->i;
+ if (ir->o == IR_SUBOV) k = -k;
+ if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ if (dest == left) emit_move(as, RID_TMP, left);
+ return;
+ }
+ }
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
+ right), dest));
+ asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
+ emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
+ if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
+ } else { /* ((dest^left) & (dest^~right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
+ emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
+ }
+ emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
+ emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
+ if (dest == left || dest == right)
+ emit_move(as, RID_TMP, dest == left ? left : right);
+}
+
+static void asm_mulov(ASMState *as, IRIns *ir)
+{
+#if LJ_DUALNUM
+#error "NYI: MULOV"
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused in single-number mode. */
+#endif
+}
+
+#if LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
+ goto loarith;
+ } else if (checki16(k)) {
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ goto loarith;
+ }
+ }
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+loarith:
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ if (dest != left)
+ emit_move(as, dest, left);
+ return;
+ } else if (checki16(k)) {
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ if (dest == left && dest == right) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+}
+#endif
+
+static void asm_bitnot(ASMState *as, IRIns *ir)
+{
+ Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irl = IR(ir->op1);
+ if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
+ left = ra_alloc2(as, irl, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ } else {
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = RID_ZERO;
+ }
+ emit_dst(as, MIPSI_NOR, dest, left, right);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
+ emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
+ } else {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
+ emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
+ emit_dta(as, MIPSI_SRL, dest, left, 8);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
+ emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
+ emit_dta(as, MIPSI_SRL, tmp, left, 24);
+ emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
+ }
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checku16(k)) {
+ emit_tsi(as, mik, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, mi, dest, left, right);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
+ emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift);
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
+ }
+}
+
+static void asm_bitror(ASMState *as, IRIns *ir)
+{
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_rotr(as, dest, left, RID_TMP, shift);
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SRLV, dest, right, left);
+ emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
+ emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
+ }
+ }
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ emit_fg(as, MIPSI_MOVT_D, dest, right);
+ } else {
+ emit_fg(as, MIPSI_MOVF_D, dest, left);
+ if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
+ }
+ emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
+ } else {
+ emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
+ if (dest != right) emit_move(as, dest, right);
+ }
+ emit_dst(as, MIPSI_SLT, RID_TMP,
+ ismax ? left : right, ismax ? right : left);
+ }
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = ir->o;
+ if (irt_isnum(ir->t)) {
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
+ } else {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (op == IR_ABC) op = IR_UGT;
+ if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) {
+ MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
+ ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
+ asm_guard(as, mi, left, 0);
+ } else {
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if ((op&2)) k++;
+ if (checki16(k)) {
+ asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
+ RID_TMP, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
+ RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
+ }
+ }
+}
+
+static void asm_compeq(ASMState *as, IRIns *ir)
+{
+ Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (irt_isnum(ir->t)) {
+ asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
+ } else {
+ asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
+ }
+}
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = (ir-1)->o;
+ MCLabel l_end;
+ Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
+ righthi = (lefthi >> 8); lefthi &= 255;
+ leftlo = ra_alloc2(as, ir-1,
+ rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
+ rightlo = (leftlo >> 8); leftlo &= 255;
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ l_end = emit_label(as);
+ if (lefthi != righthi)
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
+ (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
+ emit_dst(as, MIPSI_SLTU, RID_TMP,
+ (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
+ if (lefthi != righthi)
+ emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
+}
+
+static void asm_comp64eq(ASMState *as, IRIns *ir)
+{
+ Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
+ emit_dst(as, MIPSI_XOR, tmp, left, right);
+ left = ra_alloc2(as, ir-1, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+ } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64(as, ir);
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64eq(as, ir);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CALLN:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ ExitNo oldsnap = as->snapno;
+ rset_clear(allow, pbase);
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+ as->snapno = exitno;
+ asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
+ as->snapno = oldsnap;
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0);
+ else
+ ra_modified(as, tmp);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
+ emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase);
+ emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
+ } else {
+ Reg type;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ /* Assumes asm_snap_prep() already done. */
+ asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, (int32_t)as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ p[-1] = MIPSI_NOP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guard already inverted the cond branch. Only patch the target. */
+ p[-3] |= ((target-p+2) & 0x0000ffffu);
+ } else {
+ p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (as->loopinv) as->mctop--;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_move(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (as->loopinv) as->mctop--;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_move(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ int32_t spadj = as->T->spadjust;
+ MCode *p = as->mctop-1;
+ *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
+ p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */
+ as->invmcp = as->loopref ? as->mcp : NULL;
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE: asm_compeq(as, ir); break;
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_comp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitnot(as, ir); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI); break;
+ case IR_BOR: asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI); break;
+ case IR_BXOR: asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL); break;
+ case IR_BSHR: asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL); break;
+ case IR_BSAR: asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA); break;
+ case IR_BROL: lua_assert(0); break;
+ case IR_BROR: asm_bitror(as, ir); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB: asm_sub(as, ir); break;
+ case IR_MUL: asm_mul(as, ir); break;
+ case IR_DIV: asm_fparith(as, ir, MIPSI_DIV_D); break;
+ case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
+ case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
+ case IR_NEG: asm_neg(as, ir); break;
+
+ case IR_ABS: asm_fpunary(as, ir, MIPSI_ABS_D); break;
+ case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
+ case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
+ case IR_MIN: asm_min_max(as, ir, 0); break;
+ case IR_MAX: asm_min_max(as, ir, 1); break;
+ case IR_FPMATH:
+ if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
+ break;
+ if (ir->op2 <= IRFPM_TRUNC)
+ asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+
+ /* Overflow-checking arithmetic ops. */
+ case IR_ADDOV: asm_arithov(as, ir); break;
+ case IR_SUBOV: asm_arithov(as, ir); break;
+ case IR_MULOV: asm_mulov(as, ir); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+ if (args[i] && irt_isfp(IR(args[i])->t) &&
+ nfpr > 0 && !(ci->flags & CCI_VARARG)) {
+ nfpr--;
+ ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else if (args[i] && irt_isnum(IR(args[i])->t)) {
+ nfpr = 0;
+ ngpr = ngpr & ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
+ } else {
+ nfpr = 0;
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_sparejump_setup(as);
+ asm_exitstub_setup(as);
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL, *cstop = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
+ MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ for (p++; p < pe; p++) {
+ if (*p == exitload) { /* Look for load of exit number. */
+ if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */
+ ptrdiff_t delta = target - p;
+ if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
+ patchbranch:
+ p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p;
+ if (!cstart) cstart = p-1;
+ } else { /* Branch out of range. Use spare jump slot in mcarea. */
+ int i;
+ for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) {
+ if (mcarea[i] == tjump) {
+ delta = mcarea+i - p;
+ goto patchbranch;
+ } else if (mcarea[i] == MIPSI_NOP) {
+ mcarea[i] = tjump;
+ cstart = mcarea+i;
+ delta = mcarea+i - p;
+ goto patchbranch;
+ }
+ }
+ /* Ignore jump slot overflow. Child trace is simply not attached. */
+ }
+ } else if (p+1 == pe) {
+ /* Patch NOP after code for inverted loop branch. Use of J is ok. */
+ lua_assert(p[1] == MIPSI_NOP);
+ p[1] = tjump;
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p+2;
+ if (!cstart) cstart = p+1;
+ }
+ }
+ }
+ if (cstart) lj_mcode_sync(cstart, cstop);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/src/LuaJIT/src/lj_asm_ppc.h b/src/LuaJIT/src/lj_asm_ppc.h
new file mode 100644
index 000000000..73942b8bf
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm_ppc.h
@@ -0,0 +1,2144 @@
+/*
+** PPC IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Setup exit stubs after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ MCode *mxp = as->mctop;
+ /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
+ for (i = nexits-1; (int32_t)i >= 0; i--)
+ *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
+ *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
+ mxp--;
+ *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
+ *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
+ as->mctop = mxp;
+}
+
+static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
+{
+ /* Keep this in-sync with exitstub_trace_addr(). */
+ return as->mctop + exitno + 3;
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, PPCCC cc)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
+ emit_condbranch(as, PPCI_BC, cc^4, p);
+ return;
+ }
+ emit_condbranch(as, PPCI_BC, cc, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Indicates load/store indexed is ok. */
+#define AHUREF_LSX ((int32_t)0x80000000)
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ if (*ofsp == AHUREF_LSX) {
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ return base | (idx << 8);
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ int32_t jgl = (intptr_t)J2G(as->J);
+ if ((uint32_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ Reg base;
+ if (ra_noreg(ir->r) && mayfuse(as, ref)) {
+ if (ir->o == IR_ADD) {
+ if (irref_isk(ir->op2) && (ofs = IR(ir->op2)->i, checki16(ofs))) {
+ ref = ir->op1;
+ } else {
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ } else if (ir->o == IR_STRREF) {
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg tmp, right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
+ emit_fai(as, pi, rt, tmp, ofs);
+ emit_tab(as, PPCI_ADD, tmp, left, right);
+ return;
+ }
+ if (!checki16(ofs)) {
+ Reg left = ra_alloc1(as, ref, allow);
+ Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_fai(as, pi, rt, base, ofs);
+}
+
+/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
+static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ira = IR(ref);
+ Reg right, left;
+ if (mayfuse(as, ref) && ira->o == IR_ADD && ra_noreg(ira->r)) {
+ left = ra_alloc2(as, ira, allow);
+ right = (left >> 8); left &= 255;
+ } else {
+ right = ra_alloc1(as, ref, allow);
+ left = RID_R0;
+ }
+ emit_tab(as, pi, rt, left, right);
+}
+
+/* Fuse to multiply-add/sub instruction. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, pi = pir, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_alloc1(as, rref, RSET_FPR);
+ Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
+ right = (left >> 8); left &= 255;
+ emit_facb(as, pi, dest, left, right, add);
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 8;
+ Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+ if (irt_isfp(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ ofs += 4;
+ }
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR)
+ gpr++;
+ else
+ ofs += 4;
+ }
+ }
+ if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
+ emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ /* Use spill slot or temp slots. */
+ int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
+ }
+ emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
+ emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg freg = ra_alloc1(as, func, allow);
+ *--as->mcp = PPCI_BCTRL;
+ *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_ab(as, PPCI_CMPW, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ RegSet allow = RSET_FPR;
+ Reg tmp = ra_scratch(as, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_fab(as, PPCI_FCMPU, 0, tmp, left);
+ emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
+ emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
+ RSET_GPR);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fab(as, PPCI_FADD, tmp, left, right);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(irt_isint64(ir->t) ||
+ (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ if (st == IRT_NUM) /* double -> float conversion. */
+ emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
+ else /* float -> double conversion is a no-op on PPC. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* Integer to FP conversion. */
+ /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
+ /* IRT_U32: Bias with 2^52, subtract 2^52. */
+ RegSet allow = RSET_GPR;
+ Reg left = ra_alloc1(as, lref, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ const float *kbias;
+ if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000));
+ if (st == IRT_U32) kbias++;
+ emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias,
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
+ RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) {
+ /* Convert both x and x-2^31 to int and merge results. */
+ Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
+ emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
+ emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
+ emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
+ emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
+ emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_LWZ, dest,
+ RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
+ emit_fab(as, PPCI_FSUB, tmp, left, tmp);
+ emit_lsptr(as, PPCI_LFS, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)),
+ RSET_GPR);
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ }
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((ir->op2 & IRCONV_SEXT))
+ emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
+ else
+ emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
+ } else { /* 32/64 bit integer conversions. */
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+#if LJ_HASFFI
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ const CCallInfo *ci;
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = (ir-1)->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ ci = &lj_ir_callinfo[id];
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum];
+ IRRef args[2];
+ int32_t ofs;
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ else /* Otherwise force a spill and use the spill slot. */
+ emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_setgl(as, type, tmptv.it);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tai(as, PPCI_ADDI, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
+ emit_slwi(as, RID_TMP, idx, 3);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
+ Reg tisnum = RID_NONE, tmpnum = RID_NONE;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+ if (irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ rset_clear(allow, tisnum);
+ } else if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_EQ);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_ai(as, PPCI_CMPWI, dest, 0);
+ emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ if (irt_isnum(kt)) {
+ emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
+ emit_condbranch(as, PPCI_BC, CC_GE, l_next);
+ emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
+ emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (!irt_ispri(kt)) {
+ emit_ab(as, PPCI_CMPW, tmp2, key);
+ emit_condbranch(as, PPCI_BC, CC_NE, l_next);
+ }
+ emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
+ if (!irt_ispri(kt))
+ emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ }
+ emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
+ (((char *)as->mcp-(char *)l_loop) & 0xffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (irref_isk(refkey))
+ tmphash = ra_allock(as, khash, allow);
+ emit_tab(as, PPCI_ADD, dest, dest, tmp1);
+ emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
+ emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (irref_isk(refkey)) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
+ emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
+ emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
+ emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
+ emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
+ if (irt_isnum(kt)) {
+ int32_t ofs = ra_spill(as, irkey);
+ emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
+ emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
+ emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
+ } else {
+ emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
+ emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
+ asm_guardcc(as, CC_NE);
+ emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (ra_hasreg(key)) {
+ emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
+ asm_guardcc(as, CC_NE);
+ }
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
+ }
+ if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
+ emit_tai(as, PPCI_LWZ, type, idx, kofs);
+ if (ofs > 32736) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
+ emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tai(as, PPCI_LWZ, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tai(as, PPCI_ADDI, dest, dest, ofs);
+ emit_tab(as, PPCI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tai(as, PPCI_ADDI, dest, r, ofs);
+ else
+ emit_tab(as, PPCI_ADD, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static PPCIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
+ case IRT_U8: return PPCI_LBZ;
+ case IRT_I16: return PPCI_LHA;
+ case IRT_U16: return PPCI_LHZ;
+ case IRT_NUM: return PPCI_LFD;
+ case IRT_FLOAT: return PPCI_LFS;
+ default: return PPCI_LWZ;
+ }
+}
+
+static PPCIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return PPCI_STB;
+ case IRT_I16: case IRT_U16: return PPCI_STH;
+ case IRT_NUM: return PPCI_STFD;
+ case IRT_FLOAT: return PPCI_STFS;
+ default: return PPCI_STW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ PPCIns pi = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tai(as, PPCI_ADDI, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ lua_assert(!irt_isi8(ir->t));
+ emit_tai(as, pi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ PPCIns pi = asm_fxstoreins(ir);
+ emit_tai(as, pi, src, idx, ofs);
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ if (irt_isi8(ir->t))
+ emit_as(as, PPCI_EXTSB, dest, dest);
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir)
+{
+ IRIns *irb;
+ if (mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
+ ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
+ /* Fuse BSWAP with XSTORE to stwbrx. */
+ Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
+ asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
+ } else {
+ Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src));
+ }
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = AHUREF_LSX;
+ if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ if (!irt_isnum(t)) ofs = 0;
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(t)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
+ asm_guardcc(as, CC_GE);
+ emit_ab(as, PPCI_CMPLW, type, tisnum);
+ if (ra_hasreg(dest)) {
+ if (ofs == AHUREF_LSX) {
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
+ (idx&255)), (idx>>8)));
+ emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
+ } else {
+ emit_fai(as, PPCI_LFD, dest, idx, ofs);
+ }
+ }
+ } else {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
+ }
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
+ emit_slwi(as, tmp, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_LWZ, type, idx, ofs);
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = AHUREF_LSX;
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ ofs = 0;
+ }
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(ir->t)) {
+ if (ofs == AHUREF_LSX) {
+ emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_fai(as, PPCI_STFD, src, idx, ofs);
+ }
+ } else {
+ if (ra_hasreg(src))
+ emit_tai(as, PPCI_STW, src, idx, ofs+4);
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_STW, type, idx, ofs);
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(LJ_DUALNUM ||
+ !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ dest = ra_scratch(as, RSET_FPR);
+ emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, dest, dest);
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+ if (irt_isnum(t)) {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ asm_guardcc(as, CC_GE);
+ emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4);
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
+ }
+ if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID typeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4;
+ lua_assert((ir+1)->o == IR_HIOP);
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_tai(as, PPCI_STW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir++;
+ }
+ }
+ /* Initialize gct and typeid. lj_mem_newgco() already sets marked. */
+ emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, typeid));
+ emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, PPCI_LI, RID_TMP, typeid); /* Lower 16 bit used. Sign-ext ok. */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ lua_assert(LJ_GC_BLACK == 0x04);
+ emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
+ emit_getgl(as, link, gc.grayagain);
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
+ emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tai(as, PPCI_LBZ, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_FMUL)
+ emit_fac(as, pi, dest, left, right);
+ else
+ emit_fab(as, pi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fb(as, pi, dest, left);
+}
+
+static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
+ IRRef args[2];
+ args[0] = irpp->op1;
+ args[1] = irp->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
+ asm_fparith(as, ir, PPCI_FADD);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ PPCIns pi;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ pi = PPCI_ADDI;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi = PPCI_ADDICDOT;
+ }
+ emit_tai(as, pi, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
+ return;
+ } else if (!as->sectref) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, left, k);
+ return;
+ }
+ }
+ pi = PPCI_ADD;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
+ asm_fparith(as, ir, PPCI_FSUB);
+ } else {
+ PPCIns pi = PPCI_SUBF;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, PPCI_FMUL);
+ } else {
+ PPCIns pi = PPCI_MULLW;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_MULLI, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, PPCI_FNEG);
+ } else {
+ Reg dest, left;
+ PPCIns pi = PPCI_NEG;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_tab(as, pi, dest, left, 0);
+ }
+}
+
+static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest, left, right;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ }
+ asm_guardcc(as, CC_SO);
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
+ emit_tab(as, pi|PPCF_DOT, dest, left, right);
+}
+
+#if LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ PPCIns pi = PPCI_ADDE;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0)
+ pi = PPCI_ADDZE;
+ else if (k == -1)
+ pi = PPCI_ADDME;
+ else
+ goto needright;
+ right = 0;
+ } else {
+ needright:
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tab(as, pi, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_ADDIC, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, PPCI_ADDC, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
+ PPCIns pi = PPCI_SUBFE;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (k == 0)
+ pi = PPCI_SUBFZE;
+ else if (k == -1)
+ pi = PPCI_SUBFME;
+ else
+ goto needleft;
+ left = 0;
+ } else {
+ needleft:
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ }
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ emit_tab(as, PPCI_SUBFC, dest, right, left);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tab(as, PPCI_SUBFZE, dest, left, 0);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, left, 0);
+}
+#endif
+
+static void asm_bitnot(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ PPCIns pi = PPCI_NOR;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (mayfuse(as, ir->op1)) {
+ IRIns *irl = IR(ir->op1);
+ if (irl->o == IR_BAND)
+ pi ^= (PPCI_NOR ^ PPCI_NAND);
+ else if (irl->o == IR_BXOR)
+ pi ^= (PPCI_NOR ^ PPCI_EQV);
+ else if (irl->o != IR_BOR)
+ goto nofuse;
+ left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
+ } else {
+nofuse:
+ left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ }
+ emit_asb(as, pi, dest, left, right);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irx;
+ if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
+ ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
+ /* Fuse BSWAP with XLOAD to lwbrx. */
+ asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
+ } else {
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = dest;
+ if (tmp == left) {
+ tmp = RID_TMP;
+ emit_mr(as, dest, RID_TMP);
+ }
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
+ emit_rotlwi(as, tmp, left, 8);
+ }
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ Reg tmp = left;
+ if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
+ if (!checku16(k)) {
+ emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
+ if ((k & 0xffff) == 0) return;
+ }
+ emit_asi(as, pik, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi, dest, left, right);
+}
+
+/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
+static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
+{
+ IRIns *ir;
+ Reg left;
+ if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
+ irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ int32_t sh = (IR(ir->op2)->i & 31);
+ switch (ir->o) {
+ case IR_BSHL:
+ if ((mask & ((1u<>sh))) goto nofuse;
+ sh = ((32-sh)&31);
+ break;
+ case IR_BROL:
+ break;
+ default:
+ goto nofuse;
+ }
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
+ return;
+ }
+nofuse:
+ left = ra_alloc1(as, ref, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left);
+}
+
+static void asm_bitand(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ IRRef lref = ir->op1;
+ PPCIns dot = 0;
+ IRRef op2;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k) {
+ /* First check for a contiguous bitmask as used by rlwinm. */
+ uint32_t s1 = lj_ffs((uint32_t)k);
+ uint32_t k1 = ((uint32_t)k >> s1);
+ if ((k1 & (k1+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
+ k, lref);
+ return;
+ }
+ if (~(uint32_t)k) {
+ uint32_t s2 = lj_ffs(~(uint32_t)k);
+ uint32_t k2 = (~(uint32_t)k >> s2);
+ if ((k2 & (k2+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
+ k, lref);
+ return;
+ }
+ }
+ }
+ if (checku16(k)) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDIDOT, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
+ return;
+ }
+ }
+ op2 = ir->op2;
+ if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
+ dot ^= (PPCI_AND ^ PPCI_ANDC);
+ op2 = IR(op2)->op1;
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, PPCI_AND ^ dot, dest, left, right);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest, left;
+ Reg dot = 0;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ int32_t shift = (IR(ir->op2)->i & 31);
+ if (pik == 0) /* SLWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
+ else if (pik == 1) /* SRWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
+ else
+ emit_asb(as, pik|dot, dest, left, shift);
+ } else {
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi|dot, dest, left, right);
+ }
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg tmp = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (tmp == left || tmp == right)
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
+ dest), left), right));
+ emit_facb(as, PPCI_FSEL, dest, tmp,
+ ismax ? left : right, ismax ? right : left);
+ emit_fab(as, PPCI_FSUB, tmp, left, right);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp1 = RID_TMP, tmp2 = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (tmp2 == left || tmp2 == right)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
+ dest), left), right));
+ emit_tab(as, PPCI_ADD, dest, tmp2, right);
+ emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
+ emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
+ emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
+ emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
+ emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
+ }
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
+#define CC_TWO 0x80 /* Check two flags for FP comparison. */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op int cc FP cc */
+ /* LT */ CC_GE + (CC_GE<<4),
+ /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
+ /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
+ /* GT */ CC_LE + (CC_LE<<4),
+ /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
+ /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
+ /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
+ /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
+ /* EQ */ CC_NE + (CC_NE<<4),
+ /* NE */ CC_EQ + (CC_EQ<<4),
+ /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
+};
+
+static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
+{
+ Reg right, left = ra_alloc1(as, lref, RSET_GPR);
+ if (irref_isk(rref)) {
+ int32_t k = IR(rref)->i;
+ if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
+ if (checki16(k)) {
+ emit_tai(as, PPCI_CMPWI, cr, left, k);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (k == 0 && lref == as->curins-1)
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+ return;
+ } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
+ emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
+ return;
+ }
+ }
+ } else { /* Unsigned comparison with constant. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ }
+ }
+ }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
+ emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
+}
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[ir->o];
+ if (irt_isnum(ir->t)) {
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guardcc(as, (cc >> 4));
+ if ((cc & CC_TWO))
+ emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
+ emit_fab(as, PPCI_FCMPU, 0, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(lref) && !irref_isk(rref)) {
+ /* Swap constants to the right (only for ABC). */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
+ }
+ asm_guardcc(as, cc);
+ asm_intcomp_(as, lref, rref, 0, cc);
+ }
+}
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[(ir-1)->o];
+ if ((cc&3) == (CC_EQ&3)) {
+ asm_guardcc(as, cc);
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
+ (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
+ } else {
+ asm_guardcc(as, CC_EQ);
+ emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
+ (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
+ }
+ /* Loword comparison sets cr1 and is unsigned, except for equality. */
+ asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
+ cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
+ /* Hiword comparison sets cr0. */
+ asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
+ as->flagmcp = NULL; /* Doesn't work here. */
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64(as, ir);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CALLN:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ rset_clear(allow, pbase);
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+ emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
+ else
+ ra_modified(as, tmp);
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
+ emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
+ emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
+ } else {
+ Reg type;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, (int32_t)as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
+ emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the cond branch and patched the final b. */
+ p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
+ } else {
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_mr(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ *--p = PPCI_NOP;
+ *--p = PPCI_NOP;
+ as->mctop = p;
+ } else {
+ /* Patch stack adjustment. */
+ lua_assert(checki16(CFRAME_SIZE+spadj));
+ p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
+ p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-2; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE:
+ if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
+ as->curins--;
+ asm_href(as, ir-1, (IROp)ir->o);
+ break;
+ }
+ /* fallthrough */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_comp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitnot(as, ir); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitand(as, ir); break;
+ case IR_BOR: asm_bitop(as, ir, PPCI_OR, PPCI_ORI); break;
+ case IR_BXOR: asm_bitop(as, ir, PPCI_XOR, PPCI_XORI); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, PPCI_SLW, 0); break;
+ case IR_BSHR: asm_bitshift(as, ir, PPCI_SRW, 1); break;
+ case IR_BSAR: asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI); break;
+ case IR_BROL: asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31),
+ PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)); break;
+ case IR_BROR: lua_assert(0); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB: asm_sub(as, ir); break;
+ case IR_MUL: asm_mul(as, ir); break;
+ case IR_DIV: asm_fparith(as, ir, PPCI_FDIV); break;
+ case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
+ case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
+ case IR_NEG: asm_neg(as, ir); break;
+
+ case IR_ABS: asm_fpunary(as, ir, PPCI_FABS); break;
+ case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
+ case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
+ case IR_MIN: asm_min_max(as, ir, 0); break;
+ case IR_MAX: asm_min_max(as, ir, 1); break;
+ case IR_FPMATH:
+ if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
+ break;
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+
+ /* Overflow-checking arithmetic ops. */
+ case IR_ADDOV: asm_arithov(as, ir, PPCI_ADDO); break;
+ case IR_SUBOV: asm_arithov(as, ir, PPCI_SUBFO); break;
+ case IR_MULOV: asm_arithov(as, ir, PPCI_MULLWO); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir, 0); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ int clearso = 0;
+ for (; p < pe; p++) {
+ /* Look for exitstub branch, try to replace with branch to target. */
+ uint32_t ins = *p;
+ if ((ins & 0xfc000000u) == 0x40000000u &&
+ ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if (((ins >> 16) & 3) == (CC_SO&3)) {
+ clearso = sizeof(MCode);
+ delta -= sizeof(MCode);
+ }
+ /* Many, but not all short-range branches can be patched directly. */
+ if (((delta + 0x8000) >> 16) == 0) {
+ *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
+ ((delta & 0x8000) * (PPCF_Y/0x8000));
+ if (!cstart) cstart = p;
+ }
+ } else if ((ins & 0xfc000000u) == PPCI_B &&
+ ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lua_assert(((delta + 0x02000000) >> 26) == 0);
+ *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ if (!cstart) cstart = p;
+ }
+ }
+ { /* Always patch long-range branch in exit stub itself. */
+ ptrdiff_t delta = (char *)target - (char *)px - clearso;
+ lua_assert(((delta + 0x02000000) >> 26) == 0);
+ *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ }
+ if (!cstart) cstart = px;
+ lj_mcode_sync(cstart, px+1);
+ if (clearso) { /* Extend the current trace. Ugly workaround. */
+ MCode *pp = J->cur.mcode;
+ J->cur.szmcode += sizeof(MCode);
+ *--pp = PPCI_MCRXR; /* Clear SO flag. */
+ J->cur.mcode = pp;
+ lj_mcode_sync(pp, pp+1);
+ }
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/src/LuaJIT/src/lj_asm_x86.h b/src/LuaJIT/src/lj_asm_x86.h
new file mode 100644
index 000000000..c4ebdb1fe
--- /dev/null
+++ b/src/LuaJIT/src/lj_asm_x86.h
@@ -0,0 +1,2751 @@
+/*
+** x86/x64 IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
+ MCode *mxp = as->mcbot;
+ MCode *mxpstart = mxp;
+ if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
+ asm_mclimit(as);
+ /* Push low byte of exitno for each exit stub. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
+ for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
+ *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
+ }
+ /* Push the high byte of the exitno for each exit stub group. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
+ /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
+ *mxp++ = XI_MOVmi;
+ *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
+ *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ *mxp++ = 2*sizeof(void *);
+ *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
+ /* Jump to exit handler which fills in the ExitState. */
+ *mxp++ = XI_JMP; mxp += 4;
+ *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
+ /* Commit the code for this group (even if assembly fails later on). */
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxpstart;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard.
+** It's important to emit this *after* all registers have been allocated,
+** because rematerializations may invalidate the flags.
+*/
+static void asm_guardcc(ASMState *as, int cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *(int32_t *)(p+1) = jmprel(p+5, target);
+ target = p;
+ cc ^= 1;
+ if (as->realign) {
+ emit_sjcc(as, cc, target);
+ return;
+ }
+ }
+ emit_jcc(as, cc, target);
+}
+
+/* -- Memory operand fusion ----------------------------------------------- */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if a reference is a signed 32 bit constant. */
+static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
+{
+ if (irref_isk(ref)) {
+ IRIns *ir = IR(ref);
+ if (ir->o != IR_KINT64) {
+ *k = ir->i;
+ return 1;
+ } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
+ *k = (int32_t)ir_kint64(ir)->u64;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Check if there's no conflicting instruction between curins and ref.
+** Also avoid fusing loads if there are multiple references.
+*/
+static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref) {
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
+ return 0;
+ }
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse array base into memory operand. */
+static IRRef asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *irb = IR(ref);
+ as->mrm.ofs = 0;
+ if (irb->o == IR_FLOAD) {
+ IRIns *ira = IR(irb->op1);
+ lua_assert(irb->op2 == IRFL_TAB_ARRAY);
+ /* We can avoid the FLOAD of t->array for colocated arrays. */
+ if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
+ as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
+ return irb->op1; /* Table obj. */
+ }
+ } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
+ /* Fuse base offset (vararg load). */
+ as->mrm.ofs = IR(irb->op2)->i;
+ return irb->op1;
+ }
+ return ref; /* Otherwise use the given array base. */
+}
+
+/* Fuse array reference into memory operand. */
+static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irx;
+ lua_assert(ir->o == IR_AREF);
+ as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
+ irx = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += 8*irx->i;
+ as->mrm.idx = RID_NONE;
+ } else {
+ rset_clear(allow, as->mrm.base);
+ as->mrm.scale = XM_SCALE8;
+ /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
+ ** Doesn't help much without ABCelim, but reduces register pressure.
+ */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
+ irx->o == IR_ADD && irref_isk(irx->op2)) {
+ as->mrm.ofs += 8*IR(irx->op2)->i;
+ as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
+ } else {
+ as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
+ }
+ }
+}
+
+/* Fuse array/hash/upvalue reference into memory operand.
+** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
+** pass the final allow mask, excluding any GPRs used for other inputs.
+** In particular: 2-operand GPR instructions need to call ra_dest() first!
+*/
+static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ switch ((IROp)ir->o) {
+ case IR_AREF:
+ if (mayfuse(as, ref)) {
+ asm_fusearef(as, ir, allow);
+ return;
+ }
+ break;
+ case IR_HREFK:
+ if (mayfuse(as, ref)) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+ as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ as->mrm.idx = RID_NONE;
+ return;
+ }
+ break;
+ case IR_UREFC:
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
+ as->mrm.ofs = ptr2addr(&uv->tv);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ return;
+ }
+ break;
+ default:
+ lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
+ ir->o == IR_KKPTR);
+ break;
+ }
+ }
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ as->mrm.ofs = 0;
+ as->mrm.idx = RID_NONE;
+}
+
+/* Fuse FLOAD/FREF reference into memory operand. */
+static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
+ as->mrm.ofs = field_ofs[ir->op2];
+ as->mrm.idx = RID_NONE;
+ if (irref_isk(ir->op1)) {
+ as->mrm.ofs += IR(ir->op1)->i;
+ as->mrm.base = RID_NONE;
+ } else {
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+ }
+}
+
+/* Fuse string reference into memory operand. */
+static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irr;
+ lua_assert(ir->o == IR_STRREF);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = sizeof(GCstr);
+ if (irref_isk(ir->op1)) {
+ as->mrm.ofs += IR(ir->op1)->i;
+ } else {
+ Reg r = ra_alloc1(as, ir->op1, allow);
+ rset_clear(allow, r);
+ as->mrm.base = (uint8_t)r;
+ }
+ irr = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += irr->i;
+ } else {
+ Reg r;
+ /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
+ as->mrm.ofs += IR(irr->op2)->i;
+ r = ra_alloc1(as, irr->op1, allow);
+ } else {
+ r = ra_alloc1(as, ir->op2, allow);
+ }
+ if (as->mrm.base == RID_NONE)
+ as->mrm.base = (uint8_t)r;
+ else
+ as->mrm.idx = (uint8_t)r;
+ }
+}
+
+static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ as->mrm.idx = RID_NONE;
+ if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ as->mrm.ofs = ir->i;
+ as->mrm.base = RID_NONE;
+ } else if (ir->o == IR_STRREF) {
+ asm_fusestrref(as, ir, allow);
+ } else {
+ as->mrm.ofs = 0;
+ if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
+ /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
+ IRIns *irx;
+ IRRef idx;
+ Reg r;
+ if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
+ ref = ir->op1;
+ ir = IR(ref);
+ if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
+ goto noadd;
+ }
+ as->mrm.scale = XM_SCALE1;
+ idx = ir->op1;
+ ref = ir->op2;
+ irx = IR(idx);
+ if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
+ idx = ir->op2;
+ ref = ir->op1;
+ irx = IR(idx);
+ }
+ if (canfuse(as, irx) && ra_noreg(irx->r)) {
+ if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
+ /* Recognize idx<op1;
+ as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
+ } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
+ /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
+ idx = irx->op1;
+ as->mrm.scale = XM_SCALE2;
+ }
+ }
+ r = ra_alloc1(as, idx, allow);
+ rset_clear(allow, r);
+ as->mrm.idx = (uint8_t)r;
+ }
+ noadd:
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ }
+}
+
+/* Fuse load into memory operand. */
+static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ if (allow != RSET_EMPTY) { /* Fast path. */
+ ra_noweak(as, ir->r);
+ return ir->r;
+ }
+ fusespill:
+ /* Force a spill if only memory operands are allowed (asm_x87load). */
+ as->mrm.base = RID_ESP;
+ as->mrm.ofs = ra_spill(as, ir);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ if (ir->o == IR_KNUM) {
+ RegSet avail = as->freeset & ~as->modset & RSET_FPR;
+ lua_assert(allow != RSET_EMPTY);
+ if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
+ as->mrm.ofs = ptr2addr(ir_knum(ir));
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ } else if (mayfuse(as, ref)) {
+ RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
+ if (ir->o == IR_SLOAD) {
+ if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
+ noconflict(as, ref, IR_RETF, 0)) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
+ as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_FLOAD) {
+ /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
+ if ((irt_isint(ir->t) || irt_isaddr(ir->t)) &&
+ noconflict(as, ref, IR_FSTORE, 0)) {
+ asm_fusefref(as, ir, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
+ if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_XLOAD) {
+ /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
+ ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
+ */
+ if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
+ noconflict(as, ref, IR_XSTORE, 0)) {
+ asm_fusexref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_VLOAD) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ }
+ if (!(as->freeset & allow) &&
+ (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
+ goto fusespill;
+ return ra_allocref(as, ref, allow);
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Count the required number of stack slots for a call. */
+static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t i, nargs = CCI_NARGS(ci);
+ int nslots = 0;
+#if LJ_64
+ if (LJ_ABI_WIN) {
+ nslots = (int)(nargs*2); /* Only matters for more than four args. */
+ } else {
+ int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots += 2;
+ }
+ }
+#else
+ int ngpr = 0;
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ ngpr = 2;
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ ngpr = 1;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+#endif
+ return nslots;
+}
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = STACKARG_OFS;
+#if LJ_64
+ uint32_t gprs = REGARG_GPRS;
+ Reg fpr = REGARG_FIRSTFPR;
+#if !LJ_ABI_WIN
+ MCode *patchnfpr = NULL;
+#endif
+#else
+ uint32_t gprs = 0;
+ if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ gprs = (REGARG_GPRS & 31);
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ gprs = REGARG_GPRS;
+ }
+#endif
+ if ((void *)ci->func)
+ emit_call(as, ci->func);
+#if LJ_64
+ if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
+#if LJ_ABI_WIN
+ for (n = 0; n < 4 && n < nargs; n++) {
+ IRIns *ir = IR(args[n]);
+ if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
+ emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
+ ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
+ }
+#else
+ patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
+ *--as->mcp = XI_MOVrib | RID_EAX;
+#endif
+ }
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+ Reg r;
+#if LJ_64 && LJ_ABI_WIN
+ /* Windows/x64 argument registers are strictly positional. */
+ r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
+ fpr++; gprs >>= 5;
+#elif LJ_64
+ /* POSIX/x64 argument registers are used in order of appearance. */
+ if (irt_isfp(ir->t)) {
+ r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ }
+#else
+ if (ref && irt_isfp(ir->t)) {
+ r = 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ if (!ref) continue;
+ }
+#endif
+ if (r) { /* Argument is in a register. */
+ if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
+#if LJ_64
+ if (ir->o == IR_KINT64)
+ emit_loadu64(as, r, ir_kint64(ir)->u64);
+ else
+#endif
+ emit_loadi(as, r, ir->i);
+ } else {
+ lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ emit_movrr(as, ir, r, ir->r);
+ } else {
+ ra_allocref(as, ref, RID2RSET(r));
+ }
+ }
+ } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
+ lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
+ if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
+ /* Split stores for unaligned FP consts. */
+ emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
+ emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
+ } else {
+ r = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
+ r, RID_ESP, ofs);
+ }
+ ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
+ } else { /* Non-FP argument is on stack. */
+ if (LJ_32 && ref < ASMREF_TMP1) {
+ emit_movmroi(as, RID_ESP, ofs, ir->i);
+ } else {
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
+ }
+ ofs += sizeof(intptr_t);
+ }
+ }
+#if LJ_64 && !LJ_ABI_WIN
+ if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
+#endif
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ if (irt_isfp(ir->t)) {
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+#if LJ_64
+ if ((ci->flags & CCI_CASTU64)) {
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
+ }
+ if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+#else
+ /* Number result is in x87 st0 for x86 calling convention. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
+ dest, RID_ESP, ofs);
+ }
+ if ((ci->flags & CCI_CASTU64)) {
+ emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
+ emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
+ } else {
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ }
+#endif
+ } else {
+ lua_assert(!irt_ispri(ir->t));
+ ra_destreg(as, ir, RID_RET);
+ }
+ } else if (LJ_32 && irt_isfp(ir->t)) {
+ emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+/* Return a constant function pointer or NULL for indirect calls. */
+static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
+{
+#if LJ_32
+ UNUSED(as);
+ if (irref_isk(func))
+ return (void *)irf->i;
+#else
+ if (irref_isk(func)) {
+ MCode *p;
+ if (irf->o == IR_KINT64)
+ p = (MCode *)(void *)ir_k64(irf)->u64;
+ else
+ p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
+ if (p - as->mcp == (int32_t)(p - as->mcp))
+ return p; /* Call target is still in +-2GB range. */
+ /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
+ }
+#endif
+ return NULL;
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ int32_t spadj = 0;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+#if LJ_32
+ /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
+ if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
+ spadj = 4 * asm_count_call_slots(as, &ci, args);
+#endif
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ ci.func = (ASMFunction)asm_callx_func(as, irf, func);
+ if (!(void *)ci.func) {
+ /* Use a (hoistable) non-scratch register for indirect calls. */
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ Reg r = ra_alloc1(as, func, allow);
+ if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
+ emit_rr(as, XO_GROUP5, XOg_CALL, r);
+ } else if (LJ_32) {
+ emit_spsub(as, spadj);
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_P);
+ asm_guardcc(as, CC_NE);
+ emit_rr(as, XO_UCOMISD, left, tmp);
+ emit_rr(as, XO_CVTSI2SD, tmp, dest);
+ if (!(as->flags & JIT_F_SPLIT_XMM))
+ emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
+ emit_rr(as, XO_CVTTSD2SI, dest, left);
+ /* Can't fuse since left is needed twice. */
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp = ra_noreg(IR(ir->op1)->r) ?
+ ra_alloc1(as, ir->op1, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
+ emit_rr(as, XO_MOVDto, tmp, dest);
+ emit_mrm(as, XO_ADDSD, tmp, right);
+ ra_left(as, tmp, ir->op1);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ Reg left = asm_fuseload(as, lref, RSET_FPR);
+ emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
+ if (left == dest) return; /* Avoid the XO_XORPS. */
+ } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
+ /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
+ cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000));
+ Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ if (irt_isfloat(ir->t))
+ emit_rr(as, XO_CVTSD2SS, dest, dest);
+ emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
+ emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
+ emit_loadn(as, bias, k);
+ emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
+ return;
+ } else { /* Integer to FP conversion. */
+ Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
+ ra_alloc1(as, lref, RSET_GPR) :
+ asm_fuseload(as, lref, RSET_GPR);
+ if (LJ_64 && st == IRT_U64) {
+ MCLabel l_end = emit_label(as);
+ const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
+ emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
+ }
+ emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
+ dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
+ }
+ if (!(as->flags & JIT_F_SPLIT_XMM))
+ emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ x86Op op = st == IRT_NUM ?
+ ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
+ ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
+ if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
+ /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
+ /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
+ Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ MCLabel l_end = emit_label(as);
+ if (LJ_32)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
+ emit_rr(as, op, dest|REX_64, tmp);
+ if (st == IRT_NUM)
+ emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J,
+ LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
+ else
+ emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J,
+ LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
+ emit_rr(as, op, dest|REX_64, tmp);
+ ra_left(as, tmp, lref);
+ } else {
+ Reg left = asm_fuseload(as, lref, RSET_FPR);
+ if (LJ_64 && irt_isu32(ir->t))
+ emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
+ emit_mrm(as, op,
+ dest|((LJ_64 &&
+ (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
+ left);
+ }
+ }
+ } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ RegSet allow = RSET_GPR;
+ x86Op op;
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if (st == IRT_I8) {
+ op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_U8) {
+ op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_I16) {
+ op = XO_MOVSXw;
+ } else {
+ op = XO_MOVZXw;
+ }
+ left = asm_fuseload(as, lref, allow);
+ /* Add extra MOV if source is already in wrong register. */
+ if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
+ Reg tmp = ra_scratch(as, allow);
+ emit_rr(as, op, dest, tmp);
+ emit_rr(as, XO_MOV, tmp, left);
+ } else {
+ emit_mrm(as, op, dest, left);
+ }
+ } else { /* 32/64 bit integer conversions. */
+ if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else if (irt_is64(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64 || !(ir->op2 & IRCONV_SEXT)) {
+ /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* 32 to 64 bit sign extension. */
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64) {
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ /* This is either a 32 bit reg/reg mov which zeroes the hiword
+ ** or a load of the loword from a 64 bit address.
+ */
+ emit_mrm(as, XO_MOV, dest, left);
+ } else { /* 32/32 bit no-op (cast). */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
+
+/* 64 bit integer to FP conversion in 32 bit mode. */
+static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
+{
+ Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
+ dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
+ MCLabel l_end = emit_label(as);
+ emit_rma(as, XO_FADDq, XOg_FADDq,
+ lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
+ } else {
+ lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
+ }
+ emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
+ /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
+ emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
+}
+
+/* FP to 64 bit integer conversion in 32 bit mode. */
+static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ Reg lo, hi;
+ lua_assert(st == IRT_NUM || st == IRT_FLOAT);
+ lua_assert(dt == IRT_I64 || dt == IRT_U64);
+ lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
+ hi = ra_dest(as, ir, RSET_GPR);
+ lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
+ if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
+ /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
+ if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
+ emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
+ }
+ if (dt == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
+ MCLabel l_pop, l_end = emit_label(as);
+ emit_x87op(as, XI_FPOP);
+ l_pop = emit_label(as);
+ emit_sjmp(as, l_end);
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3))
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ else
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rma(as, XO_FADDq, XOg_FADDq,
+ lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
+ emit_sjcc(as, CC_NS, l_pop);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
+ }
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ } else { /* Otherwise set FPU rounding mode to truncate before the store. */
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
+ emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
+ emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
+ emit_loadi(as, lo, 0xc00);
+ emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
+ }
+ if (dt == IRT_U64)
+ emit_x87op(as, XI_FDUP);
+ emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
+ st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
+ asm_fuseload(as, ir->op1, RSET_EMPTY));
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ /* Force a spill slot for the destination register (if any). */
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum];
+ IRRef args[2];
+ RegSet drop = RSET_SCRATCH;
+ if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
+ rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
+ ra_evictset(as, drop);
+ asm_guardcc(as, CC_E);
+ emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
+ RID_ESP, sps_scale(ir->s));
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(irl->t)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
+ RID_ESP, ra_spill(as, irl));
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusearef(as, ir, RSET_GPR);
+ if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+ else if (as->mrm.base != dest)
+ emit_rr(as, XO_MOV, dest, as->mrm.base);
+}
+
+/* Merge NE(HREF, niltv) check. */
+static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
+{
+ /* Assumes nothing else generates NE of HREF. */
+ if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
+ ra_hasreg(ir->r)) {
+ MCode *p = as->mcp;
+ p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
+ /* Ensure no loop branch inversion happened. */
+ if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
+ as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
+ return p + *(int32_t *)(p-4); /* Return exit address. */
+ }
+ }
+ return NULL;
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir)
+{
+ MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
+ RegSet allow = RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp = RID_NONE;
+ IRIns *irkey = IR(ir->op2);
+ int isk = irref_isk(ir->op2);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ if (!isk) {
+ rset_clear(allow, tab);
+ key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
+ if (!irt_isstr(kt))
+ tmp = ra_scratch(as, rset_exclude(allow, key));
+ }
+
+ /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
+ l_end = emit_label(as);
+ if (nilexit && ir[1].o == IR_NE) {
+ emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */
+ nilexit = NULL;
+ } else {
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+ }
+
+ /* Follow hash chain until the end. */
+ l_loop = emit_sjcc_label(as, CC_NZ);
+ emit_rr(as, XO_TEST, dest, dest);
+ emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (nilexit)
+ emit_jcc(as, CC_E, nilexit);
+ else
+ emit_sjcc(as, CC_E, l_end);
+ if (irt_isnum(kt)) {
+ if (isk) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_next);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ emit_sjcc(as, CC_P, l_next);
+ emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
+ emit_sjcc(as, CC_AE, l_next);
+ /* The type check avoids NaN penalties and complaints from Valgrind. */
+#if LJ_64
+ emit_u32(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
+#else
+ emit_i8(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+#endif
+ }
+#if LJ_64
+ } else if (irt_islightud(kt)) {
+ emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
+#endif
+ } else {
+ if (!irt_ispri(kt)) {
+ lua_assert(irt_isaddr(kt));
+ if (isk)
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ else
+ emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
+ emit_sjcc(as, CC_NE, l_next);
+ }
+ lua_assert(!irt_isnil(kt));
+ emit_i8(as, irt_toitype(kt));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+ }
+ emit_sfixup(as, l_loop);
+ checkmclim(as);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node));
+ } else {
+ emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node));
+ if ((as->flags & JIT_F_PREFER_IMUL)) {
+ emit_i8(as, sizeof(Node));
+ emit_rr(as, XO_IMULi8, dest, dest);
+ } else {
+ emit_shifti(as, XOg_SHL, dest, 3);
+ emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
+ }
+ if (isk) {
+ emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else if (irt_isstr(kt)) {
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else { /* Must match with hashrot() in lj_tab.c. */
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
+ emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
+ emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
+ emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
+ emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
+ emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
+ if (irt_isnum(kt)) {
+ emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
+#if LJ_64
+ emit_shifti(as, XOg_SHR|REX_64, dest, 32);
+ emit_rr(as, XO_MOV, tmp, dest);
+ emit_rr(as, XO_MOVDto, key|REX_64, dest);
+#else
+ emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
+ emit_rr(as, XO_MOVDto, key, tmp);
+#endif
+ } else {
+ emit_rr(as, XO_MOV, tmp, key);
+ emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+#if !LJ_64
+ MCLabel l_exit;
+#endif
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ra_hasreg(dest)) {
+ if (ofs != 0) {
+ if (dest == node && !(as->flags & JIT_F_LEA_AGU))
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs);
+ else
+ emit_rmro(as, XO_LEA, dest, node, ofs);
+ } else if (dest != node) {
+ emit_rr(as, XO_MOV, dest, node);
+ }
+ }
+ asm_guardcc(as, CC_NE);
+#if LJ_64
+ if (!irt_ispri(irkey->t)) {
+ Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
+ emit_rmro(as, XO_CMP, key|REX_64, node,
+ ofs + (int32_t)offsetof(Node, key.u64));
+ lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
+ ((uint64_t)irt_toitype(irkey->t) << 32) |
+ (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
+ } else {
+ lua_assert(!irt_isnil(irkey->t));
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+ }
+#else
+ l_exit = emit_label(as);
+ if (irt_isnum(irkey->t)) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_exit);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (!irt_ispri(irkey->t)) {
+ lua_assert(irt_isgcv(irkey->t));
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ emit_sjcc(as, CC_NE, l_exit);
+ }
+ lua_assert(!irt_isnil(irkey->t));
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+ }
+#endif
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ IRIns *irkey;
+ Reg tmp;
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ irkey = IR(ir->op2);
+ if (irt_isnum(irkey->t)) {
+ /* For numbers use the constant itself or a spill slot as a TValue. */
+ if (irref_isk(ir->op2))
+ emit_loada(as, tmp, ir_knum(irkey));
+ else
+ emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ if (!irref_isk(ir->op2)) {
+ Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
+ emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
+ } else if (!irt_ispri(irkey->t)) {
+ emit_movmroi(as, tmp, 0, irkey->i);
+ }
+ if (!(LJ_64 && irt_islightud(irkey->t)))
+ emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
+ emit_loada(as, tmp, &J2G(as->J)->tmptv);
+ }
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_rma(as, XO_MOV, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, 1);
+ emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
+ } else {
+ emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v));
+ }
+ emit_rmro(as, XO_MOV, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusefref(as, ir, RSET_GPR);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusestrref(as, ir, RSET_GPR);
+ if (as->mrm.base == RID_NONE)
+ emit_loadi(as, dest, as->mrm.ofs);
+ else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs);
+ else
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static void asm_fxload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ x86Op xo;
+ if (ir->o == IR_FLOAD)
+ asm_fusefref(as, ir, RSET_GPR);
+ else
+ asm_fusexref(as, ir->op1, RSET_GPR);
+ /* ir->op2 is ignored -- unaligned loads are ok on x86. */
+ switch (irt_type(ir->t)) {
+ case IRT_I8: xo = XO_MOVSXb; break;
+ case IRT_U8: xo = XO_MOVZXb; break;
+ case IRT_I16: xo = XO_MOVSXw; break;
+ case IRT_U16: xo = XO_MOVZXw; break;
+ case IRT_NUM: xo = XMM_MOVRM(as); break;
+ case IRT_FLOAT: xo = XO_MOVSS; break;
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ dest |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
+ xo = XO_MOV;
+ break;
+ }
+ emit_mrm(as, xo, dest, RID_MRM);
+}
+
+static void asm_fxstore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE, osrc = RID_NONE;
+ int32_t k = 0;
+ /* The IRT_I16/IRT_U16 stores should never be simplified for constant
+ ** values since mov word [mem], imm16 has a length-changing prefix.
+ */
+ if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
+ !asm_isk32(as, ir->op2, &k)) {
+ RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
+ (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
+ src = osrc = ra_alloc1(as, ir->op2, allow8);
+ if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
+ rset_clear(allow, osrc);
+ src = ra_scratch(as, allow8);
+ }
+ rset_clear(allow, src);
+ }
+ if (ir->o == IR_FSTORE)
+ asm_fusefref(as, IR(ir->op1), allow);
+ else
+ asm_fusexref(as, ir->op1, allow);
+ /* ir->op2 is ignored -- unaligned stores are ok on x86. */
+ if (ra_hasreg(src)) {
+ x86Op xo;
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
+ case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
+ case IRT_NUM: xo = XO_MOVSDto; break;
+ case IRT_FLOAT: xo = XO_MOVSSto; break;
+#if LJ_64
+ case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
+#endif
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ src |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
+ xo = XO_MOVto;
+ break;
+ }
+ emit_mrm(as, xo, src, RID_MRM);
+ if (!LJ_64 && src != osrc) {
+ ra_noweak(as, osrc);
+ emit_rr(as, XO_MOV, src, osrc);
+ }
+ } else {
+ if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
+ emit_i8(as, k);
+ emit_mrm(as, XO_MOVmib, 0, RID_MRM);
+ } else {
+ lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
+ irt_isaddr(ir->t));
+ emit_i32(as, k);
+ emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
+ }
+ }
+}
+
+#if LJ_64
+static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
+{
+ if (ra_used(ir) || typecheck) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (typecheck) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, -2);
+ emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
+ emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
+ emit_rr(as, XO_MOV, tmp|REX_64, dest);
+ }
+ return dest;
+ } else {
+ return RID_NONE;
+ }
+}
+#endif
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isint(ir->t)));
+#if LJ_64
+ if (irt_islightud(ir->t)) {
+ Reg dest = asm_load_lightud64(as, ir, 1);
+ if (ra_hasreg(dest)) {
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
+ }
+ return;
+ } else
+#endif
+ if (ra_used(ir)) {
+ RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM);
+ } else {
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ }
+ /* Always do the type check, even if the load result is unused. */
+ as->mrm.ofs += 4;
+ asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
+ if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
+ lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
+ emit_u32(as, LJ_TISNUM);
+ emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
+ } else {
+ emit_i8(as, irt_toitype(ir->t));
+ emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, XO_MOVSDto, src, RID_MRM);
+#if LJ_64
+ } else if (irt_islightud(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
+ emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
+#endif
+ } else {
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE;
+ if (!irref_isk(ir->op2)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ asm_fuseahuref(as, ir->op1, allow);
+ if (ra_hasreg(src)) {
+ emit_mrm(as, XO_MOVto, src, RID_MRM);
+ } else if (!irt_ispri(irr->t)) {
+ lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
+ emit_i32(as, irr->i);
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+ as->mrm.ofs += 4;
+ emit_i32(as, (int32_t)irt_toitype(ir->t));
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ IRType1 t = ir->t;
+ Reg base;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(LJ_DUALNUM ||
+ !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ Reg left = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XMM_MOVRM(as), left, base, ofs);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+#if LJ_64
+ } else if (irt_islightud(t)) {
+ Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
+ if (ra_hasreg(dest)) {
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
+ }
+ return;
+#endif
+ } else if (ra_used(ir)) {
+ RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
+ emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs);
+ } else if (irt_isnum(t)) {
+ emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
+ } else {
+ emit_rmro(as, XO_MOV, dest, base, ofs);
+ }
+ } else {
+ if (!(ir->op2 & IRSLOAD_TYPECHECK))
+ return; /* No type check: avoid base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ }
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ /* Need type check, even if the load result is unused. */
+ asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
+ if (LJ_64 && irt_type(t) >= IRT_NUM) {
+ lua_assert(irt_isinteger(t) || irt_isnum(t));
+ emit_u32(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
+ } else {
+ emit_i8(as, irt_toitype(t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
+ }
+ }
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID typeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+#if LJ_64
+ Reg r64 = sz == 8 ? REX_64 : 0;
+ if (irref_isk(ir->op2)) {
+ IRIns *irk = IR(ir->op2);
+ uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 :
+ (uint64_t)(uint32_t)irk->i;
+ if (sz == 4 || checki32((int64_t)k)) {
+ emit_i32(as, (int32_t)k);
+ emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
+ } else {
+ emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
+ emit_loadu64(as, RID_ECX, k);
+ }
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
+ }
+#else
+ int32_t ofs = sizeof(GCcdata);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lua_assert(ir->o == IR_HIOP);
+ }
+ do {
+ if (irref_isk(ir->op2)) {
+ emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ }
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ } while (1);
+#endif
+ lua_assert(sz == 4 || sz == 8);
+ }
+
+ /* Combine initialization of marked, gct and typeid. */
+ emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
+ emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
+ (int32_t)((~LJ_TCDATA<<8)+(typeid<<16)));
+ emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
+ emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
+
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ MCLabel l_end = emit_label(as);
+ emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, tmp, gc.grayagain);
+ emit_i8(as, ~LJ_GC_BLACK);
+ emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
+ obj = IR(ir->op1)->r;
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_WHITES);
+ if (irref_isk(ir->op2)) {
+ GCobj *vp = ir_kgc(IR(ir->op2));
+ emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
+ } else {
+ Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
+ }
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+}
+
+/* -- FP/int arithmetic and logic operations ------------------------------ */
+
+/* Load reference onto x87 stack. Force a spill to memory if needed. */
+static void asm_x87load(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KNUM) {
+ cTValue *tv = ir_knum(ir);
+ if (tvispzero(tv)) /* Use fldz only for +0. */
+ emit_x87op(as, XI_FLDZ);
+ else if (tvispone(tv))
+ emit_x87op(as, XI_FLD1);
+ else
+ emit_rma(as, XO_FLDq, XOg_FLDq, tv);
+ } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
+ !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
+ IRIns *iri = IR(ir->op1);
+ emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
+ } else {
+ emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
+ }
+}
+
+/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
+static int fpmjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
+ IRIns *irx;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, lj_vm_pow_sse);
+ irx = IR(irpp->op1);
+ if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
+ irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
+ ra_left(as, RID_XMM0, irpp->op1);
+ ra_left(as, RID_XMM1, irp->op2);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER;
+ if (fpm == IRFPM_SQRT) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ emit_mrm(as, XO_SQRTSD, dest, left);
+ } else if (fpm <= IRFPM_TRUNC) {
+ if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
+ ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
+ ** This is atrocious, but the alternatives are much worse.
+ */
+ /* Round down/up/trunc == 1001/1010/1011. */
+ emit_i8(as, 0x09 + fpm);
+ emit_mrm(as, XO_ROUNDSD, dest, left);
+ if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
+ as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
+ }
+ *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
+ } else { /* Call helper functions for SSE2 variant. */
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
+ fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
+ ra_left(as, RID_XMM0, ir->op1);
+ }
+ } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) {
+ /* Rejoined to pow(). */
+ } else { /* Handle x87 ops. */
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
+ switch (fpm) { /* st0 = lj_vm_*(st0) */
+ case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
+ case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
+ case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
+ case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
+ case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
+ case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
+ /* Note: the use of fyl2xp1 would be pointless here. When computing
+ ** log(1.0+eps) the precision is already lost after 1.0 is added.
+ ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
+ */
+ emit_x87op(as, XI_FYL2X); break;
+ case IRFPM_OTHER:
+ switch (ir->o) {
+ case IR_ATAN2:
+ emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
+ case IR_LDEXP:
+ emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
+ default: lua_assert(0); break;
+ }
+ break;
+ default: lua_assert(0); break;
+ }
+ asm_x87load(as, ir->op1);
+ switch (fpm) {
+ case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
+ case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
+ case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
+ case IRFPM_OTHER:
+ if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
+ break;
+ default: break;
+ }
+ }
+}
+
+static void asm_fppowi(ASMState *as, IRIns *ir)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, lj_vm_powi_sse);
+ ra_left(as, RID_XMM0, ir->op1);
+ ra_left(as, RID_EAX, ir->op2);
+}
+
+#if LJ_64 && LJ_HASFFI
+static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_intmod(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static int asm_swapops(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ lua_assert(ra_noreg(irr->r));
+ if (!irm_iscomm(lj_ir_mode[ir->o]))
+ return 0; /* Can't swap non-commutative operations. */
+ if (irref_isk(ir->op2))
+ return 0; /* Don't swap constants to the left. */
+ if (ra_hasreg(irl->r))
+ return 1; /* Swap if left already has a register. */
+ if (ra_samehint(ir->r, irr->r))
+ return 1; /* Swap if dest and right have matching hints. */
+ if (as->curins > as->loopref) { /* In variant part? */
+ if (ir->op2 < as->loopref && !irt_isphi(irr->t))
+ return 0; /* Keep invariants on the right. */
+ if (ir->op1 < as->loopref && !irt_isphi(irl->t))
+ return 1; /* Swap invariants to the right. */
+ }
+ if (opisfusableload(irl->o))
+ return 1; /* Swap fusable loads to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_FPR;
+ Reg dest;
+ Reg right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseload(as, rref, rset_clear(allow, dest));
+ }
+ emit_mrm(as, xo, dest, right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_GPR;
+ Reg dest, right;
+ int32_t k = 0;
+ if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
+ as->flagmcp = NULL;
+ as->mcp += (LJ_64 && *as->mcp != XI_TEST) ? 3 : 2;
+ }
+ right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseload(as, rref, rset_clear(allow, dest));
+ }
+ if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_O);
+ if (xa != XOg_X_IMUL) {
+ if (ra_hasreg(right))
+ emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
+ else
+ emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
+ } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
+ emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
+ } else { /* IMUL r, r, k. */
+ /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ x86Op xo;
+ if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
+ } else { emit_i32(as, k); xo = XO_IMULi; }
+ emit_mrm(as, xo, REX_64IR(ir, dest), left);
+ return;
+ }
+ ra_left(as, dest, lref);
+}
+
+/* LEA is really a 4-operand ADD with an independent destination register,
+** up to two source registers and an immediate. One register can be scaled
+** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
+** instructions.
+**
+** Currently only a few common cases are supported:
+** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
+** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
+** - Right ADD fusion: y = a+(b+k)
+** The ommited variants have already been reduced by FOLD.
+**
+** There are more fusion opportunities, like gathering shifts or joining
+** common references. But these are probably not worth the trouble, since
+** array indexing is not decomposed and already makes use of all fields
+** of the ModRM operand.
+*/
+static int asm_lea(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg dest;
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = 0;
+ if (ra_hasreg(irl->r)) {
+ rset_clear(allow, irl->r);
+ ra_noweak(as, irl->r);
+ as->mrm.base = irl->r;
+ if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
+ /* The PHI renaming logic does a better job in some cases. */
+ if (ra_hasreg(ir->r) &&
+ ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
+ (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
+ return 0;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ } else {
+ rset_clear(allow, irr->r);
+ ra_noweak(as, irr->r);
+ as->mrm.idx = irr->r;
+ }
+ } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
+ irref_isk(irr->op2)) {
+ Reg idx = ra_alloc1(as, irr->op1, allow);
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ as->mrm.ofs = IR(irr->op2)->i;
+ } else {
+ return 0;
+ }
+ } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
+ (irref_isk(ir->op2) || irref_isk(irl->op2))) {
+ Reg idx, base = ra_alloc1(as, irl->op1, allow);
+ rset_clear(allow, base);
+ as->mrm.base = (uint8_t)base;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ idx = ra_alloc1(as, irl->op2, allow);
+ } else {
+ as->mrm.ofs = IR(irl->op2)->i;
+ idx = ra_alloc1(as, ir->op2, allow);
+ }
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ } else {
+ return 0;
+ }
+ dest = ra_dest(as, ir, allow);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+ return 1; /* Success. */
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_ADDSD);
+ else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
+ irt_is64(ir->t) || !asm_lea(as, ir))
+ asm_intarith(as, ir, XOg_ADD);
+}
+
+static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
+ ra_left(as, dest, ir->op1);
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int cc)
+{
+ Reg right, dest = ra_dest(as, ir, RSET_GPR);
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
+ emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
+ emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
+ REX_64IR(ir, 0), dest, 0, as->mcp, 1);
+ ra_left(as, dest, ir->op1);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
+{
+ IRRef rref = ir->op2;
+ IRIns *irr = IR(rref);
+ Reg dest;
+ if (irref_isk(rref)) { /* Constant shifts. */
+ int shift;
+ dest = ra_dest(as, ir, RSET_GPR);
+ shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
+ switch (shift) {
+ case 0: break;
+ case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
+ default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
+ }
+ } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
+ Reg right;
+ dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
+ if (dest == RID_ECX) {
+ dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
+ emit_rr(as, XO_MOV, RID_ECX, dest);
+ }
+ right = irr->r;
+ if (ra_noreg(right))
+ right = ra_allocref(as, rref, RID2RSET(RID_ECX));
+ else if (right != RID_ECX)
+ ra_scratch(as, RID2RSET(RID_ECX));
+ emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
+ if (right != RID_ECX) {
+ ra_noweak(as, right);
+ emit_rr(as, XO_MOV, RID_ECX, right);
+ }
+ }
+ ra_left(as, dest, ir->op1);
+ /*
+ ** Note: avoid using the flags resulting from a shift or rotate!
+ ** All of them cause a partial flag stall, except for r,1 shifts
+ ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
+ */
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Virtual flags for unordered FP comparisons. */
+#define VCC_U 0x1000 /* Unordered. */
+#define VCC_P 0x2000 /* Needs extra CC_P branch. */
+#define VCC_S 0x4000 /* Swap avoids CC_P branch. */
+#define VCC_PS (VCC_P|VCC_S)
+
+/* Map of comparisons to flags. ORDER IR. */
+#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
+static const uint16_t asm_compmap[IR_ABC+1] = {
+ /* signed non-eq unsigned flags */
+ /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
+ /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
+ /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
+ /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
+ /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
+ /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
+ /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
+ /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
+ /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
+ /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
+ /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
+};
+
+/* FP and integer comparisons. */
+static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
+{
+ if (irt_isnum(ir->t)) {
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ Reg left, right;
+ MCLabel l_around;
+ /*
+ ** An extra CC_P branch is required to preserve ordered/unordered
+ ** semantics for FP comparisons. This can be avoided by swapping
+ ** the operands and inverting the condition (except for EQ and UNE).
+ ** So always try to swap if possible.
+ **
+ ** Another option would be to swap operands to achieve better memory
+ ** operand fusion. But it's unlikely that this outweighs the cost
+ ** of the extra branches.
+ */
+ if (cc & VCC_S) { /* Swap? */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
+ }
+ left = ra_alloc1(as, lref, RSET_FPR);
+ right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
+ l_around = emit_label(as);
+ asm_guardcc(as, cc >> 4);
+ if (cc & VCC_P) { /* Extra CC_P branch required? */
+ if (!(cc & VCC_U)) {
+ asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
+ } else if (l_around != as->invmcp) {
+ emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
+ } else {
+ /* Patched to mcloop by asm_loop_fixup. */
+ as->loopinv = 2;
+ if (as->realign)
+ emit_sjcc(as, CC_P, as->mcp);
+ else
+ emit_jcc(as, CC_P, as->mcp);
+ }
+ }
+ emit_mrm(as, XO_UCOMISD, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ IROp leftop = (IROp)(IR(lref)->o);
+ Reg r64 = REX_64IR(ir, 0);
+ int32_t imm = 0;
+ lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isaddr(ir->t));
+ /* Swap constants (only for ABC) and fusable loads to the right. */
+ if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
+ if ((cc & 0xc) == 0xc) cc ^= 3; /* L <-> G, LE <-> GE */
+ else if ((cc & 0xa) == 0x2) cc ^= 5; /* A <-> B, AE <-> BE */
+ lref = ir->op2; rref = ir->op1;
+ }
+ if (asm_isk32(as, rref, &imm)) {
+ IRIns *irl = IR(lref);
+ /* Check wether we can use test ins. Not for unsigned, since CF=0. */
+ int usetest = (imm == 0 && (cc & 0xa) != 0x2);
+ if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
+ /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
+ Reg right, left = RID_NONE;
+ RegSet allow = RSET_GPR;
+ if (!asm_isk32(as, irl->op2, &imm)) {
+ left = ra_alloc1(as, irl->op2, allow);
+ rset_clear(allow, left);
+ } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
+ IRIns *irll = IR(irl->op1);
+ if (opisfusableload((IROp)irll->o) &&
+ (irt_isi8(irll->t) || irt_isu8(irll->t))) {
+ IRType1 origt = irll->t; /* Temporarily flip types. */
+ irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseload(as, irl->op1, RSET_GPR);
+ as->curins++;
+ irll->t = origt;
+ if (right != RID_MRM) goto test_nofuse;
+ /* Fusion succeeded, emit test byte mrm, imm8. */
+ asm_guardcc(as, cc);
+ emit_i8(as, (imm & 0xff));
+ emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
+ return;
+ }
+ }
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseload(as, irl->op1, allow);
+ as->curins++; /* Undo the above. */
+ test_nofuse:
+ asm_guardcc(as, cc);
+ if (ra_noreg(left)) {
+ emit_i32(as, imm);
+ emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
+ } else {
+ emit_mrm(as, XO_TEST, r64 + left, right);
+ }
+ } else {
+ Reg left;
+ if (opisfusableload((IROp)irl->o) &&
+ ((irt_isu8(irl->t) && checku8(imm)) ||
+ ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
+ (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
+ /* Only the IRT_INT case is fused by asm_fuseload.
+ ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
+ ** are handled here.
+ ** Note that cmp word [mem], imm16 should not be generated,
+ ** since it has a length-changing prefix. Compares of a word
+ ** against a sign-extended imm8 are ok, however.
+ */
+ IRType1 origt = irl->t; /* Temporarily flip types. */
+ irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
+ left = asm_fuseload(as, lref, RSET_GPR);
+ irl->t = origt;
+ if (left == RID_MRM) { /* Fusion succeeded? */
+ if (irt_isu8(irl->t) || irt_isu16(irl->t))
+ cc >>= 4; /* Need unsigned compare. */
+ asm_guardcc(as, cc);
+ emit_i8(as, imm);
+ emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
+ XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
+ return;
+ } /* Otherwise handle register case as usual. */
+ } else {
+ left = asm_fuseload(as, lref, RSET_GPR);
+ }
+ asm_guardcc(as, cc);
+ if (usetest && left != RID_MRM) {
+ /* Use test r,r instead of cmp r,0. */
+ emit_rr(as, XO_TEST, r64 + left, left);
+ if (irl+1 == ir) /* Referencing previous ins? */
+ as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
+ } else {
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
+ }
+ }
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, cc);
+ emit_mrm(as, XO_CMP, r64 + left, right);
+ }
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* 64 bit integer comparisons in 32 bit mode. */
+static void asm_comp_int64(ASMState *as, IRIns *ir)
+{
+ uint32_t cc = asm_compmap[(ir-1)->o];
+ RegSet allow = RSET_GPR;
+ Reg lefthi = RID_NONE, leftlo = RID_NONE;
+ Reg righthi = RID_NONE, rightlo = RID_NONE;
+ MCLabel l_around;
+ x86ModRM mrm;
+
+ as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
+
+ /* Allocate/fuse hiword operands. */
+ if (irref_isk(ir->op2)) {
+ lefthi = asm_fuseload(as, ir->op1, allow);
+ } else {
+ lefthi = ra_alloc1(as, ir->op1, allow);
+ righthi = asm_fuseload(as, ir->op2, allow);
+ if (righthi == RID_MRM) {
+ if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
+ if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
+ } else {
+ rset_clear(allow, righthi);
+ }
+ }
+ mrm = as->mrm; /* Save state for hiword instruction. */
+
+ /* Allocate/fuse loword operands. */
+ if (irref_isk((ir-1)->op2)) {
+ leftlo = asm_fuseload(as, (ir-1)->op1, allow);
+ } else {
+ leftlo = ra_alloc1(as, (ir-1)->op1, allow);
+ rightlo = asm_fuseload(as, (ir-1)->op2, allow);
+ if (rightlo == RID_MRM) {
+ if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
+ if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
+ } else {
+ rset_clear(allow, rightlo);
+ }
+ }
+
+ /* All register allocations must be performed _before_ this point. */
+ l_around = emit_label(as);
+ as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
+
+ /* Loword comparison and branch. */
+ asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
+ if (ra_noreg(rightlo)) {
+ int32_t imm = IR((ir-1)->op2)->i;
+ if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
+ emit_rr(as, XO_TEST, leftlo, leftlo);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
+ } else {
+ emit_mrm(as, XO_CMP, leftlo, rightlo);
+ }
+
+ /* Hiword comparison and branches. */
+ if ((cc & 15) != CC_NE)
+ emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
+ if ((cc & 15) != CC_E)
+ asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
+ as->mrm = mrm; /* Restore state. */
+ if (ra_noreg(righthi)) {
+ int32_t imm = IR(ir->op2)->i;
+ if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
+ emit_rr(as, XO_TEST, lefthi, lefthi);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
+ } else {
+ emit_mrm(as, XO_CMP, lefthi, righthi);
+ }
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_32 && LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ if (usehi || uselo) {
+ if (irt_isfp(ir->t))
+ asm_conv_fp_int64(as, ir);
+ else
+ asm_conv_int64_fp(as, ir);
+ }
+ as->curins--; /* Always skip the CONV. */
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ asm_comp_int64(as, ir);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_ADC);
+ asm_intarith(as, ir-1, XOg_ADD);
+ break;
+ case IR_SUB:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_SBB);
+ asm_intarith(as, ir-1, XOg_SUB);
+ break;
+ case IR_NEG: {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, XOg_NEG, dest);
+ emit_i8(as, 0);
+ emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
+ ra_left(as, dest, ir->op1);
+ as->curins--;
+ asm_neg_not(as, ir-1, XOg_NEG);
+ break;
+ }
+ case IR_CALLN:
+ case IR_CALLXS:
+ ra_destreg(as, ir, RID_RETHI);
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark call as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by CNEWI itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore eax. */
+ Reg pbase = irp ? irp->r : RID_BASE;
+ Reg r = allow ? rset_pickbot(allow) : RID_EAX;
+ emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
+ else
+ ra_modified(as, r);
+ emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot));
+ if (ra_hasreg(pbase) && pbase != r)
+ emit_rr(as, XO_ARITH(XOg_SUB), r, pbase);
+ else
+ emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
+ ptr2addr(&J2G(as->J)->jit_base));
+ emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack));
+ emit_getgl(as, r, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
+ } else {
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isinteger(ir->t)));
+ if (!irref_isk(ref)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
+ emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
+ } else if (!irt_ispri(ir->t)) {
+ emit_movmroi(as, RID_BASE, ofs, ir->i);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s != 0) /* Do not overwrite link to previous frame. */
+ emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
+ } else {
+ if (!(LJ_64 && irt_islightud(ir->t)))
+ emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
+ }
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_rr(as, XO_TEST, RID_RET, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ emit_loada(as, tmp, J2G(as->J));
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), (int32_t)as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_sjcc(as, CC_B, l_end);
+ emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold);
+ emit_getgl(as, tmp, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->realign) { /* Realigned loops use short jumps. */
+ as->realign = NULL; /* Stop another retry. */
+ lua_assert(((intptr_t)target & 15) == 0);
+ if (as->loopinv) { /* Inverted loop branch? */
+ p -= 5;
+ p[0] = XI_JMP;
+ lua_assert(target - p >= -128);
+ p[-1] = (MCode)(target - p); /* Patch sjcc. */
+ if (as->loopinv == 2)
+ p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
+ } else {
+ lua_assert(target - p >= -128);
+ p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
+ p[-2] = XI_JMPs;
+ }
+ } else {
+ MCode *newloop;
+ p[-5] = XI_JMP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the jcc and patched the jmp. */
+ p -= 5;
+ newloop = target+4;
+ *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
+ if (as->loopinv == 2) {
+ *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
+ newloop = target+8;
+ }
+ } else { /* Otherwise just patch jmp. */
+ *(int32_t *)(p-4) = (int32_t)(target - p);
+ newloop = target+3;
+ }
+ /* Realign small loops and shorten the loop branch. */
+ if (newloop >= p - 128) {
+ as->realign = newloop; /* Force a retry and remember alignment. */
+ as->curins = as->stopins; /* Abort asm_trace now. */
+ as->T->nins = as->orignins; /* Remove any added renames. */
+ }
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_rr(as, XO_MOV, r, RID_BASE);
+ }
+}
+
+/* Coalesce or reload BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
+ MCode *p = as->mctop;
+ MCode *target, *q;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
+ } else {
+ MCode *p1;
+ /* Patch stack adjustment. */
+ if (checki8(spadj)) {
+ p -= 3;
+ p1 = p-6;
+ *p1 = (MCode)spadj;
+ } else {
+ p1 = p-9;
+ *(int32_t *)p1 = spadj;
+ }
+ if ((as->flags & JIT_F_LEA_AGU)) {
+#if LJ_64
+ p1[-4] = 0x48;
+#endif
+ p1[-3] = (MCode)XI_LEA;
+ p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
+ p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ } else {
+#if LJ_64
+ p1[-3] = 0x48;
+#endif
+ p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
+ p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
+ }
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_JMP;
+ /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
+ for (q = as->mctop-1; q >= p; q--)
+ *q = XI_NOP;
+ as->mctop = p;
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop;
+ /* Realign and leave room for backwards loop branch or exit branch. */
+ if (as->realign) {
+ int i = ((int)(intptr_t)as->realign) & 15;
+ /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
+ while (i-- > 0)
+ *--p = XI_NOP;
+ as->mctop = p;
+ p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
+ } else {
+ p -= 5; /* Space for exit branch (near jmp). */
+ }
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
+ as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_EQ: case IR_NE: case IR_ABC:
+ asm_comp(as, ir, asm_compmap[ir->o]);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
+ case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
+ case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
+ case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
+ case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
+ case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
+ case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_SUBSD);
+ else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
+ asm_intarith(as, ir, XOg_SUB);
+ break;
+ case IR_MUL:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MULSD);
+ else
+ asm_intarith(as, ir, XOg_X_IMUL);
+ break;
+ case IR_DIV:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ else
+#endif
+ asm_fparith(as, ir, XO_DIVSD);
+ break;
+ case IR_MOD:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isint(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ else
+#endif
+ asm_intmod(as, ir);
+ break;
+
+ case IR_NEG:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_XORPS);
+ else
+ asm_neg_not(as, ir, XOg_NEG);
+ break;
+ case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
+
+ case IR_MIN:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MINSD);
+ else
+ asm_min_max(as, ir, CC_G);
+ break;
+ case IR_MAX:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MAXSD);
+ else
+ asm_min_max(as, ir, CC_L);
+ break;
+
+ case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
+ asm_fpmath(as, ir);
+ break;
+ case IR_POW:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ else
+#endif
+ asm_fppowi(as, ir);
+ break;
+
+ /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
+ case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
+ case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
+ case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX];
+ int nslots;
+ asm_collectargs(as, ir, ci, args);
+ nslots = asm_count_call_slots(as, ci, args);
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+#if LJ_64
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+#else
+ return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
+#endif
+}
+
+/* Target-specific setup. */
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap);
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MSize len = T->szmcode;
+ MCode *px = exitstub_addr(J, exitno) - 6;
+ MCode *pe = p+len-6;
+ uint32_t stateaddr = u32ptr(&J2G(J)->vmstate);
+ if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
+ *(int32_t *)(p+len-4) = jmprel(p+len, target);
+ /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
+ for (; p < pe; p++)
+ if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) {
+ p += LJ_64 ? 11 : 10;
+ break;
+ }
+ lua_assert(p < pe);
+ for (; p < pe; p++) {
+ if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) {
+ *(int32_t *)(p+2) = jmprel(p+6, target);
+ p += 5;
+ }
+ }
+ lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/src/LuaJIT/src/lj_bc.c b/src/LuaJIT/src/lj_bc.c
new file mode 100644
index 000000000..1770273d6
--- /dev/null
+++ b/src/LuaJIT/src/lj_bc.c
@@ -0,0 +1,14 @@
+/*
+** Bytecode instruction modes.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+
+/* Bytecode offsets and bytecode instruction modes. */
+#include "lj_bcdef.h"
+
diff --git a/src/LuaJIT/src/lj_bc.h b/src/LuaJIT/src/lj_bc.h
new file mode 100644
index 000000000..0a82c9d45
--- /dev/null
+++ b/src/LuaJIT/src/lj_bc.h
@@ -0,0 +1,261 @@
+/*
+** Bytecode instruction format.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BC_H
+#define _LJ_BC_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit:
+**
+** +----+----+----+----+
+** | B | C | A | OP | Format ABC
+** +----+----+----+----+
+** | D | A | OP | Format AD
+** +--------------------
+** MSB LSB
+**
+** In-memory instructions are always stored in host byte order.
+*/
+
+/* Operand ranges and related constants. */
+#define BCMAX_A 0xff
+#define BCMAX_B 0xff
+#define BCMAX_C 0xff
+#define BCMAX_D 0xffff
+#define BCBIAS_J 0x8000
+#define NO_REG BCMAX_A
+#define NO_JMP (~(BCPos)0)
+
+/* Macros to get instruction fields. */
+#define bc_op(i) ((BCOp)((i)&0xff))
+#define bc_a(i) ((BCReg)(((i)>>8)&0xff))
+#define bc_b(i) ((BCReg)((i)>>24))
+#define bc_c(i) ((BCReg)(((i)>>16)&0xff))
+#define bc_d(i) ((BCReg)((i)>>16))
+#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J)
+
+/* Macros to set instruction fields. */
+#define setbc_byte(p, x, ofs) \
+ ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x)
+#define setbc_op(p, x) setbc_byte(p, (x), 0)
+#define setbc_a(p, x) setbc_byte(p, (x), 1)
+#define setbc_b(p, x) setbc_byte(p, (x), 3)
+#define setbc_c(p, x) setbc_byte(p, (x), 2)
+#define setbc_d(p, x) \
+ ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x)
+#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J))
+
+/* Macros to compose instructions. */
+#define BCINS_ABC(o, a, b, c) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16))
+#define BCINS_AD(o, a, d) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16))
+#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J))
+
+/* Bytecode instruction definition. Order matters, see below.
+**
+** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod)
+**
+** The opcode name suffixes specify the type for RB/RC or RD:
+** V = variable slot
+** S = string const
+** N = number const
+** P = primitive type (~itype)
+** B = unsigned byte literal
+** M = multiple args/results
+*/
+#define BCDEF(_) \
+ /* Comparison ops. ORDER OPR. */ \
+ _(ISLT, var, ___, var, lt) \
+ _(ISGE, var, ___, var, lt) \
+ _(ISLE, var, ___, var, le) \
+ _(ISGT, var, ___, var, le) \
+ \
+ _(ISEQV, var, ___, var, eq) \
+ _(ISNEV, var, ___, var, eq) \
+ _(ISEQS, var, ___, str, eq) \
+ _(ISNES, var, ___, str, eq) \
+ _(ISEQN, var, ___, num, eq) \
+ _(ISNEN, var, ___, num, eq) \
+ _(ISEQP, var, ___, pri, eq) \
+ _(ISNEP, var, ___, pri, eq) \
+ \
+ /* Unary test and copy ops. */ \
+ _(ISTC, dst, ___, var, ___) \
+ _(ISFC, dst, ___, var, ___) \
+ _(IST, ___, ___, var, ___) \
+ _(ISF, ___, ___, var, ___) \
+ \
+ /* Unary ops. */ \
+ _(MOV, dst, ___, var, ___) \
+ _(NOT, dst, ___, var, ___) \
+ _(UNM, dst, ___, var, unm) \
+ _(LEN, dst, ___, var, len) \
+ \
+ /* Binary ops. ORDER OPR. VV last, POW must be next. */ \
+ _(ADDVN, dst, var, num, add) \
+ _(SUBVN, dst, var, num, sub) \
+ _(MULVN, dst, var, num, mul) \
+ _(DIVVN, dst, var, num, div) \
+ _(MODVN, dst, var, num, mod) \
+ \
+ _(ADDNV, dst, var, num, add) \
+ _(SUBNV, dst, var, num, sub) \
+ _(MULNV, dst, var, num, mul) \
+ _(DIVNV, dst, var, num, div) \
+ _(MODNV, dst, var, num, mod) \
+ \
+ _(ADDVV, dst, var, var, add) \
+ _(SUBVV, dst, var, var, sub) \
+ _(MULVV, dst, var, var, mul) \
+ _(DIVVV, dst, var, var, div) \
+ _(MODVV, dst, var, var, mod) \
+ \
+ _(POW, dst, var, var, pow) \
+ _(CAT, dst, rbase, rbase, concat) \
+ \
+ /* Constant ops. */ \
+ _(KSTR, dst, ___, str, ___) \
+ _(KCDATA, dst, ___, cdata, ___) \
+ _(KSHORT, dst, ___, lits, ___) \
+ _(KNUM, dst, ___, num, ___) \
+ _(KPRI, dst, ___, pri, ___) \
+ _(KNIL, base, ___, base, ___) \
+ \
+ /* Upvalue and function ops. */ \
+ _(UGET, dst, ___, uv, ___) \
+ _(USETV, uv, ___, var, ___) \
+ _(USETS, uv, ___, str, ___) \
+ _(USETN, uv, ___, num, ___) \
+ _(USETP, uv, ___, pri, ___) \
+ _(UCLO, rbase, ___, jump, ___) \
+ _(FNEW, dst, ___, func, gc) \
+ \
+ /* Table ops. */ \
+ _(TNEW, dst, ___, lit, gc) \
+ _(TDUP, dst, ___, tab, gc) \
+ _(GGET, dst, ___, str, index) \
+ _(GSET, var, ___, str, newindex) \
+ _(TGETV, dst, var, var, index) \
+ _(TGETS, dst, var, str, index) \
+ _(TGETB, dst, var, lit, index) \
+ _(TSETV, var, var, var, newindex) \
+ _(TSETS, var, var, str, newindex) \
+ _(TSETB, var, var, lit, newindex) \
+ _(TSETM, base, ___, num, newindex) \
+ \
+ /* Calls and vararg handling. T = tail call. */ \
+ _(CALLM, base, lit, lit, call) \
+ _(CALL, base, lit, lit, call) \
+ _(CALLMT, base, ___, lit, call) \
+ _(CALLT, base, ___, lit, call) \
+ _(ITERC, base, lit, lit, call) \
+ _(ITERN, base, lit, lit, call) \
+ _(VARG, base, lit, lit, ___) \
+ _(ISNEXT, base, ___, jump, ___) \
+ \
+ /* Returns. */ \
+ _(RETM, base, ___, lit, ___) \
+ _(RET, rbase, ___, lit, ___) \
+ _(RET0, rbase, ___, lit, ___) \
+ _(RET1, rbase, ___, lit, ___) \
+ \
+ /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \
+ _(FORI, base, ___, jump, ___) \
+ _(JFORI, base, ___, jump, ___) \
+ \
+ _(FORL, base, ___, jump, ___) \
+ _(IFORL, base, ___, jump, ___) \
+ _(JFORL, base, ___, lit, ___) \
+ \
+ _(ITERL, base, ___, jump, ___) \
+ _(IITERL, base, ___, jump, ___) \
+ _(JITERL, base, ___, lit, ___) \
+ \
+ _(LOOP, rbase, ___, jump, ___) \
+ _(ILOOP, rbase, ___, jump, ___) \
+ _(JLOOP, rbase, ___, lit, ___) \
+ \
+ _(JMP, rbase, ___, jump, ___) \
+ \
+ /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \
+ _(FUNCF, rbase, ___, ___, ___) \
+ _(IFUNCF, rbase, ___, ___, ___) \
+ _(JFUNCF, rbase, ___, lit, ___) \
+ _(FUNCV, rbase, ___, ___, ___) \
+ _(IFUNCV, rbase, ___, ___, ___) \
+ _(JFUNCV, rbase, ___, lit, ___) \
+ _(FUNCC, rbase, ___, ___, ___) \
+ _(FUNCCW, rbase, ___, ___, ___)
+
+/* Bytecode opcode numbers. */
+typedef enum {
+#define BCENUM(name, ma, mb, mc, mt) BC_##name,
+BCDEF(BCENUM)
+#undef BCENUM
+ BC__MAX
+} BCOp;
+
+LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES);
+LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN);
+LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP);
+LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE);
+LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT);
+LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT);
+LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC);
+LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM);
+LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT);
+LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET);
+LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL);
+LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL);
+LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP);
+LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV);
+
+/* This solves a circular dependency problem, change as needed. */
+#define FF_next_N 15
+
+/* Stack slots used by FORI/FORL, relative to operand A. */
+enum {
+ FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT
+};
+
+/* Bytecode operand modes. ORDER BCMode */
+typedef enum {
+ BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */
+ BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata,
+ BCM_max
+} BCMode;
+#define BCM___ BCMnone
+
+#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7))
+#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15))
+#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15))
+#define bcmode_d(op) bcmode_c(op)
+#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3))
+#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11))
+
+#define BCMODE(name, ma, mb, mc, mm) \
+ (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)),
+#define BCMODE_FF 0
+
+static LJ_AINLINE int bc_isret(BCOp op)
+{
+ return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1);
+}
+
+LJ_DATA const uint16_t lj_bc_mode[];
+LJ_DATA const uint16_t lj_bc_ofs[];
+
+#endif
diff --git a/src/LuaJIT/src/lj_bcdump.h b/src/LuaJIT/src/lj_bcdump.h
new file mode 100644
index 000000000..5b461cc26
--- /dev/null
+++ b/src/LuaJIT/src/lj_bcdump.h
@@ -0,0 +1,66 @@
+/*
+** Bytecode dump definitions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BCDUMP_H
+#define _LJ_BCDUMP_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+/* -- Bytecode dump format ------------------------------------------------ */
+
+/*
+** dump = header proto+ 0U
+** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*]
+** proto = lengthU pdata
+** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*]
+** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU
+** [debuglenU [firstlineU numlineU]]
+** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* }
+** knum = intU0 | (loU1 hiU)
+** ktab = narrayU nhashU karray* khash*
+** karray = ktabk
+** khash = ktabk ktabk
+** ktabk = ktabtypeU { intU | (loU hiU) | strB* }
+**
+** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1
+*/
+
+/* Bytecode dump header. */
+#define BCDUMP_HEAD1 0x1b
+#define BCDUMP_HEAD2 0x4c
+#define BCDUMP_HEAD3 0x4a
+
+/* If you perform *any* kind of private modifications to the bytecode itself
+** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
+*/
+#define BCDUMP_VERSION 1
+
+/* Compatibility flags. */
+#define BCDUMP_F_BE 0x01
+#define BCDUMP_F_STRIP 0x02
+#define BCDUMP_F_FFI 0x04
+
+#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1)
+
+/* Type codes for the GC constants of a prototype. Plus length for strings. */
+enum {
+ BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64,
+ BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR
+};
+
+/* Type codes for the keys/values of a constant table. */
+enum {
+ BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE,
+ BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR
+};
+
+/* -- Bytecode reader/writer ---------------------------------------------- */
+
+LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
+ void *data, int strip);
+LJ_FUNC GCproto *lj_bcread(LexState *ls);
+
+#endif
diff --git a/src/LuaJIT/src/lj_bcread.c b/src/LuaJIT/src/lj_bcread.c
new file mode 100644
index 000000000..dfef3947c
--- /dev/null
+++ b/src/LuaJIT/src/lj_bcread.c
@@ -0,0 +1,466 @@
+/*
+** Bytecode reader.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcread_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_state.h"
+
+/* Reuse some lexer fields for our own purposes. */
+#define bcread_flags(ls) ls->level
+#define bcread_swap(ls) \
+ ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE)
+#define bcread_oldtop(L, ls) restorestack(L, ls->lastline)
+#define bcread_savetop(L, ls, top) \
+ ls->lastline = (BCLine)savestack(L, (top))
+
+/* -- Input buffer handling ----------------------------------------------- */
+
+/* Throw reader error. */
+static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em)
+{
+ lua_State *L = ls->L;
+ const char *name = ls->chunkarg;
+ if (*name == BCDUMP_HEAD1) name = "(binary)";
+ else if (*name == '@' || *name == '=') name++;
+ lj_str_pushf(L, "%s: %s", name, err2msg(em));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+}
+
+/* Resize input buffer. */
+static void bcread_resize(LexState *ls, MSize len)
+{
+ if (ls->sb.sz < len) {
+ MSize sz = ls->sb.sz * 2;
+ while (len > sz) sz = sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, sz);
+ /* Caveat: this may change ls->sb.buf which may affect ls->p. */
+ }
+}
+
+/* Refill buffer if needed. */
+static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need)
+{
+ lua_assert(len != 0);
+ if (len > LJ_MAX_MEM || ls->current < 0)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ do {
+ const char *buf;
+ size_t size;
+ if (ls->n) { /* Copy remainder to buffer. */
+ if (ls->sb.n) { /* Move down in buffer. */
+ lua_assert(ls->p + ls->n == ls->sb.buf + ls->sb.n);
+ if (ls->n != ls->sb.n)
+ memmove(ls->sb.buf, ls->p, ls->n);
+ } else { /* Copy from buffer provided by reader. */
+ bcread_resize(ls, len);
+ memcpy(ls->sb.buf, ls->p, ls->n);
+ }
+ ls->p = ls->sb.buf;
+ }
+ ls->sb.n = ls->n;
+ buf = ls->rfunc(ls->L, ls->rdata, &size); /* Get more data from reader. */
+ if (buf == NULL || size == 0) { /* EOF? */
+ if (need) bcread_error(ls, LJ_ERR_BCBAD);
+ ls->current = -1; /* Only bad if we get called again. */
+ break;
+ }
+ if (ls->sb.n) { /* Append to buffer. */
+ MSize n = ls->sb.n + (MSize)size;
+ bcread_resize(ls, n < len ? len : n);
+ memcpy(ls->sb.buf + ls->sb.n, buf, size);
+ ls->n = ls->sb.n = n;
+ ls->p = ls->sb.buf;
+ } else { /* Return buffer provided by reader. */
+ ls->n = (MSize)size;
+ ls->p = buf;
+ }
+ } while (ls->n < len);
+}
+
+/* Need a certain number of bytes. */
+static LJ_AINLINE void bcread_need(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->n < len))
+ bcread_fill(ls, len, 1);
+}
+
+/* Want to read up to a certain number of bytes, but may need less. */
+static LJ_AINLINE void bcread_want(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->n < len))
+ bcread_fill(ls, len, 0);
+}
+
+#define bcread_dec(ls) check_exp(ls->n > 0, ls->n--)
+#define bcread_consume(ls, len) check_exp(ls->n >= (len), ls->n -= (len))
+
+/* Return memory block from buffer. */
+static uint8_t *bcread_mem(LexState *ls, MSize len)
+{
+ uint8_t *p = (uint8_t *)ls->p;
+ bcread_consume(ls, len);
+ ls->p = (char *)p + len;
+ return p;
+}
+
+/* Copy memory block from buffer. */
+static void bcread_block(LexState *ls, void *q, MSize len)
+{
+ memcpy(q, bcread_mem(ls, len), len);
+}
+
+/* Read byte from buffer. */
+static LJ_AINLINE uint32_t bcread_byte(LexState *ls)
+{
+ bcread_dec(ls);
+ return (uint32_t)(uint8_t)*ls->p++;
+}
+
+/* Read ULEB128 value from buffer. */
+static uint32_t bcread_uleb128(LexState *ls)
+{
+ const uint8_t *p = (const uint8_t *)ls->p;
+ uint32_t v = *p++;
+ if (LJ_UNLIKELY(v >= 0x80)) {
+ int sh = 0;
+ v &= 0x7f;
+ do {
+ v |= ((*p & 0x7f) << (sh += 7));
+ bcread_dec(ls);
+ } while (*p++ >= 0x80);
+ }
+ bcread_dec(ls);
+ ls->p = (char *)p;
+ return v;
+}
+
+/* Read top 32 bits of 33 bit ULEB128 value from buffer. */
+static uint32_t bcread_uleb128_33(LexState *ls)
+{
+ const uint8_t *p = (const uint8_t *)ls->p;
+ uint32_t v = (*p++ >> 1);
+ if (LJ_UNLIKELY(v >= 0x40)) {
+ int sh = -1;
+ v &= 0x3f;
+ do {
+ v |= ((*p & 0x7f) << (sh += 7));
+ bcread_dec(ls);
+ } while (*p++ >= 0x80);
+ }
+ bcread_dec(ls);
+ ls->p = (char *)p;
+ return v;
+}
+
+/* -- Bytecode reader ----------------------------------------------------- */
+
+/* Read debug info of a prototype. */
+static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg)
+{
+ void *lineinfo = (void *)proto_lineinfo(pt);
+ bcread_block(ls, lineinfo, sizedbg);
+ /* Swap lineinfo if the endianess differs. */
+ if (bcread_swap(ls) && pt->numline >= 256) {
+ MSize i, n = pt->sizebc-1;
+ if (pt->numline < 65536) {
+ uint16_t *p = (uint16_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8));
+ } else {
+ uint32_t *p = (uint32_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]);
+ }
+ }
+}
+
+/* Find pointer to varinfo. */
+static const void *bcread_varinfo(GCproto *pt)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ MSize n = pt->sizeuv;
+ if (n) while (*p++ || --n) ;
+ return p;
+}
+
+/* Read a single constant key/value of a template table. */
+static void bcread_ktabk(LexState *ls, TValue *o)
+{
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KTAB_STR) {
+ MSize len = tp - BCDUMP_KTAB_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setstrV(ls->L, o, lj_str_new(ls->L, p, len));
+ } else if (tp == BCDUMP_KTAB_INT) {
+ setintV(o, (int32_t)bcread_uleb128(ls));
+ } else if (tp == BCDUMP_KTAB_NUM) {
+ o->u32.lo = bcread_uleb128(ls);
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ lua_assert(tp <= BCDUMP_KTAB_TRUE);
+ setitype(o, ~tp);
+ }
+}
+
+/* Read a template table. */
+static GCtab *bcread_ktab(LexState *ls)
+{
+ MSize narray = bcread_uleb128(ls);
+ MSize nhash = bcread_uleb128(ls);
+ GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash));
+ if (narray) { /* Read array entries. */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcread_ktabk(ls, o);
+ }
+ if (nhash) { /* Read hash entries. */
+ MSize i;
+ for (i = 0; i < nhash; i++) {
+ TValue key;
+ bcread_ktabk(ls, &key);
+ lua_assert(!tvisnil(&key));
+ bcread_ktabk(ls, lj_tab_set(ls->L, t, &key));
+ }
+ }
+ return t;
+}
+
+/* Read GC constants of a prototype. */
+static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc)
+{
+ MSize i;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KGC_STR) {
+ MSize len = tp - BCDUMP_KGC_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len)));
+ } else if (tp == BCDUMP_KGC_TAB) {
+ setgcref(*kr, obj2gco(bcread_ktab(ls)));
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE :
+ tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64;
+ CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8;
+ GCcdata *cd = lj_cdata_new_(ls->L, id, sz);
+ TValue *p = (TValue *)cdataptr(cd);
+ setgcref(*kr, obj2gco(cd));
+ p[0].u32.lo = bcread_uleb128(ls);
+ p[0].u32.hi = bcread_uleb128(ls);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ p[1].u32.lo = bcread_uleb128(ls);
+ p[1].u32.hi = bcread_uleb128(ls);
+ }
+#endif
+ } else {
+ lua_State *L = ls->L;
+ lua_assert(tp == BCDUMP_KGC_CHILD);
+ if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */
+ bcread_error(ls, LJ_ERR_BCBAD);
+ L->top--;
+ setgcref(*kr, obj2gco(protoV(L->top)));
+ }
+ }
+}
+
+/* Read number constants of a prototype. */
+static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn)
+{
+ MSize i;
+ TValue *o = mref(pt->k, TValue);
+ for (i = 0; i < sizekn; i++, o++) {
+ int isnum = (ls->p[0] & 1);
+ uint32_t lo = bcread_uleb128_33(ls);
+ if (isnum) {
+ o->u32.lo = lo;
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ setintV(o, lo);
+ }
+ }
+}
+
+/* Read bytecode instructions. */
+static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc)
+{
+ BCIns *bc = proto_bc(pt);
+ bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ pt->framesize, 0);
+ bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns));
+ /* Swap bytecode instructions if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]);
+ }
+}
+
+/* Read upvalue refs. */
+static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv)
+{
+ if (sizeuv) {
+ uint16_t *uv = proto_uv(pt);
+ bcread_block(ls, uv, sizeuv*2);
+ /* Swap upvalue refs if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 0; i < sizeuv; i++)
+ uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8));
+ }
+ }
+}
+
+/* Read a prototype. */
+static GCproto *bcread_proto(LexState *ls)
+{
+ GCproto *pt;
+ MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept;
+ MSize ofsk, ofsuv, ofsdbg;
+ MSize sizedbg = 0;
+ BCLine firstline = 0, numline = 0;
+ MSize len, startn;
+
+ /* Read length. */
+ if (ls->n > 0 && ls->p[0] == 0) { /* Shortcut EOF. */
+ ls->n--; ls->p++;
+ return NULL;
+ }
+ bcread_want(ls, 5);
+ len = bcread_uleb128(ls);
+ if (!len) return NULL; /* EOF */
+ bcread_need(ls, len);
+ startn = ls->n;
+
+ /* Read prototype header. */
+ flags = bcread_byte(ls);
+ numparams = bcread_byte(ls);
+ framesize = bcread_byte(ls);
+ sizeuv = bcread_byte(ls);
+ sizekgc = bcread_uleb128(ls);
+ sizekn = bcread_uleb128(ls);
+ sizebc = bcread_uleb128(ls) + 1;
+ if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) {
+ sizedbg = bcread_uleb128(ls);
+ if (sizedbg) {
+ firstline = bcread_uleb128(ls);
+ numline = bcread_uleb128(ls);
+ }
+ }
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = (MSize)sizeof(GCproto) +
+ sizebc*(MSize)sizeof(BCIns) +
+ sizekgc*(MSize)sizeof(GCRef);
+ sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1);
+ ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue);
+ ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2;
+ ofsdbg = sizept; sizept += sizedbg;
+
+ /* Allocate prototype object and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->numparams = (uint8_t)numparams;
+ pt->framesize = (uint8_t)framesize;
+ pt->sizebc = sizebc;
+ setmref(pt->k, (char *)pt + ofsk);
+ setmref(pt->uv, (char *)pt + ofsuv);
+ pt->sizekgc = 0; /* Set to zero until fully initialized. */
+ pt->sizekn = sizekn;
+ pt->sizept = sizept;
+ pt->sizeuv = (uint8_t)sizeuv;
+ pt->flags = (uint8_t)flags;
+ pt->trace = 0;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0;
+
+ /* Read bytecode instructions and upvalue refs. */
+ bcread_bytecode(ls, pt, sizebc);
+ bcread_uv(ls, pt, sizeuv);
+
+ /* Read constants. */
+ bcread_kgc(ls, pt, sizekgc);
+ pt->sizekgc = sizekgc;
+ bcread_knum(ls, pt, sizekn);
+
+ /* Read and initialize debug info. */
+ pt->firstline = firstline;
+ pt->numline = numline;
+ if (sizedbg) {
+ MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+ setmref(pt->lineinfo, (char *)pt + ofsdbg);
+ setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli);
+ bcread_dbg(ls, pt, sizedbg);
+ setmref(pt->varinfo, bcread_varinfo(pt));
+ } else {
+ setmref(pt->lineinfo, NULL);
+ setmref(pt->uvinfo, NULL);
+ setmref(pt->varinfo, NULL);
+ }
+
+ if (len != startn - ls->n)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ return pt;
+}
+
+/* Read and check header of bytecode dump. */
+static int bcread_header(LexState *ls)
+{
+ uint32_t flags;
+ bcread_want(ls, 3+5+5);
+ if (bcread_byte(ls) != BCDUMP_HEAD2 ||
+ bcread_byte(ls) != BCDUMP_HEAD3 ||
+ bcread_byte(ls) != BCDUMP_VERSION) return 0;
+ bcread_flags(ls) = flags = bcread_uleb128(ls);
+ if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0;
+#if !LJ_HASFFI
+ if ((flags & BCDUMP_F_FFI)) return 0;
+#endif
+ if ((flags & BCDUMP_F_STRIP)) {
+ ls->chunkname = lj_str_newz(ls->L, ls->chunkarg);
+ } else {
+ MSize len = bcread_uleb128(ls);
+ bcread_need(ls, len);
+ ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len);
+ }
+ return 1; /* Ok. */
+}
+
+/* Read a bytecode dump. */
+GCproto *lj_bcread(LexState *ls)
+{
+ lua_State *L = ls->L;
+ lua_assert(ls->current == BCDUMP_HEAD1);
+ bcread_savetop(L, ls, L->top);
+ lj_str_resetbuf(&ls->sb);
+ /* Check for a valid bytecode dump header. */
+ if (!bcread_header(ls))
+ bcread_error(ls, LJ_ERR_BCFMT);
+ for (;;) { /* Process all prototypes in the bytecode dump. */
+ GCproto *pt = bcread_proto(ls);
+ if (!pt) break;
+ setprotoV(L, L->top, pt);
+ incr_top(L);
+ }
+ if ((int32_t)ls->n > 0 || L->top-1 != bcread_oldtop(L, ls))
+ bcread_error(ls, LJ_ERR_BCBAD);
+ /* Pop off last prototype. */
+ L->top--;
+ return protoV(L->top);
+}
+
diff --git a/src/LuaJIT/src/lj_bcwrite.c b/src/LuaJIT/src/lj_bcwrite.c
new file mode 100644
index 000000000..ae90727e5
--- /dev/null
+++ b/src/LuaJIT/src/lj_bcwrite.c
@@ -0,0 +1,395 @@
+/*
+** Bytecode writer.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcwrite_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#if LJ_HASJIT
+#include "lj_dispatch.h"
+#include "lj_jit.h"
+#endif
+#include "lj_bcdump.h"
+#include "lj_vm.h"
+
+/* Context for bytecode writer. */
+typedef struct BCWriteCtx {
+ SBuf sb; /* Output buffer. */
+ lua_State *L; /* Lua state. */
+ GCproto *pt; /* Root prototype. */
+ lua_Writer wfunc; /* Writer callback. */
+ void *wdata; /* Writer callback data. */
+ int strip; /* Strip debug info. */
+ int status; /* Status from writer callback. */
+} BCWriteCtx;
+
+/* -- Output buffer handling ---------------------------------------------- */
+
+/* Resize buffer if needed. */
+static LJ_NOINLINE void bcwrite_resize(BCWriteCtx *ctx, MSize len)
+{
+ MSize sz = ctx->sb.sz * 2;
+ while (ctx->sb.n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(ctx->L, &ctx->sb, sz);
+}
+
+/* Need a certain amount of buffer space. */
+static LJ_AINLINE void bcwrite_need(BCWriteCtx *ctx, MSize len)
+{
+ if (LJ_UNLIKELY(ctx->sb.n + len > ctx->sb.sz))
+ bcwrite_resize(ctx, len);
+}
+
+/* Add memory block to buffer. */
+static void bcwrite_block(BCWriteCtx *ctx, const void *p, MSize len)
+{
+ uint8_t *q = (uint8_t *)(ctx->sb.buf + ctx->sb.n);
+ MSize i;
+ ctx->sb.n += len;
+ for (i = 0; i < len; i++) q[i] = ((uint8_t *)p)[i];
+}
+
+/* Add byte to buffer. */
+static LJ_AINLINE void bcwrite_byte(BCWriteCtx *ctx, uint8_t b)
+{
+ ctx->sb.buf[ctx->sb.n++] = b;
+}
+
+/* Add ULEB128 value to buffer. */
+static void bcwrite_uleb128(BCWriteCtx *ctx, uint32_t v)
+{
+ MSize n = ctx->sb.n;
+ uint8_t *p = (uint8_t *)ctx->sb.buf;
+ for (; v >= 0x80; v >>= 7)
+ p[n++] = (uint8_t)((v & 0x7f) | 0x80);
+ p[n++] = (uint8_t)v;
+ ctx->sb.n = n;
+}
+
+/* -- Bytecode writer ----------------------------------------------------- */
+
+/* Write a single constant key/value of a template table. */
+static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow)
+{
+ bcwrite_need(ctx, 1+10);
+ if (tvisstr(o)) {
+ const GCstr *str = strV(o);
+ MSize len = str->len;
+ bcwrite_need(ctx, 5+len);
+ bcwrite_uleb128(ctx, BCDUMP_KTAB_STR+len);
+ bcwrite_block(ctx, strdata(str), len);
+ } else if (tvisint(o)) {
+ bcwrite_byte(ctx, BCDUMP_KTAB_INT);
+ bcwrite_uleb128(ctx, intV(o));
+ } else if (tvisnum(o)) {
+ if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ int32_t k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ bcwrite_byte(ctx, BCDUMP_KTAB_INT);
+ bcwrite_uleb128(ctx, k);
+ return;
+ }
+ }
+ bcwrite_byte(ctx, BCDUMP_KTAB_NUM);
+ bcwrite_uleb128(ctx, o->u32.lo);
+ bcwrite_uleb128(ctx, o->u32.hi);
+ } else {
+ lua_assert(tvispri(o));
+ bcwrite_byte(ctx, BCDUMP_KTAB_NIL+~itype(o));
+ }
+}
+
+/* Write a template table. */
+static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t)
+{
+ MSize narray = 0, nhash = 0;
+ if (t->asize > 0) { /* Determine max. length of array part. */
+ ptrdiff_t i;
+ TValue *array = tvref(t->array);
+ for (i = (ptrdiff_t)t->asize-1; i >= 0; i--)
+ if (!tvisnil(&array[i]))
+ break;
+ narray = (MSize)(i+1);
+ }
+ if (t->hmask > 0) { /* Count number of used hash slots. */
+ MSize i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (i = 0; i <= hmask; i++)
+ nhash += !tvisnil(&node[i].val);
+ }
+ /* Write number of array slots and hash slots. */
+ bcwrite_uleb128(ctx, narray);
+ bcwrite_uleb128(ctx, nhash);
+ if (narray) { /* Write array entries (may contain nil). */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcwrite_ktabk(ctx, o, 1);
+ }
+ if (nhash) { /* Write hash entries. */
+ MSize i = nhash;
+ Node *node = noderef(t->node) + t->hmask;
+ for (;; node--)
+ if (!tvisnil(&node->val)) {
+ bcwrite_ktabk(ctx, &node->key, 0);
+ bcwrite_ktabk(ctx, &node->val, 1);
+ if (--i == 0) break;
+ }
+ }
+}
+
+/* Write GC constants of a prototype. */
+static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekgc = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ GCobj *o = gcref(*kr);
+ MSize tp, need = 1;
+ /* Determine constant type and needed size. */
+ if (o->gch.gct == ~LJ_TSTR) {
+ tp = BCDUMP_KGC_STR + gco2str(o)->len;
+ need = 5+gco2str(o)->len;
+ } else if (o->gch.gct == ~LJ_TPROTO) {
+ lua_assert((pt->flags & PROTO_CHILD));
+ tp = BCDUMP_KGC_CHILD;
+#if LJ_HASFFI
+ } else if (o->gch.gct == ~LJ_TCDATA) {
+ CTypeID id = gco2cd(o)->typeid;
+ need = 1+4*5;
+ if (id == CTID_INT64) {
+ tp = BCDUMP_KGC_I64;
+ } else if (id == CTID_UINT64) {
+ tp = BCDUMP_KGC_U64;
+ } else {
+ lua_assert(id == CTID_COMPLEX_DOUBLE);
+ tp = BCDUMP_KGC_COMPLEX;
+ }
+#endif
+ } else {
+ lua_assert(o->gch.gct == ~LJ_TTAB);
+ tp = BCDUMP_KGC_TAB;
+ }
+ /* Write constant type. */
+ bcwrite_need(ctx, need);
+ bcwrite_uleb128(ctx, tp);
+ /* Write constant data (if any). */
+ if (tp >= BCDUMP_KGC_STR) {
+ bcwrite_block(ctx, strdata(gco2str(o)), gco2str(o)->len);
+ } else if (tp == BCDUMP_KGC_TAB) {
+ bcwrite_ktab(ctx, gco2tab(o));
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ cTValue *p = (TValue *)cdataptr(gco2cd(o));
+ bcwrite_uleb128(ctx, p[0].u32.lo);
+ bcwrite_uleb128(ctx, p[0].u32.hi);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ bcwrite_uleb128(ctx, p[1].u32.lo);
+ bcwrite_uleb128(ctx, p[1].u32.hi);
+ }
+#endif
+ }
+ }
+}
+
+/* Write number constants of a prototype. */
+static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekn = pt->sizekn;
+ cTValue *o = mref(pt->k, TValue);
+ bcwrite_need(ctx, 10*sizekn);
+ for (i = 0; i < sizekn; i++, o++) {
+ int32_t k;
+ if (tvisint(o)) {
+ k = intV(o);
+ goto save_int;
+ } else {
+ /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */
+ if (!LJ_DUALNUM) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ save_int:
+ bcwrite_uleb128(ctx, 2*(uint32_t)k | ((uint32_t)k & 0x80000000u));
+ if (k < 0) {
+ char *p = &ctx->sb.buf[ctx->sb.n-1];
+ *p = (*p & 7) | ((k>>27) & 0x18);
+ }
+ continue;
+ }
+ }
+ bcwrite_uleb128(ctx, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u)));
+ if (o->u32.lo >= 0x80000000u) {
+ char *p = &ctx->sb.buf[ctx->sb.n-1];
+ *p = (*p & 7) | ((o->u32.lo>>27) & 0x18);
+ }
+ bcwrite_uleb128(ctx, o->u32.hi);
+ }
+ }
+}
+
+/* Write bytecode instructions. */
+static void bcwrite_bytecode(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */
+#if LJ_HASJIT
+ uint8_t *p = (uint8_t *)&ctx->sb.buf[ctx->sb.n];
+#endif
+ bcwrite_block(ctx, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns));
+#if LJ_HASJIT
+ /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */
+ if ((pt->flags & PROTO_ILOOP) || pt->trace) {
+ jit_State *J = L2J(ctx->L);
+ MSize i;
+ for (i = 0; i < nbc; i++, p += sizeof(BCIns)) {
+ BCOp op = (BCOp)p[LJ_ENDIAN_SELECT(0, 3)];
+ if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP ||
+ op == BC_JFORI) {
+ p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL);
+ } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+ BCReg rd = p[LJ_ENDIAN_SELECT(2, 1)] + (p[LJ_ENDIAN_SELECT(3, 0)] << 8);
+ BCIns ins = traceref(J, rd)->startins;
+ p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL);
+ p[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins);
+ p[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins);
+ }
+ }
+ }
+#endif
+}
+
+/* Write prototype. */
+static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize sizedbg = 0;
+
+ /* Recursively write children of prototype. */
+ if ((pt->flags & PROTO_CHILD)) {
+ ptrdiff_t i, n = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - 1;
+ for (i = 0; i < n; i++, kr--) {
+ GCobj *o = gcref(*kr);
+ if (o->gch.gct == ~LJ_TPROTO)
+ bcwrite_proto(ctx, gco2pt(o));
+ }
+ }
+
+ /* Start writing the prototype info to a buffer. */
+ lj_str_resetbuf(&ctx->sb);
+ ctx->sb.n = 5; /* Leave room for final size. */
+ bcwrite_need(ctx, 4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2);
+
+ /* Write prototype header. */
+ bcwrite_byte(ctx, (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI)));
+ bcwrite_byte(ctx, pt->numparams);
+ bcwrite_byte(ctx, pt->framesize);
+ bcwrite_byte(ctx, pt->sizeuv);
+ bcwrite_uleb128(ctx, pt->sizekgc);
+ bcwrite_uleb128(ctx, pt->sizekn);
+ bcwrite_uleb128(ctx, pt->sizebc-1);
+ if (!ctx->strip) {
+ if (proto_lineinfo(pt))
+ sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt);
+ bcwrite_uleb128(ctx, sizedbg);
+ if (sizedbg) {
+ bcwrite_uleb128(ctx, pt->firstline);
+ bcwrite_uleb128(ctx, pt->numline);
+ }
+ }
+
+ /* Write bytecode instructions and upvalue refs. */
+ bcwrite_bytecode(ctx, pt);
+ bcwrite_block(ctx, proto_uv(pt), pt->sizeuv*2);
+
+ /* Write constants. */
+ bcwrite_kgc(ctx, pt);
+ bcwrite_knum(ctx, pt);
+
+ /* Write debug info, if not stripped. */
+ if (sizedbg) {
+ bcwrite_need(ctx, sizedbg);
+ bcwrite_block(ctx, proto_lineinfo(pt), sizedbg);
+ }
+
+ /* Pass buffer to writer function. */
+ if (ctx->status == 0) {
+ MSize n = ctx->sb.n - 5;
+ MSize nn = 1 + lj_fls(n)/7;
+ ctx->sb.n = 5 - nn;
+ bcwrite_uleb128(ctx, n); /* Fill in final size. */
+ lua_assert(ctx->sb.n == 5);
+ ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf+5-nn, nn+n, ctx->wdata);
+ }
+}
+
+/* Write header of bytecode dump. */
+static void bcwrite_header(BCWriteCtx *ctx)
+{
+ GCstr *chunkname = proto_chunkname(ctx->pt);
+ const char *name = strdata(chunkname);
+ MSize len = chunkname->len;
+ lj_str_resetbuf(&ctx->sb);
+ bcwrite_need(ctx, 5+5+len);
+ bcwrite_byte(ctx, BCDUMP_HEAD1);
+ bcwrite_byte(ctx, BCDUMP_HEAD2);
+ bcwrite_byte(ctx, BCDUMP_HEAD3);
+ bcwrite_byte(ctx, BCDUMP_VERSION);
+ bcwrite_byte(ctx, (ctx->strip ? BCDUMP_F_STRIP : 0) +
+ (LJ_BE ? BCDUMP_F_BE : 0) +
+ ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0));
+ if (!ctx->strip) {
+ bcwrite_uleb128(ctx, len);
+ bcwrite_block(ctx, name, len);
+ }
+ ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf, ctx->sb.n, ctx->wdata);
+}
+
+/* Write footer of bytecode dump. */
+static void bcwrite_footer(BCWriteCtx *ctx)
+{
+ if (ctx->status == 0) {
+ uint8_t zero = 0;
+ ctx->status = ctx->wfunc(ctx->L, &zero, 1, ctx->wdata);
+ }
+}
+
+/* Protected callback for bytecode writer. */
+static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ BCWriteCtx *ctx = (BCWriteCtx *)ud;
+ UNUSED(dummy);
+ lj_str_resizebuf(L, &ctx->sb, 1024); /* Avoids resize for most prototypes. */
+ bcwrite_header(ctx);
+ bcwrite_proto(ctx, ctx->pt);
+ bcwrite_footer(ctx);
+ return NULL;
+}
+
+/* Write bytecode for a prototype. */
+int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data,
+ int strip)
+{
+ BCWriteCtx ctx;
+ int status;
+ ctx.L = L;
+ ctx.pt = pt;
+ ctx.wfunc = writer;
+ ctx.wdata = data;
+ ctx.strip = strip;
+ ctx.status = 0;
+ lj_str_initbuf(&ctx.sb);
+ status = lj_vm_cpcall(L, NULL, &ctx, cpwriter);
+ if (status == 0) status = ctx.status;
+ lj_str_freebuf(G(ctx.L), &ctx.sb);
+ return status;
+}
+
diff --git a/src/LuaJIT/src/lj_carith.c b/src/LuaJIT/src/lj_carith.c
new file mode 100644
index 000000000..56708bf67
--- /dev/null
+++ b/src/LuaJIT/src/lj_carith.c
@@ -0,0 +1,314 @@
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+
+/* -- C data arithmetic --------------------------------------------------- */
+
+/* Binary operands of an operator converted to ctypes. */
+typedef struct CDArith {
+ uint8_t *p[2];
+ CType *ct[2];
+} CDArith;
+
+/* Check arguments for arithmetic metamethods. */
+static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca)
+{
+ TValue *o = L->base;
+ int ok = 1;
+ MSize i;
+ if (o+1 >= L->top)
+ lj_err_argt(L, 1, LUA_TCDATA);
+ for (i = 0; i < 2; i++, o++) {
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id = (CTypeID)cd->typeid;
+ CType *ct = ctype_raw(cts, id);
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ } else if (ctype_isfunc(ct->info)) {
+ p = (uint8_t *)*(void **)p;
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ }
+ ca->ct[i] = ct;
+ ca->p[i] = p;
+ } else if (tvisint(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_INT32);
+ ca->p[i] = (uint8_t *)&o->i;
+ } else if (tvisnum(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_DOUBLE);
+ ca->p[i] = (uint8_t *)&o->n;
+ } else if (tvisnil(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_P_VOID);
+ ca->p[i] = (uint8_t *)0;
+ } else {
+ ca->ct[i] = NULL;
+ ca->p[i] = NULL;
+ ok = 0;
+ }
+ }
+ return ok;
+}
+
+/* Pointer arithmetic. */
+static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ CType *ctp = ca->ct[0];
+ uint8_t *pp = ca->p[0];
+ ptrdiff_t idx;
+ CTSize sz;
+ CTypeID id;
+ GCcdata *cd;
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ uint8_t *pp2 = ca->p[1];
+ if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */
+ setboolV(L->top-1, (pp == pp2));
+ return 1;
+ }
+ if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL))
+ return 0;
+ if (mm == MM_sub) { /* Pointer difference. */
+ intptr_t diff;
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == 0 || sz == CTSIZE_INVALID)
+ return 0;
+ diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz;
+ /* All valid pointer differences on x64 are in (-2^47, +2^47),
+ ** which fits into a double without loss of precision.
+ */
+ setintptrV(L->top-1, (int32_t)diff);
+ return 1;
+ } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */
+ setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2));
+ return 1;
+ } else {
+ lua_assert(mm == MM_le);
+ setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2));
+ return 1;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info)))
+ return 0;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1],
+ (uint8_t *)&idx, ca->p[1], 0);
+ if (mm == MM_sub) idx = -idx;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ /* Swap pointer and index. */
+ ctp = ca->ct[1]; pp = ca->p[1];
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0],
+ (uint8_t *)&idx, ca->p[0], 0);
+ } else {
+ return 0;
+ }
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == CTSIZE_INVALID)
+ return 0;
+ pp += idx*(int32_t)sz; /* Compute pointer + index. */
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(uint8_t **)cdataptr(cd) = pp;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+/* 64 bit integer arithmetic. */
+static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 &&
+ ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) {
+ CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) ||
+ ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ?
+ CTID_UINT64 : CTID_INT64;
+ CType *ct = ctype_get(cts, id);
+ GCcdata *cd;
+ uint64_t u0, u1, *up;
+ lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0);
+ if (mm != MM_unm)
+ lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0);
+ switch (mm) {
+ case MM_eq:
+ setboolV(L->top-1, (u0 == u1));
+ return 1;
+ case MM_lt:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1));
+ return 1;
+ case MM_le:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1));
+ return 1;
+ default: break;
+ }
+ cd = lj_cdata_new(cts, id, 8);
+ up = (uint64_t *)cdataptr(cd);
+ setcdataV(L, L->top-1, cd);
+ switch (mm) {
+ case MM_add: *up = u0 + u1; break;
+ case MM_sub: *up = u0 - u1; break;
+ case MM_mul: *up = u0 * u1; break;
+ case MM_div:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_divu64(u0, u1);
+ break;
+ case MM_mod:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_modu64(u0, u1);
+ break;
+ case MM_pow:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_powu64(u0, u1);
+ break;
+ case MM_unm: *up = (uint64_t)-(int64_t)u0; break;
+ default: lua_assert(0); break;
+ }
+ lj_gc_check(L);
+ return 1;
+ }
+ return 0;
+}
+
+/* Handle ctype arithmetic metamethods. */
+static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ cTValue *tv = NULL;
+ if (tviscdata(L->base))
+ tv = lj_ctype_meta(cts, cdataV(L->base)->typeid, mm);
+ if (!tv && L->base+1 < L->top && tviscdata(L->base+1))
+ tv = lj_ctype_meta(cts, cdataV(L->base+1)->typeid, mm);
+ if (!tv) {
+ const char *repr[2];
+ int i;
+ for (i = 0; i < 2; i++) {
+ if (ca->ct[i])
+ repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL));
+ else
+ repr[i] = typename(&L->base[i]);
+ }
+ lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN :
+ mm == MM_concat ? LJ_ERR_FFI_BADCONCAT :
+ mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH,
+ repr[0], repr[1]);
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+/* Arithmetic operators for cdata. */
+int lj_carith_op(lua_State *L, MMS mm)
+{
+ CTState *cts = ctype_cts(L);
+ CDArith ca;
+ if (carith_checkarg(L, cts, &ca)) {
+ if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) {
+ copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */
+ return 1;
+ }
+ }
+ return lj_carith_meta(L, cts, &ca, mm);
+}
+
+/* -- 64 bit integer arithmetic helpers ----------------------------------- */
+
+#if LJ_32 && LJ_HASJIT
+/* Signed/unsigned 64 bit multiplication. */
+int64_t lj_carith_mul64(int64_t a, int64_t b)
+{
+ return a * b;
+}
+#endif
+
+/* Unsigned 64 bit division. */
+uint64_t lj_carith_divu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Signed 64 bit division. */
+int64_t lj_carith_divi64(int64_t a, int64_t b)
+{
+ if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1))
+ return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Unsigned 64 bit modulo. */
+uint64_t lj_carith_modu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a % b;
+}
+
+/* Signed 64 bit modulo. */
+int64_t lj_carith_modi64(int64_t a, int64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0;
+ return a % b;
+}
+
+/* Unsigned 64 bit x^k. */
+uint64_t lj_carith_powu64(uint64_t x, uint64_t k)
+{
+ uint64_t y;
+ if (k == 0)
+ return 1;
+ for (; (k & 1) == 0; k >>= 1) x *= x;
+ y = x;
+ if ((k >>= 1) != 0) {
+ for (;;) {
+ x *= x;
+ if (k == 1) break;
+ if (k & 1) y *= x;
+ k >>= 1;
+ }
+ y *= x;
+ }
+ return y;
+}
+
+/* Signed 64 bit x^k. */
+int64_t lj_carith_powi64(int64_t x, int64_t k)
+{
+ if (k == 0)
+ return 1;
+ if (k < 0) {
+ if (x == 0)
+ return U64x(7fffffff,ffffffff);
+ else if (x == 1)
+ return 1;
+ else if (x == -1)
+ return (k & 1) ? -1 : 1;
+ else
+ return 0;
+ }
+ return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k);
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_carith.h b/src/LuaJIT/src/lj_carith.h
new file mode 100644
index 000000000..6e0f29bb7
--- /dev/null
+++ b/src/LuaJIT/src/lj_carith.h
@@ -0,0 +1,27 @@
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CARITH_H
+#define _LJ_CARITH_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
+
+#if LJ_32 && LJ_HASJIT
+LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
+#endif
+LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k);
+LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_ccall.c b/src/LuaJIT/src/lj_ccall.c
new file mode 100644
index 000000000..eb9f709ce
--- /dev/null
+++ b/src/LuaJIT/src/lj_ccall.c
@@ -0,0 +1,728 @@
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_ccall.h"
+#include "lj_trace.h"
+
+/* Target-specific handling of register arguments. */
+#if LJ_TARGET_X86
+/* -- x86 calling conventions --------------------------------------------- */
+
+#if LJ_ABI_WIN
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs bigger than 8 by reference (on stack only). */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) cc->stack[nsp++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#else
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Return complex float in GPRs and complex double by reference. */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) { \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp; \
+ }
+
+#endif
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ ngpr = maxgpr; /* Pass all structs by value on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 1; /* Pass complex by value on stack. */
+
+#define CCALL_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+/* -- Windows/x64 calling conventions ------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \
+ cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \
+ if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \
+ }
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex float in a GPR and complex double by reference. */ \
+ if (sz != 2*sizeof(float)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; \
+ }
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < 4) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \
+ } else { \
+ if (ngpr < 4) { dp = &cc->gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+/* -- POSIX/x64 calling conventions --------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (ccall_classify_struct(cts, ctr, rcl, 0)) { \
+ cc->retref = 1; /* Return struct by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ } else { \
+ cc->retref = 0; /* Return small structs in registers. */ \
+ }
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ ccall_classify_struct(cts, ctr, rcl, 0); \
+ ccall_struct_ret(cc, rcl, dp, ctr->size);
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in one or two FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \
+ *(int64_t *)dp = cc->fpr[0].l[0]; \
+ } else { /* Copy non-contiguous complex double from FPRs. */ \
+ ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \
+ ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (!ccall_classify_struct(cts, d, rcl, 0)) { \
+ cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \
+ if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \
+ nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \
+ continue; \
+ } /* Pass all other structs by value on stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ /* Note that reordering is explicitly allowed in the x64 ABI. */ \
+ if (n <= 2 && ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+/* -- ARM calling conventions --------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size <= 4 in a GPR. */ \
+ cc->retref = !(sz <= 4); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ cc->retref = 1; /* Return all complex values by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ UNUSED(dp); /* Nothing to do. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+/* ARM has a softfp ABI. */
+#define CCALL_HANDLE_REGARG \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
+ if (ngpr < maxgpr) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ else \
+ nsp = (nsp + 1u) & ~1u; /* Align argument on stack. */ \
+ } \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ }
+
+#elif LJ_TARGET_PPC
+/* -- PPC calling conventions --------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 2 or 4 GPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all structs by reference. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += 1; \
+ d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ if (n > 1) { \
+ lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \
+ if (ctype_isinteger(d->info)) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ else if (ngpr + n > maxgpr) \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
+
+#elif LJ_TARGET_PPCSPE
+/* -- PPC/SPE calling conventions ----------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 2 or 4 GPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all structs by reference. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+/* PPC/SPE has a softfp ABI. */
+#define CCALL_HANDLE_REGARG \
+ if (n > 1) { /* Doesn't fit in a single GPR? */ \
+ lua_assert(n == 2 || n == 4); /* int64_t, double or complex (float). */ \
+ if (n == 2) \
+ ngpr = (ngpr + 1u) & ~1u; /* Only align 64 bit value to regpair. */ \
+ else if (ngpr + n > maxgpr) \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+
+#elif LJ_TARGET_MIPS
+/* -- MIPS calling conventions -------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 1 or 2 FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
+ ((float *)dp)[0] = cc->fpr[0].f; \
+ ((float *)dp)[1] = cc->fpr[1].f; \
+ } else { /* Copy complex double from FPRs. */ \
+ ((double *)dp)[0] = cc->fpr[0].d; \
+ ((double *)dp)[1] = cc->fpr[1].d; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
+ /* Try to pass argument in FPRs. */ \
+ dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ } \
+ }
+
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)&cc->fpr[0].f;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_HANDLE_STRUCTRET2
+#define CCALL_HANDLE_STRUCTRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */
+#endif
+
+/* -- x64 struct classification ------------------------------------------- */
+
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+
+/* Register classes for x64 struct classification. */
+#define CCALL_RCL_INT 1
+#define CCALL_RCL_SSE 2
+#define CCALL_RCL_MEM 4
+/* NYI: classify vectors. */
+
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs);
+
+/* Classify a C type. */
+static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ctype_isarray(ct->info)) {
+ CType *cct = ctype_rawchild(cts, ct);
+ CTSize eofs, esz = cct->size, asz = ct->size;
+ for (eofs = 0; eofs < asz; eofs += esz)
+ ccall_classify_ct(cts, cct, rcl, ofs+eofs);
+ } else if (ctype_isstruct(ct->info)) {
+ ccall_classify_struct(cts, ct, rcl, ofs);
+ } else {
+ int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
+ lua_assert(ctype_hassize(ct->info));
+ if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
+ rcl[(ofs >= 8)] |= cl;
+ }
+}
+
+/* Recursively classify a struct based on its fields. */
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */
+ while (ct->sib) {
+ CTSize fofs;
+ ct = ctype_get(cts, ct->sib);
+ fofs = ofs+ct->size;
+ if (ctype_isfield(ct->info))
+ ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs);
+ else if (ctype_isbitfield(ct->info))
+ rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */
+ else if (ctype_isxattrib(ct->info, CTA_SUBTYPE))
+ ccall_classify_struct(cts, ctype_child(cts, ct), rcl, fofs);
+ }
+ return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */
+}
+
+/* Try to split up a small struct into registers. */
+static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl)
+{
+ MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
+ uint32_t i;
+ for (i = 0; i < 2; i++) {
+ lua_assert(!(rcl[i] & CCALL_RCL_MEM));
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
+ cc->gpr[ngpr++] = dp[i];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */
+ cc->fpr[nfpr++].l[0] = dp[i];
+ }
+ }
+ cc->ngpr = ngpr; cc->nfpr = nfpr;
+ return 0; /* Ok. */
+}
+
+/* Pass a small struct argument. */
+static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
+ TValue *o, int narg)
+{
+ GPRArg dp[2];
+ dp[0] = dp[1] = 0;
+ /* Convert to temp. struct. */
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */
+ MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
+ if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
+ cc->nsp = nsp + n;
+ memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR);
+ }
+ return 0; /* Ok. */
+}
+
+/* Combine returned small struct. */
+static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz)
+{
+ GPRArg sp[2];
+ MSize ngpr = 0, nfpr = 0;
+ uint32_t i;
+ for (i = 0; i < 2; i++) {
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ sp[i] = cc->gpr[ngpr++];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ sp[i] = cc->fpr[nfpr++].l[0];
+ }
+ }
+ memcpy(dp, sp, sz);
+}
+#endif
+
+/* -- Common C call handling ---------------------------------------------- */
+
+/* Infer the destination CTypeID for a vararg argument. */
+CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o)
+{
+ if (tvisnumber(o)) {
+ return CTID_DOUBLE;
+ } else if (tviscdata(o)) {
+ CTypeID id = cdataV(o)->typeid;
+ CType *s = ctype_get(cts, id);
+ if (ctype_isrefarray(s->info)) {
+ return lj_ctype_intern(cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR);
+ } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) {
+ /* NYI: how to pass a struct by value in a vararg argument? */
+ return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR);
+ } else if (ctype_isfp(s->info) && s->size == sizeof(float)) {
+ return CTID_DOUBLE;
+ } else {
+ return id;
+ }
+ } else if (tvisstr(o)) {
+ return CTID_P_CCHAR;
+ } else if (tvisbool(o)) {
+ return CTID_BOOL;
+ } else {
+ return CTID_P_VOID;
+ }
+}
+
+/* Setup arguments for C call. */
+static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc)
+{
+ int gcsteps = 0;
+ TValue *o, *top = L->top;
+ CTypeID fid;
+ CType *ctr;
+ MSize maxgpr, ngpr = 0, nsp = 0, narg;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#endif
+
+ /* Clear unused regs to get some determinism in case of misdeclaration. */
+ memset(cc->gpr, 0, sizeof(cc->gpr));
+#if CCALL_NUM_FPR
+ memset(cc->fpr, 0, sizeof(cc->fpr));
+#endif
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ cc->resx87 = 0;
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#else
+ maxgpr = CCALL_NARG_GPR;
+#endif
+
+ /* Perform required setup for some result types. */
+ ctr = ctype_rawchild(cts, ct);
+ if (ctype_isvector(ctr->info)) {
+ if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16)))
+ goto err_nyi;
+ } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) {
+ /* Preallocate cdata object and anchor it after arguments. */
+ CTSize sz = ctr->size;
+ GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz);
+ void *dp = cdataptr(cd);
+ setcdataV(L, L->top++, cd);
+ if (ctype_isstruct(ctr->info)) {
+ CCALL_HANDLE_STRUCTRET
+ } else {
+ CCALL_HANDLE_COMPLEXRET
+ }
+#if LJ_TARGET_X86
+ } else if (ctype_isfp(ctr->info)) {
+ cc->resx87 = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+
+ /* Walk through all passed arguments. */
+ for (o = L->base+1, narg = 1; o < top; o++, narg++) {
+ CTypeID did;
+ CType *d;
+ CTSize sz;
+ MSize n, isfp = 0, isva = 0;
+ void *dp, *rp = NULL;
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lua_assert(ctype_isfield(ctf->info));
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ isva = 1;
+ }
+ d = ctype_raw(cts, did);
+ sz = d->size;
+
+ /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */
+ if (ctype_isnum(d->info)) {
+ if (sz > 8) goto err_nyi;
+ if ((d->info & CTF_FP))
+ isfp = 1;
+ } else if (ctype_isvector(d->info)) {
+ if (CCALL_VECTOR_REG && (sz == 8 || sz == 16))
+ isfp = 1;
+ else
+ goto err_nyi;
+ } else if (ctype_isstruct(d->info)) {
+ CCALL_HANDLE_STRUCTARG
+ } else if (ctype_iscomplex(d->info)) {
+ CCALL_HANDLE_COMPLEXARG
+ } else {
+ sz = CTSIZE_PTR;
+ }
+ sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CCALL_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) {
+ MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1;
+ nsp = (nsp + align) & ~align; /* Align argument on stack. */
+ }
+ if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */
+ err_nyi:
+ lj_err_caller(L, LJ_ERR_FFI_NYICALL);
+ }
+ dp = &cc->stack[nsp];
+ nsp += n;
+ isva = 0;
+
+ done:
+ if (rp) { /* Pass by reference. */
+ gcsteps++;
+ *(void **)dp = rp;
+ dp = rp;
+ }
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ /* Extend passed integers to 32 bits at least. */
+ if (ctype_isinteger_or_bool(d->info) && d->size < 4) {
+ if (d->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_X64 && LJ_ABI_WIN
+ if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
+ if (nfpr == ngpr)
+ cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0];
+ else
+ cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1];
+ }
+#else
+ UNUSED(isva);
+#endif
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+ if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) {
+ cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
+ cc->fpr[nfpr-2].d[1] = 0;
+ }
+#else
+ UNUSED(isfp);
+#endif
+ }
+ if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
+
+#if LJ_TARGET_X64 || LJ_TARGET_PPC
+ cc->nfpr = nfpr; /* Required for vararg functions. */
+#endif
+ cc->nsp = nsp;
+ cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR;
+ if (nsp > CCALL_SPS_FREE)
+ cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u);
+ return gcsteps;
+}
+
+/* Get results from C call. */
+static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc, int *ret)
+{
+ CType *ctr = ctype_rawchild(cts, ct);
+ uint8_t *sp = (uint8_t *)&cc->gpr[0];
+ if (ctype_isvoid(ctr->info)) {
+ *ret = 0; /* Zero results. */
+ return 0; /* No additional GC step. */
+ }
+ *ret = 1; /* One result. */
+ if (ctype_isstruct(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ if (!cc->retref) {
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_STRUCTRET2
+ }
+ return 1; /* One GC step. */
+ }
+ if (ctype_iscomplex(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_COMPLEXRET2
+ return 1; /* One GC step. */
+ }
+ if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR)
+ sp += (CTSIZE_PTR - ctr->size);
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
+ sp = (uint8_t *)&cc->fpr[0];
+#endif
+#ifdef CCALL_HANDLE_RET
+ CCALL_HANDLE_RET
+#endif
+ /* No reference types end up here, so there's no need for the CTypeID. */
+ lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)));
+ if (ctype_isenum(ctr->info)) ctr = ctype_child(cts, ctr);
+ return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
+}
+
+/* Call C function. */
+int lj_ccall_func(lua_State *L, GCcdata *cd)
+{
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->typeid);
+ CTSize sz = CTSIZE_PTR;
+ if (ctype_isptr(ct->info)) {
+ sz = ct->size;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ CCallState cc;
+ int gcsteps, ret;
+ cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz);
+ gcsteps = ccall_set_args(L, cts, ct, &cc);
+ cts->cb.slot = ~0u;
+ lj_vm_ffi_call(&cc);
+ if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */
+ TValue tv;
+ setlightudV(&tv, (void *)cc.func);
+ setboolV(lj_tab_set(L, cts->miscmap, &tv), 1);
+ }
+ gcsteps += ccall_get_results(L, cts, ct, &cc, &ret);
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Automatically detect __stdcall and fix up C function declaration. */
+ if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) {
+ CTF_INSERT(ct->info, CCONV, CTCC_STDCALL);
+ lj_trace_abort(G(L));
+ }
+#endif
+ while (gcsteps-- > 0)
+ lj_gc_check(L);
+ return ret;
+ }
+ return -1; /* Not a function. */
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_ccall.h b/src/LuaJIT/src/lj_ccall.h
new file mode 100644
index 000000000..5985c4a98
--- /dev/null
+++ b/src/LuaJIT/src/lj_ccall.h
@@ -0,0 +1,158 @@
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALL_H
+#define _LJ_CCALL_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* -- C calling conventions ----------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+
+#if LJ_TARGET_X86
+#define CCALL_NARG_GPR 2 /* For fastcall arguments. */
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */
+#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */
+#elif LJ_ABI_WIN
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 4
+#define CCALL_NRET_GPR 1
+#define CCALL_NRET_FPR 1
+#define CCALL_SPS_EXTRA 4
+#else
+#define CCALL_NARG_GPR 6
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 2
+#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */
+#endif
+
+#define CCALL_SPS_FREE 1
+
+typedef LJ_ALIGN(16) union FPRArg {
+ double d[2];
+ float f[4];
+ uint8_t b[16];
+ uint16_t s[8];
+ int i[4];
+ int64_t l[2];
+} FPRArg;
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_ARM
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 2 /* For softfp double. */
+#define CCALL_NRET_FPR 0
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_PPC
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_GPR 4 /* For complex double. */
+#define CCALL_NRET_FPR 1
+#define CCALL_SPS_EXTRA 4
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef double FPRArg;
+
+#elif LJ_TARGET_PPCSPE
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 4 /* For softfp complex double. */
+#define CCALL_NRET_FPR 0
+#define CCALL_SPS_FREE 0 /* NYI */
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_MIPS
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 2
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 2
+#define CCALL_SPS_EXTRA 7
+#define CCALL_SPS_FREE 1
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ struct { LJ_ENDIAN_LOHI(float f; , float g;) };
+} FPRArg;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_SPS_EXTRA
+#define CCALL_SPS_EXTRA 0
+#endif
+#ifndef CCALL_VECTOR_REG
+#define CCALL_VECTOR_REG 0
+#endif
+#ifndef CCALL_ALIGN_STACKARG
+#define CCALL_ALIGN_STACKARG 1
+#endif
+
+#define CCALL_NUM_GPR \
+ (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR)
+#define CCALL_NUM_FPR \
+ (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR)
+
+/* Check against constants in lj_ctype.h. */
+LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR);
+LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR);
+
+#define CCALL_MAXSTACK 32
+
+/* -- C call state -------------------------------------------------------- */
+
+typedef struct CCallState {
+ void (*func)(void); /* Pointer to called function. */
+ uint32_t spadj; /* Stack pointer adjustment. */
+ uint8_t nsp; /* Number of stack slots. */
+ uint8_t retref; /* Return value by reference. */
+#if LJ_TARGET_X64
+ uint8_t ngpr; /* Number of arguments in GPRs. */
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#elif LJ_TARGET_X86
+ uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
+#elif LJ_TARGET_PPC
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#endif
+#if CCALL_NUM_FPR
+#if LJ_32
+ int32_t align1;
+#endif
+ FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */
+#endif
+ GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */
+ GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */
+} CCallState;
+
+/* -- C call handling ----------------------------------------------------- */
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc);
+
+LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o);
+LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_ccallback.c b/src/LuaJIT/src/lj_ccallback.c
new file mode 100644
index 000000000..482553f04
--- /dev/null
+++ b/src/LuaJIT/src/lj_ccallback.c
@@ -0,0 +1,597 @@
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_target.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Target-specific handling of callback slots -------------------------- */
+
+#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE)
+
+#if LJ_TARGET_X86ORX64
+
+#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0)
+#define CALLBACK_MCODE_GROUP (-2+1+2+5+(LJ_64 ? 6 : 5))
+
+#define CALLBACK_SLOT2OFS(slot) \
+ (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot))
+
+static MSize CALLBACK_OFS2SLOT(MSize ofs)
+{
+ MSize group;
+ ofs -= CALLBACK_MCODE_HEAD;
+ group = ofs / (32*4 + CALLBACK_MCODE_GROUP);
+ return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32;
+}
+
+#define CALLBACK_MAX_SLOT \
+ (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32)
+
+#elif LJ_TARGET_ARM
+
+#define CALLBACK_MCODE_HEAD 32
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_MCODE_HEAD 24
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#elif LJ_TARGET_MIPS
+
+#define CALLBACK_MCODE_HEAD 24
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#else
+
+/* Missing support for this architecture. */
+#define CALLBACK_SLOT2OFS(slot) (0*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
+#define CALLBACK_MAX_SLOT 0
+
+#endif
+
+/* Convert callback slot number to callback function pointer. */
+static void *callback_slot2ptr(CTState *cts, MSize slot)
+{
+ return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot);
+}
+
+/* Convert callback function pointer to slot number. */
+MSize lj_ccallback_ptr2slot(CTState *cts, void *p)
+{
+ uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode);
+ if (ofs < CALLBACK_MCODE_SIZE) {
+ MSize slot = CALLBACK_OFS2SLOT((MSize)ofs);
+ if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs)
+ return slot;
+ }
+ return ~0u; /* Not a known callback function pointer. */
+}
+
+/* Initialize machine code for callback function pointers. */
+#if LJ_TARGET_X86ORX64
+static void callback_mcode_init(global_State *g, uint8_t *page)
+{
+ uint8_t *p = page;
+ uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback;
+ MSize slot;
+#if LJ_64
+ *(void **)p = target; p += 8;
+#endif
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ /* mov al, slot; jmp group */
+ *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot;
+ if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) {
+ /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */
+ *p++ = XI_PUSH + RID_EBP;
+ *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8);
+ *p++ = XI_MOVri | RID_EBP;
+ *(int32_t *)p = i32ptr(g); p += 4;
+#if LJ_64
+ /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */
+ *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP;
+ *(int32_t *)p = (int32_t)(page-(p+4)); p += 4;
+#else
+ /* jmp lj_vm_ffi_callback. */
+ *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4;
+#endif
+ } else {
+ *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2);
+ }
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_ARM
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ /* This must match with the saveregs macro in buildvm_arm.dasc. */
+ *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC);
+ *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR);
+ *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD;
+ *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC);
+ *p++ = u32ptr(g);
+ *p++ = u32ptr(target);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC);
+ *p = ARMI_B | ((page-p-2) & 0x00ffffffu);
+ p++;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_PPC
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16);
+ *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16);
+ *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff);
+ *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff);
+ *p++ = PPCI_MTCTR | PPCF_T(RID_TMP);
+ *p++ = PPCI_BCTR;
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = PPCI_LI | PPCF_T(RID_R11) | slot;
+ *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2);
+ p++;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_MIPS
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0;
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16);
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff);
+ *p++ = MIPSI_JR | MIPSF_S(RID_R3);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p = MIPSI_B | ((page-p-1) & 0x0000ffffu);
+ p++;
+ *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#else
+/* Missing support for this architecture. */
+#define callback_mcode_init(g, p) UNUSED(p)
+#endif
+
+/* -- Machine code management --------------------------------------------- */
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include
+
+#elif LJ_TARGET_POSIX
+
+#include
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#endif
+
+/* Allocate and initialize area for callback function pointers. */
+static void callback_mcode_new(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p;
+ if (CALLBACK_MAX_SLOT == 0)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#if LJ_TARGET_WINDOWS
+ p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ if (!p)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#elif LJ_TARGET_POSIX
+ p = mmap(NULL, sz, (PROT_READ|PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS,
+ -1, 0);
+ if (p == MAP_FAILED)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#else
+ /* Fallback allocator. Fails if memory is not executable by default. */
+ p = lj_mem_new(cts->L, sz);
+#endif
+ cts->cb.mcode = p;
+ callback_mcode_init(cts->g, p);
+ lj_mcode_sync(p, (char *)p + sz);
+#if LJ_TARGET_WINDOWS
+ {
+ DWORD oprot;
+ VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot);
+ }
+#elif LJ_TARGET_POSIX
+ mprotect(p, sz, (PROT_READ|PROT_EXEC));
+#endif
+}
+
+/* Free area for callback function pointers. */
+void lj_ccallback_mcode_free(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p = cts->cb.mcode;
+ if (p == NULL) return;
+#if LJ_TARGET_WINDOWS
+ VirtualFree(p, 0, MEM_RELEASE);
+ UNUSED(sz);
+#elif LJ_TARGET_POSIX
+ munmap(p, sz);
+#else
+ lj_mem_free(cts->g, p, sz);
+#endif
+}
+
+/* -- C callback entry ---------------------------------------------------- */
+
+/* Target-specific handling of register arguments. Similar to lj_ccall.c. */
+#if LJ_TARGET_X86
+
+#define CALLBACK_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < 4) { sp = &cts->cb.fpr[ngpr++]; nfpr = ngpr; goto done; } \
+ } else { \
+ if (ngpr < 4) { sp = &cts->cb.gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } \
+ } else { \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+
+#define CALLBACK_HANDLE_REGARG \
+ UNUSED(isfp); \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr]; \
+ nfpr += 1; \
+ cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ if (n > 1) { \
+ lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */
+
+#elif LJ_TARGET_MIPS
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \
+ sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ((float *)dp)[1] = *(float *)dp;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+/* Convert and push callback arguments to Lua stack. */
+static void callback_conv_args(CTState *cts, lua_State *L)
+{
+ TValue *o = L->top;
+ intptr_t *stack = cts->cb.stack;
+ MSize slot = cts->cb.slot;
+ CTypeID id = 0, rid, fid;
+ CType *ct;
+ GCfunc *fn;
+ MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#endif
+
+ if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) {
+ ct = ctype_get(cts, id);
+ rid = ctype_cid(ct->info);
+ fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot));
+ } else { /* Must set up frame first, before throwing the error. */
+ ct = NULL;
+ rid = 0;
+ fn = (GCfunc *)L;
+ }
+ o->u32.lo = LJ_CONT_FFI_CALLBACK; /* Continuation returns from callback. */
+ o->u32.hi = rid; /* Return type. x86: +(spadj<<16). */
+ o++;
+ setframe_gc(o, obj2gco(fn));
+ setframe_ftsz(o, (int)((char *)(o+1) - (char *)L->base) + FRAME_CONT);
+ L->top = L->base = ++o;
+ if (!ct)
+ lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK);
+ if (isluafunc(fn))
+ setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1);
+ lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */
+ o = L->base; /* Might have been reallocated. */
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#endif
+
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ void *sp;
+ CTSize sz;
+ int isfp;
+ MSize n;
+ lua_assert(ctype_isfield(ctf->info));
+ cta = ctype_rawchild(cts, ctf);
+ if (ctype_isenum(cta->info)) cta = ctype_child(cts, cta);
+ isfp = ctype_isfp(cta->info);
+ sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CALLBACK_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8)
+ nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */
+ sp = &stack[nsp];
+ nsp += n;
+
+ done:
+ if (LJ_BE && cta->size < CTSIZE_PTR)
+ sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size);
+ lj_cconv_tv_ct(cts, cta, 0, o++, sp);
+ }
+ fid = ctf->sib;
+ }
+ L->top = o;
+#if LJ_TARGET_X86
+ /* Store stack adjustment for returns from fastcall/stdcall callbacks. */
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: case CTCC_STDCALL:
+ (L->base-2)->u32.hi |= (nsp << (16+2));
+ break;
+ }
+#endif
+}
+
+/* Convert Lua object to callback result. */
+static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
+{
+ CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi);
+#if LJ_TARGET_X86
+ cts->cb.gpr[2] = 0;
+#endif
+ if (!ctype_isvoid(ctr->info)) {
+ uint8_t *dp = (uint8_t *)&cts->cb.gpr[0];
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info))
+ dp = (uint8_t *)&cts->cb.fpr[0];
+#endif
+ lj_cconv_ct_tv(cts, ctr, dp, o, 0);
+#ifdef CALLBACK_HANDLE_RET
+ CALLBACK_HANDLE_RET
+#endif
+ /* Extend returned integers to (at least) 32 bits. */
+ if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) {
+ if (ctr->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_X86
+ if (ctype_isfp(ctr->info))
+ cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+}
+
+/* Enter callback. */
+lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf)
+{
+ lua_State *L = cts->L;
+ lua_assert(L != NULL);
+ if (gcref(cts->g->jit_L))
+ lj_err_caller(gco2th(gcref(cts->g->jit_L)), LJ_ERR_FFI_BADCBACK);
+ lj_trace_abort(cts->g); /* Never record across callback. */
+ /* Setup C frame. */
+ cframe_prev(cf) = L->cframe;
+ setcframe_L(cf, L);
+ cframe_errfunc(cf) = -1;
+ cframe_nres(cf) = 0;
+ L->cframe = cf;
+ callback_conv_args(cts, L);
+ return L; /* Now call the function on this stack. */
+}
+
+/* Leave callback. */
+void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o)
+{
+ lua_State *L = cts->L;
+ GCfunc *fn;
+ TValue *obase = L->base;
+ L->base = L->top; /* Keep continuation frame for throwing errors. */
+ if (o >= L->base) {
+ /* PC of RET* is lost. Point to last line for result conv. errors. */
+ fn = curr_func(L);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1);
+ }
+ }
+ callback_conv_result(cts, L, o);
+ /* Finally drop C frame and continuation frame. */
+ L->cframe = cframe_prev(L->cframe);
+ L->top -= 2;
+ L->base = obase;
+ cts->cb.slot = 0; /* Blacklist C function that called the callback. */
+}
+
+/* -- C callback management ----------------------------------------------- */
+
+/* Get an unused slot in the callback slot table. */
+static MSize callback_slot_new(CTState *cts, CType *ct)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ CTypeID1 *cbid = cts->cb.cbid;
+ MSize top;
+ for (top = cts->cb.topid; top < cts->cb.sizeid; top++)
+ if (LJ_LIKELY(cbid[top] == 0))
+ goto found;
+#if CALLBACK_MAX_SLOT
+ if (top >= CALLBACK_MAX_SLOT)
+#endif
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+ if (!cts->cb.mcode)
+ callback_mcode_new(cts);
+ lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1);
+ cts->cb.cbid = cbid;
+ memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1));
+found:
+ cbid[top] = id;
+ cts->cb.topid = top+1;
+ return top;
+}
+
+/* Check for function pointer and supported argument/result types. */
+static CType *callback_checkfunc(CTState *cts, CType *ct)
+{
+ int narg = 0;
+ if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR))
+ return NULL;
+ ct = ctype_rawchild(cts, ct);
+ if (ctype_isfunc(ct->info)) {
+ CType *ctr = ctype_rawchild(cts, ct);
+ CTypeID fid = ct->sib;
+ if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) ||
+ ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8)))
+ return NULL;
+ if ((ct->info & CTF_VARARG))
+ return NULL;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ lua_assert(ctype_isfield(ctf->info));
+ cta = ctype_rawchild(cts, ctf);
+ if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) ||
+ (ctype_isnum(cta->info) && cta->size <= 8)) ||
+ ++narg >= LUA_MINSTACK-3)
+ return NULL;
+ }
+ fid = ctf->sib;
+ }
+ return ct;
+ }
+ return NULL;
+}
+
+/* Create a new callback and return the callback function pointer. */
+void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn)
+{
+ ct = callback_checkfunc(cts, ct);
+ if (ct) {
+ MSize slot = callback_slot_new(cts, ct);
+ GCtab *t = cts->miscmap;
+ setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn);
+ lj_gc_anybarriert(cts->L, t);
+ return callback_slot2ptr(cts, slot);
+ }
+ return NULL; /* Bad conversion. */
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_ccallback.h b/src/LuaJIT/src/lj_ccallback.h
new file mode 100644
index 000000000..ba92cc9ee
--- /dev/null
+++ b/src/LuaJIT/src/lj_ccallback.h
@@ -0,0 +1,25 @@
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALLBACK_H
+#define _LJ_CCALLBACK_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void lj_vm_ffi_callback(void);
+
+LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p);
+LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf);
+LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o);
+LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn);
+LJ_FUNC void lj_ccallback_mcode_free(CTState *cts);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_cconv.c b/src/LuaJIT/src/lj_cconv.c
new file mode 100644
index 000000000..240a8d10d
--- /dev/null
+++ b/src/LuaJIT/src/lj_cconv.c
@@ -0,0 +1,744 @@
+/*
+** C type conversions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_ccallback.h"
+
+/* -- Conversion errors --------------------------------------------------- */
+
+/* Bad conversion. */
+LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src;
+ if ((flags & CCF_FROMTV))
+ src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER :
+ ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)];
+ else
+ src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL));
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Bad conversion from TValue. */
+LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src = typename(o);
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Initializer overflow. */
+LJ_NORET static void cconv_err_initov(CTState *cts, CType *d)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst);
+}
+
+/* -- C type compatibility checks ----------------------------------------- */
+
+/* Get raw type and qualifiers for a child type. Resolves enums, too. */
+static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual)
+{
+ ct = ctype_child(cts, ct);
+ for (;;) {
+ if (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ } else if (!ctype_isenum(ct->info)) {
+ break;
+ }
+ ct = ctype_child(cts, ct);
+ }
+ *qual |= (ct->info & CTF_QUAL);
+ return ct;
+}
+
+/* Check for compatible types when converting to a pointer.
+** Note: these checks are more relaxed than what C99 mandates.
+*/
+int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags)
+{
+ if (!((flags & CCF_CAST) || d == s)) {
+ CTInfo dqual = 0, squal = 0;
+ d = cconv_childqual(cts, d, &dqual);
+ if (!ctype_isstruct(s->info))
+ s = cconv_childqual(cts, s, &squal);
+ if ((flags & CCF_SAME)) {
+ if (dqual != squal)
+ return 0; /* Different qualifiers. */
+ } else if (!(flags & CCF_IGNQUAL)) {
+ if ((dqual & squal) != squal)
+ return 0; /* Discarded qualifiers. */
+ if (ctype_isvoid(d->info) || ctype_isvoid(s->info))
+ return 1; /* Converting to/from void * is always ok. */
+ }
+ if (ctype_type(d->info) != ctype_type(s->info) ||
+ d->size != s->size)
+ return 0; /* Different type or different size. */
+ if (ctype_isnum(d->info)) {
+ if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP)))
+ return 0; /* Different numeric types. */
+ } else if (ctype_ispointer(d->info)) {
+ /* Check child types for compatibility. */
+ return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME);
+ } else if (ctype_isstruct(d->info)) {
+ if (d != s)
+ return 0; /* Must be exact same type for struct/union. */
+ } else if (ctype_isfunc(d->info)) {
+ /* NYI: structural equality of functions. */
+ }
+ }
+ return 1; /* Types are compatible. */
+}
+
+/* -- C type to C type conversion ----------------------------------------- */
+
+/* Convert C type to C type. Caveat: expects to get the raw CType!
+**
+** Note: This is only used by the interpreter and not optimized at all.
+** The JIT compiler will do a much better job specializing for each case.
+*/
+void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags)
+{
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+ void *tmpptr;
+
+ lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo));
+ lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo));
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /* Some basic sanity checks. */
+ lua_assert(!ctype_isnum(dinfo) || dsize > 0);
+ lua_assert(!ctype_isnum(sinfo) || ssize > 0);
+ lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4);
+ lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4);
+ lua_assert(!ctype_isinteger(dinfo) || (1u< ssize) { /* Zero-extend or sign-extend LSB. */
+#if LJ_LE
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0;
+ memcpy(dp, sp, ssize);
+ memset(dp + ssize, fill, dsize-ssize);
+#else
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0;
+ memset(dp, fill, dsize-ssize);
+ memcpy(dp + (dsize-ssize), sp, ssize);
+#endif
+ } else { /* Copy LSB. */
+#if LJ_LE
+ memcpy(dp, sp, dsize);
+#else
+ memcpy(dp, sp + (ssize-dsize), dsize);
+#endif
+ }
+ break;
+ case CCX(I, F): {
+ double n; /* Always convert via double. */
+ conv_I_F:
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Then convert double to integer. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) {
+ int32_t i = (int32_t)n;
+ if (dsize == 4) *(int32_t *)dp = i;
+ else if (dsize == 2) *(int16_t *)dp = (int16_t)i;
+ else *(int8_t *)dp = (int8_t)i;
+ } else if (dsize == 4) {
+ *(uint32_t *)dp = (uint32_t)n;
+ } else if (dsize == 8) {
+ if (!(dinfo & CTF_UNSIGNED))
+ *(int64_t *)dp = (int64_t)n;
+ else
+ *(uint64_t *)dp = lj_num2u64(n);
+ } else {
+ goto err_conv; /* NYI: conversion to >64 bit integers. */
+ }
+ break;
+ }
+ case CCX(I, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_I_F; /* Just convert re. */
+ case CCX(I, P):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+ case CCX(I, A):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ tmpptr = sp;
+ sp = (uint8_t *)&tmpptr;
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I): {
+ double n; /* Always convert via double. */
+ conv_F_I:
+ /* First convert source to double. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) {
+ int32_t i;
+ if (ssize == 4) {
+ i = *(int32_t *)sp;
+ } else if (!(sinfo & CTF_UNSIGNED)) {
+ if (ssize == 2) i = *(int16_t *)sp;
+ else i = *(int8_t *)sp;
+ } else {
+ if (ssize == 2) i = *(uint16_t *)sp;
+ else i = *(uint8_t *)sp;
+ }
+ n = (double)i;
+ } else if (ssize == 4) {
+ n = (double)*(uint32_t *)sp;
+ } else if (ssize == 8) {
+ if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp;
+ else n = (double)*(uint64_t *)sp;
+ } else {
+ goto err_conv; /* NYI: conversion from >64 bit integers. */
+ }
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, F): {
+ double n; /* Always convert via double. */
+ conv_F_F:
+ if (ssize == dsize) goto copyval;
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_F_F; /* Ignore im, and convert from re. */
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_I; /* Convert to re. */
+ case CCX(C, F):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_F; /* Convert to re. */
+
+ case CCX(C, C):
+ if (dsize != ssize) { /* Different types: convert re/im separately. */
+ CType *dc = ctype_child(cts, d);
+ CType *sc = ctype_child(cts, s);
+ lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags);
+ lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags);
+ return;
+ }
+ goto copyval; /* Otherwise this is easy. */
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C): {
+ CType *dc = ctype_child(cts, d);
+ CTSize esize;
+ /* First convert the scalar to the first element. */
+ lj_cconv_ct_ct(cts, dc, s, dp, sp, flags);
+ /* Then replicate it to the other elements (splat). */
+ for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) {
+ dp += esize;
+ memcpy(dp, sp, esize);
+ }
+ break;
+ }
+
+ case CCX(V, V):
+ /* Copy same-sized vectors, even for different lengths/element-types. */
+ if (dsize != ssize) goto err_conv;
+ goto copyval;
+
+ /* Destination is a pointer. */
+ case CCX(P, I):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ dinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+
+ case CCX(P, F):
+ if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED);
+ goto conv_I_F;
+
+ case CCX(P, P):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, cdata_getptr(sp, ssize));
+ break;
+
+ case CCX(P, A):
+ case CCX(P, S):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, sp);
+ break;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize ||
+ d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags))
+ goto err_conv;
+ goto copyval;
+
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s)
+ goto err_conv; /* Must be exact same type. */
+copyval: /* Copy value. */
+ lua_assert(dsize == ssize);
+ memcpy(dp, sp, dsize);
+ break;
+
+ default:
+ err_conv:
+ cconv_err_conv(cts, d, s, flags);
+ }
+}
+
+/* -- C type to TValue conversion ----------------------------------------- */
+
+/* Convert C type to TValue. Caveat: expects to get the raw CType! */
+int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp)
+{
+ CTInfo sinfo = s->info;
+ lua_assert(!ctype_isenum(sinfo));
+ if (ctype_isnum(sinfo)) {
+ if (!ctype_isbool(sinfo)) {
+ if (ctype_isinteger(sinfo) && s->size > 4) goto copyval;
+ if (LJ_DUALNUM && ctype_isinteger(sinfo)) {
+ int32_t i;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s,
+ (uint8_t *)&i, sp, 0);
+ if ((sinfo & CTF_UNSIGNED) && i < 0)
+ setnumV(o, (lua_Number)(uint32_t)i);
+ else
+ setintV(o, i);
+ } else {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s,
+ (uint8_t *)&o->n, sp, 0);
+ /* Numbers are NOT canonicalized here! Beware of uninitialized data. */
+ lua_assert(tvisnum(o));
+ }
+ } else {
+ uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0);
+ setboolV(o, b);
+ setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
+ }
+ return 0;
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ /* Create reference. */
+ setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid));
+ return 1; /* Need GC step. */
+ } else {
+ GCcdata *cd;
+ CTSize sz;
+ copyval: /* Copy value. */
+ sz = s->size;
+ lua_assert(sz != CTSIZE_INVALID);
+ /* Attributes are stripped, qualifiers are kept (but mostly ignored). */
+ cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz);
+ setcdataV(cts->L, o, cd);
+ memcpy(cdataptr(cd), sp, sz);
+ return 1; /* Need GC step. */
+ }
+}
+
+/* Convert bitfield to TValue. */
+int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTInfo info = s->info;
+ CTSize pos, bsz;
+ uint32_t val;
+ lua_assert(ctype_isbitfield(info));
+ /* NYI: packed bitfields may cause misaligned reads. */
+ switch (ctype_bitcsz(info)) {
+ case 4: val = *(uint32_t *)sp; break;
+ case 2: val = *(uint16_t *)sp; break;
+ case 1: val = *(uint8_t *)sp; break;
+ default: lua_assert(0); val = 0; break;
+ }
+ /* Check if a packed bitfield crosses a container boundary. */
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lua_assert(pos < 8*ctype_bitcsz(info));
+ lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ if (!(info & CTF_BOOL)) {
+ CTSize shift = 32 - bsz;
+ if (!(info & CTF_UNSIGNED)) {
+ setintV(o, (int32_t)(val << (shift-pos)) >> shift);
+ } else {
+ val = (val << (shift-pos)) >> shift;
+ if (!LJ_DUALNUM || (int32_t)val < 0)
+ setnumV(o, (lua_Number)(uint32_t)val);
+ else
+ setintV(o, (int32_t)val);
+ }
+ } else {
+ lua_assert(bsz == 1);
+ setboolV(o, (val >> pos) & 1);
+ }
+ return 0; /* No GC step needed. */
+}
+
+/* -- TValue to C type conversion ----------------------------------------- */
+
+/* Convert table to array. */
+static void cconv_array_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i;
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize size = d->size, esize = dc->size, ofs = 0;
+ for (i = 0; ; i++) {
+ TValue *tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) continue; /* Try again for 1-based tables. */
+ break; /* Stop at first nil. */
+ }
+ if (ofs >= size)
+ cconv_err_initov(cts, d);
+ lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags);
+ ofs += esize;
+ }
+ if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, size - ofs);
+ }
+ }
+}
+
+/* Convert table to sub-struct/union. */
+static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp,
+ GCtab *t, int32_t *ip, CTInfo flags)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ TValue *tv;
+ int32_t i = *ip;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= 0) {
+ retry:
+ tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) { i = 1; goto retry; } /* 1-based tables. */
+ break; /* Stop at first nil. */
+ }
+ *ip = i + 1;
+ } else {
+ tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name)));
+ if (!tv || tvisnil(tv)) continue;
+ }
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, tv);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_tab(cts, ctype_child(cts, df), dp+df->size, t, ip, flags);
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Convert table to struct/union. */
+static void cconv_struct_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i = 0;
+ memset(dp, 0, d->size); /* Much simpler to clear the struct first. */
+ if (t->hmask) i = -1; else if (t->asize == 0) return; /* Fast exit. */
+ cconv_substruct_tab(cts, d, dp, t, &i, flags);
+}
+
+/* Convert TValue to C type. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags)
+{
+ CTypeID sid = CTID_P_VOID;
+ CType *s;
+ void *tmpptr;
+ uint8_t tmpbool, *sp = (uint8_t *)&tmpptr;
+ if (LJ_LIKELY(tvisint(o))) {
+ sp = (uint8_t *)&o->i;
+ sid = CTID_INT32;
+ flags |= CCF_FROMTV;
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ sp = (uint8_t *)&o->n;
+ sid = CTID_DOUBLE;
+ flags |= CCF_FROMTV;
+ } else if (tviscdata(o)) {
+ sp = cdataptr(cdataV(o));
+ sid = cdataV(o)->typeid;
+ s = ctype_get(cts, sid);
+ if (ctype_isref(s->info)) { /* Resolve reference for value. */
+ lua_assert(s->size == CTSIZE_PTR);
+ sp = *(void **)sp;
+ sid = ctype_cid(s->info);
+ }
+ s = ctype_raw(cts, sid);
+ if (ctype_isfunc(s->info)) {
+ sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
+ } else {
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ goto doconv;
+ }
+ } else if (tvisstr(o)) {
+ GCstr *str = strV(o);
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ if (!cct || !ctype_isconstval(cct->info))
+ goto err_conv;
+ lua_assert(d->size == 4);
+ sp = (uint8_t *)&cct->size;
+ sid = ctype_cid(cct->info);
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ CType *dc = ctype_rawchild(cts, d);
+ CTSize sz = str->len+1;
+ if (!ctype_isinteger(dc->info) || dc->size != 1)
+ goto err_conv;
+ if (d->size != 0 && d->size < sz)
+ sz = d->size;
+ memcpy(dp, strdata(str), sz);
+ return;
+ } else { /* Otherwise pass it as a const char[]. */
+ sp = (uint8_t *)strdata(str);
+ sid = CTID_A_CCHAR;
+ flags |= CCF_FROMTV;
+ }
+ } else if (tvistab(o)) {
+ if (ctype_isarray(d->info)) {
+ cconv_array_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else if (ctype_isstruct(d->info)) {
+ cconv_struct_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else {
+ goto err_conv;
+ }
+ } else if (tvisbool(o)) {
+ tmpbool = boolV(o);
+ sp = &tmpbool;
+ sid = CTID_BOOL;
+ } else if (tvisnil(o)) {
+ tmpptr = (void *)0;
+ flags |= CCF_FROMTV;
+ } else if (tvisudata(o)) {
+ tmpptr = uddata(udataV(o));
+ } else if (tvislightud(o)) {
+ tmpptr = lightudV(o);
+ } else if (tvisfunc(o)) {
+ void *p = lj_ccallback_new(cts, d, funcV(o));
+ if (p) {
+ *(void **)dp = p;
+ return;
+ }
+ goto err_conv;
+ } else {
+ err_conv:
+ cconv_err_convtv(cts, d, o, flags);
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ lj_cconv_ct_ct(cts, d, s, dp, sp, flags);
+}
+
+/* Convert TValue to bitfield. */
+void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
+{
+ CTInfo info = d->info;
+ CTSize pos, bsz;
+ uint32_t val, mask;
+ lua_assert(ctype_isbitfield(info));
+ if ((info & CTF_BOOL)) {
+ uint8_t tmpbool;
+ lua_assert(ctype_bitbsz(info) == 1);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0);
+ val = tmpbool;
+ } else {
+ CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32;
+ lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0);
+ }
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lua_assert(pos < 8*ctype_bitcsz(info));
+ lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
+ /* Check if a packed bitfield crosses a container boundary. */
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ mask = ((1u << bsz) - 1u) << pos;
+ val = (val << pos) & mask;
+ /* NYI: packed bitfields may cause misaligned reads/writes. */
+ switch (ctype_bitcsz(info)) {
+ case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break;
+ case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break;
+ case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break;
+ default: lua_assert(0); break;
+ }
+}
+
+/* -- Initialize C type with TValues -------------------------------------- */
+
+/* Initialize an array with TValues. */
+static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ MSize i;
+ if (len*esize > sz)
+ cconv_err_initov(cts, d);
+ for (i = 0, ofs = 0; i < len; i++, ofs += esize)
+ lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0);
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, sz - ofs);
+ }
+}
+
+/* Initialize a sub-struct/union with TValues. */
+static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp,
+ TValue *o, MSize len, MSize *ip)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ MSize i = *ip;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= len) break;
+ *ip = i + 1;
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, o + i);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_init(cts, ctype_child(cts, df), dp+df->size, o, len, ip);
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Initialize a struct/union with TValues. */
+static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ MSize i = 0;
+ memset(dp, 0, sz); /* Much simpler to clear the struct first. */
+ cconv_substruct_init(cts, d, dp, o, len, &i);
+ if (i < len)
+ cconv_err_initov(cts, d);
+}
+
+/* Check whether to use a multi-value initializer.
+** This is true if an aggregate is to be initialized with a value.
+** Valarrays are treated as values here so ct_tv handles (V|C, I|F).
+*/
+int lj_cconv_multi_init(CType *d, TValue *o)
+{
+ if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info)))
+ return 0; /* Destination is not an aggregate. */
+ if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info)))
+ return 0; /* Initializer is not a value. */
+ return 1; /* Otherwise the initializer is a value. */
+}
+
+/* Initialize C type with TValues. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len)
+{
+ if (len == 0)
+ memset(dp, 0, sz);
+ else if (len == 1 && !lj_cconv_multi_init(d, o))
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+ else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */
+ cconv_array_init(cts, d, sz, dp, o, len);
+ else if (ctype_isstruct(d->info))
+ cconv_struct_init(cts, d, sz, dp, o, len);
+ else
+ cconv_err_initov(cts, d);
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_cconv.h b/src/LuaJIT/src/lj_cconv.h
new file mode 100644
index 000000000..c53a7cf84
--- /dev/null
+++ b/src/LuaJIT/src/lj_cconv.h
@@ -0,0 +1,70 @@
+/*
+** C type conversions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCONV_H
+#define _LJ_CCONV_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Compressed C type index. ORDER CCX. */
+enum {
+ CCX_B, /* Bool. */
+ CCX_I, /* Integer. */
+ CCX_F, /* Floating-point number. */
+ CCX_C, /* Complex. */
+ CCX_V, /* Vector. */
+ CCX_P, /* Pointer. */
+ CCX_A, /* Refarray. */
+ CCX_S /* Struct/union. */
+};
+
+/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */
+static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
+{
+ uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
+ lua_assert(ctype_type(info) <= CT_MAYCONVERT);
+#if LJ_64
+ idx = ((U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
+#else
+ idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
+#endif
+ lua_assert(idx < 8);
+ return idx;
+}
+
+#define cconv_idx2(dinfo, sinfo) \
+ ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo)))
+
+#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src)
+
+/* Conversion flags. */
+#define CCF_CAST 0x00000001u
+#define CCF_FROMTV 0x00000002u
+#define CCF_SAME 0x00000004u
+#define CCF_IGNQUAL 0x00000008u
+
+#define CCF_ARG_SHIFT 8
+#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT)
+#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT)
+
+LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags);
+LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags);
+LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp);
+LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags);
+LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o);
+LJ_FUNC int lj_cconv_multi_init(CType *d, TValue *o);
+LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_cdata.c b/src/LuaJIT/src/lj_cdata.c
new file mode 100644
index 000000000..482add4b5
--- /dev/null
+++ b/src/LuaJIT/src/lj_cdata.c
@@ -0,0 +1,284 @@
+/*
+** C data management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+
+/* -- C data allocation --------------------------------------------------- */
+
+/* Allocate a new C data object holding a reference to another object. */
+GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id)
+{
+ CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR);
+ GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR);
+ *(const void **)cdataptr(cd) = p;
+ return cd;
+}
+
+/* Allocate variable-sized or specially aligned C data object. */
+GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align)
+{
+ global_State *g;
+ MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) +
+ (align > CT_MEMALIGN ? (1u<L, extra + sz, char);
+ uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata);
+ uintptr_t almask = (1u << align) - 1u;
+ GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata));
+ lua_assert((char *)cd - p < 65536);
+ cdatav(cd)->offset = (uint16_t)((char *)cd - p);
+ cdatav(cd)->extra = extra;
+ cdatav(cd)->len = sz;
+ g = cts->g;
+ setgcrefr(cd->nextgc, g->gc.root);
+ setgcref(g->gc.root, obj2gco(cd));
+ newwhite(g, obj2gco(cd));
+ cd->marked |= 0x80;
+ cd->gct = ~LJ_TCDATA;
+ cd->typeid = id;
+ return cd;
+}
+
+/* Free a C data object. */
+void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
+{
+ if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) {
+ GCobj *root;
+ makewhite(g, obj2gco(cd));
+ obj2gco(cd)->gch.marked |= LJ_GC_FINALIZED;
+ if ((root = gcref(g->gc.mmudata)) != NULL) {
+ setgcrefr(cd->nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ } else {
+ setgcref(cd->nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ }
+ } else if (LJ_LIKELY(!cdataisv(cd))) {
+ CType *ct = ctype_raw(ctype_ctsG(g), cd->typeid);
+ CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR;
+ lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) ||
+ ctype_isextern(ct->info));
+ lj_mem_free(g, cd, sizeof(GCcdata) + sz);
+ } else {
+ lj_mem_free(g, memcdatav(cd), sizecdatav(cd));
+ }
+}
+
+TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd)
+{
+ global_State *g = G(L);
+ GCtab *t = ctype_ctsG(g)->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add cdata to finalizer table, if still enabled. */
+ TValue *tv, tmp;
+ setcdataV(L, &tmp, cd);
+ lj_gc_anybarriert(L, t);
+ tv = lj_tab_set(L, t, &tmp);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ return tv;
+ } else {
+ /* Otherwise return dummy TValue. */
+ return &g->tmptv;
+ }
+}
+
+/* -- C data indexing ----------------------------------------------------- */
+
+/* Index C data by a TValue. Return CType and pointer. */
+CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp,
+ CTInfo *qual)
+{
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ CType *ct = ctype_get(cts, cd->typeid);
+ ptrdiff_t idx;
+
+ /* Resolve reference for cdata object. */
+ if (ctype_isref(ct->info)) {
+ lua_assert(ct->size == CTSIZE_PTR);
+ p = *(uint8_t **)p;
+ ct = ctype_child(cts, ct);
+ }
+
+collect_attrib:
+ /* Skip attributes and collect qualifiers. */
+ while (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ ct = ctype_child(cts, ct);
+ }
+ lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */
+
+ if (tvisint(key)) {
+ idx = (ptrdiff_t)intV(key);
+ goto integer_key;
+ } else if (tvisnum(key)) { /* Numeric key. */
+ idx = LJ_64 ? (ptrdiff_t)numV(key) : (ptrdiff_t)lj_num2int(numV(key));
+ integer_key:
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */
+ if (sz != CTSIZE_INVALID) {
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) {
+ if ((ct->info & CTF_COMPLEX)) idx &= 1;
+ *qual |= CTF_CONST; /* Valarray elements are constant. */
+ }
+ *pp = p + idx*(int32_t)sz;
+ return ct;
+ }
+ }
+ } else if (tviscdata(key)) { /* Integer cdata key. */
+ GCcdata *cdk = cdataV(key);
+ CType *ctk = ctype_raw(cts, cdk->typeid);
+ if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk);
+ if (ctype_isinteger(ctk->info)) {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk,
+ (uint8_t *)&idx, cdataptr(cdk), 0);
+ goto integer_key;
+ }
+ } else if (tvisstr(key)) { /* String key. */
+ GCstr *name = strV(key);
+ if (ctype_isstruct(ct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfield(cts, ct, name, &ofs);
+ if (fct) {
+ *pp = p + ofs;
+ return fct;
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2) {
+ *qual |= CTF_CONST; /* Complex fields are constant. */
+ if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') {
+ *pp = p;
+ return ct;
+ } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') {
+ *pp = p + (ct->size >> 1);
+ return ct;
+ }
+ }
+ } else if (cd->typeid == CTID_CTYPEID) {
+ /* Allow indexing a (pointer to) struct constructor to get constants. */
+ CType *sct = ctype_raw(cts, *(CTypeID *)p);
+ if (ctype_isptr(sct->info))
+ sct = ctype_rawchild(cts, sct);
+ if (ctype_isstruct(sct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfield(cts, sct, name, &ofs);
+ if (fct && ctype_isconstval(fct->info))
+ return fct;
+ }
+ }
+ }
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ ct = ctype_child(cts, ct);
+ goto collect_attrib;
+ }
+ }
+ *qual |= 1; /* Lookup failed. */
+ return ct; /* But return the resolved raw type. */
+}
+
+/* -- C data getters ------------------------------------------------------ */
+
+/* Get constant value and convert to TValue. */
+static void cdata_getconst(CTState *cts, TValue *o, CType *ct)
+{
+ CType *ctt = ctype_child(cts, ct);
+ lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
+ /* Constants are already zero-extended/sign-extended to 32 bits. */
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(o, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(o, (int32_t)ct->size);
+}
+
+/* Get C data value and convert to TValue. */
+int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTypeID sid;
+
+ if (ctype_isconstval(s->info)) {
+ cdata_getconst(cts, o, s);
+ return 0; /* No GC step needed. */
+ } else if (ctype_isbitfield(s->info)) {
+ return lj_cconv_tv_bf(cts, s, o, sp);
+ }
+
+ /* Get child type of pointer/array/field. */
+ lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info));
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(s->info)) {
+ lua_assert(s->size == CTSIZE_PTR);
+ sp = *(uint8_t **)sp;
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+ }
+
+ /* Skip attributes and enums. */
+ while (ctype_isattrib(s->info) || ctype_isenum(s->info))
+ s = ctype_child(cts, s);
+
+ return lj_cconv_tv_ct(cts, s, sid, o, sp);
+}
+
+/* -- C data setters ------------------------------------------------------ */
+
+/* Convert TValue and set C data value. */
+void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
+{
+ if (ctype_isconstval(d->info)) {
+ goto err_const;
+ } else if (ctype_isbitfield(d->info)) {
+ if (((d->info|qual) & CTF_CONST)) goto err_const;
+ lj_cconv_bf_tv(cts, d, dp, o);
+ return;
+ }
+
+ /* Get child type of pointer/array/field. */
+ lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info));
+ d = ctype_child(cts, d);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(d->info)) {
+ lua_assert(d->size == CTSIZE_PTR);
+ dp = *(uint8_t **)dp;
+ d = ctype_child(cts, d);
+ }
+
+ /* Skip attributes and collect qualifiers. */
+ for (;;) {
+ if (ctype_isattrib(d->info)) {
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ } else {
+ break;
+ }
+ d = ctype_child(cts, d);
+ }
+
+ lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info));
+
+ if (((d->info|qual) & CTF_CONST)) {
+ err_const:
+ lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST);
+ }
+
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_cdata.h b/src/LuaJIT/src/lj_cdata.h
new file mode 100644
index 000000000..4618bc99c
--- /dev/null
+++ b/src/LuaJIT/src/lj_cdata.h
@@ -0,0 +1,75 @@
+/*
+** C data management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CDATA_H
+#define _LJ_CDATA_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Get C data pointer. */
+static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ return ((void *)(uintptr_t)*(uint32_t *)p);
+ } else {
+ lua_assert(sz == CTSIZE_PTR);
+ return *(void **)p;
+ }
+}
+
+/* Set C data pointer. */
+static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ *(uint32_t *)p = (uint32_t)(uintptr_t)v;
+ } else {
+ lua_assert(sz == CTSIZE_PTR);
+ *(void **)p = (void *)v;
+ }
+}
+
+/* Allocate fixed-size C data object. */
+static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
+{
+ GCcdata *cd;
+#ifdef LUA_USE_ASSERT
+ CType *ct = ctype_raw(cts, id);
+ lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz);
+#endif
+ cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->typeid = ctype_check(cts, id);
+ return cd;
+}
+
+/* Variant which works without a valid CTState. */
+static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
+{
+ GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->typeid = id;
+ return cd;
+}
+
+LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
+LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz,
+ CTSize align);
+
+LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
+LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd);
+
+LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
+ uint8_t **pp, CTInfo *qual);
+LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o,
+ CTInfo qual);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_char.c b/src/LuaJIT/src/lj_char.c
new file mode 100644
index 000000000..11f23efe4
--- /dev/null
+++ b/src/LuaJIT/src/lj_char.c
@@ -0,0 +1,43 @@
+/*
+** Character types.
+** Donated to the public domain.
+**
+** This is intended to replace the problematic libc single-byte NLS functions.
+** These just don't make sense anymore with UTF-8 locales becoming the norm
+** on POSIX systems. It never worked too well on Windows systems since hardly
+** anyone bothered to call setlocale().
+**
+** This table is hardcoded for ASCII. Identifiers include the characters
+** 128-255, too. This allows for the use of all non-ASCII chars as identifiers
+** in the lexer. This is a broad definition, but works well in practice
+** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*).
+**
+** If you really need proper character types for UTF-8 strings, please use
+** an add-on library such as slnunicode: http://luaforge.net/projects/sln/
+*/
+
+#define lj_char_c
+#define LUA_CORE
+
+#include "lj_char.h"
+
+LJ_DATADEF const uint8_t lj_char_bits[257] = {
+ 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4,
+ 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160,
+ 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132,
+ 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192,
+ 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+};
+
diff --git a/src/LuaJIT/src/lj_char.h b/src/LuaJIT/src/lj_char.h
new file mode 100644
index 000000000..7b7c1322e
--- /dev/null
+++ b/src/LuaJIT/src/lj_char.h
@@ -0,0 +1,42 @@
+/*
+** Character types.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_CHAR_H
+#define _LJ_CHAR_H
+
+#include "lj_def.h"
+
+#define LJ_CHAR_CNTRL 0x01
+#define LJ_CHAR_SPACE 0x02
+#define LJ_CHAR_PUNCT 0x04
+#define LJ_CHAR_DIGIT 0x08
+#define LJ_CHAR_XDIGIT 0x10
+#define LJ_CHAR_UPPER 0x20
+#define LJ_CHAR_LOWER 0x40
+#define LJ_CHAR_IDENT 0x80
+#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER)
+#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT)
+#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT)
+
+/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */
+#define lj_char_isa(c, t) (lj_char_bits[(c)+1] & t)
+#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL)
+#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE)
+#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT)
+#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT)
+#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT)
+#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER)
+#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER)
+#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT)
+#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA)
+#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM)
+#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH)
+
+#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1))
+#define lj_char_tolower(c) ((c) + lj_char_isupper(c))
+
+LJ_DATA const uint8_t lj_char_bits[257];
+
+#endif
diff --git a/src/LuaJIT/src/lj_clib.c b/src/LuaJIT/src/lj_clib.c
new file mode 100644
index 000000000..3023b7354
--- /dev/null
+++ b/src/LuaJIT/src/lj_clib.c
@@ -0,0 +1,412 @@
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_str.h"
+#include "lj_udata.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_clib.h"
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_TARGET_DLOPEN
+
+#include
+#include
+
+#if defined(RTLD_DEFAULT)
+#define CLIB_DEFHANDLE RTLD_DEFAULT
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+#define CLIB_DEFHANDLE ((void *)(intptr_t)-2)
+#else
+#define CLIB_DEFHANDLE NULL
+#endif
+
+LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L)
+{
+ lj_err_callermsg(L, dlerror());
+}
+
+#define clib_error(L, fmt, name) clib_error_(L)
+
+#if defined(__CYGWIN__)
+#define CLIB_SOPREFIX "cyg"
+#else
+#define CLIB_SOPREFIX "lib"
+#endif
+
+#if LJ_TARGET_OSX
+#define CLIB_SOEXT "%s.dylib"
+#elif defined(__CYGWIN__)
+#define CLIB_SOEXT "%s.dll"
+#else
+#define CLIB_SOEXT "%s.so"
+#endif
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (!strchr(name, '/')
+#ifdef __CYGWIN__
+ && !strchr(name, '\\')
+#endif
+ ) {
+ if (!strchr(name, '.')) {
+ name = lj_str_pushf(L, CLIB_SOEXT, name);
+ L->top--;
+#ifdef __CYGWIN__
+ } else {
+ return name;
+#endif
+ }
+ if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] &&
+ name[2] == CLIB_SOPREFIX[2])) {
+ name = lj_str_pushf(L, CLIB_SOPREFIX "%s", name);
+ L->top--;
+ }
+ }
+ return name;
+}
+
+/* Check for a recognized ld script line. */
+static const char *clib_check_lds(lua_State *L, const char *buf)
+{
+ char *p, *e;
+ if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) &&
+ (p = strchr(buf, '('))) {
+ while (*++p == ' ') ;
+ for (e = p; *e && *e != ' ' && *e != ')'; e++) ;
+ return strdata(lj_str_new(L, p, e-p));
+ }
+ return NULL;
+}
+
+/* Quick and dirty solution to resolve shared library name from ld script. */
+static const char *clib_resolve_lds(lua_State *L, const char *name)
+{
+ FILE *fp = fopen(name, "r");
+ const char *p = NULL;
+ if (fp) {
+ char buf[256];
+ if (fgets(buf, sizeof(buf), fp)) {
+ if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */
+ while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */
+ p = clib_check_lds(L, buf);
+ if (p) break;
+ }
+ } else { /* Otherwise check only the first line. */
+ p = clib_check_lds(L, buf);
+ }
+ }
+ fclose(fp);
+ }
+ return p;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ void *h = dlopen(clib_extname(L, name),
+ RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (!h) {
+ const char *e, *err = dlerror();
+ if (*err == '/' && (e = strchr(err, ':')) &&
+ (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) {
+ h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (h) return h;
+ err = dlerror();
+ }
+ lj_err_callermsg(L, err);
+ }
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle && cl->handle != CLIB_DEFHANDLE)
+ dlclose(cl->handle);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = dlsym(cl->handle, name);
+ return p;
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+#include
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#define CLIB_DEFHANDLE ((void *)-1)
+
+/* Default libraries. */
+enum {
+ CLIB_HANDLE_EXE,
+ CLIB_HANDLE_DLL,
+ CLIB_HANDLE_CRT,
+ CLIB_HANDLE_KERNEL32,
+ CLIB_HANDLE_USER32,
+ CLIB_HANDLE_GDI32,
+ CLIB_HANDLE_MAX
+};
+
+static void *clib_def_handle[CLIB_HANDLE_MAX];
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ DWORD err = GetLastError();
+ char buf[128];
+ if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, err, 0, buf, sizeof(buf), NULL))
+ buf[0] = '\0';
+ lj_err_callermsg(L, lj_str_pushf(L, fmt, name, buf));
+}
+
+static int clib_needext(const char *s)
+{
+ while (*s) {
+ if (*s == '/' || *s == '\\' || *s == '.') return 0;
+ s++;
+ }
+ return 1;
+}
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (clib_needext(name)) {
+ name = lj_str_pushf(L, "%s.dll", name);
+ L->top--;
+ }
+ return name;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ DWORD oldwerr = GetLastError();
+ void *h = (void *)LoadLibraryA(clib_extname(L, name));
+ if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name);
+ SetLastError(oldwerr);
+ UNUSED(global);
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle == CLIB_DEFHANDLE) {
+ MSize i;
+ for (i = 0; i < CLIB_HANDLE_MAX; i++) {
+ void *h = clib_def_handle[i];
+ if (h) {
+ clib_def_handle[i] = NULL;
+ FreeLibrary((HINSTANCE)h);
+ }
+ }
+ } else if (!cl->handle) {
+ FreeLibrary((HINSTANCE)cl->handle);
+ }
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = NULL;
+ if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */
+ MSize i;
+ for (i = 0; i < CLIB_HANDLE_MAX; i++) {
+ HINSTANCE h = (HINSTANCE)clib_def_handle[i];
+ if (!(void *)h) { /* Resolve default library handles (once). */
+ switch (i) {
+ case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break;
+ case CLIB_HANDLE_DLL:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)clib_def_handle, &h);
+ break;
+ case CLIB_HANDLE_CRT:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)&_fmode, &h);
+ break;
+ case CLIB_HANDLE_KERNEL32: h = LoadLibraryA("kernel32.dll"); break;
+ case CLIB_HANDLE_USER32: h = LoadLibraryA("user32.dll"); break;
+ case CLIB_HANDLE_GDI32: h = LoadLibraryA("gdi32.dll"); break;
+ }
+ if (!h) continue;
+ clib_def_handle[i] = (void *)h;
+ }
+ p = (void *)GetProcAddress(h, name);
+ if (p) break;
+ }
+ } else {
+ p = (void *)GetProcAddress((HINSTANCE)cl->handle, name);
+ }
+ return p;
+}
+
+#else
+
+#define CLIB_DEFHANDLE NULL
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ lj_err_callermsg(L, lj_str_pushf(L, fmt, name, "no support for this OS"));
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ lj_err_callermsg(L, "no support for loading dynamic libraries for this OS");
+ UNUSED(name); UNUSED(global);
+ return NULL;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ UNUSED(cl);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ UNUSED(cl); UNUSED(name);
+ return NULL;
+}
+
+#endif
+
+/* -- C library indexing -------------------------------------------------- */
+
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+/* Compute argument size for fastcall/stdcall functions. */
+static CTSize clib_func_argsize(CTState *cts, CType *ct)
+{
+ CTSize n = 0;
+ while (ct->sib) {
+ CType *d;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ d = ctype_rawchild(cts, ct);
+ n += ((d->size + 3) & ~3);
+ }
+ }
+ return n;
+}
+#endif
+
+/* Get redirected or mangled external symbol. */
+static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name)
+{
+ if (ct->sib) {
+ CType *ctf = ctype_get(cts, ct->sib);
+ if (ctype_isxattrib(ctf->info, CTA_REDIR))
+ return strdata(gco2str(gcref(ctf->name)));
+ }
+ return strdata(name);
+}
+
+/* Index a C library by name. */
+TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
+{
+ TValue *tv = lj_tab_setstr(L, cl->cache, name);
+ if (LJ_UNLIKELY(tvisnil(tv))) {
+ CTState *cts = ctype_cts(L);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ if (!id)
+ lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name));
+ if (ctype_isconstval(ct->info)) {
+ CType *ctt = ctype_child(cts, ct);
+ lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(tv, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(tv, (int32_t)ct->size);
+ } else {
+ const char *sym = clib_extsym(cts, ct, name);
+#if LJ_TARGET_WINDOWS
+ DWORD oldwerr = GetLastError();
+#endif
+ void *p = clib_getsym(cl, sym);
+ GCcdata *cd;
+ lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info));
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Retry with decorated name for fastcall/stdcall functions. */
+ if (!p && ctype_isfunc(ct->info)) {
+ CTInfo cconv = ctype_cconv(ct->info);
+ if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) {
+ CTSize sz = clib_func_argsize(cts, ct);
+ const char *symd = lj_str_pushf(L,
+ cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d",
+ sym, sz);
+ L->top--;
+ p = clib_getsym(cl, symd);
+ }
+ }
+#endif
+ if (!p)
+ clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym);
+#if LJ_TARGET_WINDOWS
+ SetLastError(oldwerr);
+#endif
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(void **)cdataptr(cd) = p;
+ setcdataV(L, tv, cd);
+ }
+ }
+ return tv;
+}
+
+/* -- C library management ------------------------------------------------ */
+
+/* Create a new CLibrary object and push it on the stack. */
+static CLibrary *clib_new(lua_State *L, GCtab *mt)
+{
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t);
+ CLibrary *cl = (CLibrary *)uddata(ud);
+ cl->cache = t;
+ ud->udtype = UDTYPE_FFI_CLIB;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, obj2gco(mt));
+ setudataV(L, L->top++, ud);
+ return cl;
+}
+
+/* Load a C library. */
+void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global)
+{
+ void *handle = clib_loadlib(L, strdata(name), global);
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = handle;
+}
+
+/* Unload a C library. */
+void lj_clib_unload(CLibrary *cl)
+{
+ clib_unloadlib(cl);
+ cl->handle = NULL;
+}
+
+/* Create the default C library object. */
+void lj_clib_default(lua_State *L, GCtab *mt)
+{
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = CLIB_DEFHANDLE;
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_clib.h b/src/LuaJIT/src/lj_clib.h
new file mode 100644
index 000000000..f862bbacb
--- /dev/null
+++ b/src/LuaJIT/src/lj_clib.h
@@ -0,0 +1,29 @@
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CLIB_H
+#define _LJ_CLIB_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+/* Namespace for C library indexing. */
+#define CLNS_INDEX ((1u<env. */
+} CLibrary;
+
+LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name);
+LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global);
+LJ_FUNC void lj_clib_unload(CLibrary *cl);
+LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_cparse.c b/src/LuaJIT/src/lj_cparse.c
new file mode 100644
index 000000000..ff5abc705
--- /dev/null
+++ b/src/LuaJIT/src/lj_cparse.c
@@ -0,0 +1,1844 @@
+/*
+** C declaration parser.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ctype.h"
+#include "lj_cparse.h"
+#include "lj_frame.h"
+#include "lj_vm.h"
+#include "lj_char.h"
+
+/*
+** Important note: this is NOT a validating C parser! This is a minimal
+** C declaration parser, solely for use by the LuaJIT FFI.
+**
+** It ought to return correct results for properly formed C declarations,
+** but it may accept some invalid declarations, too (and return nonsense).
+** Also, it shows rather generic error messages to avoid unnecessary bloat.
+** If in doubt, please check the input against your favorite C compiler.
+*/
+
+/* -- C lexer ------------------------------------------------------------- */
+
+/* C lexer token names. */
+static const char *const ctoknames[] = {
+#define CTOKSTR(name, str) str,
+CTOKDEF(CTOKSTR)
+#undef CTOKSTR
+ NULL
+};
+
+/* Forward declaration. */
+LJ_NORET static void cp_err(CPState *cp, ErrMsg em);
+
+static const char *cp_tok2str(CPState *cp, CPToken tok)
+{
+ lua_assert(tok < CTOK_FIRSTDECL);
+ if (tok > CTOK_OFS)
+ return ctoknames[tok-CTOK_OFS-1];
+ else if (!lj_char_iscntrl(tok))
+ return lj_str_pushf(cp->L, "%c", tok);
+ else
+ return lj_str_pushf(cp->L, "char(%d)", tok);
+}
+
+/* End-of-line? */
+static LJ_AINLINE int cp_iseol(CPChar c)
+{
+ return (c == '\n' || c == '\r');
+}
+
+static LJ_AINLINE CPChar cp_get(CPState *cp);
+
+/* Peek next raw character. */
+static LJ_AINLINE CPChar cp_rawpeek(CPState *cp)
+{
+ return (CPChar)(uint8_t)(*cp->p);
+}
+
+/* Transparently skip backslash-escaped line breaks. */
+static LJ_NOINLINE CPChar cp_get_bs(CPState *cp)
+{
+ CPChar c2, c = cp_rawpeek(cp);
+ if (!cp_iseol(c)) return cp->c;
+ cp->p++;
+ c2 = cp_rawpeek(cp);
+ if (cp_iseol(c2) && c2 != c) cp->p++;
+ cp->linenumber++;
+ return cp_get(cp);
+}
+
+/* Get next character. */
+static LJ_AINLINE CPChar cp_get(CPState *cp)
+{
+ cp->c = (CPChar)(uint8_t)(*cp->p++);
+ if (LJ_LIKELY(cp->c != '\\')) return cp->c;
+ return cp_get_bs(cp);
+}
+
+/* Grow save buffer. */
+static LJ_NOINLINE void cp_save_grow(CPState *cp, CPChar c)
+{
+ MSize newsize;
+ if (cp->sb.sz >= CPARSE_MAX_BUF/2)
+ cp_err(cp, LJ_ERR_XELEM);
+ newsize = cp->sb.sz * 2;
+ lj_str_resizebuf(cp->L, &cp->sb, newsize);
+ cp->sb.buf[cp->sb.n++] = (char)c;
+}
+
+/* Save character in buffer. */
+static LJ_AINLINE void cp_save(CPState *cp, CPChar c)
+{
+ if (LJ_UNLIKELY(cp->sb.n + 1 > cp->sb.sz))
+ cp_save_grow(cp, c);
+ else
+ cp->sb.buf[cp->sb.n++] = (char)c;
+}
+
+/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */
+static void cp_newline(CPState *cp)
+{
+ CPChar c = cp_rawpeek(cp);
+ if (cp_iseol(c) && c != cp->c) cp->p++;
+ cp->linenumber++;
+}
+
+LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...)
+{
+ const char *msg, *tokstr;
+ lua_State *L;
+ va_list argp;
+ if (tok == 0) {
+ tokstr = NULL;
+ } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING ||
+ tok >= CTOK_FIRSTDECL) {
+ cp_save(cp, '\0');
+ tokstr = cp->sb.buf;
+ } else {
+ tokstr = cp_tok2str(cp, tok);
+ }
+ L = cp->L;
+ va_start(argp, em);
+ msg = lj_str_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ if (tokstr)
+ msg = lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr);
+ if (cp->linenumber > 1)
+ msg = lj_str_pushf(L, "%s at line %d", msg, cp->linenumber);
+ lj_err_callermsg(L, msg);
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err_token(CPState *cp, CPToken tok)
+{
+ cp_errmsg(cp, cp->tok, LJ_ERR_XTOKEN, cp_tok2str(cp, tok));
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err_badidx(CPState *cp, CType *ct)
+{
+ GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL);
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADIDX, strdata(s));
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err(CPState *cp, ErrMsg em)
+{
+ cp_errmsg(cp, 0, em);
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+/* Parse integer literal. */
+static CPToken cp_integer(CPState *cp)
+{
+ uint32_t n = 0;
+ cp->val.id = CTID_INT32;
+ if (cp->c != '0') { /* Decimal. */
+ do {
+ n = n*10 + (cp->c - '0');
+ } while (lj_char_isdigit(cp_get(cp)));
+ } else if ((cp_get(cp)& ~0x20) == 'X') { /* Hexadecimal. */
+ if (!lj_char_isxdigit(cp_get(cp)))
+ cp_err(cp, LJ_ERR_XNUMBER);
+ do {
+ n = n*16 + (cp->c & 15);
+ if (!lj_char_isdigit(cp->c)) n += 9;
+ } while (lj_char_isxdigit(cp_get(cp)));
+ if (n >= 0x80000000u) cp->val.id = CTID_UINT32;
+ } else { /* Octal. */
+ while (cp->c >= '0' && cp->c <= '7') {
+ n = n*8 + (cp->c - '0');
+ cp_get(cp);
+ }
+ if (n >= 0x80000000u) cp->val.id = CTID_UINT32;
+ }
+ cp->val.u32 = n;
+ for (;;) { /* Parse suffixes. */
+ if ((cp->c & ~0x20) == 'U')
+ cp->val.id = CTID_UINT32;
+ else if ((cp->c & ~0x20) != 'L')
+ break;
+ cp_get(cp);
+ }
+ if (lj_char_isident(cp->c) && !(cp->mode & CPARSE_MODE_SKIP))
+ cp_errmsg(cp, cp->c, LJ_ERR_XNUMBER);
+ return CTOK_INTEGER;
+}
+
+/* Parse identifier or keyword. */
+static CPToken cp_ident(CPState *cp)
+{
+ do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp)));
+ cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n);
+ cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask);
+ if (ctype_type(cp->ct->info) == CT_KW)
+ return ctype_cid(cp->ct->info);
+ return CTOK_IDENT;
+}
+
+/* Parse string or character constant. */
+static CPToken cp_string(CPState *cp)
+{
+ CPChar delim = cp->c;
+ cp_get(cp);
+ while (cp->c != delim) {
+ CPChar c = cp->c;
+ if (c == '\0') cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR);
+ if (c == '\\') {
+ c = cp_get(cp);
+ switch (c) {
+ case '\0': cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); break;
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'e': c = 27; break;
+ case 'x':
+ c = 0;
+ while (lj_char_isxdigit(cp_get(cp)))
+ c = (c<<4) + (lj_char_isdigit(cp->c) ? cp->c-'0' : (cp->c&15)+9);
+ cp_save(cp, (c & 0xff));
+ continue;
+ default:
+ if (lj_char_isdigit(c)) {
+ c -= '0';
+ if (lj_char_isdigit(cp_get(cp))) {
+ c = c*8 + (cp->c - '0');
+ if (lj_char_isdigit(cp_get(cp))) {
+ c = c*8 + (cp->c - '0');
+ cp_get(cp);
+ }
+ }
+ cp_save(cp, (c & 0xff));
+ continue;
+ }
+ break;
+ }
+ }
+ cp_save(cp, c);
+ cp_get(cp);
+ }
+ cp_get(cp);
+ if (delim == '"') {
+ cp->str = lj_str_new(cp->L, cp->sb.buf, cp->sb.n);
+ return CTOK_STRING;
+ } else {
+ if (cp->sb.n != 1) cp_err_token(cp, '\'');
+ cp->val.i32 = (int32_t)(char)cp->sb.buf[0];
+ cp->val.id = CTID_INT32;
+ return CTOK_INTEGER;
+ }
+}
+
+/* Skip C comment. */
+static void cp_comment_c(CPState *cp)
+{
+ do {
+ if (cp_get(cp) == '*') {
+ do {
+ if (cp_get(cp) == '/') { cp_get(cp); return; }
+ } while (cp->c == '*');
+ }
+ if (cp_iseol(cp->c)) cp_newline(cp);
+ } while (cp->c != '\0');
+}
+
+/* Skip C++ comment. */
+static void cp_comment_cpp(CPState *cp)
+{
+ while (!cp_iseol(cp_get(cp)) && cp->c != '\0')
+ ;
+}
+
+/* Lexical scanner for C. Only a minimal subset is implemented. */
+static CPToken cp_next_(CPState *cp)
+{
+ lj_str_resetbuf(&cp->sb);
+ for (;;) {
+ if (lj_char_isident(cp->c))
+ return lj_char_isdigit(cp->c) ? cp_integer(cp) : cp_ident(cp);
+ switch (cp->c) {
+ case '\n': case '\r': cp_newline(cp); /* fallthrough. */
+ case ' ': case '\t': case '\v': case '\f': cp_get(cp); break;
+ case '"': case '\'': return cp_string(cp);
+ case '/':
+ cp_get(cp);
+ if (cp->c == '*') cp_comment_c(cp);
+ else if (cp->c == '/') cp_comment_cpp(cp);
+ else return '/';
+ break;
+ case '|':
+ cp_get(cp); if (cp->c != '|') return '|'; cp_get(cp); return CTOK_OROR;
+ case '&':
+ cp_get(cp); if (cp->c != '&') return '&'; cp_get(cp); return CTOK_ANDAND;
+ case '=':
+ cp_get(cp); if (cp->c != '=') return '='; cp_get(cp); return CTOK_EQ;
+ case '!':
+ cp_get(cp); if (cp->c != '=') return '!'; cp_get(cp); return CTOK_NE;
+ case '<':
+ cp_get(cp);
+ if (cp->c == '=') { cp_get(cp); return CTOK_LE; }
+ else if (cp->c == '<') { cp_get(cp); return CTOK_SHL; }
+ return '<';
+ case '>':
+ cp_get(cp);
+ if (cp->c == '=') { cp_get(cp); return CTOK_GE; }
+ else if (cp->c == '>') { cp_get(cp); return CTOK_SHR; }
+ return '>';
+ case '-':
+ cp_get(cp); if (cp->c != '>') return '-'; cp_get(cp); return CTOK_DEREF;
+ case '\0': return CTOK_EOF;
+ default: { CPToken c = cp->c; cp_get(cp); return c; }
+ }
+ }
+}
+
+static LJ_NOINLINE CPToken cp_next(CPState *cp)
+{
+ return (cp->tok = cp_next_(cp));
+}
+
+/* -- C parser ------------------------------------------------------------ */
+
+/* Namespaces for resolving identifiers. */
+#define CPNS_DEFAULT \
+ ((1u<linenumber = 1;
+ cp->depth = 0;
+ cp->curpack = 0;
+ cp->packstack[0] = 255;
+ lj_str_initbuf(&cp->sb);
+ lj_str_resizebuf(cp->L, &cp->sb, LJ_MIN_SBUF);
+ lua_assert(cp->p != NULL);
+ cp_get(cp); /* Read-ahead first char. */
+ cp->tok = 0;
+ cp->tmask = CPNS_DEFAULT;
+ cp_next(cp); /* Read-ahead first token. */
+}
+
+/* Cleanup C parser state. */
+static void cp_cleanup(CPState *cp)
+{
+ global_State *g = G(cp->L);
+ lj_str_freebuf(g, &cp->sb);
+}
+
+/* Check and consume optional token. */
+static int cp_opt(CPState *cp, CPToken tok)
+{
+ if (cp->tok == tok) { cp_next(cp); return 1; }
+ return 0;
+}
+
+/* Check and consume token. */
+static void cp_check(CPState *cp, CPToken tok)
+{
+ if (cp->tok != tok) cp_err_token(cp, tok);
+ cp_next(cp);
+}
+
+/* Check if the next token may start a type declaration. */
+static int cp_istypedecl(CPState *cp)
+{
+ if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECL) return 1;
+ if (cp->tok == CTOK_IDENT && ctype_istypedef(cp->ct->info)) return 1;
+ return 0;
+}
+
+/* -- Constant expression evaluator --------------------------------------- */
+
+/* Forward declarations. */
+static void cp_expr_unary(CPState *cp, CPValue *k);
+static void cp_expr_sub(CPState *cp, CPValue *k, int pri);
+
+/* Please note that type handling is very weak here. Most ops simply
+** assume integer operands. Accessors are only needed to compute types and
+** return synthetic values. The only purpose of the expression evaluator
+** is to compute the values of constant expressions one would typically
+** find in C header files. And again: this is NOT a validating C parser!
+*/
+
+/* Parse comma separated expression and return last result. */
+static void cp_expr_comma(CPState *cp, CPValue *k)
+{
+ do { cp_expr_sub(cp, k, 0); } while (cp_opt(cp, ','));
+}
+
+/* Parse sizeof/alignof operator. */
+static void cp_expr_sizeof(CPState *cp, CPValue *k, int wantsz)
+{
+ CTSize sz;
+ CTInfo info;
+ if (cp_opt(cp, '(')) {
+ if (cp_istypedecl(cp))
+ k->id = cp_decl_abstract(cp);
+ else
+ cp_expr_comma(cp, k);
+ cp_check(cp, ')');
+ } else {
+ cp_expr_unary(cp, k);
+ }
+ info = lj_ctype_info(cp->cts, k->id, &sz);
+ if (wantsz) {
+ if (sz != CTSIZE_INVALID)
+ k->u32 = sz;
+ else if (k->id != CTID_A_CCHAR) /* Special case for sizeof("string"). */
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ } else {
+ k->u32 = 1u << ctype_align(info);
+ }
+ k->id = CTID_UINT32; /* Really size_t. */
+}
+
+/* Parse prefix operators. */
+static void cp_expr_prefix(CPState *cp, CPValue *k)
+{
+ if (cp->tok == CTOK_INTEGER) {
+ *k = cp->val; cp_next(cp);
+ } else if (cp_opt(cp, '+')) {
+ cp_expr_unary(cp, k); /* Nothing to do (well, integer promotion). */
+ } else if (cp_opt(cp, '-')) {
+ cp_expr_unary(cp, k); k->i32 = -k->i32;
+ } else if (cp_opt(cp, '~')) {
+ cp_expr_unary(cp, k); k->i32 = ~k->i32;
+ } else if (cp_opt(cp, '!')) {
+ cp_expr_unary(cp, k); k->i32 = !k->i32; k->id = CTID_INT32;
+ } else if (cp_opt(cp, '(')) {
+ if (cp_istypedecl(cp)) { /* Cast operator. */
+ CTypeID id = cp_decl_abstract(cp);
+ cp_check(cp, ')');
+ cp_expr_unary(cp, k);
+ k->id = id; /* No conversion performed. */
+ } else { /* Sub-expression. */
+ cp_expr_comma(cp, k);
+ cp_check(cp, ')');
+ }
+ } else if (cp_opt(cp, '*')) { /* Indirection. */
+ CType *ct;
+ cp_expr_unary(cp, k);
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ k->u32 = 0; k->id = ctype_cid(ct->info);
+ } else if (cp_opt(cp, '&')) { /* Address operator. */
+ cp_expr_unary(cp, k);
+ k->id = lj_ctype_intern(cp->cts, CTINFO(CT_PTR, CTALIGN_PTR+k->id),
+ CTSIZE_PTR);
+ } else if (cp_opt(cp, CTOK_SIZEOF)) {
+ cp_expr_sizeof(cp, k, 1);
+ } else if (cp_opt(cp, CTOK_ALIGNOF)) {
+ cp_expr_sizeof(cp, k, 0);
+ } else if (cp->tok == CTOK_IDENT) {
+ if (ctype_type(cp->ct->info) == CT_CONSTVAL) {
+ k->u32 = cp->ct->size; k->id = ctype_cid(cp->ct->info);
+ } else if (ctype_type(cp->ct->info) == CT_EXTERN) {
+ k->u32 = cp->val.id; k->id = ctype_cid(cp->ct->info);
+ } else if (ctype_type(cp->ct->info) == CT_FUNC) {
+ k->u32 = cp->val.id; k->id = cp->val.id;
+ } else {
+ goto err_expr;
+ }
+ cp_next(cp);
+ } else if (cp->tok == CTOK_STRING) {
+ CTSize sz = cp->str->len;
+ while (cp_next(cp) == CTOK_STRING)
+ sz += cp->str->len;
+ k->u32 = sz + 1;
+ k->id = CTID_A_CCHAR;
+ } else {
+ err_expr:
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ }
+}
+
+/* Parse postfix operators. */
+static void cp_expr_postfix(CPState *cp, CPValue *k)
+{
+ for (;;) {
+ CType *ct;
+ if (cp_opt(cp, '[')) { /* Array/pointer index. */
+ CPValue k2;
+ cp_expr_comma(cp, &k2);
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (!ctype_ispointer(ct->info)) {
+ ct = lj_ctype_rawref(cp->cts, k2.id);
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ }
+ cp_check(cp, ']');
+ k->u32 = 0;
+ } else if (cp->tok == '.' || cp->tok == CTOK_DEREF) { /* Struct deref. */
+ CTSize ofs;
+ CType *fct;
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (cp->tok == CTOK_DEREF) {
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ ct = lj_ctype_rawref(cp->cts, ctype_cid(ct->info));
+ }
+ cp_next(cp);
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (!ctype_isstruct(ct->info) || ct->size == CTSIZE_INVALID ||
+ !(fct = lj_ctype_getfield(cp->cts, ct, cp->str, &ofs)) ||
+ ctype_isbitfield(fct->info)) {
+ GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL);
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADMEMBER, strdata(s), strdata(cp->str));
+ }
+ ct = fct;
+ k->u32 = ctype_isconstval(ct->info) ? ct->size : 0;
+ cp_next(cp);
+ } else {
+ return;
+ }
+ k->id = ctype_cid(ct->info);
+ }
+}
+
+/* Parse infix operators. */
+static void cp_expr_infix(CPState *cp, CPValue *k, int pri)
+{
+ CPValue k2;
+ k2.u32 = 0; k2.id = 0; /* Silence the compiler. */
+ for (;;) {
+ switch (pri) {
+ case 0:
+ if (cp_opt(cp, '?')) {
+ CPValue k3;
+ cp_expr_comma(cp, &k2); /* Right-associative. */
+ cp_check(cp, ':');
+ cp_expr_sub(cp, &k3, 0);
+ k->u32 = k->u32 ? k2.u32 : k3.u32;
+ k->id = k2.id > k3.id ? k2.id : k3.id;
+ continue;
+ }
+ case 1:
+ if (cp_opt(cp, CTOK_OROR)) {
+ cp_expr_sub(cp, &k2, 2); k->i32 = k->u32 || k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ case 2:
+ if (cp_opt(cp, CTOK_ANDAND)) {
+ cp_expr_sub(cp, &k2, 3); k->i32 = k->u32 && k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ case 3:
+ if (cp_opt(cp, '|')) {
+ cp_expr_sub(cp, &k2, 4); k->u32 = k->u32 | k2.u32; goto arith_result;
+ }
+ case 4:
+ if (cp_opt(cp, '^')) {
+ cp_expr_sub(cp, &k2, 5); k->u32 = k->u32 ^ k2.u32; goto arith_result;
+ }
+ case 5:
+ if (cp_opt(cp, '&')) {
+ cp_expr_sub(cp, &k2, 6); k->u32 = k->u32 & k2.u32; goto arith_result;
+ }
+ case 6:
+ if (cp_opt(cp, CTOK_EQ)) {
+ cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 == k2.u32; k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_NE)) {
+ cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 != k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ case 7:
+ if (cp_opt(cp, '<')) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 < k2.i32;
+ else
+ k->i32 = k->u32 < k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, '>')) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 > k2.i32;
+ else
+ k->i32 = k->u32 > k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_LE)) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 <= k2.i32;
+ else
+ k->i32 = k->u32 <= k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_GE)) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 >= k2.i32;
+ else
+ k->i32 = k->u32 >= k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ }
+ case 8:
+ if (cp_opt(cp, CTOK_SHL)) {
+ cp_expr_sub(cp, &k2, 9); k->u32 = k->u32 << k2.u32;
+ continue;
+ } else if (cp_opt(cp, CTOK_SHR)) {
+ cp_expr_sub(cp, &k2, 9);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 >> k2.i32;
+ else
+ k->u32 = k->u32 >> k2.u32;
+ continue;
+ }
+ case 9:
+ if (cp_opt(cp, '+')) {
+ cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 + k2.u32;
+ arith_result:
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ continue;
+ } else if (cp_opt(cp, '-')) {
+ cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 - k2.u32; goto arith_result;
+ }
+ case 10:
+ if (cp_opt(cp, '*')) {
+ cp_expr_unary(cp, &k2); k->u32 = k->u32 * k2.u32; goto arith_result;
+ } else if (cp_opt(cp, '/')) {
+ cp_expr_unary(cp, &k2);
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ if (k2.u32 == 0 ||
+ (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1))
+ cp_err(cp, LJ_ERR_BADVAL);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 / k2.i32;
+ else
+ k->u32 = k->u32 / k2.u32;
+ continue;
+ } else if (cp_opt(cp, '%')) {
+ cp_expr_unary(cp, &k2);
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ if (k2.u32 == 0 ||
+ (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1))
+ cp_err(cp, LJ_ERR_BADVAL);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 % k2.i32;
+ else
+ k->u32 = k->u32 % k2.u32;
+ continue;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* Parse and evaluate unary expression. */
+static void cp_expr_unary(CPState *cp, CPValue *k)
+{
+ if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS);
+ cp_expr_prefix(cp, k);
+ cp_expr_postfix(cp, k);
+ cp->depth--;
+}
+
+/* Parse and evaluate sub-expression. */
+static void cp_expr_sub(CPState *cp, CPValue *k, int pri)
+{
+ cp_expr_unary(cp, k);
+ cp_expr_infix(cp, k, pri);
+}
+
+/* Parse constant integer expression. */
+static void cp_expr_kint(CPState *cp, CPValue *k)
+{
+ CType *ct;
+ cp_expr_sub(cp, k, 0);
+ ct = ctype_raw(cp->cts, k->id);
+ if (!ctype_isinteger(ct->info)) cp_err(cp, LJ_ERR_BADVAL);
+}
+
+/* Parse (non-negative) size expression. */
+static CTSize cp_expr_ksize(CPState *cp)
+{
+ CPValue k;
+ cp_expr_kint(cp, &k);
+ if (k.u32 >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ return k.u32;
+}
+
+/* -- Type declaration stack management ----------------------------------- */
+
+/* Add declaration element behind the insertion position. */
+static CPDeclIdx cp_add(CPDecl *decl, CTInfo info, CTSize size)
+{
+ CPDeclIdx top = decl->top;
+ if (top >= CPARSE_MAX_DECLSTACK) cp_err(decl->cp, LJ_ERR_XLEVELS);
+ decl->stack[top].info = info;
+ decl->stack[top].size = size;
+ decl->stack[top].sib = 0;
+ setgcrefnull(decl->stack[top].name);
+ decl->stack[top].next = decl->stack[decl->pos].next;
+ decl->stack[decl->pos].next = (CTypeID1)top;
+ decl->top = top+1;
+ return top;
+}
+
+/* Push declaration element before the insertion position. */
+static CPDeclIdx cp_push(CPDecl *decl, CTInfo info, CTSize size)
+{
+ return (decl->pos = cp_add(decl, info, size));
+}
+
+/* Push or merge attributes. */
+static void cp_push_attributes(CPDecl *decl)
+{
+ CType *ct = &decl->stack[decl->pos];
+ if (ctype_isfunc(ct->info)) { /* Ok to modify in-place. */
+#if LJ_TARGET_X86
+ if ((decl->fattr & CTFP_CCONV))
+ ct->info = (ct->info & (CTMASK_NUM|CTF_VARARG|CTMASK_CID)) +
+ (decl->fattr & ~CTMASK_CID);
+#endif
+ } else {
+ if ((decl->attr & CTFP_ALIGNED) && !(decl->mode & CPARSE_MODE_FIELD))
+ cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_ALIGN)),
+ ctype_align(decl->attr));
+ }
+}
+
+/* Push unrolled type to declaration stack and merge qualifiers. */
+static void cp_push_type(CPDecl *decl, CTypeID id)
+{
+ CType *ct = ctype_get(decl->cp->cts, id);
+ CTInfo info = ct->info;
+ CTSize size = ct->size;
+ switch (ctype_type(info)) {
+ case CT_STRUCT: case CT_ENUM:
+ cp_push(decl, CTINFO(CT_TYPEDEF, id), 0); /* Don't copy unique types. */
+ if ((decl->attr & CTF_QUAL)) { /* Push unmerged qualifiers. */
+ cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_QUAL)),
+ (decl->attr & CTF_QUAL));
+ decl->attr &= ~CTF_QUAL;
+ }
+ break;
+ case CT_ATTRIB:
+ if (ctype_isxattrib(info, CTA_QUAL))
+ decl->attr &= ~size; /* Remove redundant qualifiers. */
+ cp_push_type(decl, ctype_cid(info)); /* Unroll. */
+ cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */
+ break;
+ case CT_ARRAY:
+ cp_push_type(decl, ctype_cid(info)); /* Unroll. */
+ cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */
+ decl->stack[decl->pos].sib = 1; /* Mark as already checked and sized. */
+ /* Note: this is not copied to the ct->sib in the C type table. */
+ break;
+ case CT_FUNC:
+ /* Copy type, link parameters (shared). */
+ decl->stack[cp_push(decl, info, size)].sib = ct->sib;
+ break;
+ default:
+ /* Copy type, merge common qualifiers. */
+ cp_push(decl, info|(decl->attr & CTF_QUAL), size);
+ decl->attr &= ~CTF_QUAL;
+ break;
+ }
+}
+
+/* Consume the declaration element chain and intern the C type. */
+static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
+{
+ CTypeID id = 0;
+ CPDeclIdx idx = 0;
+ CTSize csize = CTSIZE_INVALID;
+ CTSize cinfo = 0;
+ do {
+ CType *ct = &decl->stack[idx];
+ CTInfo info = ct->info;
+ CTInfo size = ct->size;
+ /* The cid is already part of info for copies of pointers/functions. */
+ idx = ct->next;
+ if (ctype_istypedef(info)) {
+ lua_assert(id == 0);
+ id = ctype_cid(info);
+ /* Always refetch info/size, since struct/enum may have been completed. */
+ cinfo = ctype_get(cp->cts, id)->info;
+ csize = ctype_get(cp->cts, id)->size;
+ lua_assert(ctype_isstruct(cinfo) || ctype_isenum(cinfo));
+ } else if (ctype_isfunc(info)) { /* Intern function. */
+ CType *fct;
+ CTypeID fid;
+ CTypeID sib;
+ if (id) {
+ CType *refct = ctype_raw(cp->cts, id);
+ /* Reject function or refarray return types. */
+ if (ctype_isfunc(refct->info) || ctype_isrefarray(refct->info))
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ }
+ /* No intervening attributes allowed, skip forward. */
+ while (idx) {
+ CType *ctn = &decl->stack[idx];
+ if (!ctype_isattrib(ctn->info)) break;
+ idx = ctn->next; /* Skip attribute. */
+ }
+ sib = ct->sib; /* Next line may reallocate the C type table. */
+ fid = lj_ctype_new(cp->cts, &fct);
+ csize = CTSIZE_INVALID;
+ fct->info = cinfo = info + id;
+ fct->size = size;
+ fct->sib = sib;
+ id = fid;
+ } else if (ctype_isattrib(info)) {
+ if (ctype_isxattrib(info, CTA_QUAL))
+ cinfo |= size;
+ else if (ctype_isxattrib(info, CTA_ALIGN))
+ CTF_INSERT(cinfo, ALIGN, size);
+ id = lj_ctype_intern(cp->cts, info+id, size);
+ /* Inherit csize/cinfo from original type. */
+ } else {
+ if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */
+ lua_assert(id == 0);
+ if (!(info & CTF_BOOL)) {
+ CTSize msize = ctype_msizeP(decl->attr);
+ CTSize vsize = ctype_vsizeP(decl->attr);
+ if (msize && (!(info & CTF_FP) || (msize == 4 || msize == 8))) {
+ CTSize malign = lj_fls(msize);
+ if (malign > 4) malign = 4; /* Limit alignment. */
+ CTF_INSERT(info, ALIGN, malign);
+ size = msize; /* Override size via mode. */
+ }
+ if (vsize) { /* Vector size set? */
+ CTSize esize = lj_fls(size);
+ if (vsize >= esize) {
+ /* Intern the element type first. */
+ id = lj_ctype_intern(cp->cts, info, size);
+ /* Then create a vector (array) with vsize alignment. */
+ size = (1u << vsize);
+ if (vsize > 4) vsize = 4; /* Limit alignment. */
+ if (ctype_align(info) > vsize) vsize = ctype_align(info);
+ info = CTINFO(CT_ARRAY, (info & CTF_QUAL) + CTF_VECTOR +
+ CTALIGN(vsize));
+ }
+ }
+ }
+ } else if (ctype_isptr(info)) {
+ /* Reject pointer/ref to ref. */
+ if (id && ctype_isref(ctype_raw(cp->cts, id)->info))
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ if (ctype_isref(info)) {
+ info &= ~CTF_VOLATILE; /* Refs are always const, never volatile. */
+ /* No intervening attributes allowed, skip forward. */
+ while (idx) {
+ CType *ctn = &decl->stack[idx];
+ if (!ctype_isattrib(ctn->info)) break;
+ idx = ctn->next; /* Skip attribute. */
+ }
+ }
+ } else if (ctype_isarray(info)) { /* Check for valid array size etc. */
+ if (ct->sib == 0) { /* Only check/size arrays not copied by unroll. */
+ if (ctype_isref(cinfo)) /* Reject arrays of refs. */
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ /* Reject VLS or unknown-sized types. */
+ if (ctype_isvltype(cinfo) || csize == CTSIZE_INVALID)
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ /* a[] and a[?] keep their invalid size. */
+ if (size != CTSIZE_INVALID) {
+ uint64_t xsz = (uint64_t)size * csize;
+ if (xsz >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ size = (CTSize)xsz;
+ }
+ }
+ info |= (cinfo & (CTF_QUAL|CTF_ALIGN)); /* Inherit qual and align. */
+ } else {
+ lua_assert(ctype_isvoid(info));
+ }
+ csize = size;
+ cinfo = info+id;
+ id = lj_ctype_intern(cp->cts, info+id, size);
+ }
+ } while (idx);
+ return id;
+}
+
+/* -- C declaration parser ------------------------------------------------ */
+
+#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
+
+/* Reset declaration state to declaration specifier. */
+static void cp_decl_reset(CPDecl *decl)
+{
+ decl->pos = decl->specpos;
+ decl->top = decl->specpos+1;
+ decl->stack[decl->specpos].next = 0;
+ decl->attr = decl->specattr;
+ decl->fattr = decl->specfattr;
+ decl->name = NULL;
+ decl->redir = NULL;
+}
+
+/* Parse constant initializer. */
+/* NYI: FP constants and strings as initializers. */
+static CTypeID cp_decl_constinit(CPState *cp, CType **ctp, CTypeID typeid)
+{
+ CType *ctt = ctype_get(cp->cts, typeid);
+ CTInfo info;
+ CTSize size;
+ CPValue k;
+ CTypeID constid;
+ while (ctype_isattrib(ctt->info)) { /* Skip attributes. */
+ typeid = ctype_cid(ctt->info); /* Update ID, too. */
+ ctt = ctype_get(cp->cts, typeid);
+ }
+ info = ctt->info;
+ size = ctt->size;
+ if (!ctype_isinteger(info) || !(info & CTF_CONST) || size > 4)
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ cp_check(cp, '=');
+ cp_expr_sub(cp, &k, 0);
+ constid = lj_ctype_new(cp->cts, ctp);
+ (*ctp)->info = CTINFO(CT_CONSTVAL, CTF_CONST|typeid);
+ k.u32 <<= 8*(4-size);
+ if ((info & CTF_UNSIGNED))
+ k.u32 >>= 8*(4-size);
+ else
+ k.u32 = (uint32_t)((int32_t)k.u32 >> 8*(4-size));
+ (*ctp)->size = k.u32;
+ return constid;
+}
+
+/* Parse size in parentheses as part of attribute. */
+static CTSize cp_decl_sizeattr(CPState *cp)
+{
+ CTSize sz;
+ uint32_t oldtmask = cp->tmask;
+ cp->tmask = CPNS_DEFAULT; /* Required for expression evaluator. */
+ cp_check(cp, '(');
+ sz = cp_expr_ksize(cp);
+ cp->tmask = oldtmask;
+ cp_check(cp, ')');
+ return sz;
+}
+
+/* Parse alignment attribute. */
+static void cp_decl_align(CPState *cp, CPDecl *decl)
+{
+ CTSize al = 4; /* Unspecified alignment is 16 bytes. */
+ if (cp->tok == '(') {
+ al = cp_decl_sizeattr(cp);
+ al = al ? lj_fls(al) : 0;
+ }
+ CTF_INSERT(decl->attr, ALIGN, al);
+ decl->attr |= CTFP_ALIGNED;
+}
+
+/* Parse GCC asm("name") redirect. */
+static void cp_decl_asm(CPState *cp, CPDecl *decl)
+{
+ UNUSED(decl);
+ cp_next(cp);
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_STRING) {
+ GCstr *str = cp->str;
+ while (cp_next(cp) == CTOK_STRING) {
+ lj_str_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str));
+ cp->L->top--;
+ str = strV(cp->L->top);
+ }
+ decl->redir = str;
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse GCC __attribute__((mode(...))). */
+static void cp_decl_mode(CPState *cp, CPDecl *decl)
+{
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_IDENT) {
+ const char *s = strdata(cp->str);
+ CTSize sz = 0, vlen = 0;
+ if (s[0] == '_' && s[1] == '_') s += 2;
+ if (*s == 'V') {
+ s++;
+ vlen = *s++ - '0';
+ if (*s >= '0' && *s <= '9')
+ vlen = vlen*10 + (*s++ - '0');
+ }
+ switch (*s++) {
+ case 'Q': sz = 1; break;
+ case 'H': sz = 2; break;
+ case 'S': sz = 4; break;
+ case 'D': sz = 8; break;
+ case 'T': sz = 16; break;
+ case 'O': sz = 32; break;
+ default: goto bad_size;
+ }
+ if (*s == 'I' || *s == 'F') {
+ CTF_INSERT(decl->attr, MSIZEP, sz);
+ if (vlen) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vlen*sz));
+ }
+ bad_size:
+ cp_next(cp);
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse GCC __attribute__((...)). */
+static void cp_decl_gccattribute(CPState *cp, CPDecl *decl)
+{
+ cp_next(cp);
+ cp_check(cp, '(');
+ cp_check(cp, '(');
+ while (cp->tok != ')') {
+ if (cp->tok == CTOK_IDENT) {
+ GCstr *attrstr = cp->str;
+ cp_next(cp);
+ switch (attrstr->hash) {
+ case H_(64a9208e,8ce14319): case H_(8e6331b2,95a282af): /* aligned */
+ cp_decl_align(cp, decl);
+ break;
+ case H_(42eb47de,f0ede26c): case H_(29f48a09,cf383e0c): /* packed */
+ decl->attr |= CTFP_PACKED;
+ break;
+ case H_(0a84eef6,8dfab04c): case H_(995cf92c,d5696591): /* mode */
+ cp_decl_mode(cp, decl);
+ break;
+ case H_(0ab31997,2d5213fa): case H_(bf875611,200e9990): /* vector_size */
+ {
+ CTSize vsize = cp_decl_sizeattr(cp);
+ if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize));
+ }
+ break;
+#if LJ_TARGET_X86
+ case H_(5ad22db8,c689b848): case H_(439150fa,65ea78cb): /* regparm */
+ CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp));
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case H_(18fc0b98,7ff4c074): case H_(4e62abed,0a747424): /* cdecl */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case H_(72b2e41b,494c5a44): case H_(f2356d59,f25fc9bd): /* thiscall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case H_(0d0ffc42,ab746f88): case H_(21c54ba1,7f0ca7e3): /* fastcall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case H_(ef76b040,9412e06a): case H_(de56697b,c750e6e1): /* stdcall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case H_(ea78b622,f234bd8e): case H_(252ffb06,8d50f34b): /* sseregparm */
+ decl->fattr |= CTF_SSEREGPARM;
+ decl->fattr |= CTFP_CCONV;
+ break;
+#endif
+ default: /* Skip all other attributes. */
+ goto skip_attr;
+ }
+ } else if (cp->tok >= CTOK_FIRSTDECL) { /* For __attribute((const)) etc. */
+ cp_next(cp);
+ skip_attr:
+ if (cp_opt(cp, '(')) {
+ while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp);
+ cp_check(cp, ')');
+ }
+ } else {
+ break;
+ }
+ if (!cp_opt(cp, ',')) break;
+ }
+ cp_check(cp, ')');
+ cp_check(cp, ')');
+}
+
+/* Parse MSVC __declspec(...). */
+static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl)
+{
+ cp_next(cp);
+ cp_check(cp, '(');
+ while (cp->tok == CTOK_IDENT) {
+ GCstr *attrstr = cp->str;
+ cp_next(cp);
+ switch (attrstr->hash) {
+ case H_(bc2395fa,98f267f8): /* align */
+ cp_decl_align(cp, decl);
+ break;
+ default: /* Ignore all other attributes. */
+ if (cp_opt(cp, '(')) {
+ while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp);
+ cp_check(cp, ')');
+ }
+ break;
+ }
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse declaration attributes (and common qualifiers). */
+static void cp_decl_attributes(CPState *cp, CPDecl *decl)
+{
+ for (;;) {
+ switch (cp->tok) {
+ case CTOK_CONST: decl->attr |= CTF_CONST; break;
+ case CTOK_VOLATILE: decl->attr |= CTF_VOLATILE; break;
+ case CTOK_RESTRICT: break; /* Ignore. */
+ case CTOK_EXTENSION: break; /* Ignore. */
+ case CTOK_ATTRIBUTE: cp_decl_gccattribute(cp, decl); continue;
+ case CTOK_ASM: cp_decl_asm(cp, decl); continue;
+ case CTOK_DECLSPEC: cp_decl_msvcattribute(cp, decl); continue;
+ case CTOK_CCDECL:
+#if LJ_TARGET_X86
+ CTF_INSERT(decl->fattr, CCONV, cp->ct->size);
+ decl->fattr |= CTFP_CCONV;
+#endif
+ break;
+ case CTOK_PTRSZ:
+#if LJ_64
+ CTF_INSERT(decl->attr, MSIZEP, cp->ct->size);
+#endif
+ break;
+ default: return;
+ }
+ cp_next(cp);
+ }
+}
+
+/* Parse struct/union/enum name. */
+static CTypeID cp_struct_name(CPState *cp, CPDecl *sdecl, CTInfo info)
+{
+ CTypeID sid;
+ CType *ct;
+ cp->tmask = CPNS_STRUCT;
+ cp_next(cp);
+ cp_decl_attributes(cp, sdecl);
+ cp->tmask = CPNS_DEFAULT;
+ if (cp->tok != '{') {
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (cp->val.id) { /* Name of existing struct/union/enum. */
+ sid = cp->val.id;
+ ct = cp->ct;
+ if ((ct->info ^ info) & (CTMASK_NUM|CTF_UNION)) /* Wrong type. */
+ cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name))));
+ } else { /* Create named, incomplete struct/union/enum. */
+ if ((cp->mode & CPARSE_MODE_NOIMPLICIT))
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADTAG, strdata(cp->str));
+ sid = lj_ctype_new(cp->cts, &ct);
+ ct->info = info;
+ ct->size = CTSIZE_INVALID;
+ ctype_setname(ct, cp->str);
+ lj_ctype_addname(cp->cts, ct, sid);
+ }
+ cp_next(cp);
+ } else { /* Create anonymous, incomplete struct/union/enum. */
+ sid = lj_ctype_new(cp->cts, &ct);
+ ct->info = info;
+ ct->size = CTSIZE_INVALID;
+ }
+ if (cp->tok == '{') {
+ if (ct->size != CTSIZE_INVALID || ct->sib)
+ cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name))));
+ ct->sib = 1; /* Indicate the type is currently being defined. */
+ }
+ return sid;
+}
+
+/* Determine field alignment. */
+static CTSize cp_field_align(CPState *cp, CType *ct, CTInfo info)
+{
+ CTSize align = ctype_align(info);
+ UNUSED(cp); UNUSED(ct);
+#if (LJ_TARGET_X86 && !LJ_ABI_WIN) || (LJ_TARGET_ARM && __APPLE__)
+ /* The SYSV i386 and iOS ABIs limit alignment of non-vector fields to 2^2. */
+ if (align > 2 && !(info & CTFP_ALIGNED)) {
+ if (ctype_isarray(info) && !(info & CTF_VECTOR)) {
+ do {
+ ct = ctype_rawchild(cp->cts, ct);
+ info = ct->info;
+ } while (ctype_isarray(info) && !(info & CTF_VECTOR));
+ }
+ if (ctype_isnum(info) || ctype_isenum(info))
+ align = 2;
+ }
+#endif
+ return align;
+}
+
+/* Layout struct/union fields. */
+static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr)
+{
+ CTSize bofs = 0, bmaxofs = 0; /* Bit offset and max. bit offset. */
+ CTSize maxalign = ctype_align(sattr);
+ CType *sct = ctype_get(cp->cts, sid);
+ CTInfo sinfo = sct->info;
+ CTypeID fieldid = sct->sib;
+ while (fieldid) {
+ CType *ct = ctype_get(cp->cts, fieldid);
+ CTInfo attr = ct->size; /* Field declaration attributes (temp.). */
+
+ if (ctype_isfield(ct->info) ||
+ (ctype_isxattrib(ct->info, CTA_SUBTYPE) && attr)) {
+ CTSize align, amask; /* Alignment (pow2) and alignment mask (bits). */
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cp->cts, ctype_cid(ct->info), &sz);
+ CTSize bsz, csz = 8*sz; /* Field size and container size (in bits). */
+ sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */
+
+ /* Check for size overflow and determine alignment. */
+ if (sz >= 0x20000000u || bofs + csz < bofs) {
+ if (!(sz == CTSIZE_INVALID && ctype_isarray(info) &&
+ !(sinfo & CTF_UNION)))
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ csz = sz = 0; /* Treat a[] and a[?] as zero-sized. */
+ }
+ align = cp_field_align(cp, ct, info);
+ if (((attr|sattr) & CTFP_PACKED) ||
+ ((attr & CTFP_ALIGNED) && ctype_align(attr) > align))
+ align = ctype_align(attr);
+ if (cp->packstack[cp->curpack] < align)
+ align = cp->packstack[cp->curpack];
+ if (align > maxalign) maxalign = align;
+ amask = (8u << align) - 1;
+
+ bsz = ctype_bitcsz(ct->info); /* Bitfield size (temp.). */
+ if (bsz == CTBSZ_FIELD || !ctype_isfield(ct->info)) {
+ bsz = csz; /* Regular fields or subtypes always fill the container. */
+ bofs = (bofs + amask) & ~amask; /* Start new aligned field. */
+ ct->size = (bofs >> 3); /* Store field offset. */
+ } else { /* Bitfield. */
+ if (bsz == 0 || (attr & CTFP_ALIGNED) ||
+ (!((attr|sattr) & CTFP_PACKED) && (bofs & amask) + bsz > csz))
+ bofs = (bofs + amask) & ~amask; /* Start new aligned field. */
+
+ /* Prefer regular field over bitfield. */
+ if (bsz == csz && (bofs & amask) == 0) {
+ ct->info = CTINFO(CT_FIELD, ctype_cid(ct->info));
+ ct->size = (bofs >> 3); /* Store field offset. */
+ } else {
+ ct->info = CTINFO(CT_BITFIELD,
+ (info & (CTF_QUAL|CTF_UNSIGNED|CTF_BOOL)) +
+ (csz << (CTSHIFT_BITCSZ-3)) + (bsz << CTSHIFT_BITBSZ));
+#if LJ_BE
+ ct->info += ((csz - (bofs & (csz-1)) - bsz) << CTSHIFT_BITPOS);
+#else
+ ct->info += ((bofs & (csz-1)) << CTSHIFT_BITPOS);
+#endif
+ ct->size = ((bofs & ~(csz-1)) >> 3); /* Store container offset. */
+ }
+ }
+
+ /* Determine next offset or max. offset. */
+ if ((sinfo & CTF_UNION)) {
+ if (bsz > bmaxofs) bmaxofs = bsz;
+ } else {
+ bofs += bsz;
+ }
+ } /* All other fields in the chain are already set up. */
+
+ fieldid = ct->sib;
+ }
+
+ /* Complete struct/union. */
+ sct->info = sinfo + CTALIGN(maxalign);
+ bofs = (sinfo & CTF_UNION) ? bmaxofs : bofs;
+ maxalign = (8u << maxalign) - 1;
+ sct->size = (((bofs + maxalign) & ~maxalign) >> 3);
+}
+
+/* Parse struct/union declaration. */
+static CTypeID cp_decl_struct(CPState *cp, CPDecl *sdecl, CTInfo sinfo)
+{
+ CTypeID sid = cp_struct_name(cp, sdecl, sinfo);
+ if (cp_opt(cp, '{')) { /* Struct/union definition. */
+ CTypeID lastid = sid;
+ int lastdecl = 0;
+ while (cp->tok != '}') {
+ CPDecl decl;
+ CPscl scl = cp_decl_spec(cp, &decl, CDF_STATIC);
+ decl.mode = scl ? CPARSE_MODE_DIRECT :
+ CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT|CPARSE_MODE_FIELD;
+
+ for (;;) {
+ CTypeID typeid;
+
+ if (lastdecl) cp_err_token(cp, '}');
+
+ /* Parse field declarator. */
+ decl.bits = CTSIZE_INVALID;
+ cp_declarator(cp, &decl);
+ typeid = cp_decl_intern(cp, &decl);
+
+ if ((scl & CDF_STATIC)) { /* Static constant in struct namespace. */
+ CType *ct;
+ CTypeID fieldid = cp_decl_constinit(cp, &ct, typeid);
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ lastid = fieldid;
+ ctype_setname(ct, decl.name);
+ } else {
+ CTSize bsz = CTBSZ_FIELD; /* Temp. for layout phase. */
+ CType *ct;
+ CTypeID fieldid = lj_ctype_new(cp->cts, &ct); /* Do this first. */
+ CType *tct = ctype_raw(cp->cts, typeid);
+
+ if (decl.bits == CTSIZE_INVALID) { /* Regular field. */
+ if (ctype_isarray(tct->info) && tct->size == CTSIZE_INVALID)
+ lastdecl = 1; /* a[] or a[?] must be the last declared field. */
+
+ /* Accept transparent struct/union/enum. */
+ if (!decl.name) {
+ if (!((ctype_isstruct(tct->info) && !(tct->info & CTF_VLA)) ||
+ ctype_isenum(tct->info)))
+ cp_err_token(cp, CTOK_IDENT);
+ ct->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_SUBTYPE) + typeid);
+ ct->size = ctype_isstruct(tct->info) ?
+ (decl.attr|0x80000000u) : 0; /* For layout phase. */
+ goto add_field;
+ }
+ } else { /* Bitfield. */
+ bsz = decl.bits;
+ if (!ctype_isinteger_or_bool(tct->info) ||
+ (bsz == 0 && decl.name) || 8*tct->size > CTBSZ_MAX ||
+ bsz > ((tct->info & CTF_BOOL) ? 1 : 8*tct->size))
+ cp_errmsg(cp, ':', LJ_ERR_BADVAL);
+ }
+
+ /* Create temporary field for layout phase. */
+ ct->info = CTINFO(CT_FIELD, typeid + (bsz << CTSHIFT_BITCSZ));
+ ct->size = decl.attr;
+ if (decl.name) ctype_setname(ct, decl.name);
+
+ add_field:
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ lastid = fieldid;
+ }
+ if (!cp_opt(cp, ',')) break;
+ cp_decl_reset(&decl);
+ }
+ cp_check(cp, ';');
+ }
+ cp_check(cp, '}');
+ ctype_get(cp->cts, lastid)->sib = 0; /* Drop sib = 1 for empty structs. */
+ cp_decl_attributes(cp, sdecl); /* Layout phase needs postfix attributes. */
+ cp_struct_layout(cp, sid, sdecl->attr);
+ }
+ return sid;
+}
+
+/* Parse enum declaration. */
+static CTypeID cp_decl_enum(CPState *cp, CPDecl *sdecl)
+{
+ CTypeID eid = cp_struct_name(cp, sdecl, CTINFO(CT_ENUM, CTID_VOID));
+ CTInfo einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_UINT32);
+ CTSize esize = 4; /* Only 32 bit enums are supported. */
+ if (cp_opt(cp, '{')) { /* Enum definition. */
+ CPValue k;
+ CTypeID lastid = eid;
+ k.u32 = 0;
+ k.id = CTID_INT32;
+ do {
+ GCstr *name = cp->str;
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (cp->val.id) cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(name));
+ cp_next(cp);
+ if (cp_opt(cp, '=')) {
+ cp_expr_kint(cp, &k);
+ if (k.id == CTID_UINT32) {
+ /* C99 says that enum constants are always (signed) integers.
+ ** But since unsigned constants like 0x80000000 are quite common,
+ ** those are left as uint32_t.
+ */
+ if (k.i32 >= 0) k.id = CTID_INT32;
+ } else {
+ /* OTOH it's common practice and even mandated by some ABIs
+ ** that the enum type itself is unsigned, unless there are any
+ ** negative constants.
+ */
+ k.id = CTID_INT32;
+ if (k.i32 < 0) einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_INT32);
+ }
+ }
+ /* Add named enum constant. */
+ {
+ CType *ct;
+ CTypeID constid = lj_ctype_new(cp->cts, &ct);
+ ctype_get(cp->cts, lastid)->sib = constid;
+ lastid = constid;
+ ctype_setname(ct, name);
+ ct->info = CTINFO(CT_CONSTVAL, CTF_CONST|k.id);
+ ct->size = k.u32++;
+ if (k.u32 == 0x80000000u) k.id = CTID_UINT32;
+ lj_ctype_addname(cp->cts, ct, constid);
+ }
+ if (!cp_opt(cp, ',')) break;
+ } while (cp->tok != '}'); /* Trailing ',' is ok. */
+ cp_check(cp, '}');
+ /* Complete enum. */
+ ctype_get(cp->cts, eid)->info = einfo;
+ ctype_get(cp->cts, eid)->size = esize;
+ }
+ return eid;
+}
+
+/* Parse declaration specifiers. */
+static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl)
+{
+ uint32_t cds = 0, sz = 0;
+ CTInfo tdef = 0;
+
+ decl->cp = cp;
+ decl->mode = cp->mode;
+ decl->name = NULL;
+ decl->redir = NULL;
+ decl->attr = 0;
+ decl->fattr = 0;
+ decl->pos = decl->top = 0;
+ decl->stack[0].next = 0;
+
+ for (;;) { /* Parse basic types. */
+ cp_decl_attributes(cp, decl);
+ switch (cp->tok) {
+ case CTOK_STRUCT:
+ tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, 0));
+ break;
+ case CTOK_UNION:
+ tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, CTF_UNION));
+ break;
+ case CTOK_ENUM:
+ tdef = cp_decl_enum(cp, decl);
+ break;
+ case CTOK_IDENT:
+ if (!ctype_istypedef(cp->ct->info) || sz || tdef ||
+ (cds & (CDF_SHORT|CDF_LONG|CDF_SIGNED|CDF_UNSIGNED|CDF_COMPLEX)))
+ goto end_decl;
+ tdef = ctype_cid(cp->ct->info); /* Get typedef. */
+ cp_next(cp);
+ break;
+ default:
+ if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECLFLAG) {
+ uint32_t cbit;
+ if (cp->ct->size) {
+ if (sz) goto end_decl;
+ sz = cp->ct->size;
+ }
+ cbit = (1u << (cp->tok - CTOK_FIRSTDECL));
+ cds = cds | cbit | ((cbit & cds & CDF_LONG) << 1);
+ if (cp->tok >= CTOK_FIRSTSCL && !(scl & cbit))
+ cp_errmsg(cp, cp->tok, LJ_ERR_FFI_BADSCL);
+ cp_next(cp);
+ break;
+ }
+ goto end_decl;
+ }
+ }
+end_decl:
+
+ if ((cds & CDF_COMPLEX)) /* Use predefined complex types. */
+ tdef = sz == 4 ? CTID_COMPLEX_FLOAT : CTID_COMPLEX_DOUBLE;
+
+ if (tdef) {
+ cp_push_type(decl, tdef);
+ } else if ((cds & CDF_VOID)) {
+ cp_push(decl, CTINFO(CT_VOID, (decl->attr & CTF_QUAL)), CTSIZE_INVALID);
+ decl->attr &= ~CTF_QUAL;
+ } else {
+ /* Determine type info and size. */
+ CTInfo info = CTINFO(CT_NUM, (cds & CDF_UNSIGNED) ? CTF_UNSIGNED : 0);
+ if ((cds & CDF_BOOL)) {
+ if ((cds & ~(CDF_SCL|CDF_BOOL|CDF_INT|CDF_SIGNED|CDF_UNSIGNED)))
+ cp_errmsg(cp, 0, LJ_ERR_FFI_INVTYPE);
+ info |= CTF_BOOL;
+ if (!sz) {
+ if (!(cds & CDF_SIGNED)) info |= CTF_UNSIGNED;
+ sz = 1;
+ }
+ } else if ((cds & CDF_FP)) {
+ info = CTINFO(CT_NUM, CTF_FP);
+ if ((cds & CDF_LONG)) sz = sizeof(long double);
+ } else if ((cds & CDF_CHAR)) {
+ if ((cds & (CDF_CHAR|CDF_SIGNED|CDF_UNSIGNED)) == CDF_CHAR)
+ info |= CTF_UCHAR; /* Handle platforms where char is unsigned. */
+ } else if ((cds & CDF_SHORT)) {
+ sz = sizeof(short);
+ } else if ((cds & CDF_LONGLONG)) {
+ sz = 8;
+ } else if ((cds & CDF_LONG)) {
+ info |= CTF_LONG;
+ sz = sizeof(long);
+ } else if (!sz) {
+ if (!(cds & (CDF_SIGNED|CDF_UNSIGNED)))
+ cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC);
+ sz = sizeof(int);
+ }
+ lua_assert(sz != 0);
+ info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */
+ info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */
+ cp_push(decl, info, sz);
+ decl->attr &= ~CTF_QUAL;
+ }
+ decl->specpos = decl->pos;
+ decl->specattr = decl->attr;
+ decl->specfattr = decl->fattr;
+ return (cds & CDF_SCL); /* Return storage class. */
+}
+
+/* Parse array declaration. */
+static void cp_decl_array(CPState *cp, CPDecl *decl)
+{
+ CTInfo info = CTINFO(CT_ARRAY, 0);
+ CTSize nelem = CTSIZE_INVALID; /* Default size for a[] or a[?]. */
+ cp_decl_attributes(cp, decl);
+ if (cp_opt(cp, '?'))
+ info |= CTF_VLA; /* Create variable-length array a[?]. */
+ else if (cp->tok != ']')
+ nelem = cp_expr_ksize(cp);
+ cp_check(cp, ']');
+ cp_add(decl, info, nelem);
+}
+
+/* Parse function declaration. */
+static void cp_decl_func(CPState *cp, CPDecl *fdecl)
+{
+ CTSize nargs = 0;
+ CTInfo info = CTINFO(CT_FUNC, 0);
+ CTypeID lastid = 0, anchor = 0;
+ if (cp->tok != ')') {
+ do {
+ CPDecl decl;
+ CTypeID typeid, fieldid;
+ CType *ct;
+ if (cp_opt(cp, '.')) { /* Vararg function. */
+ cp_check(cp, '.'); /* Workaround for the minimalistic lexer. */
+ cp_check(cp, '.');
+ info |= CTF_VARARG;
+ break;
+ }
+ cp_decl_spec(cp, &decl, CDF_REGISTER);
+ decl.mode = CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT;
+ cp_declarator(cp, &decl);
+ typeid = cp_decl_intern(cp, &decl);
+ ct = ctype_raw(cp->cts, typeid);
+ if (ctype_isvoid(ct->info))
+ break;
+ else if (ctype_isrefarray(ct->info))
+ typeid = lj_ctype_intern(cp->cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ct->info)), CTSIZE_PTR);
+ else if (ctype_isfunc(ct->info))
+ typeid = lj_ctype_intern(cp->cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|typeid), CTSIZE_PTR);
+ /* Add new parameter. */
+ fieldid = lj_ctype_new(cp->cts, &ct);
+ if (anchor)
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ else
+ anchor = fieldid;
+ lastid = fieldid;
+ if (decl.name) ctype_setname(ct, decl.name);
+ ct->info = CTINFO(CT_FIELD, typeid);
+ ct->size = nargs++;
+ } while (cp_opt(cp, ','));
+ }
+ cp_check(cp, ')');
+ if (cp_opt(cp, '{')) { /* Skip function definition. */
+ int level = 1;
+ cp->mode |= CPARSE_MODE_SKIP;
+ for (;;) {
+ if (cp->tok == '{') level++;
+ else if (cp->tok == '}' && --level == 0) break;
+ else if (cp->tok == CTOK_EOF) cp_err_token(cp, '}');
+ cp_next(cp);
+ }
+ cp->mode &= ~CPARSE_MODE_SKIP;
+ cp->tok = ';'; /* Ok for cp_decl_multi(), error in cp_decl_single(). */
+ }
+ info |= (fdecl->fattr & ~CTMASK_CID);
+ fdecl->fattr = 0;
+ fdecl->stack[cp_add(fdecl, info, nargs)].sib = anchor;
+}
+
+/* Parse declarator. */
+static void cp_declarator(CPState *cp, CPDecl *decl)
+{
+ if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS);
+
+ for (;;) { /* Head of declarator. */
+ if (cp_opt(cp, '*')) { /* Pointer. */
+ CTSize sz;
+ CTInfo info;
+ cp_decl_attributes(cp, decl);
+ sz = CTSIZE_PTR;
+ info = CTINFO(CT_PTR, CTALIGN_PTR);
+#if LJ_64
+ if (ctype_msizeP(decl->attr) == 4) {
+ sz = 4;
+ info = CTINFO(CT_PTR, CTALIGN(2));
+ }
+#endif
+ info += (decl->attr & (CTF_QUAL|CTF_REF));
+ decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<mode & CPARSE_MODE_ABSTRACT) &&
+ (cp->tok == ')' || cp_istypedecl(cp))) goto func_decl;
+ pos = decl->pos;
+ cp_declarator(cp, decl);
+ cp_check(cp, ')');
+ decl->pos = pos;
+ } else if (cp->tok == CTOK_IDENT) { /* Direct declarator. */
+ if (!(decl->mode & CPARSE_MODE_DIRECT)) cp_err_token(cp, CTOK_EOF);
+ decl->name = cp->str;
+ decl->nameid = cp->val.id;
+ cp_next(cp);
+ } else { /* Abstract declarator. */
+ if (!(decl->mode & CPARSE_MODE_ABSTRACT)) cp_err_token(cp, CTOK_IDENT);
+ }
+
+ for (;;) { /* Tail of declarator. */
+ if (cp_opt(cp, '[')) { /* Array. */
+ cp_decl_array(cp, decl);
+ } else if (cp_opt(cp, '(')) { /* Function. */
+ func_decl:
+ cp_decl_func(cp, decl);
+ } else {
+ break;
+ }
+ }
+
+ if ((decl->mode & CPARSE_MODE_FIELD) && cp_opt(cp, ':')) /* Field width. */
+ decl->bits = cp_expr_ksize(cp);
+
+ /* Process postfix attributes. */
+ cp_decl_attributes(cp, decl);
+ cp_push_attributes(decl);
+
+ cp->depth--;
+}
+
+/* Parse an abstract type declaration and return it's C type ID. */
+static CTypeID cp_decl_abstract(CPState *cp)
+{
+ CPDecl decl;
+ cp_decl_spec(cp, &decl, 0);
+ decl.mode = CPARSE_MODE_ABSTRACT;
+ cp_declarator(cp, &decl);
+ return cp_decl_intern(cp, &decl);
+}
+
+/* Handle pragmas. */
+static void cp_pragma(CPState *cp, BCLine pragmaline)
+{
+ cp_next(cp);
+ if (cp->tok == CTOK_IDENT &&
+ cp->str->hash == H_(e79b999f,42ca3e85)) { /* pack */
+ cp_next(cp);
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_IDENT) {
+ if (cp->str->hash == H_(738e923c,a1b65954)) { /* push */
+ if (cp->curpack < CPARSE_MAX_PACKSTACK) {
+ cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack];
+ cp->curpack++;
+ }
+ } else if (cp->str->hash == H_(6c71cf27,6c71cf27)) { /* pop */
+ if (cp->curpack > 0) cp->curpack--;
+ } else {
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ }
+ cp_next(cp);
+ if (!cp_opt(cp, ',')) goto end_pack;
+ }
+ if (cp->tok == CTOK_INTEGER) {
+ cp->packstack[cp->curpack] = cp->val.u32 ? lj_fls(cp->val.u32) : 0;
+ cp_next(cp);
+ } else {
+ cp->packstack[cp->curpack] = 255;
+ }
+ end_pack:
+ cp_check(cp, ')');
+ } else { /* Ignore all other pragmas. */
+ while (cp->tok != CTOK_EOF && cp->linenumber == pragmaline)
+ cp_next(cp);
+ }
+}
+
+/* Parse multiple C declarations of types or extern identifiers. */
+static void cp_decl_multi(CPState *cp)
+{
+ int first = 1;
+ while (cp->tok != CTOK_EOF) {
+ CPDecl decl;
+ CPscl scl;
+ if (cp_opt(cp, ';')) { /* Skip empty statements. */
+ first = 0;
+ continue;
+ }
+ if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */
+ BCLine pragmaline = cp->linenumber;
+ if (!(cp_next(cp) == CTOK_IDENT &&
+ cp->str->hash == H_(f5e6b4f8,1d509107))) /* pragma */
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ cp_pragma(cp, pragmaline);
+ continue;
+ }
+ scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC);
+ if ((cp->tok == ';' || cp->tok == CTOK_EOF) &&
+ ctype_istypedef(decl.stack[0].info)) {
+ CTInfo info = ctype_rawchild(cp->cts, &decl.stack[0])->info;
+ if (ctype_isstruct(info) || ctype_isenum(info))
+ goto decl_end; /* Accept empty declaration of struct/union/enum. */
+ }
+ for (;;) {
+ CTypeID typeid;
+ cp_declarator(cp, &decl);
+ typeid = cp_decl_intern(cp, &decl);
+ if (decl.name && !decl.nameid) { /* NYI: redeclarations are ignored. */
+ CType *ct;
+ CTypeID id;
+ if ((scl & CDF_TYPEDEF)) { /* Create new typedef. */
+ id = lj_ctype_new(cp->cts, &ct);
+ ct->info = CTINFO(CT_TYPEDEF, typeid);
+ goto noredir;
+ } else if (ctype_isfunc(ctype_get(cp->cts, typeid)->info)) {
+ /* Treat both static and extern function declarations as extern. */
+ ct = ctype_get(cp->cts, typeid);
+ /* We always get new anonymous functions (typedefs are copied). */
+ lua_assert(gcref(ct->name) == NULL);
+ id = typeid; /* Just name it. */
+ } else if ((scl & CDF_STATIC)) { /* Accept static constants. */
+ id = cp_decl_constinit(cp, &ct, typeid);
+ goto noredir;
+ } else { /* External references have extern or no storage class. */
+ id = lj_ctype_new(cp->cts, &ct);
+ ct->info = CTINFO(CT_EXTERN, typeid);
+ }
+ if (decl.redir) { /* Add attribute for redirected symbol name. */
+ CType *cta;
+ CTypeID aid = lj_ctype_new(cp->cts, &cta);
+ ct = ctype_get(cp->cts, id); /* Table may have been reallocated. */
+ cta->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_REDIR));
+ cta->sib = ct->sib;
+ ct->sib = aid;
+ ctype_setname(cta, decl.redir);
+ }
+ noredir:
+ ctype_setname(ct, decl.name);
+ lj_ctype_addname(cp->cts, ct, id);
+ }
+ if (!cp_opt(cp, ',')) break;
+ cp_decl_reset(&decl);
+ }
+ decl_end:
+ if (cp->tok == CTOK_EOF && first) break; /* May omit ';' for 1 decl. */
+ first = 0;
+ cp_check(cp, ';');
+ }
+}
+
+/* Parse a single C type declaration. */
+static void cp_decl_single(CPState *cp)
+{
+ CPDecl decl;
+ cp_decl_spec(cp, &decl, 0);
+ cp_declarator(cp, &decl);
+ cp->val.id = cp_decl_intern(cp, &decl);
+ if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF);
+}
+
+#undef H_
+
+/* ------------------------------------------------------------------------ */
+
+/* Protected callback for C parser. */
+static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ CPState *cp = (CPState *)ud;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ cp_init(cp);
+ if ((cp->mode & CPARSE_MODE_MULTI))
+ cp_decl_multi(cp);
+ else
+ cp_decl_single(cp);
+ lua_assert(cp->depth == 0);
+ return NULL;
+}
+
+/* C parser. */
+int lj_cparse(CPState *cp)
+{
+ LJ_CTYPE_SAVE(cp->cts);
+ int errcode = lj_vm_cpcall(cp->L, NULL, cp, cpcparser);
+ if (errcode)
+ LJ_CTYPE_RESTORE(cp->cts);
+ cp_cleanup(cp);
+ return errcode;
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_cparse.h b/src/LuaJIT/src/lj_cparse.h
new file mode 100644
index 000000000..eaa1bdb53
--- /dev/null
+++ b/src/LuaJIT/src/lj_cparse.h
@@ -0,0 +1,64 @@
+/*
+** C declaration parser.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CPARSE_H
+#define _LJ_CPARSE_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* C parser limits. */
+#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */
+#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */
+#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */
+#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */
+
+/* Flags for C parser mode. */
+#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */
+#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */
+#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */
+#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */
+#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */
+#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */
+
+typedef int CPChar; /* C parser character. Unsigned ext. from char. */
+typedef int CPToken; /* C parser token. */
+
+/* C parser internal value representation. */
+typedef struct CPValue {
+ union {
+ int32_t i32; /* Value for CTID_INT32. */
+ uint32_t u32; /* Value for CTID_UINT32. */
+ };
+ CTypeID id; /* C Type ID of the value. */
+} CPValue;
+
+/* C parser state. */
+typedef struct CPState {
+ CPChar c; /* Current character. */
+ CPToken tok; /* Current token. */
+ CPValue val; /* Token value. */
+ GCstr *str; /* Interned string of identifier/keyword. */
+ CType *ct; /* C type table entry. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_State *L; /* Lua state. */
+ CTState *cts; /* C type state. */
+ const char *srcname; /* Current source name. */
+ BCLine linenumber; /* Input line counter. */
+ int depth; /* Recursive declaration depth. */
+ uint32_t tmask; /* Type mask for next identifier. */
+ uint32_t mode; /* C parser mode. */
+ uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */
+ uint8_t curpack; /* Current position in pack pragma stack. */
+} CPState;
+
+LJ_FUNC int lj_cparse(CPState *cp);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_crecord.c b/src/LuaJIT/src/lj_crecord.c
new file mode 100644
index 000000000..81ab35409
--- /dev/null
+++ b/src/LuaJIT/src/lj_crecord.c
@@ -0,0 +1,1323 @@
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ffrecord_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cparse.h"
+#include "lj_cconv.h"
+#include "lj_clib.h"
+#include "lj_ccall.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_crecord.h"
+#include "lj_dispatch.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitconv(a, dt, st, flags) \
+ emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags))
+
+/* -- C type checks ------------------------------------------------------- */
+
+static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o)
+{
+ GCcdata *cd;
+ TRef trtypeid;
+ if (!tref_iscdata(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ cd = cdataV(o);
+ /* Specialize to the CTypeID. */
+ trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_TYPEID);
+ emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->typeid));
+ return cd;
+}
+
+/* Specialize to the CTypeID held by a cdata constructor. */
+static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr)
+{
+ CTypeID id;
+ lua_assert(tref_iscdata(tr) && cd->typeid == CTID_CTYPEID);
+ id = *(CTypeID *)cdataptr(cd);
+ tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata)));
+ tr = emitir(IRT(IR_XLOAD, IRT_INT), tr, 0);
+ emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id));
+ return id;
+}
+
+static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o)
+{
+ if (tref_isstr(tr)) {
+ GCstr *s = strV(o);
+ CPState cp;
+ CTypeID oldtop;
+ /* Specialize to the string containing the C type declaration. */
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s));
+ cp.L = J->L;
+ cp.cts = ctype_ctsG(J2G(J));
+ oldtop = cp.cts->top;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return cp.val.id;
+ } else {
+ GCcdata *cd = argv2cdata(J, tr, o);
+ return cd->typeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) :
+ cd->typeid;
+ }
+}
+
+/* -- Convert C type to C type -------------------------------------------- */
+
+/*
+** This code mirrors the code in lj_cconv.c. It performs the same steps
+** for the trace recorder that lj_cconv.c does for the interpreter.
+**
+** One major difference is that we can get away with much fewer checks
+** here. E.g. checks for casts, constness or correct types can often be
+** omitted, even if they might fail. The interpreter subsequently throws
+** an error, which aborts the trace.
+**
+** All operations are specialized to their C types, so the on-trace
+** outcome must be the same as the outcome in the interpreter. If the
+** interpreter doesn't throw an error, then the trace is correct, too.
+** Care must be taken not to generate invalid (temporary) IR or to
+** trigger asserts.
+*/
+
+/* Convert CType to IRType. */
+static IRType crec_ct2irt(CType *ct)
+{
+ if (LJ_LIKELY(ctype_isnum(ct->info))) {
+ if ((ct->info & CTF_FP)) {
+ if (ct->size == sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == sizeof(float))
+ return IRT_FLOAT;
+ } else {
+ uint32_t b = lj_fls(ct->size);
+ if (b <= 3)
+ return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0);
+ }
+ } else if (ctype_isptr(ct->info)) {
+ return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ } else if (ctype_iscomplex(ct->info)) {
+ if (ct->size == 2*sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == 2*sizeof(float))
+ return IRT_FLOAT;
+ }
+ return IRT_CDATA;
+}
+
+/* Determine whether a passed number or cdata number is non-zero. */
+static int crec_isnonzero(CType *s, void *p)
+{
+ if (p == (void *)0)
+ return 0;
+ if (p == (void *)1)
+ return 1;
+ if ((s->info & CTF_FP)) {
+ if (s->size == sizeof(float))
+ return (*(float *)p != 0);
+ else
+ return (*(double *)p != 0);
+ } else {
+ if (s->size == 1)
+ return (*(uint8_t *)p != 0);
+ else if (s->size == 2)
+ return (*(uint16_t *)p != 0);
+ else if (s->size == 4)
+ return (*(uint32_t *)p != 0);
+ else
+ return (*(uint64_t *)p != 0);
+ }
+}
+
+static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
+ void *svisnz)
+{
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+ IRType dt = crec_ct2irt(d);
+ IRType st = crec_ct2irt(s);
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /*
+ ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and
+ ** numbers up to 8 bytes. Otherwise sp holds a pointer.
+ */
+
+ switch (cconv_idx2(dinfo, sinfo)) {
+ /* Destination is a bool. */
+ case CCX(B, B):
+ goto xstore; /* Source operand is already normalized. */
+ case CCX(B, I):
+ case CCX(B, F):
+ if (st != IRT_CDATA) {
+ /* Specialize to the result of a comparison against 0. */
+ TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) :
+ (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) :
+ lj_ir_kint(J, 0);
+ int isnz = crec_isnonzero(s, svisnz);
+ emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero);
+ sp = lj_ir_kint(J, isnz);
+ goto xstore;
+ }
+ goto err_nyi;
+
+ /* Destination is an integer. */
+ case CCX(I, B):
+ case CCX(I, I):
+ conv_I_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ /* Extend 32 to 64 bit integer. */
+ if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED)))
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st,
+ (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
+ else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0);
+ else if (st == IRT_INT)
+ sp = lj_opt_narrow_toint(J, sp);
+ xstore:
+ if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J);
+ if (dp == 0) return sp;
+ emitir(IRT(IR_XSTORE, dt), dp, sp);
+ break;
+ case CCX(I, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(I, F):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_TRUNC|IRCONV_ANY);
+ goto xstore;
+ case CCX(I, P):
+ case CCX(I, A):
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ st = IRT_UINTP;
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I):
+ conv_F_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0);
+ goto xstore;
+ case CCX(F, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(F, F):
+ conv_F_F:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ if (dt != st) sp = emitconv(sp, dt, st, 0);
+ goto xstore;
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ case CCX(C, F):
+ { /* Clear im. */
+ TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0));
+ }
+ /* Convert to re. */
+ if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I;
+
+ case CCX(C, C):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ {
+ TRef re, im, ptr;
+ re = emitir(IRT(IR_XLOAD, st), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1)));
+ im = emitir(IRT(IR_XLOAD, st), ptr, 0);
+ if (dt != st) {
+ re = emitconv(re, dt, st, 0);
+ im = emitconv(im, dt, st, 0);
+ }
+ emitir(IRT(IR_XSTORE, dt), dp, re);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, im);
+ }
+ break;
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C):
+ case CCX(V, V):
+ goto err_nyi;
+
+ /* Destination is a pointer. */
+ case CCX(P, P):
+ case CCX(P, A):
+ case CCX(P, S):
+ /* There are only 32 bit pointers/addresses on 32 bit machines.
+ ** Also ok on x64, since all 32 bit ops clear the upper part of the reg.
+ */
+ goto xstore;
+ case CCX(P, I):
+ if (st == IRT_CDATA) goto err_nyi;
+ if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, IRT_U32, st, 0);
+ goto xstore;
+ case CCX(P, F):
+ if (st == IRT_CDATA) goto err_nyi;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32,
+ st, IRCONV_TRUNC|IRCONV_ANY);
+ goto xstore;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ goto err_nyi;
+
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ goto err_nyi;
+
+ default:
+ err_conv:
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ break;
+ }
+ return 0;
+}
+
+/* -- Convert C type to TValue (load) ------------------------------------- */
+
+static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTInfo sinfo = s->info;
+ lua_assert(!ctype_isenum(sinfo));
+ if (ctype_isnum(sinfo)) {
+ IRType t = crec_ct2irt(s);
+ TRef tr;
+ if (t == IRT_CDATA)
+ goto err_nyi; /* NYI: copyval of >64 bit integers. */
+ tr = emitir(IRT(IR_XLOAD, t), sp, 0);
+ if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */
+ return emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */
+ sp = tr;
+ lj_needsplit(J);
+ } else if ((sinfo & CTF_BOOL)) {
+ /* Assume not equal to zero. Fixup and emit pending guard later. */
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ return tr;
+ }
+ } else if (ctype_isptr(sinfo)) {
+ IRType t = (LJ_64 && s->size == 8) ? IRT_P64 : IRT_P32;
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0);
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ cts->L = J->L;
+ sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */
+ } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */
+ IRType t = s->size == 2*sizeof(double) ? IRT_NUM : IRT_FLOAT;
+ ptrdiff_t esz = (ptrdiff_t)(s->size >> 1);
+ TRef ptr, tr1, tr2, dp;
+ dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL);
+ tr1 = emitir(IRT(IR_XLOAD, t), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz));
+ tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)));
+ emitir(IRT(IR_XSTORE, t), ptr, tr1);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz));
+ emitir(IRT(IR_XSTORE, t), ptr, tr2);
+ return dp;
+ } else {
+ /* NYI: copyval of vectors. */
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ /* Box pointer, ref or 64 bit integer. */
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp);
+}
+
+/* -- Convert TValue to C type (store) ------------------------------------ */
+
+static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID sid = CTID_P_VOID;
+ void *svisnz = 0;
+ CType *s;
+ if (LJ_LIKELY(tref_isinteger(sp))) {
+ sid = CTID_INT32;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isnum(sp)) {
+ sid = CTID_DOUBLE;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isbool(sp)) {
+ sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0);
+ sid = CTID_BOOL;
+ } else if (tref_isnil(sp)) {
+ sp = lj_ir_kptr(J, NULL);
+ } else if (tref_isudata(sp)) {
+ sp = emitir(IRT(IR_ADD, IRT_P32), sp, lj_ir_kint(J, sizeof(GCudata)));
+ } else if (tref_isstr(sp)) {
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ GCstr *str = strV(sval);
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ /* Specialize to the name of the enum constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str));
+ if (cct && ctype_isconstval(cct->info)) {
+ lua_assert(ctype_child(cts, cct)->size == 4);
+ svisnz = (void *)(intptr_t)(cct->size != 0);
+ sp = lj_ir_kint(J, (int32_t)cct->size);
+ sid = ctype_cid(cct->info);
+ } /* else: interpreter will throw. */
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */
+ } else { /* Otherwise pass the string data as a const char[]. */
+ sp = emitir(IRT(IR_STRREF, IRT_P32), sp, lj_ir_kint(J, 0));
+ sid = CTID_A_CCHAR;
+ }
+ } else { /* NYI: tref_istab(sp), tref_islightud(sp). */
+ sid = argv2cdata(J, sp, sval)->typeid;
+ s = ctype_raw(cts, sid);
+ svisnz = cdataptr(cdataV(sval));
+ if (ctype_isptr(s->info)) {
+ IRType t = (LJ_64 && s->size == 8) ? IRT_P64 : IRT_P32;
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR);
+ if (ctype_isref(s->info)) {
+ svisnz = *(void **)svisnz;
+ s = ctype_rawchild(cts, s);
+ } else {
+ goto doconv; /* The pointer value was loaded, don't load number. */
+ }
+ } else if (ctype_isinteger(s->info) && s->size == 8) {
+ IRType t = (s->info & CTF_UNSIGNED) ? IRT_U64 : IRT_I64;
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto doconv;
+ } else {
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ if (ctype_isnum(s->info)) { /* Load number value. */
+ IRType t = crec_ct2irt(s);
+ if (t != IRT_CDATA) {
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0);
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ }
+ }
+ goto doconv;
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ return crec_ct_ct(J, d, s, dp, sp, svisnz);
+}
+
+/* -- C data metamethods -------------------------------------------------- */
+
+/* This would be rather difficult in FOLD, so do it here:
+** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k)
+** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz)
+*/
+static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz)
+{
+ IRIns *ir = IR(tref_ref(tr));
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) &&
+ (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) {
+ IRIns *irk = IR(ir->op2);
+ ptrdiff_t k;
+ if (LJ_64 && irk->o == IR_KINT64)
+ k = (ptrdiff_t)ir_kint64(irk)->u64 * sz;
+ else
+ k = (ptrdiff_t)irk->i * sz;
+ if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k;
+ tr = ir->op1; /* Not a TRef, but the caller doesn't care. */
+ }
+ return tr;
+}
+
+/* Record ctype __index/__newindex metamethods. */
+static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
+ RecordFFData *rd)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index);
+ if (!tv)
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ if (tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) {
+ /* Specialize to result of __index lookup. */
+ cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]);
+ IRType t = itype2irt(o);
+ if (tvisgcv(o))
+ J->base[0] = lj_ir_kgc(J, gcV(o), t);
+ else if (tvisint(o))
+ J->base[0] = lj_ir_kint(J, intV(o));
+ else if (tvisnum(o))
+ J->base[0] = lj_ir_knumint(J, numV(o));
+ else if (tvisbool(o))
+ J->base[0] = TREF_PRI(t);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /* Always specialize to the key. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
+ } else {
+ /* NYI: resolving of non-function metamethods. */
+ /* NYI: non-string keys for __index table. */
+ /* NYI: stores to __newindex table. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd)
+{
+ TRef idx, ptr = J->base[0];
+ ptrdiff_t ofs = sizeof(GCcdata);
+ GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]);
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->typeid);
+ CTypeID sid = 0;
+
+ /* Resolve pointer or reference for cdata object. */
+ if (ctype_isptr(ct->info)) {
+ IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR);
+ ofs = 0;
+ ptr = crec_reassoc_ofs(J, ptr, &ofs, 1);
+ }
+
+again:
+ idx = J->base[1];
+ if (tref_isnumber(idx)) {
+ idx = lj_opt_narrow_cindex(J, idx);
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz;
+ integer_key:
+ if ((ct->info & CTF_COMPLEX))
+ idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1));
+ sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info)));
+ idx = crec_reassoc_ofs(J, idx, &ofs, sz);
+#if LJ_TARGET_ARM || LJ_TARGET_PPC
+ /* Hoist base add to allow fusion of index/shift into operands. */
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs
+#if LJ_TARGET_ARM
+ && (sz == 1 || sz == 4)
+#endif
+ ) {
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+ ofs = 0;
+ }
+#endif
+ idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz));
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr);
+ }
+ } else if (tref_iscdata(idx)) {
+ GCcdata *cdk = cdataV(&rd->argv[1]);
+ CType *ctk = ctype_raw(cts, cdk->typeid);
+ IRType t;
+ if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk);
+ if (ctype_ispointer(ct->info) &&
+ ctype_isinteger(ctk->info) && (t = crec_ct2irt(ctk)) != IRT_CDATA) {
+ if (ctk->size == 8) {
+ idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64);
+ } else {
+ idx = emitir(IRT(IR_ADD, IRT_PTR), idx,
+ lj_ir_kintp(J, sizeof(GCcdata)));
+ idx = emitir(IRT(IR_XLOAD, t), idx, 0);
+ }
+ if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED))
+ idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT);
+ if (!LJ_64 && ctk->size > sizeof(intptr_t)) {
+ idx = emitconv(idx, IRT_INTP, t, 0);
+ lj_needsplit(J);
+ }
+ goto integer_key;
+ }
+ } else if (tref_isstr(idx)) {
+ GCstr *name = strV(&rd->argv[1]);
+ if (cd->typeid == CTID_CTYPEID)
+ ct = ctype_raw(cts, crec_constructor(J, cd, ptr));
+ if (ctype_isstruct(ct->info)) {
+ CTSize fofs;
+ CType *fct;
+ fct = lj_ctype_getfield(cts, ct, name, &fofs);
+ if (fct) {
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (ctype_isconstval(fct->info)) {
+ if (fct->size >= 0x80000000u &&
+ (ctype_child(cts, fct)->info & CTF_UNSIGNED)) {
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size);
+ return;
+ }
+ J->base[0] = lj_ir_kint(J, (int32_t)fct->size);
+ return; /* Interpreter will throw for newindex. */
+ } else if (ctype_isbitfield(fct->info)) {
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ } else {
+ lua_assert(ctype_isfield(fct->info));
+ sid = ctype_cid(fct->info);
+ }
+ ofs += (ptrdiff_t)fofs;
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2 &&
+ ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') ||
+ (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) {
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (strdata(name)[0] == 'i') ofs += (ct->size >> 1);
+ sid = ctype_cid(ct->info);
+ }
+ }
+ }
+ if (!sid) {
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ CType *cct = ctype_rawchild(cts, ct);
+ if (ctype_isstruct(cct->info)) {
+ ct = cct;
+ if (tref_isstr(idx)) goto again;
+ }
+ }
+ crec_index_meta(J, cts, ct, rd);
+ return;
+ }
+
+ if (ofs)
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+
+ /* Resolve reference for field. */
+ ct = ctype_get(cts, sid);
+ if (ctype_isref(ct->info))
+ ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0);
+
+ while (ctype_isattrib(ct->info))
+ ct = ctype_child(cts, ct); /* Skip attributes. */
+
+ if (rd->data == 0) { /* __index metamethod. */
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); /* Skip enums. */
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else { /* __newindex metamethod. */
+ rd->nres = 0;
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+}
+
+/* Record cdata allocation. */
+static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ CType *d = ctype_raw(cts, id);
+ TRef trid;
+ if (sz == 0 || sz > 64 || (info & CTF_VLA) || ctype_align(info) > CT_MEMALIGN)
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: large/special allocations. */
+ trid = lj_ir_kint(J, id);
+ /* Use special instruction to box pointer or 64 bit integer. */
+ if (ctype_isptr(info) || (ctype_isinteger(info) && sz == 8)) {
+ TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) :
+ ctype_isptr(info) ? lj_ir_kptr(J, NULL) :
+ (lj_needsplit(J), lj_ir_kint64(J, 0));
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp);
+ } else {
+ TRef trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, TREF_NIL);
+ cTValue *fin;
+ J->base[0] = trcd;
+ if (J->base[1] && !J->base[2] && !lj_cconv_multi_init(d, &rd->argv[1])) {
+ goto single_init;
+ } else if (ctype_isarray(d->info)) {
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ TRef sp = 0;
+ TValue tv;
+ TValue *sval = &tv;
+ MSize i;
+ tv.u64 = 0;
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)))
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init array of aggregates. */
+ for (i = 1, ofs = 0; ofs < sz; ofs += esize) {
+ TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, ofs + sizeof(GCcdata)));
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else if (i != 2) {
+ sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL;
+ }
+ crec_ct_tv(J, dc, dp, sp, sval);
+ }
+ } else if (ctype_isstruct(d->info)) {
+ CTypeID fid = d->sib;
+ MSize i = 1;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *dc;
+ TRef sp, dp;
+ TValue tv;
+ TValue *sval = &tv;
+ setintV(&tv, 0);
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ dc = ctype_rawchild(cts, df); /* Field type. */
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)))
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else {
+ sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL;
+ }
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, df->size + sizeof(GCcdata)));
+ crec_ct_tv(J, dc, dp, sp, sval);
+ } else if (!ctype_isconstval(df->info)) {
+ /* NYI: init bitfields and sub-structures. */
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ }
+ } else {
+ TRef dp;
+ single_init:
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
+ if (J->base[1]) {
+ crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]);
+ } else {
+ TValue tv;
+ tv.u64 = 0;
+ crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv);
+ }
+ }
+ /* Handle __gc metamethod. */
+ fin = lj_ctype_meta(cts, id, MM_gc);
+ if (fin) {
+ TRef trlo = lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd);
+ TRef trhi = emitir(IRT(IR_ADD, IRT_P32), trlo, lj_ir_kint(J, 4));
+ if (LJ_BE) { TRef tmp = trlo; trlo = trhi; trhi = tmp; }
+ if (tvisfunc(fin)) {
+ emitir(IRT(IR_XSTORE, IRT_P32), trlo, lj_ir_kfunc(J, funcV(fin)));
+ emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TFUNC));
+ } else if (tviscdata(fin)) {
+ emitir(IRT(IR_XSTORE, IRT_P32), trlo,
+ lj_ir_kgc(J, obj2gco(cdataV(fin)), IRT_CDATA));
+ emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TCDATA));
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ J->needsnap = 1;
+ }
+ }
+}
+
+/* Record argument conversions. */
+static TRef crec_call_args(jit_State *J, RecordFFData *rd,
+ CTState *cts, CType *ct)
+{
+ TRef args[CCI_NARGS_MAX];
+ CTypeID fid;
+ MSize i, n;
+ TRef tr, *base;
+ cTValue *o;
+#if LJ_TARGET_X86
+#if LJ_ABI_WIN
+ TRef *arg0 = NULL, *arg1 = NULL;
+#endif
+ int ngpr = 0;
+ if (ctype_cconv(ct->info) == CTCC_THISCALL)
+ ngpr = 1;
+ else if (ctype_cconv(ct->info) == CTCC_FASTCALL)
+ ngpr = 2;
+#endif
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+ args[0] = TREF_NIL;
+ for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) {
+ CTypeID did;
+ CType *d;
+
+ if (n >= CCI_NARGS_MAX)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lua_assert(ctype_isfield(ctf->info));
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ }
+ d = ctype_raw(cts, did);
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) ||
+ ctype_isenum(d->info)))
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ tr = crec_ct_tv(J, d, 0, *base, o);
+ if (ctype_isinteger_or_bool(d->info)) {
+ if (d->size < 4) {
+ if ((d->info & CTF_UNSIGNED))
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0);
+ else
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT);
+ }
+ } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) {
+ lj_needsplit(J);
+ }
+#if LJ_TARGET_X86
+ /* 64 bit args must not end up in registers for fastcall/thiscall. */
+#if LJ_ABI_WIN
+ if (!ctype_isfp(d->info)) {
+ /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ if (ngpr) {
+ arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ if (ngpr) {
+ arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ }
+ }
+ } else {
+ if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; }
+ if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; }
+ if (ngpr) ngpr--;
+ }
+ }
+#else
+ if (!ctype_isfp(d->info) && ngpr) {
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ /* No reordering for other x86 ABIs. Simply add alignment args. */
+ do { args[n++] = TREF_NIL; } while (--ngpr);
+ } else {
+ ngpr--;
+ }
+ }
+#endif
+#endif
+ args[n] = tr;
+ }
+ tr = args[0];
+ for (i = 1; i < n; i++)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]);
+ return tr;
+}
+
+/* Create a snapshot for the caller, simulating a 'false' return value. */
+static void crec_snap_caller(jit_State *J)
+{
+ lua_State *L = J->L;
+ TValue *base = L->base, *top = L->top;
+ const BCIns *pc = J->pc;
+ TRef ftr = J->base[-1];
+ ptrdiff_t delta;
+ if (!frame_islua(base-1))
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->pc = frame_pc(base-1); delta = 1+bc_a(J->pc[-1]);
+ L->top = base; L->base = base - delta;
+ J->base[-1] = TREF_FALSE;
+ J->base -= delta; J->baseslot -= (BCReg)delta;
+ J->maxslot = (BCReg)delta; J->framedepth--;
+ lj_snap_add(J);
+ L->base = base; L->top = top;
+ J->framedepth++; J->maxslot = 1;
+ J->base += delta; J->baseslot += (BCReg)delta;
+ J->base[-1] = ftr; J->pc = pc;
+}
+
+/* Record function call. */
+static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->typeid);
+ IRType tp = IRT_PTR;
+ if (ctype_isptr(ct->info)) {
+ tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR);
+ CType *ctr = ctype_rawchild(cts, ct);
+ IRType t = crec_ct2irt(ctr);
+ TRef tr;
+ TValue tv;
+ /* Check for blacklisted C functions that might call a callback. */
+ setlightudV(&tv,
+ cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4));
+ if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv)))
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ if (ctype_isvoid(ctr->info)) {
+ t = IRT_NIL;
+ rd->nres = 0;
+ } else if (ctype_isenum(ctr->info)) {
+ ctr = ctype_child(cts, ctr);
+ }
+ if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) ||
+ ctype_isvoid(ctr->info)) || t == IRT_CDATA)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ if ((ct->info & CTF_VARARG)
+#if LJ_TARGET_X86
+ || ctype_cconv(ct->info) != CTCC_CDECL
+#endif
+ )
+ func = emitir(IRT(IR_CARG, IRT_NIL), func,
+ lj_ir_kint(J, ctype_typeid(cts, ct)));
+ tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func);
+ if (ctype_isbool(ctr->info)) {
+ if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) {
+ /* Don't check result if ignored. */
+ tr = TREF_NIL;
+ } else {
+ crec_snap_caller(J);
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+ J->postproc = LJ_POST_FIXGUARDSNAP;
+ tr = TREF_TRUE;
+ }
+ } else if (t == IRT_FLOAT || t == IRT_U32) {
+ tr = emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I8 || t == IRT_I16) {
+ tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT);
+ } else if (t == IRT_U8 || t == IRT_U16) {
+ tr = emitconv(tr, IRT_INT, t, 0);
+ } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) ||
+ (t == IRT_I64 || t == IRT_U64)) {
+ TRef trid = lj_ir_kint(J, ctype_cid(ct->info));
+ tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr);
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ }
+ J->base[0] = tr;
+ J->needsnap = 1;
+ return 1;
+ }
+ return 0;
+}
+
+/* Record ctype call metamethod. */
+static void crec_call_meta(jit_State *J, RecordFFData *rd, CTypeID id)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, id);
+ cTValue *tv;
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, MM_call);
+ if (tv && tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ } else {
+ /* NYI: non-function metamethods. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd)
+{
+ GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]);
+ if (cd->typeid == CTID_CTYPEID)
+ crec_alloc(J, rd, crec_constructor(J, cd, J->base[0]));
+ else if (!crec_call(J, rd, cd))
+ crec_call_meta(J, rd, cd->typeid);
+}
+
+static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) {
+ IRType dt;
+ CTypeID id;
+ TRef tr;
+ MSize i;
+ lj_needsplit(J);
+ if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) ||
+ ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) {
+ dt = IRT_U64; id = CTID_UINT64;
+ } else {
+ dt = IRT_I64; id = CTID_INT64;
+ }
+ for (i = 0; i < 2; i++) {
+ IRType st = tref_type(sp[i]);
+ if (st == IRT_NUM || st == IRT_FLOAT)
+ sp[i] = emitconv(sp[i], dt, st, IRCONV_TRUNC|IRCONV_ANY);
+ else if (!(st == IRT_I64 || st == IRT_U64))
+ sp[i] = emitconv(sp[i], dt, IRT_INT,
+ ((st - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
+ }
+ if (mm < MM_add) {
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ IROp op;
+ if (mm == MM_eq) {
+ op = IR_EQ;
+ } else {
+ op = mm == MM_lt ? IR_LT : IR_LE;
+ if (dt == IRT_U64)
+ op += (IR_ULT-IR_LT);
+ }
+ lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]);
+ }
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+ return 0;
+}
+
+static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ctp = s[0];
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ if (mm == MM_sub) { /* Pointer difference. */
+ TRef tr;
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ if (sz == 0 || (sz & (sz-1)) != 0)
+ return 0; /* NYI: integer division. */
+ tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]);
+ tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz)));
+#if LJ_64
+ tr = emitconv(tr, IRT_NUM, IRT_INTP, 0);
+#endif
+ return tr;
+ } else { /* Pointer comparison (unsigned). */
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE;
+ lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info)))
+ return 0;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */
+ ctp = s[1];
+ } else {
+ return 0;
+ }
+ {
+ TRef tr = sp[1];
+ IRType t = tref_type(tr);
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ CTypeID id;
+#if LJ_64
+ if (t == IRT_NUM || t == IRT_FLOAT)
+ tr = emitconv(tr, IRT_INTP, t, IRCONV_TRUNC|IRCONV_ANY);
+ else if (!(t == IRT_I64 || t == IRT_U64))
+ tr = emitconv(tr, IRT_INTP, IRT_INT,
+ ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
+#else
+ if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) {
+ tr = emitconv(tr, IRT_INTP, t,
+ (t == IRT_NUM || t == IRT_FLOAT) ?
+ IRCONV_TRUNC|IRCONV_ANY : 0);
+ }
+#endif
+ tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz));
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr);
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+}
+
+/* Record ctype arithmetic metamethods. */
+static void crec_arith_meta(jit_State *J, CTState *cts, RecordFFData *rd)
+{
+ cTValue *tv = NULL;
+ if (J->base[0]) {
+ if (tviscdata(&rd->argv[0]))
+ tv = lj_ctype_meta(cts, argv2cdata(J, J->base[0], &rd->argv[0])->typeid,
+ (MMS)rd->data);
+ if (!tv && J->base[1] && tviscdata(&rd->argv[1]))
+ tv = lj_ctype_meta(cts, argv2cdata(J, J->base[1], &rd->argv[1])->typeid,
+ (MMS)rd->data);
+ }
+ if (tv && tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ } else {
+ /* NYI: non-function metamethods. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef sp[2];
+ CType *s[2];
+ MSize i;
+ for (i = 0; i < 2; i++) {
+ TRef tr = J->base[i];
+ CType *ct = ctype_get(cts, CTID_DOUBLE);
+ if (!tr) {
+ goto trymeta;
+ } else if (tref_iscdata(tr)) {
+ CTypeID id = argv2cdata(J, tr, &rd->argv[i])->typeid;
+ ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */
+ IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR);
+ } else if (ctype_isinteger(ct->info) && ct->size == 8) {
+ IRType t = (ct->info & CTF_UNSIGNED) ? IRT_U64 : IRT_I64;
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto ok;
+ } else if (ctype_isfunc(ct->info)) {
+ tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR);
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ } else {
+ tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info)) {
+ IRType t = crec_ct2irt(ct);
+ if (t == IRT_CDATA) goto trymeta;
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ tr = emitir(IRT(IR_XLOAD, t), tr, 0);
+ } else if (!(ctype_isptr(ct->info) || ctype_isrefarray(ct->info))) {
+ goto trymeta;
+ }
+ } else if (tref_isnil(tr)) {
+ tr = lj_ir_kptr(J, NULL);
+ ct = ctype_get(cts, CTID_P_VOID);
+ } else if (tref_isinteger(tr)) {
+ ct = ctype_get(cts, CTID_INT32);
+ } else if (!tref_isnum(tr)) {
+ goto trymeta;
+ }
+ ok:
+ s[i] = ct;
+ sp[i] = tr;
+ }
+ {
+ TRef tr;
+ if ((tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) ||
+ (tr = crec_arith_ptr(J, sp, s, (MMS)rd->data))) {
+ J->base[0] = tr;
+ /* Fixup cdata comparisons, too. Avoids some cdata escapes. */
+ if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) &&
+ !irt_isguard(J->guardemit)) {
+ const BCIns *pc = frame_contpc(J->L->base-1) - 1;
+ if (bc_op(*pc) <= BC_ISNEP) {
+ setframe_pc(&J2G(J)->tmptv, pc);
+ J2G(J)->tmptv.u32.lo = ((tref_istrue(tr) ^ bc_op(*pc)) & 1);
+ J->postproc = LJ_POST_FIXCOMP;
+ }
+ }
+ } else {
+ trymeta:
+ crec_arith_meta(J, cts, rd);
+ }
+ }
+}
+
+/* -- C library namespace metamethods ------------------------------------- */
+
+void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) &&
+ udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) {
+ CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0]));
+ GCstr *name = strV(&rd->argv[1]);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ cTValue *tv = lj_tab_getstr(cl->cache, name);
+ rd->nres = rd->data;
+ if (id && tv && !tvisnil(tv)) {
+ /* Specialize to the symbol name and make the result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name));
+ if (ctype_isconstval(ct->info)) {
+ if (ct->size >= 0x80000000u &&
+ (ctype_child(cts, ct)->info & CTF_UNSIGNED))
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size);
+ else
+ J->base[0] = lj_ir_kint(J, (int32_t)ct->size);
+ } else if (ctype_isextern(ct->info)) {
+ CTypeID sid = ctype_cid(ct->info);
+ void *sp = *(void **)cdataptr(cdataV(tv));
+ TRef ptr;
+ ct = ctype_raw(cts, sid);
+ if (rd->data && ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (LJ_64 && !checkptr32(sp))
+ ptr = lj_ir_kintp(J, (uintptr_t)sp);
+ else
+ ptr = lj_ir_kptr(J, sp);
+ if (rd->data) {
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else {
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+ } else {
+ J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA);
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NOCACHE);
+ }
+ } /* else: interpreter will throw. */
+}
+
+/* -- FFI library functions ----------------------------------------------- */
+
+static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval)
+{
+ return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval);
+}
+
+void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd)
+{
+ crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0]));
+}
+
+void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd)
+{
+ UNUSED(rd);
+ if (J->base[0])
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno);
+}
+
+void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef tr = J->base[0];
+ if (tr) {
+ TRef trlen = J->base[1];
+ if (trlen) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]);
+ } else {
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]);
+ trlen = lj_ir_call(J, IRCALL_strlen, tr);
+ }
+ J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2];
+ if (trdst && trsrc && (trlen || tref_isstr(trsrc))) {
+ trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
+ trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]);
+ if (trlen) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[2]);
+ } else {
+ trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN);
+ trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
+ }
+ lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen);
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+ rd->nres = 0;
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef tr = J->base[0], trlen = J->base[1], trfill = J->base[2];
+ if (tr && trlen) {
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, tr, &rd->argv[0]);
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ if (trfill)
+ trfill = crec_toint(J, cts, trfill, &rd->argv[2]);
+ else
+ trfill = lj_ir_kint(J, 0);
+ lj_ir_call(J, IRCALL_memset, tr, trfill, trlen);
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+ rd->nres = 0;
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd)
+{
+ argv2ctype(J, J->base[0], &rd->argv[0]);
+ if (tref_iscdata(J->base[1])) {
+ argv2ctype(J, J->base[1], &rd->argv[1]);
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } else {
+ J->base[0] = TREF_FALSE;
+ }
+}
+
+void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd)
+{
+ if (tref_isstr(J->base[0])) {
+ /* Specialize to the ABI string to make the boolean result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0])));
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } /* else: interpreter will throw. */
+}
+
+/* -- Miscellaneous library functions ------------------------------------- */
+
+void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->typeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 &&
+ !(ct->size == 4 && (ct->info & CTF_UNSIGNED)))
+ d = ctype_get(cts, CTID_INT32);
+ else
+ d = ctype_get(cts, CTID_DOUBLE);
+ J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]);
+ } else {
+ J->base[0] = TREF_NIL;
+ }
+}
+
+#undef IR
+#undef emitir
+#undef emitconv
+
+#endif
diff --git a/src/LuaJIT/src/lj_crecord.h b/src/LuaJIT/src/lj_crecord.h
new file mode 100644
index 000000000..0f93e1452
--- /dev/null
+++ b/src/LuaJIT/src/lj_crecord.h
@@ -0,0 +1,40 @@
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CRECORD_H
+#define _LJ_CRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+#include "lj_ffrecord.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
+#else
+#define recff_cdata_index recff_nyi
+#define recff_cdata_call recff_nyi
+#define recff_cdata_arith recff_nyi
+#define recff_clib_index recff_nyi
+#define recff_ffi_new recff_nyi
+#define recff_ffi_errno recff_nyi
+#define recff_ffi_string recff_nyi
+#define recff_ffi_copy recff_nyi
+#define recff_ffi_fill recff_nyi
+#define recff_ffi_istype recff_nyi
+#define recff_ffi_abi recff_nyi
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_ctype.c b/src/LuaJIT/src/lj_ctype.c
new file mode 100644
index 000000000..2ea08e25d
--- /dev/null
+++ b/src/LuaJIT/src/lj_ctype.c
@@ -0,0 +1,607 @@
+/*
+** C type management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_ccallback.h"
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* Predefined typedefs. */
+#define CTTDDEF(_) \
+ /* Vararg handling. */ \
+ _("va_list", P_VOID) \
+ _("__builtin_va_list", P_VOID) \
+ _("__gnuc_va_list", P_VOID) \
+ /* From stddef.h. */ \
+ _("ptrdiff_t", INT_PSZ) \
+ _("size_t", UINT_PSZ) \
+ _("wchar_t", WCHAR) \
+ /* Subset of stdint.h. */ \
+ _("int8_t", INT8) \
+ _("int16_t", INT16) \
+ _("int32_t", INT32) \
+ _("int64_t", INT64) \
+ _("uint8_t", UINT8) \
+ _("uint16_t", UINT16) \
+ _("uint32_t", UINT32) \
+ _("uint64_t", UINT64) \
+ _("intptr_t", INT_PSZ) \
+ _("uintptr_t", UINT_PSZ) \
+ /* End of typedef list. */
+
+/* Keywords (only the ones we actually care for). */
+#define CTKWDEF(_) \
+ /* Type specifiers. */ \
+ _("void", -1, CTOK_VOID) \
+ _("_Bool", 0, CTOK_BOOL) \
+ _("bool", 1, CTOK_BOOL) \
+ _("char", 1, CTOK_CHAR) \
+ _("int", 4, CTOK_INT) \
+ _("__int8", 1, CTOK_INT) \
+ _("__int16", 2, CTOK_INT) \
+ _("__int32", 4, CTOK_INT) \
+ _("__int64", 8, CTOK_INT) \
+ _("float", 4, CTOK_FP) \
+ _("double", 8, CTOK_FP) \
+ _("long", 0, CTOK_LONG) \
+ _("short", 0, CTOK_SHORT) \
+ _("_Complex", 0, CTOK_COMPLEX) \
+ _("complex", 0, CTOK_COMPLEX) \
+ _("__complex", 0, CTOK_COMPLEX) \
+ _("__complex__", 0, CTOK_COMPLEX) \
+ _("signed", 0, CTOK_SIGNED) \
+ _("__signed", 0, CTOK_SIGNED) \
+ _("__signed__", 0, CTOK_SIGNED) \
+ _("unsigned", 0, CTOK_UNSIGNED) \
+ /* Type qualifiers. */ \
+ _("const", 0, CTOK_CONST) \
+ _("__const", 0, CTOK_CONST) \
+ _("__const__", 0, CTOK_CONST) \
+ _("volatile", 0, CTOK_VOLATILE) \
+ _("__volatile", 0, CTOK_VOLATILE) \
+ _("__volatile__", 0, CTOK_VOLATILE) \
+ _("restrict", 0, CTOK_RESTRICT) \
+ _("__restrict", 0, CTOK_RESTRICT) \
+ _("__restrict__", 0, CTOK_RESTRICT) \
+ _("inline", 0, CTOK_INLINE) \
+ _("__inline", 0, CTOK_INLINE) \
+ _("__inline__", 0, CTOK_INLINE) \
+ /* Storage class specifiers. */ \
+ _("typedef", 0, CTOK_TYPEDEF) \
+ _("extern", 0, CTOK_EXTERN) \
+ _("static", 0, CTOK_STATIC) \
+ _("auto", 0, CTOK_AUTO) \
+ _("register", 0, CTOK_REGISTER) \
+ /* GCC Attributes. */ \
+ _("__extension__", 0, CTOK_EXTENSION) \
+ _("__attribute", 0, CTOK_ATTRIBUTE) \
+ _("__attribute__", 0, CTOK_ATTRIBUTE) \
+ _("asm", 0, CTOK_ASM) \
+ _("__asm", 0, CTOK_ASM) \
+ _("__asm__", 0, CTOK_ASM) \
+ /* MSVC Attributes. */ \
+ _("__declspec", 0, CTOK_DECLSPEC) \
+ _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \
+ _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \
+ _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \
+ _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \
+ _("__ptr32", 4, CTOK_PTRSZ) \
+ _("__ptr64", 8, CTOK_PTRSZ) \
+ /* Other type specifiers. */ \
+ _("struct", 0, CTOK_STRUCT) \
+ _("union", 0, CTOK_UNION) \
+ _("enum", 0, CTOK_ENUM) \
+ /* Operators. */ \
+ _("sizeof", 0, CTOK_SIZEOF) \
+ _("__alignof", 0, CTOK_ALIGNOF) \
+ _("__alignof__", 0, CTOK_ALIGNOF) \
+ /* End of keyword list. */
+
+/* Type info for predefined types. Size merged in. */
+static CTInfo lj_ctype_typeinfo[] = {
+#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)),
+#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id),
+#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)),
+CTTYDEF(CTTYINFODEF)
+CTTDDEF(CTTDINFODEF)
+CTKWDEF(CTKWINFODEF)
+#undef CTTYINFODEF
+#undef CTTDINFODEF
+#undef CTKWINFODEF
+ 0
+};
+
+/* Predefined type names collected in a single string. */
+static const char * const lj_ctype_typenames =
+#define CTTDNAMEDEF(name, id) name "\0"
+#define CTKWNAMEDEF(name, sz, cds) name "\0"
+CTTDDEF(CTTDNAMEDEF)
+CTKWDEF(CTKWNAMEDEF)
+#undef CTTDNAMEDEF
+#undef CTKWNAMEDEF
+;
+
+#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1)
+#define CTTYPETAB_MIN 128
+
+/* -- C type interning ---------------------------------------------------- */
+
+#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK)
+#define ct_hashname(name) \
+ (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK)
+
+/* Create new type element. */
+CTypeID lj_ctype_new(CTState *cts, CType **ctp)
+{
+ CTypeID id = cts->top;
+ CType *ct;
+ lua_assert(cts->L);
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+ }
+ cts->top = id+1;
+ *ctp = ct = &cts->tab[id];
+ ct->info = 0;
+ ct->size = 0;
+ ct->sib = 0;
+ ct->next = 0;
+ setgcrefnull(ct->name);
+ return id;
+}
+
+/* Intern a type element. */
+CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size)
+{
+ uint32_t h = ct_hashtype(info, size);
+ CTypeID id = cts->hash[h];
+ lua_assert(cts->L);
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (ct->info == info && ct->size == size)
+ return id;
+ id = ct->next;
+ }
+ id = cts->top;
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+ }
+ cts->top = id+1;
+ cts->tab[id].info = info;
+ cts->tab[id].size = size;
+ cts->tab[id].sib = 0;
+ cts->tab[id].next = cts->hash[h];
+ setgcrefnull(cts->tab[id].name);
+ cts->hash[h] = (CTypeID1)id;
+ return id;
+}
+
+/* Add type element to hash table. */
+static void ctype_addtype(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashtype(ct->info, ct->size);
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Add named element to hash table. */
+void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashname(gcref(ct->name));
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Get a C type by name, matching the type mask. */
+CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask)
+{
+ CTypeID id = cts->hash[ct_hashname(name)];
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (gcref(ct->name) == obj2gco(name) &&
+ ((tmask >> ctype_type(ct->info)) & 1)) {
+ *ctp = ct;
+ return id;
+ }
+ id = ct->next;
+ }
+ *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */
+ return 0;
+}
+
+/* Get a struct/union/enum/function field by name. */
+CType *lj_ctype_getfield(CTState *cts, CType *ct, GCstr *name, CTSize *ofs)
+{
+ while (ct->sib) {
+ ct = ctype_get(cts, ct->sib);
+ if (gcref(ct->name) == obj2gco(name)) {
+ *ofs = ct->size;
+ return ct;
+ }
+ if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ CType *fct = lj_ctype_getfield(cts, ctype_child(cts, ct), name, ofs);
+ if (fct) {
+ *ofs += ct->size;
+ return fct;
+ }
+ }
+ }
+ return NULL; /* Not found. */
+}
+
+/* -- C type information -------------------------------------------------- */
+
+/* Follow references and get raw type for a C type ID. */
+CType *lj_ctype_rawref(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info))
+ ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get size for a C type ID. Does NOT support VLA/VLS. */
+CTSize lj_ctype_size(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_raw(cts, id);
+ return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+}
+
+/* Get size for a variable-length C type. Does NOT support other C types. */
+CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem)
+{
+ uint64_t xsz = 0;
+ if (ctype_isstruct(ct->info)) {
+ CTypeID arrid = 0, fid = ct->sib;
+ xsz = ct->size; /* Add the struct size. */
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (ctype_type(ctf->info) == CT_FIELD)
+ arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */
+ fid = ctf->sib;
+ }
+ ct = ctype_raw(cts, arrid);
+ }
+ lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */
+ ct = ctype_rawchild(cts, ct); /* Get array element. */
+ lua_assert(ctype_hassize(ct->info));
+ /* Calculate actual size of VLA and check for overflow. */
+ xsz += (uint64_t)ct->size * nelem;
+ return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID;
+}
+
+/* Get type, qualifiers, size and alignment for a C type ID. */
+CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp)
+{
+ CTInfo qual = 0;
+ CType *ct = ctype_get(cts, id);
+ for (;;) {
+ CTInfo info = ct->info;
+ if (ctype_isenum(info)) {
+ /* Follow child. Need to look at its attributes, too. */
+ } else if (ctype_isattrib(info)) {
+ if (ctype_isxattrib(info, CTA_QUAL))
+ qual |= ct->size;
+ else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED))
+ qual |= CTFP_ALIGNED + CTALIGN(ct->size);
+ } else {
+ if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN);
+ qual |= (info & ~(CTF_ALIGN|CTMASK_CID));
+ lua_assert(ctype_hassize(info) || ctype_isfunc(info));
+ *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size;
+ break;
+ }
+ ct = ctype_get(cts, ctype_cid(info));
+ }
+ return qual;
+}
+
+/* Get ctype metamethod. */
+cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm)
+{
+ CType *ct = ctype_get(cts, id);
+ cTValue *tv;
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) {
+ id = ctype_cid(ct->info);
+ ct = ctype_get(cts, id);
+ }
+ if (ctype_isptr(ct->info) &&
+ ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info))
+ tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty);
+ else
+ tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) &&
+ (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv))
+ return tv;
+ return NULL;
+}
+
+/* -- C type representation ----------------------------------------------- */
+
+/* Fixed max. length of a C type representation. */
+#define CTREPR_MAX 512
+
+typedef struct CTRepr {
+ char *pb, *pe;
+ CTState *cts;
+ lua_State *L;
+ int needsp;
+ int ok;
+ char buf[CTREPR_MAX];
+} CTRepr;
+
+/* Prepend string. */
+static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + len+1 > p) { ctr->ok = 0; return; }
+ if (ctr->needsp) *--p = ' ';
+ ctr->needsp = 1;
+ p -= len;
+ while (len-- > 0) p[len] = str[len];
+ ctr->pb = p;
+}
+
+#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1)
+
+/* Prepend char. */
+static void ctype_prepc(CTRepr *ctr, int c)
+{
+ if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; }
+ *--ctr->pb = c;
+}
+
+/* Prepend number. */
+static void ctype_prepnum(CTRepr *ctr, uint32_t n)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ ctr->pb = p;
+ ctr->needsp = 0;
+}
+
+/* Append char. */
+static void ctype_appc(CTRepr *ctr, int c)
+{
+ if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; }
+ *ctr->pe++ = c;
+}
+
+/* Append number. */
+static void ctype_appnum(CTRepr *ctr, uint32_t n)
+{
+ char buf[10];
+ char *p = buf+sizeof(buf);
+ char *q = ctr->pe;
+ if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ do { *q++ = *p++; } while (p < buf+sizeof(buf));
+ ctr->pe = q;
+}
+
+/* Prepend qualifiers. */
+static void ctype_prepqual(CTRepr *ctr, CTInfo info)
+{
+ if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile");
+ if ((info & CTF_CONST)) ctype_preplit(ctr, "const");
+}
+
+/* Prepend named type. */
+static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t)
+{
+ if (gcref(ct->name)) {
+ GCstr *str = gco2str(gcref(ct->name));
+ ctype_prepstr(ctr, strdata(str), str->len);
+ } else {
+ if (ctr->needsp) ctype_prepc(ctr, ' ');
+ ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct));
+ ctr->needsp = 1;
+ }
+ ctype_prepstr(ctr, t, (MSize)strlen(t));
+ ctype_prepqual(ctr, qual);
+}
+
+static void ctype_repr(CTRepr *ctr, CTypeID id)
+{
+ CType *ct = ctype_get(ctr->cts, id);
+ CTInfo qual = 0;
+ int ptrto = 0;
+ for (;;) {
+ CTInfo info = ct->info;
+ CTSize size = ct->size;
+ switch (ctype_type(info)) {
+ case CT_NUM:
+ if ((info & CTF_BOOL)) {
+ ctype_preplit(ctr, "bool");
+ } else if ((info & CTF_FP)) {
+ if (size == sizeof(double)) ctype_preplit(ctr, "double");
+ else if (size == sizeof(float)) ctype_preplit(ctr, "float");
+ else ctype_preplit(ctr, "long double");
+ } else if (size == 1) {
+ if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char");
+ else if (CTF_UCHAR) ctype_preplit(ctr, "signed char");
+ else ctype_preplit(ctr, "unsigned char");
+ } else if (size < 8) {
+ if (size == 4) ctype_preplit(ctr, "int");
+ else ctype_preplit(ctr, "short");
+ if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned");
+ } else {
+ ctype_preplit(ctr, "_t");
+ ctype_prepnum(ctr, size*8);
+ ctype_preplit(ctr, "int");
+ if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u');
+ }
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_VOID:
+ ctype_preplit(ctr, "void");
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_STRUCT:
+ ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct");
+ return;
+ case CT_ENUM:
+ ctype_preptype(ctr, ct, qual, "enum");
+ return;
+ case CT_ATTRIB:
+ if (ctype_attrib(info) == CTA_QUAL) qual |= size;
+ break;
+ case CT_PTR:
+ if ((info & CTF_REF)) {
+ ctype_prepc(ctr, '&');
+ } else {
+ ctype_prepqual(ctr, (qual|info));
+ if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32");
+ ctype_prepc(ctr, '*');
+ }
+ qual = 0;
+ ptrto = 1;
+ ctr->needsp = 1;
+ break;
+ case CT_ARRAY:
+ if (ctype_isrefarray(info)) {
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '[');
+ if (size != CTSIZE_INVALID) {
+ CTSize csize = ctype_child(ctr->cts, ct)->size;
+ ctype_appnum(ctr, csize ? size/csize : 0);
+ } else if ((info & CTF_VLA)) {
+ ctype_appc(ctr, '?');
+ }
+ ctype_appc(ctr, ']');
+ } else if ((info & CTF_COMPLEX)) {
+ if (size == 2*sizeof(float)) ctype_preplit(ctr, "float");
+ ctype_preplit(ctr, "complex");
+ return;
+ } else {
+ ctype_preplit(ctr, ")))");
+ ctype_prepnum(ctr, size);
+ ctype_preplit(ctr, "__attribute__((vector_size(");
+ }
+ break;
+ case CT_FUNC:
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '(');
+ ctype_appc(ctr, ')');
+ break;
+ default:
+ lua_assert(0);
+ break;
+ }
+ ct = ctype_get(ctr->cts, ctype_cid(info));
+ }
+}
+
+/* Return a printable representation of a C type. */
+GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name)
+{
+ global_State *g = G(L);
+ CTRepr ctr;
+ ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2];
+ ctr.cts = ctype_ctsG(g);
+ ctr.L = L;
+ ctr.ok = 1;
+ ctr.needsp = 0;
+ if (name) ctype_prepstr(&ctr, strdata(name), name->len);
+ ctype_repr(&ctr, id);
+ if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?");
+ return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb);
+}
+
+/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */
+GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned)
+{
+ char buf[1+20+3];
+ char *p = buf+sizeof(buf);
+ int sign = 0;
+ *--p = 'L'; *--p = 'L';
+ if (isunsigned) {
+ *--p = 'U';
+ } else if ((int64_t)n < 0) {
+ n = (uint64_t)-(int64_t)n;
+ sign = 1;
+ }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ if (sign) *--p = '-';
+ return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p));
+}
+
+/* Convert complex to string with 'i' or 'I' suffix. */
+GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size)
+{
+ char buf[2*LJ_STR_NUMBUF+2+1];
+ TValue re, im;
+ size_t len;
+ if (size == 2*sizeof(double)) {
+ re.n = *(double *)sp; im.n = ((double *)sp)[1];
+ } else {
+ re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1];
+ }
+ len = lj_str_bufnum(buf, &re);
+ if (!(im.u32.hi & 0x80000000u) || im.n != im.n) buf[len++] = '+';
+ len += lj_str_bufnum(buf+len, &im);
+ buf[len] = buf[len-1] >= 'a' ? 'I' : 'i';
+ return lj_str_new(L, buf, len+1);
+}
+
+/* -- C type state -------------------------------------------------------- */
+
+/* Initialize C type table and state. */
+CTState *lj_ctype_init(lua_State *L)
+{
+ CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState);
+ CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType);
+ const char *name = lj_ctype_typenames;
+ CTypeID id;
+ memset(cts, 0, sizeof(CTState));
+ cts->tab = ct;
+ cts->sizetab = CTTYPETAB_MIN;
+ cts->top = CTTYPEINFO_NUM;
+ cts->L = NULL;
+ cts->g = G(L);
+ for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) {
+ CTInfo info = lj_ctype_typeinfo[id];
+ ct->size = (CTSize)((int32_t)(info << 16) >> 26);
+ ct->info = info & 0xffff03ffu;
+ if (ctype_type(info) == CT_KW || ctype_istypedef(info)) {
+ size_t len = strlen(name);
+ GCstr *str = lj_str_new(L, name, len);
+ ctype_setname(ct, str);
+ name += len+1;
+ lj_ctype_addname(cts, ct, id);
+ } else {
+ setgcrefnull(ct->name);
+ if (!ctype_isenum(info)) ctype_addtype(cts, ct, id);
+ }
+ }
+ setmref(G(L)->ctype_state, cts);
+ return cts;
+}
+
+/* Free C type table and state. */
+void lj_ctype_freestate(global_State *g)
+{
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ lj_ccallback_mcode_free(cts);
+ lj_mem_freevec(g, cts->tab, cts->sizetab, CType);
+ lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1);
+ lj_mem_freet(g, cts);
+ }
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_ctype.h b/src/LuaJIT/src/lj_ctype.h
new file mode 100644
index 000000000..7953654f2
--- /dev/null
+++ b/src/LuaJIT/src/lj_ctype.h
@@ -0,0 +1,459 @@
+/*
+** C type management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CTYPE_H
+#define _LJ_CTYPE_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+
+#if LJ_HASFFI
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* C type numbers. Highest 4 bits of C type info. ORDER CT. */
+enum {
+ /* Externally visible types. */
+ CT_NUM, /* Integer or floating-point numbers. */
+ CT_STRUCT, /* Struct or union. */
+ CT_PTR, /* Pointer or reference. */
+ CT_ARRAY, /* Array or complex type. */
+ CT_MAYCONVERT = CT_ARRAY,
+ CT_VOID, /* Void type. */
+ CT_ENUM, /* Enumeration. */
+ CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */
+ CT_FUNC, /* Function. */
+ CT_TYPEDEF, /* Typedef. */
+ CT_ATTRIB, /* Miscellaneous attributes. */
+ /* Internal element types. */
+ CT_FIELD, /* Struct/union field or function parameter. */
+ CT_BITFIELD, /* Struct/union bitfield. */
+ CT_CONSTVAL, /* Constant value. */
+ CT_EXTERN, /* External reference. */
+ CT_KW /* Keyword. */
+};
+
+LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR);
+LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT);
+
+/*
+** ---------- info ------------
+** |type flags... A cid | size | sib | next | name |
+** +----------------------------+--------+-------+-------+-------+--
+** |NUM BFvcUL.. A | size | | type | |
+** |STRUCT ..vcU..V A | size | field | name? | name? |
+** |PTR ..vcR... A cid | size | | type | |
+** |ARRAY VCvc...V A cid | size | | type | |
+** |VOID ..vc.... A | size | | type | |
+** |ENUM A cid | size | const | name? | name? |
+** |FUNC ....VS.. cc cid | nargs | field | name? | name? |
+** |TYPEDEF cid | | | name | name |
+** |ATTRIB attrnum cid | attr | sib? | type? | |
+** |FIELD cid | offset | field | | name? |
+** |BITFIELD B.vcU csz bsz pos | offset | field | | name? |
+** |CONSTVAL c cid | value | const | name | name |
+** |EXTERN cid | | sib? | name | name |
+** |KW tok | size | | name | name |
+** +----------------------------+--------+-------+-------+-------+--
+** ^^ ^^--- bits used for C type conversion dispatch
+*/
+
+/* C type info flags. TFFArrrr */
+#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */
+#define CTF_FP 0x04000000u /* Floating-point: NUM. */
+#define CTF_CONST 0x02000000u /* Const qualifier. */
+#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */
+#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */
+#define CTF_LONG 0x00400000u /* Long: NUM. */
+#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */
+#define CTF_REF 0x00800000u /* Reference: PTR. */
+#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */
+#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */
+#define CTF_UNION 0x00800000u /* Union: STRUCT. */
+#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */
+#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */
+
+#define CTF_QUAL (CTF_CONST|CTF_VOLATILE)
+#define CTF_ALIGN (CTMASK_ALIGN< 0 ? CTF_UNSIGNED : 0)
+
+/* Flags used in parser. .F.Ammvf cp->attr */
+#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */
+#define CTFP_PACKED 0x00000002u /* cp->attr */
+/* ...C...f cp->fattr */
+#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */
+
+/* C type info bitfields. */
+#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */
+#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */
+#define CTSHIFT_NUM 28
+#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */
+#define CTSHIFT_ALIGN 16
+#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */
+#define CTSHIFT_ATTRIB 16
+#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */
+#define CTSHIFT_CCONV 16
+#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */
+#define CTSHIFT_REGPARM 18
+/* Bitfields only used in parser. */
+#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */
+#define CTSHIFT_VSIZEP 4
+#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */
+#define CTSHIFT_MSIZEP 8
+
+/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */
+#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */
+#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */
+#define CTMASK_BITPOS 127
+#define CTMASK_BITBSZ 127
+#define CTMASK_BITCSZ 127
+#define CTSHIFT_BITPOS 0
+#define CTSHIFT_BITBSZ 8
+#define CTSHIFT_BITCSZ 16
+
+#define CTF_INSERT(info, field, val) \
+ info = (info & ~(CTMASK_##field<> CTSHIFT_NUM)
+#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID))
+#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN)
+#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB)
+#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS)
+#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ)
+#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ)
+#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP)
+#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP)
+#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV)
+
+/* Simple type checks. */
+#define ctype_isnum(info) (ctype_type((info)) == CT_NUM)
+#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID)
+#define ctype_isptr(info) (ctype_type((info)) == CT_PTR)
+#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY)
+#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT)
+#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC)
+#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM)
+#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF)
+#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB)
+#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD)
+#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD)
+#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL)
+#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN)
+#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE)
+
+/* Combined type and flag checks. */
+#define ctype_isinteger(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isinteger_or_bool(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isbool(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL))
+#define ctype_isfp(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP))
+
+#define ctype_ispointer(info) \
+ ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */
+#define ctype_isref(info) \
+ (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF))
+
+#define ctype_isrefarray(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0))
+#define ctype_isvector(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR))
+#define ctype_iscomplex(info) \
+ (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX))
+
+#define ctype_isvltype(info) \
+ (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<") _(STRING, "") \
+ _(INTEGER, "") _(EOF, "") \
+ _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \
+ _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->")
+
+/* Simple declaration specifiers. */
+#define CDSDEF(_) \
+ _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \
+ _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \
+ _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \
+ _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER)
+
+/* C keywords. */
+#define CKWDEF(_) \
+ CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \
+ _(DECLSPEC) _(CCDECL) _(PTRSZ) \
+ _(STRUCT) _(UNION) _(ENUM) \
+ _(SIZEOF) _(ALIGNOF)
+
+/* C token numbers. */
+enum {
+ CTOK_OFS = 255,
+#define CTOKNUM(name, sym) CTOK_##name,
+#define CKWNUM(name) CTOK_##name,
+CTOKDEF(CTOKNUM)
+CKWDEF(CKWNUM)
+#undef CTOKNUM
+#undef CKWNUM
+ CTOK_FIRSTDECL = CTOK_VOID,
+ CTOK_FIRSTSCL = CTOK_TYPEDEF,
+ CTOK_LASTDECLFLAG = CTOK_REGISTER,
+ CTOK_LASTDECL = CTOK_ENUM
+};
+
+/* Declaration specifier flags. */
+enum {
+#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)),
+CDSDEF(CDSFLAG)
+#undef CDSFLAG
+ CDF__END
+};
+
+#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER)
+
+/* -- C type management --------------------------------------------------- */
+
+#define ctype_ctsG(g) (mref((g)->ctype_state, CTState))
+
+/* Get C type state. */
+static LJ_AINLINE CTState *ctype_cts(lua_State *L)
+{
+ CTState *cts = ctype_ctsG(G(L));
+ cts->L = L; /* Save L for errors and allocations. */
+ return cts;
+}
+
+/* Save and restore state of C type table. */
+#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts)
+#define LJ_CTYPE_RESTORE(cts) \
+ ((cts)->top = savects_.top, \
+ memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash)))
+
+/* Check C type ID for validity when assertions are enabled. */
+static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
+{
+ lua_assert(id > 0 && id < cts->top); UNUSED(cts);
+ return id;
+}
+
+/* Get C type for C type ID. */
+static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
+{
+ return &cts->tab[ctype_check(cts, id)];
+}
+
+/* Get C type ID for a C type. */
+#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab))
+
+/* Get child C type. */
+static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
+{
+ lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isbitfield(ct->info))); /* These don't have children. */
+ return ctype_get(cts, ctype_cid(ct->info));
+}
+
+/* Get raw type for a C type ID. */
+static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get raw type of the child of a C type. */
+static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct)
+{
+ do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info));
+ return ct;
+}
+
+/* Set the name of a C type table element. */
+static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s)
+{
+ /* NOBARRIER: mark string as fixed -- the C type table is never collected. */
+ fixstring(s);
+ setgcref(ct->name, obj2gco(s));
+}
+
+LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp);
+LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size);
+LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id);
+LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name,
+ uint32_t tmask);
+LJ_FUNC CType *lj_ctype_getfield(CTState *cts, CType *ct, GCstr *name,
+ CTSize *ofs);
+LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem);
+LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp);
+LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm);
+LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name);
+LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned);
+LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size);
+LJ_FUNC CTState *lj_ctype_init(lua_State *L);
+LJ_FUNC void lj_ctype_freestate(global_State *g);
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_debug.c b/src/LuaJIT/src/lj_debug.c
new file mode 100644
index 000000000..c2dc0dd89
--- /dev/null
+++ b/src/LuaJIT/src/lj_debug.c
@@ -0,0 +1,504 @@
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_debug_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+/* -- Frames -------------------------------------------------------------- */
+
+/* Get frame corresponding to a level. */
+cTValue *lj_debug_frame(lua_State *L, int level, int *size)
+{
+ cTValue *frame, *nextframe, *bot = tvref(L->stack);
+ /* Traverse frames backwards. */
+ for (nextframe = frame = L->base-1; frame > bot; ) {
+ if (frame_gc(frame) == obj2gco(L))
+ level++; /* Skip dummy frames. See lj_meta_call(). */
+ if (level-- == 0) {
+ *size = (int)(nextframe - frame);
+ return frame; /* Level found. */
+ }
+ nextframe = frame;
+ if (frame_islua(frame)) {
+ frame = frame_prevl(frame);
+ } else {
+ if (frame_isvarg(frame))
+ level++; /* Skip vararg pseudo-frame. */
+ frame = frame_prevd(frame);
+ }
+ }
+ *size = level;
+ return NULL; /* Level not found. */
+}
+
+/* Invalid bytecode position. */
+#define NO_BCPOS (~(BCPos)0)
+
+/* Return bytecode position for function/frame or NO_BCPOS. */
+static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ const BCIns *ins;
+ GCproto *pt;
+ BCPos pos;
+ lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD);
+ if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */
+ return NO_BCPOS;
+ } else if (nextframe == NULL) { /* Lua function on top. */
+ void *cf = cframe_raw(L->cframe);
+ if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf))
+ return NO_BCPOS;
+ ins = cframe_pc(cf); /* Only happens during error/hook handling. */
+ } else {
+ if (frame_islua(nextframe)) {
+ ins = frame_pc(nextframe);
+ } else if (frame_iscont(nextframe)) {
+ ins = frame_contpc(nextframe);
+ } else {
+ /* Lua function below errfunc/gc/hook: find cframe to get the PC. */
+ void *cf = cframe_raw(L->cframe);
+ TValue *f = L->base-1;
+ if (cf == NULL)
+ return NO_BCPOS;
+ while (f > nextframe) {
+ if (frame_islua(f)) {
+ f = frame_prevl(f);
+ } else {
+ if (frame_isc(f))
+ cf = cframe_raw(cframe_prev(cf));
+ f = frame_prevd(f);
+ }
+ }
+ if (cframe_prev(cf))
+ cf = cframe_raw(cframe_prev(cf));
+ ins = cframe_pc(cf);
+ }
+ }
+ pt = funcproto(fn);
+ pos = proto_bcpos(pt, ins) - 1;
+#if LJ_HASJIT
+ if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */
+ GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins));
+ lua_assert(bc_isret(bc_op(ins[-1])));
+ pos = proto_bcpos(pt, mref(T->startpc, const BCIns));
+ }
+#endif
+ return pos;
+}
+
+/* -- Line numbers -------------------------------------------------------- */
+
+/* Get line number for a bytecode position. */
+BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc)
+{
+ const void *lineinfo = proto_lineinfo(pt);
+ if (pc <= pt->sizebc && lineinfo) {
+ BCLine first = pt->firstline;
+ if (pc == pt->sizebc) return first + pt->numline;
+ if (pc-- == 0) return first;
+ if (pt->numline < 256)
+ return first + (BCLine)((const uint8_t *)lineinfo)[pc];
+ else if (pt->numline < 65536)
+ return first + (BCLine)((const uint16_t *)lineinfo)[pc];
+ else
+ return first + (BCLine)((const uint32_t *)lineinfo)[pc];
+ }
+ return 0;
+}
+
+/* Get line number for function/frame. */
+static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ lua_assert(pc <= pt->sizebc);
+ return lj_debug_line(pt, pc);
+ }
+ return -1;
+}
+
+/* -- Variable names ------------------------------------------------------ */
+
+/* Read ULEB128 value. */
+static uint32_t debug_read_uleb128(const uint8_t **pp)
+{
+ const uint8_t *p = *pp;
+ uint32_t v = *p++;
+ if (LJ_UNLIKELY(v >= 0x80)) {
+ int sh = 0;
+ v &= 0x7f;
+ do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
+ }
+ *pp = p;
+ return v;
+}
+
+/* Get name of a local variable from slot number and PC. */
+static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot)
+{
+ const uint8_t *p = proto_varinfo(pt);
+ if (p) {
+ BCPos lastpc = 0;
+ for (;;) {
+ const char *name = (const char *)p;
+ uint32_t vn = *p++;
+ BCPos startpc, endpc;
+ if (vn < VARNAME__MAX) {
+ if (vn == VARNAME_END) break; /* End of varinfo. */
+ } else {
+ while (*p++) ; /* Skip over variable name string. */
+ }
+ lastpc = startpc = lastpc + debug_read_uleb128(&p);
+ if (startpc > pc) break;
+ endpc = startpc + debug_read_uleb128(&p);
+ if (pc < endpc && slot-- == 0) {
+ if (vn < VARNAME__MAX) {
+#define VARNAMESTR(name, str) str "\0"
+ name = VARNAMEDEF(VARNAMESTR);
+#undef VARNAMESTR
+ if (--vn) while (*name++ || --vn) ;
+ }
+ return name;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Get name of local variable from 1-based slot number and function/frame. */
+static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
+ const char **name, BCReg slot1)
+{
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ TValue *frame = tvref(L->stack) + offset;
+ TValue *nextframe = size ? frame + size : NULL;
+ GCfunc *fn = frame_func(frame);
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (pc != NO_BCPOS &&
+ (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL)
+ ;
+ else if (slot1 > 0 && frame + slot1 < (nextframe ? nextframe : L->top))
+ *name = "(*temporary)";
+ else
+ *name = NULL;
+ return frame+slot1;
+}
+
+/* Get name of upvalue. */
+const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ lua_assert(idx < pt->sizeuv);
+ if (!p) return "";
+ if (idx) while (*p++ || --idx) ;
+ return (const char *)p;
+}
+
+/* Get name and value of upvalue. */
+const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp)
+{
+ if (tvisfunc(o)) {
+ GCfunc *fn = funcV(o);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ if (idx < pt->sizeuv) {
+ *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv);
+ return lj_debug_uvname(pt, idx);
+ }
+ } else {
+ if (idx < fn->c.nupvalues) {
+ *tvp = &fn->c.upvalue[idx];
+ return "";
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce name of an object from slot number and PC. */
+const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot,
+ const char **name)
+{
+ const char *lname;
+restart:
+ lname = debug_varname(pt, proto_bcpos(pt, ip), slot);
+ if (lname != NULL) { *name = lname; return "local"; }
+ while (--ip > proto_bc(pt)) {
+ BCIns ins = *ip;
+ BCOp op = bc_op(ins);
+ BCReg ra = bc_a(ins);
+ if (bcmode_a(op) == BCMbase) {
+ if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins)))
+ return NULL;
+ } else if (bcmode_a(op) == BCMdst && ra == slot) {
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ if (ra == slot) { slot = bc_d(ins); goto restart; }
+ break;
+ case BC_GGET:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins))));
+ return "global";
+ case BC_TGETS:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins))));
+ if (ip > proto_bc(pt)) {
+ BCIns insp = ip[-1];
+ if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1 &&
+ bc_d(insp) == bc_b(ins))
+ return "method";
+ }
+ return "field";
+ case BC_UGET:
+ *name = lj_debug_uvname(pt, bc_d(ins));
+ return "upvalue";
+ default:
+ return NULL;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce function name from caller of a frame. */
+const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name)
+{
+ TValue *pframe;
+ GCfunc *fn;
+ BCPos pc;
+ if (frame <= tvref(L->stack))
+ return NULL;
+ if (frame_isvarg(frame))
+ frame = frame_prevd(frame);
+ pframe = frame_prev(frame);
+ fn = frame_func(pframe);
+ pc = debug_framepc(L, fn, frame);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)];
+ MMS mm = bcmode_mm(bc_op(*ip));
+ if (mm == MM_call) {
+ BCReg slot = bc_a(*ip);
+ if (bc_op(*ip) == BC_ITERC) slot -= 3;
+ return lj_debug_slotname(pt, ip, slot, name);
+ } else if (mm != MM__MAX) {
+ *name = strdata(mmname_str(G(L), mm));
+ return "metamethod";
+ }
+ }
+ return NULL;
+}
+
+/* -- Source code locations ----------------------------------------------- */
+
+/* Generate shortened source name. */
+void lj_debug_shortname(char *out, GCstr *str)
+{
+ const char *src = strdata(str);
+ if (*src == '=') {
+ strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */
+ out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */
+ } else if (*src == '@') { /* Output "source", or "...source". */
+ size_t len = str->len-1;
+ src++; /* Skip the `@' */
+ if (len >= LUA_IDSIZE) {
+ src += len-(LUA_IDSIZE-4); /* Get last part of file name. */
+ *out++ = '.'; *out++ = '.'; *out++ = '.';
+ }
+ strcpy(out, src);
+ } else { /* Output [string "string"]. */
+ size_t len; /* Length, up to first control char. */
+ for (len = 0; len < LUA_IDSIZE-12; len++)
+ if (((const unsigned char *)src)[len] < ' ') break;
+ strcpy(out, "[string \""); out += 9;
+ if (src[len] != '\0') { /* Must truncate? */
+ if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15;
+ strncpy(out, src, len); out += len;
+ strcpy(out, "..."); out += 3;
+ } else {
+ strcpy(out, src); out += len;
+ }
+ strcpy(out, "\"]");
+ }
+}
+
+/* Add current location of a frame to error message. */
+void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe)
+{
+ if (frame) {
+ GCfunc *fn = frame_func(frame);
+ if (isluafunc(fn)) {
+ BCLine line = debug_frameline(L, fn, nextframe);
+ if (line >= 0) {
+ char buf[LUA_IDSIZE];
+ lj_debug_shortname(buf, proto_chunkname(funcproto(fn)));
+ lj_str_pushf(L, "%s:%d: %s", buf, line, msg);
+ return;
+ }
+ }
+ }
+ lj_str_pushf(L, "%s", msg);
+}
+
+/* Push location string for a bytecode position to Lua stack. */
+void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc)
+{
+ GCstr *name = proto_chunkname(pt);
+ const char *s = strdata(name);
+ MSize i, len = name->len;
+ BCLine line = lj_debug_line(pt, pc);
+ if (*s == '@') {
+ s++; len--;
+ for (i = len; i > 0; i--)
+ if (s[i] == '/' || s[i] == '\\') {
+ s += i+1;
+ break;
+ }
+ lj_str_pushf(L, "%s:%d", s, line);
+ } else if (len > 40) {
+ lj_str_pushf(L, "%p:%d", pt, line);
+ } else if (*s == '=') {
+ lj_str_pushf(L, "%s:%d", s+1, line);
+ } else {
+ lj_str_pushf(L, "\"%s\":%d", s, line);
+ }
+}
+
+/* -- Public debug API ---------------------------------------------------- */
+
+/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */
+
+LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name;
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name) {
+ copyTV(L, L->top, o);
+ incr_top(L);
+ }
+ return name;
+}
+
+LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name;
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name)
+ copyTV(L, o, L->top-1);
+ L->top--;
+ return name;
+}
+
+LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar)
+{
+ int status = 1;
+ TValue *frame = NULL;
+ TValue *nextframe = NULL;
+ GCfunc *fn;
+ if (*what == '>') {
+ TValue *func = L->top - 1;
+ api_check(L, tvisfunc(func));
+ fn = funcV(func);
+ L->top--;
+ what++;
+ } else {
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ lua_assert(offset != 0);
+ frame = tvref(L->stack) + offset;
+ if (size) nextframe = frame + size;
+ lua_assert(frame <= tvref(L->maxstack) &&
+ (!nextframe || nextframe <= tvref(L->maxstack)));
+ fn = frame_func(frame);
+ lua_assert(fn->c.gct == ~LJ_TFUNC);
+ }
+ for (; *what; what++) {
+ if (*what == 'S') {
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ BCLine firstline = pt->firstline;
+ GCstr *name = proto_chunkname(pt);
+ ar->source = strdata(name);
+ lj_debug_shortname(ar->short_src, name);
+ ar->linedefined = (int)firstline;
+ ar->lastlinedefined = (int)(firstline + pt->numline);
+ ar->what = firstline ? "Lua" : "main";
+ } else {
+ ar->source = "=[C]";
+ ar->short_src[0] = '[';
+ ar->short_src[1] = 'C';
+ ar->short_src[2] = ']';
+ ar->short_src[3] = '\0';
+ ar->linedefined = -1;
+ ar->lastlinedefined = -1;
+ ar->what = "C";
+ }
+ } else if (*what == 'l') {
+ ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1;
+ } else if (*what == 'u') {
+ ar->nups = fn->c.nupvalues;
+ } else if (*what == 'n') {
+ ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL;
+ if (ar->namewhat == NULL) {
+ ar->namewhat = "";
+ ar->name = NULL;
+ }
+ } else if (*what == 'f') {
+ setfuncV(L, L->top, fn);
+ incr_top(L);
+ } else if (*what == 'L') {
+ if (isluafunc(fn)) {
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCproto *pt = funcproto(fn);
+ const void *lineinfo = proto_lineinfo(pt);
+ if (lineinfo) {
+ BCLine first = pt->firstline;
+ int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4;
+ MSize i, szl = pt->sizebc-1;
+ for (i = 0; i < szl; i++) {
+ BCLine line = first +
+ (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] :
+ sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] :
+ (BCLine)((const uint32_t *)lineinfo)[i]);
+ setboolV(lj_tab_setint(L, t, line), 1);
+ }
+ }
+ settabV(L, L->top, t);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+ } else {
+ status = 0; /* Bad option. */
+ }
+ }
+ return status;
+}
+
+LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar)
+{
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ if (frame) {
+ ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack));
+ return 1;
+ } else {
+ ar->i_ci = level - size;
+ return 0;
+ }
+}
+
diff --git a/src/LuaJIT/src/lj_debug.h b/src/LuaJIT/src/lj_debug.h
new file mode 100644
index 000000000..e00769aea
--- /dev/null
+++ b/src/LuaJIT/src/lj_debug.h
@@ -0,0 +1,41 @@
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEBUG_H
+#define _LJ_DEBUG_H
+
+#include "lj_obj.h"
+
+LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size);
+LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc);
+LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx);
+LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp);
+LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
+ BCReg slot, const char **name);
+LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame,
+ const char **name);
+LJ_FUNC void lj_debug_shortname(char *out, GCstr *str);
+LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe);
+LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
+
+/* Fixed internal variable names. */
+#define VARNAMEDEF(_) \
+ _(FOR_IDX, "(for index)") \
+ _(FOR_STOP, "(for limit)") \
+ _(FOR_STEP, "(for step)") \
+ _(FOR_GEN, "(for generator)") \
+ _(FOR_STATE, "(for state)") \
+ _(FOR_CTL, "(for control)")
+
+enum {
+ VARNAME_END,
+#define VARNAMEENUM(name, str) VARNAME_##name,
+ VARNAMEDEF(VARNAMEENUM)
+#undef VARNAMEENUM
+ VARNAME__MAX
+};
+
+#endif
diff --git a/src/LuaJIT/src/lj_def.h b/src/LuaJIT/src/lj_def.h
new file mode 100644
index 000000000..e00ed9396
--- /dev/null
+++ b/src/LuaJIT/src/lj_def.h
@@ -0,0 +1,307 @@
+/*
+** LuaJIT common internal definitions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEF_H
+#define _LJ_DEF_H
+
+#include "lua.h"
+
+#if defined(_MSC_VER)
+/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#ifdef _WIN64
+typedef __int64 intptr_t;
+typedef unsigned __int64 uintptr_t;
+#else
+typedef __int32 intptr_t;
+typedef unsigned __int32 uintptr_t;
+#endif
+#elif defined(__symbian__)
+/* Cough. */
+typedef signed char int8_t;
+typedef short int int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+typedef unsigned char uint8_t;
+typedef unsigned short int uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+typedef int intptr_t;
+typedef unsigned int uintptr_t;
+#else
+#include
+#endif
+
+/* Needed everywhere. */
+#include
+#include
+
+/* Various VM limits. */
+#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */
+#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
+#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */
+#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */
+
+#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
+#define LJ_MAX_HBITS 26 /* Max. hash bits. */
+#define LJ_MAX_ABITS 28 /* Max. bits of array key. */
+#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
+#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
+
+#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */
+#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
+#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
+#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
+#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */
+#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
+
+#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
+#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */
+
+#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
+
+/* Minimum table/buffer sizes. */
+#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */
+#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */
+#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */
+#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
+#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
+#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
+#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */
+
+/* JIT compiler limits. */
+#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
+#define LJ_MAX_PHI 32 /* Max. # of PHIs for a loop. */
+#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */
+
+/* Various macros. */
+#ifndef UNUSED
+#define UNUSED(x) ((void)(x)) /* to avoid warnings */
+#endif
+
+#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
+#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
+#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
+
+#define checki8(x) ((x) == (int32_t)(int8_t)(x))
+#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
+#define checki16(x) ((x) == (int32_t)(int16_t)(x))
+#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
+#define checki32(x) ((x) == (int32_t)(x))
+#define checku32(x) ((x) == (uint32_t)(x))
+#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
+
+/* Every half-decent C compiler transforms this into a rotate instruction. */
+#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(8*sizeof(x)-(n))))
+#define lj_ror(x, n) (((x)<<(8*sizeof(x)-(n))) | ((x)>>(n)))
+
+/* A really naive Bloom filter. But sufficient for our needs. */
+typedef uintptr_t BloomFilter;
+#define BLOOM_MASK (8*sizeof(BloomFilter) - 1)
+#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK))
+#define bloomset(b, x) ((b) |= bloombit((x)))
+#define bloomtest(b, x) ((b) & bloombit((x)))
+
+#if defined(__GNUC__)
+
+#define LJ_NORET __attribute__((noreturn))
+#define LJ_ALIGN(n) __attribute__((aligned(n)))
+#define LJ_INLINE inline
+#define LJ_AINLINE inline __attribute__((always_inline))
+#define LJ_NOINLINE __attribute__((noinline))
+
+#if defined(__ELF__) || defined(__MACH__)
+#if !((defined(__sun__) && defined(__svr4__)) || defined(__solaris__))
+#define LJ_NOAPI extern __attribute__((visibility("hidden")))
+#endif
+#endif
+
+/* Note: it's only beneficial to use fastcall on x86 and then only for up to
+** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only
+** indirect calls and related tail-called C functions are marked as fastcall.
+*/
+#if defined(__i386__)
+#define LJ_FASTCALL __attribute__((fastcall))
+#endif
+
+#define LJ_LIKELY(x) __builtin_expect(!!(x), 1)
+#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+#define lj_ffs(x) ((uint32_t)__builtin_ctz(x))
+/* Don't ask ... */
+#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__))
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r;
+}
+#else
+#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31))
+#endif
+
+#if defined(__arm__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ uint32_t r;
+#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\
+ __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
+ __asm__("rev %0, %1" : "=r" (r) : "r" (x));
+ return r;
+#else
+#ifdef __thumb__
+ r = x ^ lj_ror(x, 16);
+#else
+ __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x));
+#endif
+ return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8);
+#endif
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ return (uint32_t)__builtin_bswap32((int32_t)x);
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return (uint64_t)__builtin_bswap64((int64_t)x);
+}
+#elif defined(__i386__) || defined(__x86_64__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+
+#if defined(__i386__)
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#else
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+#endif
+#else
+#error "missing define for lj_bswap()"
+#endif
+
+typedef union __attribute__((packed)) Unaligned16 {
+ uint16_t u;
+ uint8_t b[2];
+} Unaligned16;
+
+typedef union __attribute__((packed)) Unaligned32 {
+ uint32_t u;
+ uint8_t b[4];
+} Unaligned32;
+
+/* Unaligned load of uint16_t. */
+static LJ_AINLINE uint16_t lj_getu16(const void *p)
+{
+ return ((const Unaligned16 *)p)->u;
+}
+
+/* Unaligned load of uint32_t. */
+static LJ_AINLINE uint32_t lj_getu32(const void *p)
+{
+ return ((const Unaligned32 *)p)->u;
+}
+
+#elif defined(_MSC_VER)
+
+#define LJ_NORET __declspec(noreturn)
+#define LJ_ALIGN(n) __declspec(align(n))
+#define LJ_INLINE __inline
+#define LJ_AINLINE __forceinline
+#define LJ_NOINLINE __declspec(noinline)
+#if defined(_M_IX86)
+#define LJ_FASTCALL __fastcall
+#endif
+
+static LJ_AINLINE uint32_t lj_ffs(uint32_t x)
+{
+ uint32_t r; _BitScanForward(&r, x); return r;
+}
+
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ uint32_t r; _BitScanReverse(&r, x); return r;
+}
+
+#define lj_bswap(x) (_byteswap_ulong((x)))
+#define lj_bswap64(x) (_byteswap_uint64((x)))
+
+/* MSVC is only supported on x86/x64, where unaligned loads are always ok. */
+#define lj_getu16(p) (*(uint16_t *)(p))
+#define lj_getu32(p) (*(uint32_t *)(p))
+
+#else
+#error "missing defines for your compiler"
+#endif
+
+/* Optional defines. */
+#ifndef LJ_FASTCALL
+#define LJ_FASTCALL
+#endif
+#ifndef LJ_NORET
+#define LJ_NORET
+#endif
+#ifndef LJ_NOAPI
+#define LJ_NOAPI extern
+#endif
+#ifndef LJ_LIKELY
+#define LJ_LIKELY(x) (x)
+#define LJ_UNLIKELY(x) (x)
+#endif
+
+/* Attributes for internal functions. */
+#define LJ_DATA LJ_NOAPI
+#define LJ_DATADEF
+#define LJ_ASMF LJ_NOAPI
+#define LJ_FUNCA LJ_NOAPI
+#if defined(ljamalg_c)
+#define LJ_FUNC static
+#else
+#define LJ_FUNC LJ_NOAPI
+#endif
+#define LJ_FUNC_NORET LJ_FUNC LJ_NORET
+#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
+#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
+
+/* Runtime assertions. */
+#ifdef lua_assert
+#define check_exp(c, e) (lua_assert(c), (e))
+#define api_check(l, e) lua_assert(e)
+#else
+#define lua_assert(c) ((void)0)
+#define check_exp(c, e) (e)
+#define api_check luai_apicheck
+#endif
+
+/* Static assertions. */
+#define LJ_ASSERT_NAME2(name, line) name ## line
+#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line)
+#ifdef __COUNTER__
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#else
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_dispatch.c b/src/LuaJIT/src/lj_dispatch.c
new file mode 100644
index 000000000..427036b44
--- /dev/null
+++ b/src/LuaJIT/src/lj_dispatch.c
@@ -0,0 +1,486 @@
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_dispatch_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_func.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_debug.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+#if LJ_HASFFI
+#include "lj_ccallback.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "luajit.h"
+
+/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */
+LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC);
+
+/* -- Dispatch table management ------------------------------------------- */
+
+#if LJ_TARGET_MIPS
+#include
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+
+#define GOTFUNC(name) (ASMFunction)name,
+static const ASMFunction dispatch_got[] = {
+ GOTDEF(GOTFUNC)
+};
+#undef GOTFUNC
+#endif
+
+/* Initialize instruction dispatch table and hot counters. */
+void lj_dispatch_init(GG_State *GG)
+{
+ uint32_t i;
+ ASMFunction *disp = GG->dispatch;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ /* The JIT engine is off by default. luaopen_jit() turns it on. */
+ disp[BC_FORL] = disp[BC_IFORL];
+ disp[BC_ITERL] = disp[BC_IITERL];
+ disp[BC_LOOP] = disp[BC_ILOOP];
+ disp[BC_FUNCF] = disp[BC_IFUNCF];
+ disp[BC_FUNCV] = disp[BC_IFUNCV];
+ GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0);
+ for (i = 0; i < GG_NUM_ASMFF; i++)
+ GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0);
+#if LJ_TARGET_MIPS
+ memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4);
+#endif
+}
+
+#if LJ_HASJIT
+/* Initialize hotcount table. */
+void lj_dispatch_init_hotcount(global_State *g)
+{
+ int32_t hotloop = G2J(g)->param[JIT_P_hotloop];
+ HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1);
+ HotCount *hotcount = G2GG(g)->hotcount;
+ uint32_t i;
+ for (i = 0; i < HOTCOUNT_SIZE; i++)
+ hotcount[i] = start;
+}
+#endif
+
+/* Internal dispatch mode bits. */
+#define DISPMODE_JIT 0x01 /* JIT compiler on. */
+#define DISPMODE_REC 0x02 /* Recording active. */
+#define DISPMODE_INS 0x04 /* Override instruction dispatch. */
+#define DISPMODE_CALL 0x08 /* Override call dispatch. */
+#define DISPMODE_RET 0x08 /* Override return dispatch. */
+
+/* Update dispatch table depending on various flags. */
+void lj_dispatch_update(global_State *g)
+{
+ uint8_t oldmode = g->dispatchmode;
+ uint8_t mode = 0;
+#if LJ_HASJIT
+ mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0;
+ mode |= G2J(g)->state != LJ_TRACE_IDLE ?
+ (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0;
+#endif
+ mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0;
+ mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0;
+ mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0;
+ if (oldmode != mode) { /* Mode changed? */
+ ASMFunction *disp = G2GG(g)->dispatch;
+ ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv;
+ g->dispatchmode = mode;
+
+ /* Hotcount if JIT is on, but not while recording. */
+ if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) {
+ f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]);
+ f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]);
+ f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]);
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]);
+ } else { /* Otherwise use the non-hotcounting instructions. */
+ f_forl = disp[GG_LEN_DDISP+BC_IFORL];
+ f_iterl = disp[GG_LEN_DDISP+BC_IITERL];
+ f_loop = disp[GG_LEN_DDISP+BC_ILOOP];
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]);
+ }
+ /* Init static counting instruction dispatch first (may be copied below). */
+ disp[GG_LEN_DDISP+BC_FORL] = f_forl;
+ disp[GG_LEN_DDISP+BC_ITERL] = f_iterl;
+ disp[GG_LEN_DDISP+BC_LOOP] = f_loop;
+
+ /* Set dynamic instruction dispatch. */
+ if ((oldmode ^ mode) & (DISPMODE_REC|DISPMODE_INS)) {
+ /* Need to update the whole table. */
+ if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { /* No ins dispatch? */
+ /* Copy static dispatch table to dynamic dispatch table. */
+ memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction));
+ /* Overwrite with dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ }
+ } else {
+ /* The recording dispatch also checks for hooks. */
+ ASMFunction f = (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook;
+ uint32_t i;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[i] = f;
+ }
+ } else if (!(mode & (DISPMODE_REC|DISPMODE_INS))) {
+ /* Otherwise set dynamic counting ins. */
+ disp[BC_FORL] = f_forl;
+ disp[BC_ITERL] = f_iterl;
+ disp[BC_LOOP] = f_loop;
+ /* Set dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ } else {
+ disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM];
+ disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET];
+ disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0];
+ disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1];
+ }
+ }
+
+ /* Set dynamic call dispatch. */
+ if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */
+ uint32_t i;
+ if ((mode & 8) == 0) { /* No call hooks? */
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ } else {
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = lj_vm_callhook;
+ }
+ }
+ if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */
+ disp[BC_FUNCF] = f_funcf;
+ disp[BC_FUNCV] = f_funcv;
+ }
+
+#if LJ_HASJIT
+ /* Reset hotcounts for JIT off to on transition. */
+ if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT))
+ lj_dispatch_init_hotcount(g);
+#endif
+ }
+}
+
+/* -- JIT mode setting ---------------------------------------------------- */
+
+#if LJ_HASJIT
+/* Set JIT mode for a single prototype. */
+static void setptmode(global_State *g, GCproto *pt, int mode)
+{
+ if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */
+ pt->flags &= ~PROTO_NOJIT;
+ lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */
+ } else { /* Flush and/or disable JIT compilation. */
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ pt->flags |= PROTO_NOJIT;
+ lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */
+ }
+}
+
+/* Recursively set the JIT mode for all children of a prototype. */
+static void setptmode_all(global_State *g, GCproto *pt, int mode)
+{
+ ptrdiff_t i;
+ if (!(pt->flags & PROTO_CHILD)) return;
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) {
+ GCobj *o = proto_kgc(pt, i);
+ if (o->gch.gct == ~LJ_TPROTO) {
+ setptmode(g, gco2pt(o), mode);
+ setptmode_all(g, gco2pt(o), mode);
+ }
+ }
+}
+#endif
+
+/* Public API function: control the JIT engine. */
+int luaJIT_setmode(lua_State *L, int idx, int mode)
+{
+ global_State *g = G(L);
+ int mm = mode & LUAJIT_MODE_MASK;
+ lj_trace_abort(g); /* Abort recording on any state change. */
+ /* Avoid pulling the rug from under our own feet. */
+ if ((g->hookmask & HOOK_GC))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ switch (mm) {
+#if LJ_HASJIT
+ case LUAJIT_MODE_ENGINE:
+ if ((mode & LUAJIT_MODE_FLUSH)) {
+ lj_trace_flushall(L);
+ } else {
+ if (!(mode & LUAJIT_MODE_ON))
+ G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
+#if LJ_TARGET_X86ORX64
+ else if ((G2J(g)->flags & JIT_F_SSE2))
+ G2J(g)->flags |= (uint32_t)JIT_F_ON;
+ else
+ return 0; /* Don't turn on JIT compiler without SSE2 support. */
+#else
+ else
+ G2J(g)->flags |= (uint32_t)JIT_F_ON;
+#endif
+ lj_dispatch_update(g);
+ }
+ break;
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC: {
+ cTValue *tv = idx == 0 ? frame_prev(L->base-1) :
+ idx > 0 ? L->base + (idx-1) : L->top + idx;
+ GCproto *pt;
+ if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn))
+ pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */
+ else if (tvisproto(tv))
+ pt = protoV(tv);
+ else
+ return 0; /* Failed. */
+ if (mm != LUAJIT_MODE_ALLSUBFUNC)
+ setptmode(g, pt, mode);
+ if (mm != LUAJIT_MODE_FUNC)
+ setptmode_all(g, pt, mode);
+ break;
+ }
+ case LUAJIT_MODE_TRACE:
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ return 0; /* Failed. */
+ lj_trace_flush(G2J(g), idx);
+ break;
+#else
+ case LUAJIT_MODE_ENGINE:
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC:
+ UNUSED(idx);
+ if ((mode & LUAJIT_MODE_ON))
+ return 0; /* Failed. */
+ break;
+#endif
+ case LUAJIT_MODE_WRAPCFUNC:
+ if ((mode & LUAJIT_MODE_ON)) {
+ if (idx != 0) {
+ cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx;
+ if (tvislightud(tv))
+ g->wrapf = (lua_CFunction)lightudV(tv);
+ else
+ return 0; /* Failed. */
+ } else {
+ return 0; /* Failed. */
+ }
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0);
+ } else {
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0);
+ }
+ break;
+ default:
+ return 0; /* Failed. */
+ }
+ return 1; /* OK. */
+}
+
+/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */
+LUA_API void LUAJIT_VERSION_SYM(void)
+{
+}
+
+/* -- Hooks --------------------------------------------------------------- */
+
+/* This function can be called asynchronously (e.g. during a signal). */
+LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count)
+{
+ global_State *g = G(L);
+ mask &= HOOK_EVENTMASK;
+ if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */
+ g->hookf = func;
+ g->hookcount = g->hookcstart = (int32_t)count;
+ g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask);
+ lj_trace_abort(g); /* Abort recording on any hook change. */
+ lj_dispatch_update(g);
+ return 1;
+}
+
+LUA_API lua_Hook lua_gethook(lua_State *L)
+{
+ return G(L)->hookf;
+}
+
+LUA_API int lua_gethookmask(lua_State *L)
+{
+ return G(L)->hookmask & HOOK_EVENTMASK;
+}
+
+LUA_API int lua_gethookcount(lua_State *L)
+{
+ return (int)G(L)->hookcstart;
+}
+
+/* Call a hook. */
+static void callhook(lua_State *L, int event, BCLine line)
+{
+ global_State *g = G(L);
+ lua_Hook hookf = g->hookf;
+ if (hookf && !hook_active(g)) {
+ lua_Debug ar;
+ lj_trace_abort(g); /* Abort recording on any hook call. */
+ ar.event = event;
+ ar.currentline = line;
+ /* Top frame, nextframe = NULL. */
+ ar.i_ci = (int)((L->base-1) - tvref(L->stack));
+ lj_state_checkstack(L, 1+LUA_MINSTACK);
+ hook_enter(g);
+ hookf(L, &ar);
+ lua_assert(hook_active(g));
+ hook_leave(g);
+ }
+}
+
+/* -- Dispatch callbacks -------------------------------------------------- */
+
+/* Calculate number of used stack slots in the current frame. */
+static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
+{
+ BCIns ins = pc[-1];
+ if (bc_op(ins) == BC_UCLO)
+ ins = pc[bc_j(ins)];
+ switch (bc_op(ins)) {
+ case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1;
+ case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1;
+ case BC_TSETM: return bc_a(ins) + nres-1;
+ default: return pt->framesize;
+ }
+}
+
+/* Instruction dispatch. Used by instr/line/return hooks or when recording. */
+void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ GCproto *pt = funcproto(fn);
+ void *cf = cframe_raw(L->cframe);
+ const BCIns *oldpc = cframe_pc(cf);
+ global_State *g = G(L);
+ BCReg slots;
+ setcframe_pc(cf, pc);
+ slots = cur_topslot(pt, pc, cframe_multres_n(cf));
+ L->top = L->base + slots; /* Fix top. */
+#if LJ_HASJIT
+ {
+ jit_State *J = G2J(g);
+ if (J->state != LJ_TRACE_IDLE) {
+ J->L = L;
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ }
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) {
+ g->hookcount = g->hookcstart;
+ callhook(L, LUA_HOOKCOUNT, -1);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ if ((g->hookmask & LUA_MASKLINE)) {
+ BCPos npc = proto_bcpos(pt, pc) - 1;
+ BCPos opc = proto_bcpos(pt, oldpc) - 1;
+ BCLine line = lj_debug_line(pt, npc);
+ if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) {
+ callhook(L, LUA_HOOKLINE, line);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ }
+ if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1])))
+ callhook(L, LUA_HOOKRET, -1);
+ ERRNO_RESTORE
+}
+
+/* Initialize call. Ensure stack space and return # of missing parameters. */
+static int call_init(lua_State *L, GCfunc *fn)
+{
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ int numparams = pt->numparams;
+ int gotparams = (int)(L->top - L->base);
+ int need = pt->framesize;
+ if ((pt->flags & PROTO_VARARG)) need += 1+gotparams;
+ lj_state_checkstack(L, (MSize)need);
+ numparams -= gotparams;
+ return numparams >= 0 ? numparams : 0;
+ } else {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ return 0;
+ }
+}
+
+/* Call dispatch. Used by call hooks, hot calls or when recording. */
+ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ BCOp op;
+ global_State *g = G(L);
+#if LJ_HASJIT
+ jit_State *J = G2J(g);
+#endif
+ int missing = call_init(L, fn);
+#if LJ_HASJIT
+ J->L = L;
+ if ((uintptr_t)pc & 1) { /* Marker for hot call. */
+ pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1);
+ lj_trace_hot(J, pc);
+ goto out;
+ } else if (J->state != LJ_TRACE_IDLE &&
+ !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ /* Record the FUNC* bytecodes, too. */
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ lua_assert(L->top - L->base == delta);
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCALL)) {
+ int i;
+ for (i = 0; i < missing; i++) /* Add missing parameters. */
+ setnilV(L->top++);
+ callhook(L, LUA_HOOKCALL, -1);
+ /* Preserve modifications of missing parameters by lua_setlocal(). */
+ while (missing-- > 0 && tvisnil(L->top - 1))
+ L->top--;
+ }
+#if LJ_HASJIT
+out:
+#endif
+ op = bc_op(pc[-1]); /* Get FUNC* op. */
+#if LJ_HASJIT
+ /* Use the non-hotcounting variants if JIT is off or while recording. */
+ if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) &&
+ (op == BC_FUNCF || op == BC_FUNCV))
+ op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF);
+#endif
+ ERRNO_RESTORE
+ return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
+}
+
diff --git a/src/LuaJIT/src/lj_dispatch.h b/src/LuaJIT/src/lj_dispatch.h
new file mode 100644
index 000000000..41f80d434
--- /dev/null
+++ b/src/LuaJIT/src/lj_dispatch.h
@@ -0,0 +1,131 @@
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DISPATCH_H
+#define _LJ_DISPATCH_H
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+#if LJ_TARGET_MIPS
+/* Need our own global offset table for the dreaded MIPS calling conventions. */
+#if LJ_HASJIT
+#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot)
+#else
+#define JITGOTDEF(_)
+#endif
+#if LJ_HASFFI
+#define FFIGOTDEF(_) \
+ _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave)
+#else
+#define FFIGOTDEF(_)
+#endif
+#define GOTDEF(_) \
+ _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
+ _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
+ _(pow) _(fmod) _(ldexp) \
+ _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_err_throw) \
+ _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
+ _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
+ _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
+ _(lj_meta_for) _(lj_meta_len) _(lj_meta_tget) _(lj_meta_tset) \
+ _(lj_state_growstack) _(lj_str_fromnum) _(lj_str_fromnumber) _(lj_str_new) \
+ _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) _(lj_tab_new) \
+ _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
+ JITGOTDEF(_) FFIGOTDEF(_)
+
+enum {
+#define GOTENUM(name) LJ_GOT_##name,
+GOTDEF(GOTENUM)
+#undef GOTENUM
+ LJ_GOT__MAX
+};
+#endif
+
+/* Type of hot counter. Must match the code in the assembler VM. */
+/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */
+typedef uint16_t HotCount;
+
+/* Number of hot counter hash table entries (must be a power of two). */
+#define HOTCOUNT_SIZE 64
+#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount))
+
+/* Hotcount decrements. */
+#define HOTCOUNT_LOOP 2
+#define HOTCOUNT_CALL 1
+
+/* This solves a circular dependency problem -- bump as needed. Sigh. */
+#define GG_NUM_ASMFF 62
+
+#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
+#define GG_LEN_SDISP BC_FUNCF
+#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP)
+
+/* Global state, main thread and extra fields are allocated together. */
+typedef struct GG_State {
+ lua_State L; /* Main thread. */
+ global_State g; /* Global state. */
+#if LJ_TARGET_MIPS
+ ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */
+#endif
+#if LJ_HASJIT
+ jit_State J; /* JIT state. */
+ HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */
+#endif
+ ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */
+ BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */
+} GG_State;
+
+#define GG_OFS(field) ((int)offsetof(GG_State, field))
+#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g)))
+#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J)))
+#define L2GG(L) (G2GG(G(L)))
+#define J2G(J) (&J2GG(J)->g)
+#define G2J(gl) (&G2GG(gl)->J)
+#define L2J(L) (&L2GG(L)->J)
+#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
+#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
+#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
+#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch))
+#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction))
+
+#define hotcount_get(gg, pc) \
+ (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)]
+#define hotcount_set(gg, pc, val) \
+ (hotcount_get((gg), (pc)) = (HotCount)(val))
+
+/* Dispatch table management. */
+LJ_FUNC void lj_dispatch_init(GG_State *GG);
+#if LJ_HASJIT
+LJ_FUNC void lj_dispatch_init_hotcount(global_State *g);
+#endif
+LJ_FUNC void lj_dispatch_update(global_State *g);
+
+/* Instruction dispatch callback for hooks or when recording. */
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
+LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc);
+
+#if LJ_HASFFI && !defined(_BUILDVM_H)
+/* Save/restore errno and GetLastError() around hooks, exits and recording. */
+#include
+#if LJ_TARGET_WINDOWS
+#define WIN32_LEAN_AND_MEAN
+#include
+#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
+#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
+#else
+#define ERRNO_SAVE int olderr = errno;
+#define ERRNO_RESTORE errno = olderr;
+#endif
+#else
+#define ERRNO_SAVE
+#define ERRNO_RESTORE
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_emit_arm.h b/src/LuaJIT/src/lj_emit_arm.h
new file mode 100644
index 000000000..21ece88e7
--- /dev/null
+++ b/src/LuaJIT/src/lj_emit_arm.h
@@ -0,0 +1,302 @@
+/*
+** ARM instruction emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Constant encoding --------------------------------------------------- */
+
+static uint8_t emit_invai[16] = {
+ /* AND */ (ARMI_AND^ARMI_BIC) >> 21,
+ /* EOR */ 0,
+ /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21,
+ /* RSB */ 0,
+ /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21,
+ /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21,
+ /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21,
+ /* RSC */ 0,
+ /* TST */ 0,
+ /* TEQ */ 0,
+ /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21,
+ /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21,
+ /* ORR */ 0,
+ /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21,
+ /* BIC */ (ARMI_BIC^ARMI_AND) >> 21,
+ /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21
+};
+
+/* Encode constant in K12 format for data processing instructions. */
+static uint32_t emit_isk12(ARMIns ai, int32_t n)
+{
+ uint32_t invai, i, m = (uint32_t)n;
+ /* K12: unsigned 8 bit value, rotated in steps of two bits. */
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|m|i;
+ /* Otherwise try negation/complement with the inverse instruction. */
+ invai = emit_invai[((ai >> 21) & 15)];
+ if (!invai) return 0; /* Failed. No inverse instruction. */
+ m = ~(uint32_t)n;
+ if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) ||
+ invai == (ARMI_CMP^ARMI_CMN) >> 21) m++;
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|(invai<<21)|m|i;
+ return 0; /* Failed. */
+}
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm);
+}
+
+static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn);
+}
+
+static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_d(ASMState *as, ARMIns ai, Reg rd)
+{
+ *--as->mcp = ai | ARMF_D(rd);
+}
+
+static void emit_n(ASMState *as, ARMIns ai, Reg rn)
+{
+ *--as->mcp = ai | ARMF_N(rn);
+}
+
+static void emit_m(ASMState *as, ARMIns ai, Reg rm)
+{
+ *--as->mcp = ai | ARMF_M(rm);
+}
+
+static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lua_assert(ofs >= -255 && ofs <= 255);
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
+ ((ofs & 0xf0) << 4) | (ofs & 0x0f);
+}
+
+static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lua_assert(ofs >= -4095 && ofs <= 4095);
+ /* Combine LDR/STR pairs to LDRD/STRD. */
+ if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
+ (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
+ (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) &&
+ as->mcp != as->mcloop) {
+ as->mcp++;
+ emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4);
+ return;
+ }
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs;
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer spills of BASE/L. */
+#define emit_canremat(ref) ((ref) < ASMREF_L)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != d);
+ if (emit_canremat(ref)) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ uint32_t k = emit_isk12(ARMI_ADD, delta);
+ if (k) {
+ if (k == ARMI_K12)
+ emit_dm(as, ARMI_MOV, d, r);
+ else
+ emit_dn(as, ARMI_ADD^k, d, r);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Try to find a two step delta relative to another constant. */
+static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != d);
+ if (emit_canremat(ref)) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ uint32_t sh, inv = 0, k2, k;
+ if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; }
+ sh = lj_ffs(delta) & ~1;
+ k2 = emit_isk12(0, delta & (255 << sh));
+ k = emit_isk12(0, delta & ~(255 << sh));
+ if (k) {
+ emit_dn(as, ARMI_ADD^k2^inv, d, d);
+ emit_dn(as, ARMI_ADD^k^inv, d, r);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ uint32_t k = emit_isk12(ARMI_MOV, i);
+ lua_assert(rset_test(as->freeset, r) || r == RID_TMP);
+ if (k) {
+ /* Standard K12 constant. */
+ emit_d(as, ARMI_MOV^k, r);
+ } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
+ /* 16 bit loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
+ } else if (emit_kdelta1(as, r, i)) {
+ /* One step delta relative to another constant. */
+ } else if ((as->flags & JIT_F_ARMV6T2)) {
+ /* 32 bit hiword/loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r);
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
+ } else if (emit_kdelta2(as, r, i)) {
+ /* Two step delta relative to another constant. */
+ } else {
+ /* Otherwise construct the constant with up to 4 instructions. */
+ /* NYI: use mvn+bic, use pc-relative loads. */
+ for (;;) {
+ uint32_t sh = lj_ffs(i) & ~1;
+ int32_t m = i & (255 << sh);
+ i &= ~(255 << sh);
+ if (i == 0) {
+ emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r);
+ break;
+ }
+ emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r);
+ }
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
+{
+ int32_t i = i32ptr(p);
+ emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)),
+ (i & 4095));
+}
+
+/* Get/set global_State fields. */
+#define emit_getgl(as, r, field) \
+ emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field)
+#define emit_setgl(as, r, field) \
+ emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field)
+
+/* Trace number is determined from pc of exit instruction. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = (target - p) - 1;
+ lua_assert(((delta + 0x00800000) >> 24) == 0);
+ *--p = ai | ((uint32_t)delta & 0x00ffffffu);
+ as->mcp = p;
+}
+
+#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target))
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = ((char *)target - (char *)p) - 8;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ if ((delta & 1))
+ *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27);
+ else
+ *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu);
+ } else { /* Target out of range: need indirect call. But don't use R0-R3. */
+ Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1));
+ *p = ARMI_BLXr | ARMF_M(r);
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+ if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
+ MCode ins = *as->mcp, swp = (src^dst);
+ if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) {
+ if (!((ins ^ (dst << 16)) & 0x000f0000))
+ *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */
+ if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000))
+ *as->mcp = ins ^ (swp << 12); /* Swap D in store. */
+ }
+ }
+ emit_dm(as, ARMI_MOV, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+ emit_lso(as, ARMI_LDR, r, RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+ emit_lso(as, ARMI_STR, r, RID_SP, ofs);
+}
+
+/* Emit an arithmetic/logic operation with a constant operand. */
+static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src,
+ int32_t i, RegSet allow)
+{
+ uint32_t k = emit_isk12(ai, i);
+ if (k)
+ emit_dn(as, ai^k, dest, src);
+ else
+ emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs)
+ emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r));
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/src/LuaJIT/src/lj_emit_mips.h b/src/LuaJIT/src/lj_emit_mips.h
new file mode 100644
index 000000000..3edf8851e
--- /dev/null
+++ b/src/LuaJIT/src/lj_emit_mips.h
@@ -0,0 +1,211 @@
+/*
+** MIPS instruction emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt);
+}
+
+static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a);
+}
+
+#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0)
+#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt))
+
+static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i)
+{
+ *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff);
+}
+
+#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i))
+#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i))
+
+static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
+{
+ *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31);
+}
+
+#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0)
+
+static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
+{
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, src, shift);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31);
+ emit_dta(as, MIPSI_SRL, tmp, src, shift);
+ }
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != t);
+ if (ref < ASMREF_L) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ if (checki16(delta)) {
+ emit_tsi(as, MIPSI_ADDIU, t, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, MIPSI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ int32_t jgl = i32ptr(J2G(as->J));
+ if ((uint32_t)(i-jgl) < 65536) {
+ emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ } else if ((i >> 16) == 0) {
+ emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i);
+ return;
+ }
+ emit_tsi(as, MIPSI_ORI, r, r, i);
+ }
+ emit_ti(as, MIPSI_LUI, r, (i >> 16));
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+static void ra_allockreg(ASMState *as, int32_t k, Reg r);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
+{
+ int32_t jgl = i32ptr(J2G(as->J));
+ int32_t i = i32ptr(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tsi(as, mi, r, base, i);
+}
+
+#define emit_loadn(as, r, tv) \
+ emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR)
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
+{
+ emit_tsi(as, mi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(((delta + 0x8000) >> 16) == 0);
+ *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
+ as->mcp = p;
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ *--as->mcp = MIPSI_NOP;
+ emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
+}
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = as->mcp;
+ *--p = MIPSI_NOP;
+ if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0)
+ *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
+ else /* Target out of range: need indirect call. */
+ *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
+ as->mcp = p;
+ ra_allockreg(as, i32ptr(target), RID_CFUNCADDR);
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_move(as, dst, src) \
+ emit_ds(as, MIPSI_MOVE, (dst), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ if (dst < RID_MAX_GPR)
+ emit_move(as, dst, src);
+ else
+ emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, MIPSI_LW, r, RID_SP, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
+ (r & 31), RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, MIPSI_SW, r, RID_SP, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
+ (r&31), RID_SP, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ lua_assert(checki16(ofs));
+ emit_tsi(as, MIPSI_ADDIU, r, r, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/src/LuaJIT/src/lj_emit_ppc.h b/src/LuaJIT/src/lj_emit_ppc.h
new file mode 100644
index 000000000..f2bf0a944
--- /dev/null
+++ b/src/LuaJIT/src/lj_emit_ppc.h
@@ -0,0 +1,238 @@
+/*
+** PPC instruction emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb);
+}
+
+#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb))
+#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0)
+#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb))
+
+static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff);
+}
+
+#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i))
+#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i))
+#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i))
+
+#define emit_fab(as, pi, rf, ra, rb) \
+ emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31)
+#define emit_fac(as, pi, rf, ra, rc) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0)
+#define emit_facb(as, pi, rf, ra, rc, rb) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i))
+
+static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
+ int32_t n, int32_t b, int32_t e)
+{
+ *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) |
+ PPCF_MB(b) | PPCF_ME(e);
+}
+
+static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lua_assert(n >= 0 && n < 32);
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
+}
+
+static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lua_assert(n >= 0 && n < 32);
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != t);
+ if (ref < ASMREF_L) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ if (checki16(delta)) {
+ emit_tai(as, PPCI_ADDI, t, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, PPCI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ int32_t jgl = i32ptr(J2G(as->J));
+ if ((uint32_t)(i-jgl) < 65536) {
+ emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ }
+ emit_asi(as, PPCI_ORI, r, r, i);
+ }
+ emit_ti(as, PPCI_LIS, r, (i >> 16));
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
+{
+ int32_t jgl = i32ptr(J2G(as->J));
+ int32_t i = i32ptr(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tai(as, pi, r, base, i);
+}
+
+#define emit_loadn(as, r, tv) \
+ emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR)
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
+{
+ emit_tai(as, pi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lua_assert(((delta + 0x8000) >> 16) == 0);
+ pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
+ *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ *p = PPCI_B | (delta & 0x03fffffcu);
+}
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ *p = PPCI_BL | (delta & 0x03fffffcu);
+ } else { /* Target out of range: need indirect call. Don't use arg reg. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg r = ra_allock(as, i32ptr(target), allow);
+ *p = PPCI_BCTRL;
+ p[-1] = PPCI_MTCTR | PPCF_T(r);
+ as->mcp = p-1;
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_mr(as, dst, src) \
+ emit_asb(as, PPCI_MR, (dst), (src), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_mr(as, dst, src);
+ else
+ emit_fb(as, PPCI_FMR, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_LWZ, r, RID_SP, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_STW, r, RID_SP, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, RID_SP, ofs);
+}
+
+/* Emit a compare (for equality) with a constant operand. */
+static void emit_cmpi(ASMState *as, Reg r, int32_t k)
+{
+ if (checki16(k)) {
+ emit_ai(as, PPCI_CMPWI, r, k);
+ } else if (checku16(k)) {
+ emit_ai(as, PPCI_CMPLWI, r, k);
+ } else {
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16));
+ }
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_ADDI, r, r, ofs);
+ if (!checki16(ofs))
+ emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16);
+ }
+}
+
+static void emit_spsub(ASMState *as, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs);
+ emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP,
+ CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0));
+ }
+}
+
diff --git a/src/LuaJIT/src/lj_emit_x86.h b/src/LuaJIT/src/lj_emit_x86.h
new file mode 100644
index 000000000..dfb70574e
--- /dev/null
+++ b/src/LuaJIT/src/lj_emit_x86.h
@@ -0,0 +1,466 @@
+/*
+** x86/x64 instruction emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
+
+#if LJ_64
+#define REXRB(p, rr, rb) \
+ { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
+ if (rex != 0x40) *--(p) = rex; }
+#define FORCE_REX 0x200
+#define REX_64 (FORCE_REX|0x080000)
+#else
+#define REXRB(p, rr, rb) ((void)0)
+#define FORCE_REX 0
+#define REX_64 0
+#endif
+
+#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
+#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
+#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
+
+#define emit_x87op(as, xo) \
+ (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
+
+/* op */
+static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
+ MCode *p, int delta)
+{
+ int n = (int8_t)xo;
+#if defined(__GNUC__)
+ if (__builtin_constant_p(xo) && n == -2)
+ p[delta-2] = (MCode)(xo >> 24);
+ else if (__builtin_constant_p(xo) && n == -3)
+ *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
+ else
+#endif
+ *(uint32_t *)(p+delta-5) = (uint32_t)xo;
+ p += n + delta;
+#if LJ_64
+ {
+ uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
+ if (rex != 0x40) {
+ rex |= (rr >> 16);
+ if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
+ else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
+ *--p = (MCode)rex;
+ }
+ }
+#else
+ UNUSED(rr); UNUSED(rb); UNUSED(rx);
+#endif
+ return p;
+}
+
+/* op + modrm */
+#define emit_opm(xo, mode, rr, rb, p, delta) \
+ (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
+ emit_op((xo), (rr), (rb), 0, (p), (delta)))
+
+/* op + modrm + sib */
+#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
+ (p[-1] = MODRM((scale), (rx), (rb)), \
+ p[-2] = MODRM((mode), (rr), RID_ESP), \
+ emit_op((xo), (rr), (rb), (rx), (p), -1))
+
+/* op r1, r2 */
+static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
+{
+ MCode *p = as->mcp;
+ as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
+}
+
+#if LJ_64 && defined(LUA_USE_ASSERT)
+/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
+static int32_t ptr2addr(const void *p)
+{
+ lua_assert((uintptr_t)p < (uintptr_t)0x80000000);
+ return i32ptr(p);
+}
+#else
+#define ptr2addr(p) (i32ptr((p)))
+#endif
+
+/* op r, [addr] */
+static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = ptr2addr(addr);
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
+#else
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
+#endif
+}
+
+/* op r, [base+ofs] */
+static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ra_hasreg(rb)) {
+ if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ *--p = (MCode)ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = ofs;
+ mode = XM_OFS32;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ } else {
+ *(int32_t *)(p-4) = ofs;
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ p -= 5;
+ rb = RID_ESP;
+#else
+ p -= 4;
+ rb = RID_EBP;
+#endif
+ mode = XM_OFS0;
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op r, [base+idx*scale+ofs] */
+static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
+ x86Mode scale, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ mode = XM_OFS8;
+ *--p = (MCode)ofs;
+ } else {
+ mode = XM_OFS32;
+ p -= 4;
+ *(int32_t *)p = ofs;
+ }
+ as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
+}
+
+/* op r, i */
+static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ MCode *p = as->mcp;
+ x86Op xo;
+ if (checki8(i)) {
+ *--p = (MCode)i;
+ xo = XG_TOXOi8(xg);
+ } else {
+ p -= 4;
+ *(int32_t *)p = i;
+ xo = XG_TOXOi(xg);
+ }
+ as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
+}
+
+/* op [base+ofs], i */
+static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
+ int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
+}
+
+#define emit_shifti(as, xg, r, i) \
+ (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
+
+/* op r, rm/mrm */
+static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
+{
+ MCode *p = as->mcp;
+ x86Mode mode = XM_REG;
+ if (rb == RID_MRM) {
+ rb = as->mrm.base;
+ if (rb == RID_NONE) {
+ rb = RID_EBP;
+ mode = XM_OFS0;
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ if (as->mrm.idx != RID_NONE)
+ goto mrmidx;
+#if LJ_64
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ rb = RID_ESP;
+#endif
+ } else {
+ if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(as->mrm.ofs)) {
+ *--p = (MCode)as->mrm.ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ mode = XM_OFS32;
+ }
+ if (as->mrm.idx != RID_NONE) {
+ mrmidx:
+ as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
+ return;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ }
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op rm/mrm, i */
+static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Instruction selection for XMM moves. */
+#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
+#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
+
+/* mov [base+ofs], i */
+static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
+{
+ emit_i32(as, i);
+ emit_rmro(as, XO_MOVmi, 0, base, ofs);
+}
+
+/* mov [base+ofs], r */
+#define emit_movtomro(as, r, base, ofs) \
+ emit_rmro(as, XO_MOVto, (r), (base), (ofs))
+
+/* Get/set global_State fields. */
+#define emit_opgl(as, xo, r, field) \
+ emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
+#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
+#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
+
+#define emit_setvmstate(as, i) \
+ (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
+
+/* mov r, i / xor r, r */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */
+ if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
+ (as->curins+1 < as->T->nins &&
+ IR(as->curins+1)->o == IR_HIOP)))) {
+ emit_rr(as, XO_ARITH(XOg_XOR), r, r);
+ } else {
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = i;
+ p[-5] = (MCode)(XI_MOVri+(r&7));
+ p -= 5;
+ REXRB(p, 0, r);
+ as->mcp = p;
+ }
+}
+
+/* mov r, addr */
+#define emit_loada(as, r, addr) \
+ emit_loadi(as, (r), ptr2addr((addr)))
+
+#if LJ_64
+/* mov r, imm64 or shorter 32 bit extended load. */
+static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
+{
+ if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
+ emit_loadi(as, r, (int32_t)u64);
+ } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = (int32_t)u64;
+ as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
+ } else { /* Full-size 64 bit load. */
+ MCode *p = as->mcp;
+ *(uint64_t *)(p-8) = u64;
+ p[-9] = (MCode)(XI_MOVri+(r&7));
+ p[-10] = 0x48 + ((r>>3)&1);
+ p -= 10;
+ as->mcp = p;
+ }
+}
+#endif
+
+/* movsd r, [&tv->n] / xorps r, r */
+static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
+{
+ if (tvispzero(tv)) /* Use xor only for +0. */
+ emit_rr(as, XO_XORPS, r, r);
+ else
+ emit_rma(as, XMM_MOVRM(as), r, &tv->n);
+}
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for short jumps. */
+typedef MCode *MCLabel;
+
+#if LJ_32 && LJ_HASFFI
+/* jmp short target */
+static void emit_sjmp(ASMState *as, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int8_t)delta);
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = XI_JMPs;
+ as->mcp = p - 2;
+}
+#endif
+
+/* jcc short target */
+static void emit_sjcc(ASMState *as, int cc, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int8_t)delta);
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+}
+
+/* jcc short (pending target) */
+static MCLabel emit_sjcc_label(ASMState *as, int cc)
+{
+ MCode *p = as->mcp;
+ p[-1] = 0;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+ return p;
+}
+
+/* Fixup jcc short target. */
+static void emit_sfixup(ASMState *as, MCLabel source)
+{
+ source[-1] = (MCode)(as->mcp-source);
+}
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+/* Compute relative 32 bit offset for jump and call instructions. */
+static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
+{
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int32_t)delta);
+ return (int32_t)delta;
+}
+
+/* jcc target */
+static void emit_jcc(ASMState *as, int cc, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = (MCode)(XI_JCCn+(cc&15));
+ p[-6] = 0x0f;
+ as->mcp = p - 6;
+}
+
+/* jmp target */
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_JMP;
+ as->mcp = p - 5;
+}
+
+/* call target */
+static void emit_call_(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+#if LJ_64
+ if (target-p != (int32_t)(target-p)) {
+ /* Assumes RID_RET is never an argument to calls and always clobbered. */
+ emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
+ emit_loadu64(as, RID_RET, (uint64_t)target);
+ return;
+ }
+#endif
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_CALL;
+ as->mcp = p - 5;
+}
+
+#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Use 64 bit operations to handle 64 bit IR types. */
+#if LJ_64
+#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
+#else
+#define REX_64IR(ir, r) (r)
+#endif
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
+ else
+ emit_rr(as, XMM_MOVRR(as), dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ if ((as->flags & JIT_F_LEA_AGU))
+ emit_rmro(as, XO_LEA, r, r, ofs);
+ else
+ emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
diff --git a/src/LuaJIT/src/lj_err.c b/src/LuaJIT/src/lj_err.c
new file mode 100644
index 000000000..05813cf83
--- /dev/null
+++ b/src/LuaJIT/src/lj_err.c
@@ -0,0 +1,781 @@
+/*
+** Error handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_err_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_func.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_ff.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/*
+** LuaJIT can either use internal or external frame unwinding:
+**
+** - Internal frame unwinding (INT) is free-standing and doesn't require
+** any OS or library support.
+**
+** - External frame unwinding (EXT) uses the system-provided unwind handler.
+**
+** Pros and Cons:
+**
+** - EXT requires unwind tables for *all* functions on the C stack between
+** the pcall/catch and the error/throw. This is the default on x64,
+** but needs to be manually enabled on x86/PPC for non-C++ code.
+**
+** - INT is faster when actually throwing errors (but this happens rarely).
+** Setting up error handlers is zero-cost in any case.
+**
+** - EXT provides full interoperability with C++ exceptions. You can throw
+** Lua errors or C++ exceptions through a mix of Lua frames and C++ frames.
+** C++ destructors are called as needed. C++ exceptions caught by pcall
+** are converted to the string "C++ exception". Lua errors can be caught
+** with catch (...) in C++.
+**
+** - INT has only limited support for automatically catching C++ exceptions
+** on POSIX systems using DWARF2 stack unwinding. Other systems may use
+** the wrapper function feature. Lua errors thrown through C++ frames
+** cannot be caught by C++ code and C++ destructors are not run.
+**
+** EXT is the default on x64 systems, INT is the default on all other systems.
+**
+** EXT can be manually enabled on POSIX systems using GCC and DWARF2 stack
+** unwinding with -DLUAJIT_UNWIND_EXTERNAL. *All* C code must be compiled
+** with -funwind-tables (or -fexceptions). This includes LuaJIT itself (set
+** TARGET_CFLAGS), all of your C/Lua binding code, all loadable C modules
+** and all C libraries that have callbacks which may be used to call back
+** into Lua. C++ code must *not* be compiled with -fno-exceptions.
+**
+** EXT cannot be enabled on WIN32 since system exceptions use code-driven SEH.
+** EXT is mandatory on WIN64 since the calling convention has an abundance
+** of callee-saved registers (rbx, rbp, rsi, rdi, r12-r15, xmm6-xmm15).
+** EXT is mandatory on POSIX/x64 since the interpreter doesn't save r12/r13.
+*/
+
+#if defined(__GNUC__) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL))
+#define LJ_UNWIND_EXT 1
+#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS
+#define LJ_UNWIND_EXT 1
+#endif
+
+/* -- Error messages ------------------------------------------------------ */
+
+/* Error message strings. */
+LJ_DATADEF const char *lj_err_allmsg =
+#define ERRDEF(name, msg) msg "\0"
+#include "lj_errmsg.h"
+;
+
+/* -- Internal frame unwinding -------------------------------------------- */
+
+/* Unwind Lua stack and move error message to new top. */
+LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top)
+{
+ lj_func_closeuv(L, top);
+ if (top < L->top-1) {
+ copyTV(L, top, L->top-1);
+ L->top = top+1;
+ }
+ lj_state_relimitstack(L);
+}
+
+/* Unwind until stop frame. Optionally cleanup frames. */
+static void *err_unwind(lua_State *L, void *stopcf, int errcode)
+{
+ TValue *frame = L->base-1;
+ void *cf = L->cframe;
+ while (cf) {
+ int32_t nres = cframe_nres(cframe_raw(cf));
+ if (nres < 0) { /* C frame without Lua frame? */
+ TValue *top = restorestack(L, -nres);
+ if (frame < top) { /* Frame reached? */
+ if (errcode) {
+ L->cframe = cframe_prev(cf);
+ L->base = frame+1;
+ unwindstack(L, top);
+ }
+ return cf;
+ }
+ }
+ if (frame <= tvref(L->stack))
+ break;
+ switch (frame_typep(frame)) {
+ case FRAME_LUA: /* Lua frame. */
+ case FRAME_LUAP:
+ frame = frame_prevl(frame);
+ break;
+ case FRAME_C: /* C frame. */
+#if LJ_HASFFI
+ unwind_c:
+#endif
+#if LJ_UNWIND_EXT
+ if (errcode) {
+ L->cframe = cframe_prev(cf);
+ L->base = frame_prevd(frame) + 1;
+ unwindstack(L, frame);
+ } else if (cf != stopcf) {
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+ }
+ return NULL; /* Continue unwinding. */
+#else
+ UNUSED(stopcf);
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+#endif
+ case FRAME_CP: /* Protected C frame. */
+ if (cframe_canyield(cf)) { /* Resume? */
+ if (errcode) {
+ L->cframe = NULL;
+ L->status = (uint8_t)errcode;
+ }
+ return cf;
+ }
+ if (errcode) {
+ L->cframe = cframe_prev(cf);
+ L->base = frame_prevd(frame) + 1;
+ unwindstack(L, frame);
+ }
+ return cf;
+ case FRAME_CONT: /* Continuation frame. */
+#if LJ_HASFFI
+ if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
+ goto unwind_c;
+#endif
+ case FRAME_VARG: /* Vararg frame. */
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_PCALL: /* FF pcall() frame. */
+ case FRAME_PCALLH: /* FF pcall() frame inside hook. */
+ if (errcode) {
+ if (errcode == LUA_YIELD) {
+ frame = frame_prevd(frame);
+ break;
+ }
+ if (frame_typep(frame) == FRAME_PCALL)
+ hook_leave(G(L));
+ L->cframe = cf;
+ L->base = frame_prevd(frame) + 1;
+ unwindstack(L, L->base);
+ }
+ return (void *)((intptr_t)cf | CFRAME_UNWIND_FF);
+ }
+ }
+ /* No C frame. */
+ if (errcode) {
+ L->cframe = NULL;
+ L->base = tvref(L->stack)+1;
+ unwindstack(L, L->base);
+ if (G(L)->panic)
+ G(L)->panic(L);
+ exit(EXIT_FAILURE);
+ }
+ return L; /* Anything non-NULL will do. */
+}
+
+/* -- External frame unwinding -------------------------------------------- */
+
+#if defined(__GNUC__) && !defined(LUAJIT_NO_UNWIND)
+
+/*
+** We have to use our own definitions instead of the mandatory (!) unwind.h,
+** since various OS, distros and compilers mess up the header installation.
+*/
+
+typedef struct _Unwind_Exception
+{
+ uint64_t exclass;
+ void (*excleanup)(int, struct _Unwind_Exception);
+ uintptr_t p1, p2;
+} __attribute__((__aligned__)) _Unwind_Exception;
+
+typedef struct _Unwind_Context _Unwind_Context;
+
+#define _URC_OK 0
+#define _URC_FATAL_PHASE1_ERROR 3
+#define _URC_HANDLER_FOUND 6
+#define _URC_INSTALL_CONTEXT 7
+#define _URC_CONTINUE_UNWIND 8
+#define _URC_FAILURE 9
+
+#if !LJ_TARGET_ARM
+
+extern uintptr_t _Unwind_GetCFA(_Unwind_Context *);
+extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t);
+extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t);
+extern void _Unwind_DeleteException(_Unwind_Exception *);
+extern int _Unwind_RaiseException(_Unwind_Exception *);
+
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+
+#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */
+#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c))
+#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff)
+#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff))
+
+/* DWARF2 personality handler referenced from interpreter .eh_frame. */
+LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions,
+ uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx)
+{
+ void *cf;
+ lua_State *L;
+ if (version != 1)
+ return _URC_FATAL_PHASE1_ERROR;
+ UNUSED(uexclass);
+ cf = (void *)_Unwind_GetCFA(ctx);
+ L = cframe_L(cf);
+ if ((actions & _UA_SEARCH_PHASE)) {
+#if LJ_UNWIND_EXT
+ if (err_unwind(L, cf, 0) == NULL)
+ return _URC_CONTINUE_UNWIND;
+#endif
+ if (!LJ_UEXCLASS_CHECK(uexclass)) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ }
+ return _URC_HANDLER_FOUND;
+ }
+ if ((actions & _UA_CLEANUP_PHASE)) {
+ int errcode;
+ if (LJ_UEXCLASS_CHECK(uexclass)) {
+ errcode = LJ_UEXCLASS_ERRCODE(uexclass);
+ } else {
+ if ((actions & _UA_HANDLER_FRAME))
+ _Unwind_DeleteException(uex);
+ errcode = LUA_ERRRUN;
+ }
+#if LJ_UNWIND_EXT
+ cf = err_unwind(L, cf, errcode);
+ if ((actions & _UA_FORCE_UNWIND)) {
+ return _URC_CONTINUE_UNWIND;
+ } else if (cf) {
+ _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode);
+ _Unwind_SetIP(ctx, (uintptr_t)(cframe_unwind_ff(cf) ?
+ lj_vm_unwind_ff_eh :
+ lj_vm_unwind_c_eh));
+ return _URC_INSTALL_CONTEXT;
+ }
+#if LJ_TARGET_X86ORX64
+ else if ((actions & _UA_HANDLER_FRAME)) {
+ /* Workaround for ancient libgcc bug. Still present in RHEL 5.5. :-/
+ ** Real fix: http://gcc.gnu.org/viewcvs/trunk/gcc/unwind-dw2.c?r1=121165&r2=124837&pathrev=153877&diff_format=h
+ */
+ _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode);
+ _Unwind_SetIP(ctx, (uintptr_t)lj_vm_unwind_rethrow);
+ return _URC_INSTALL_CONTEXT;
+ }
+#endif
+#else
+ /* This is not the proper way to escape from the unwinder. We get away with
+ ** it on non-x64 because the interpreter restores all callee-saved regs.
+ */
+ lj_err_throw(L, errcode);
+#endif
+ }
+ return _URC_CONTINUE_UNWIND;
+}
+
+#if LJ_UNWIND_EXT
+#if LJ_TARGET_OSX || defined(__OpenBSD__)
+/* Sorry, no thread safety for OSX. Complain to Apple, not me. */
+static _Unwind_Exception static_uex;
+#else
+static __thread _Unwind_Exception static_uex;
+#endif
+
+/* Raise DWARF2 exception. */
+static void err_raise_ext(int errcode)
+{
+ static_uex.exclass = LJ_UEXCLASS_MAKE(errcode);
+ static_uex.excleanup = NULL;
+ _Unwind_RaiseException(&static_uex);
+}
+#endif
+
+#else
+
+extern void _Unwind_DeleteException(void *);
+extern int __gnu_unwind_frame (void *, _Unwind_Context *);
+extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *);
+extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *);
+
+static inline uint32_t _Unwind_GetGR(_Unwind_Context *ctx, int r)
+{
+ uint32_t v;
+ _Unwind_VRS_Get(ctx, 0, r, 0, &v);
+ return v;
+}
+
+static inline void _Unwind_SetGR(_Unwind_Context *ctx, int r, uint32_t v)
+{
+ _Unwind_VRS_Set(ctx, 0, r, 0, &v);
+}
+
+#define _US_VIRTUAL_UNWIND_FRAME 0
+#define _US_UNWIND_FRAME_STARTING 1
+#define _US_ACTION_MASK 3
+#define _US_FORCE_UNWIND 8
+
+/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */
+LJ_FUNCA int lj_err_unwind_arm(int state, void *ucb, _Unwind_Context *ctx)
+{
+ void *cf = (void *)_Unwind_GetGR(ctx, 13);
+ lua_State *L = cframe_L(cf);
+ if ((state & _US_ACTION_MASK) == _US_VIRTUAL_UNWIND_FRAME) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ return _URC_HANDLER_FOUND;
+ }
+ if ((state&(_US_ACTION_MASK|_US_FORCE_UNWIND)) == _US_UNWIND_FRAME_STARTING) {
+ _Unwind_DeleteException(ucb);
+ _Unwind_SetGR(ctx, 15, (uint32_t)(void *)lj_err_throw);
+ _Unwind_SetGR(ctx, 0, (uint32_t)L);
+ _Unwind_SetGR(ctx, 1, (uint32_t)LUA_ERRRUN);
+ return _URC_INSTALL_CONTEXT;
+ }
+ if (__gnu_unwind_frame(ucb, ctx) != _URC_OK)
+ return _URC_FAILURE;
+ return _URC_CONTINUE_UNWIND;
+}
+
+#endif
+
+#elif LJ_TARGET_X64 && LJ_TARGET_WINDOWS
+
+/*
+** Someone in Redmond owes me several days of my life. A lot of this is
+** undocumented or just plain wrong on MSDN. Some of it can be gathered
+** from 3rd party docs or must be found by trial-and-error. They really
+** don't want you to write your own language-specific exception handler
+** or to interact gracefully with MSVC. :-(
+**
+** Apparently MSVC doesn't call C++ destructors for foreign exceptions
+** unless you compile your C++ code with /EHa. Unfortunately this means
+** catch (...) also catches things like access violations. The use of
+** _set_se_translator doesn't really help, because it requires /EHa, too.
+*/
+
+#define WIN32_LEAN_AND_MEAN
+#include
+
+/* Taken from: http://www.nynaeve.net/?p=99 */
+typedef struct UndocumentedDispatcherContext {
+ ULONG64 ControlPc;
+ ULONG64 ImageBase;
+ PRUNTIME_FUNCTION FunctionEntry;
+ ULONG64 EstablisherFrame;
+ ULONG64 TargetIp;
+ PCONTEXT ContextRecord;
+ PEXCEPTION_ROUTINE LanguageHandler;
+ PVOID HandlerData;
+ PUNWIND_HISTORY_TABLE HistoryTable;
+ ULONG ScopeIndex;
+ ULONG Fill0;
+} UndocumentedDispatcherContext;
+
+#ifdef _MSC_VER
+/* Another wild guess. */
+extern __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow);
+#endif
+
+#define LJ_MSVC_EXCODE ((DWORD)0xe06d7363)
+
+#define LJ_EXCODE ((DWORD)0xe24c4a00)
+#define LJ_EXCODE_MAKE(c) (LJ_EXCODE | (DWORD)(c))
+#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff)
+#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff))
+
+/* Win64 exception handler for interpreter frame. */
+LJ_FUNCA EXCEPTION_DISPOSITION lj_err_unwind_win64(EXCEPTION_RECORD *rec,
+ void *cf, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch)
+{
+ lua_State *L = cframe_L(cf);
+ int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ?
+ LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN;
+ if ((rec->ExceptionFlags & 6)) { /* EH_UNWINDING|EH_EXIT_UNWIND */
+ /* Unwind internal frames. */
+ err_unwind(L, cf, errcode);
+ } else {
+ void *cf2 = err_unwind(L, cf, 0);
+ if (cf2) { /* We catch it, so start unwinding the upper frames. */
+ if (rec->ExceptionCode == LJ_MSVC_EXCODE) {
+#ifdef _MSC_VER
+ __DestructExceptionObject(rec, 1);
+#endif
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) {
+ /* Don't catch access violations etc. */
+ return ExceptionContinueSearch;
+ }
+ /* Unwind the stack and call all handlers for all lower C frames
+ ** (including ourselves) again with EH_UNWINDING set. Then set
+ ** rsp = cf, rax = errcode and jump to the specified target.
+ */
+ RtlUnwindEx(cf, (void *)((cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ?
+ lj_vm_unwind_ff_eh :
+ lj_vm_unwind_c_eh),
+ rec, (void *)errcode, ctx, dispatch->HistoryTable);
+ /* RtlUnwindEx should never return. */
+ }
+ }
+ return ExceptionContinueSearch;
+}
+
+/* Raise Windows exception. */
+static void err_raise_ext(int errcode)
+{
+ RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL);
+}
+
+#endif
+
+/* -- Error handling ------------------------------------------------------ */
+
+/* Throw error. Find catch frame, unwind stack and continue. */
+LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode)
+{
+ global_State *g = G(L);
+ lj_trace_abort(g);
+ setgcrefnull(g->jit_L);
+ L->status = 0;
+#if LJ_UNWIND_EXT
+ err_raise_ext(errcode);
+ /*
+ ** A return from this function signals a corrupt C stack that cannot be
+ ** unwound. We have no choice but to call the panic function and exit.
+ **
+ ** Usually this is caused by a C function without unwind information.
+ ** This should never happen on x64, but may happen if you've manually
+ ** enabled LUAJIT_UNWIND_EXTERNAL and forgot to recompile *every*
+ ** non-C++ file with -funwind-tables.
+ */
+ if (G(L)->panic)
+ G(L)->panic(L);
+#else
+ {
+ void *cf = err_unwind(L, NULL, errcode);
+ if (cframe_unwind_ff(cf))
+ lj_vm_unwind_ff(cframe_raw(cf));
+ else
+ lj_vm_unwind_c(cframe_raw(cf), errcode);
+ }
+#endif
+ exit(EXIT_FAILURE);
+}
+
+/* Return string object for error message. */
+LJ_NOINLINE GCstr *lj_err_str(lua_State *L, ErrMsg em)
+{
+ return lj_str_newz(L, err2msg(em));
+}
+
+/* Out-of-memory error. */
+LJ_NOINLINE void lj_err_mem(lua_State *L)
+{
+ if (L->status == LUA_ERRERR+1) /* Don't touch the stack during lua_open. */
+ lj_vm_unwind_c(L->cframe, LUA_ERRMEM);
+ L->top = L->base;
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRMEM));
+ lj_err_throw(L, LUA_ERRMEM);
+}
+
+/* Find error function for runtime errors. Requires an extra stack traversal. */
+static ptrdiff_t finderrfunc(lua_State *L)
+{
+ cTValue *frame = L->base-1, *bot = tvref(L->stack);
+ void *cf = L->cframe;
+ while (frame > bot) {
+ lua_assert(cf != NULL);
+ while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
+ if (frame >= restorestack(L, -cframe_nres(cf)))
+ break;
+ if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */
+ return cframe_errfunc(cf);
+ cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */
+ if (cf == NULL)
+ return 0;
+ }
+ switch (frame_typep(frame)) {
+ case FRAME_LUA:
+ case FRAME_LUAP:
+ frame = frame_prevl(frame);
+ break;
+ case FRAME_C:
+ cf = cframe_prev(cf);
+ /* fallthrough */
+ case FRAME_CONT:
+#if LJ_HASFFI
+ if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
+ cf = cframe_prev(cf);
+#endif
+ case FRAME_VARG:
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_CP:
+ if (cframe_canyield(cf)) return 0;
+ if (cframe_errfunc(cf) >= 0)
+ return cframe_errfunc(cf);
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_PCALL:
+ case FRAME_PCALLH:
+ if (frame_ftsz(frame) >= (ptrdiff_t)(2*sizeof(TValue))) /* xpcall? */
+ return savestack(L, frame-1); /* Point to xpcall's errorfunc. */
+ return 0;
+ default:
+ lua_assert(0);
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* Runtime error. */
+LJ_NOINLINE void lj_err_run(lua_State *L)
+{
+ ptrdiff_t ef = finderrfunc(L);
+ if (ef) {
+ TValue *errfunc = restorestack(L, ef);
+ TValue *top = L->top;
+ lj_trace_abort(G(L));
+ if (!tvisfunc(errfunc) || L->status == LUA_ERRERR) {
+ setstrV(L, top-1, lj_err_str(L, LJ_ERR_ERRERR));
+ lj_err_throw(L, LUA_ERRERR);
+ }
+ L->status = LUA_ERRERR;
+ copyTV(L, top, top-1);
+ copyTV(L, top-1, errfunc);
+ L->top = top+1;
+ lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */
+ }
+ lj_err_throw(L, LUA_ERRRUN);
+}
+
+/* Formatted runtime error message. */
+LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ msg = lj_str_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ lj_debug_addloc(L, msg, L->base-1, NULL);
+ lj_err_run(L);
+}
+
+/* Non-vararg variant for better calling conventions. */
+LJ_NOINLINE void lj_err_msg(lua_State *L, ErrMsg em)
+{
+ err_msgv(L, em);
+}
+
+/* Lexer error. */
+LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
+ BCLine line, ErrMsg em, va_list argp)
+{
+ char buff[LUA_IDSIZE];
+ const char *msg;
+ lj_debug_shortname(buff, src);
+ msg = lj_str_pushvf(L, err2msg(em), argp);
+ msg = lj_str_pushf(L, "%s:%d: %s", buff, line, msg);
+ if (tok)
+ lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok);
+ lj_err_throw(L, LUA_ERRSYNTAX);
+}
+
+/* Typecheck error for operands. */
+LJ_NOINLINE void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm)
+{
+ const char *tname = typename(o);
+ const char *opname = err2msg(opm);
+ if (curr_funcisL(L)) {
+ GCproto *pt = curr_proto(L);
+ const BCIns *pc = cframe_Lpc(L) - 1;
+ const char *oname = NULL;
+ const char *kind = lj_debug_slotname(pt, pc, (BCReg)(o-L->base), &oname);
+ if (kind)
+ err_msgv(L, LJ_ERR_BADOPRT, opname, kind, oname, tname);
+ }
+ err_msgv(L, LJ_ERR_BADOPRV, opname, tname);
+}
+
+/* Typecheck error for ordered comparisons. */
+LJ_NOINLINE void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2)
+{
+ const char *t1 = typename(o1);
+ const char *t2 = typename(o2);
+ err_msgv(L, t1 == t2 ? LJ_ERR_BADCMPV : LJ_ERR_BADCMPT, t1, t2);
+ /* This assumes the two "boolean" entries are commoned by the C compiler. */
+}
+
+/* Typecheck error for __call. */
+LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o)
+{
+ /* Gross hack if lua_[p]call or pcall/xpcall fail for a non-callable object:
+ ** L->base still points to the caller. So add a dummy frame with L instead
+ ** of a function. See lua_getstack().
+ */
+ const BCIns *pc = cframe_Lpc(L);
+ if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) {
+ const char *tname = typename(o);
+ setframe_pc(o, pc);
+ setframe_gc(o, obj2gco(L));
+ L->top = L->base = o+1;
+ err_msgv(L, LJ_ERR_BADCALL, tname);
+ }
+ lj_err_optype(L, o, LJ_ERR_OPCALL);
+}
+
+/* Error in context of caller. */
+LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg)
+{
+ TValue *frame = L->base-1;
+ TValue *pframe = NULL;
+ if (frame_islua(frame)) {
+ pframe = frame_prevl(frame);
+ } else if (frame_iscont(frame)) {
+#if LJ_HASFFI
+ if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK) {
+ pframe = frame;
+ frame = NULL;
+ } else
+#endif
+ {
+ pframe = frame_prevd(frame);
+#if LJ_HASFFI
+ /* Remove frame for FFI metamethods. */
+ if (frame_func(frame)->c.ffid >= FF_ffi_meta___index &&
+ frame_func(frame)->c.ffid <= FF_ffi_meta___tostring) {
+ L->base = pframe+1;
+ L->top = frame;
+ setcframe_pc(cframe_raw(L->cframe), frame_contpc(frame));
+ }
+#endif
+ }
+ }
+ lj_debug_addloc(L, msg, pframe, frame);
+ lj_err_run(L);
+}
+
+/* Formatted error in context of caller. */
+LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ msg = lj_str_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ lj_err_callermsg(L, msg);
+}
+
+/* Error in context of caller. */
+LJ_NOINLINE void lj_err_caller(lua_State *L, ErrMsg em)
+{
+ lj_err_callermsg(L, err2msg(em));
+}
+
+/* Argument error message. */
+LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg,
+ const char *msg)
+{
+ const char *fname = "?";
+ const char *ftype = lj_debug_funcname(L, L->base - 1, &fname);
+ if (narg < 0 && narg > LUA_REGISTRYINDEX)
+ narg = (int)(L->top - L->base) + narg + 1;
+ if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */
+ msg = lj_str_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg);
+ else
+ msg = lj_str_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg);
+ lj_err_callermsg(L, msg);
+}
+
+/* Formatted argument error. */
+LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ msg = lj_str_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ err_argmsg(L, narg, msg);
+}
+
+/* Argument error. */
+LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em)
+{
+ err_argmsg(L, narg, err2msg(em));
+}
+
+/* Typecheck error for arguments. */
+LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname)
+{
+ TValue *o = narg < 0 ? L->top + narg : L->base + narg-1;
+ const char *tname = o < L->top ? typename(o) : lj_obj_typename[0];
+ const char *msg = lj_str_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname);
+ err_argmsg(L, narg, msg);
+}
+
+/* Typecheck error for arguments. */
+LJ_NOINLINE void lj_err_argt(lua_State *L, int narg, int tt)
+{
+ lj_err_argtype(L, narg, lj_obj_typename[tt+1]);
+}
+
+/* -- Public error handling API ------------------------------------------- */
+
+LUA_API lua_CFunction lua_atpanic(lua_State *L, lua_CFunction panicf)
+{
+ lua_CFunction old = G(L)->panic;
+ G(L)->panic = panicf;
+ return old;
+}
+
+/* Forwarders for the public API (C calling convention and no LJ_NORET). */
+LUA_API int lua_error(lua_State *L)
+{
+ lj_err_run(L);
+ return 0; /* unreachable */
+}
+
+LUALIB_API int luaL_argerror(lua_State *L, int narg, const char *msg)
+{
+ err_argmsg(L, narg, msg);
+ return 0; /* unreachable */
+}
+
+LUALIB_API int luaL_typerror(lua_State *L, int narg, const char *xname)
+{
+ lj_err_argtype(L, narg, xname);
+ return 0; /* unreachable */
+}
+
+LUALIB_API void luaL_where(lua_State *L, int level)
+{
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ lj_debug_addloc(L, "", frame, size ? frame+size : NULL);
+}
+
+LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, fmt);
+ msg = lj_str_pushvf(L, fmt, argp);
+ va_end(argp);
+ lj_err_callermsg(L, msg);
+ return 0; /* unreachable */
+}
+
diff --git a/src/LuaJIT/src/lj_err.h b/src/LuaJIT/src/lj_err.h
new file mode 100644
index 000000000..064f4a486
--- /dev/null
+++ b/src/LuaJIT/src/lj_err.h
@@ -0,0 +1,41 @@
+/*
+** Error handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ERR_H
+#define _LJ_ERR_H
+
+#include
+
+#include "lj_obj.h"
+
+typedef enum {
+#define ERRDEF(name, msg) \
+ LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1,
+#include "lj_errmsg.h"
+ LJ_ERR__MAX
+} ErrMsg;
+
+LJ_DATA const char *lj_err_allmsg;
+#define err2msg(em) (lj_err_allmsg+(int)(em))
+
+LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em);
+LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode);
+LJ_FUNC_NORET void lj_err_mem(lua_State *L);
+LJ_FUNC_NORET void lj_err_run(lua_State *L);
+LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
+ BCLine line, ErrMsg em, va_list argp);
+LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm);
+LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2);
+LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o);
+LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg);
+LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em);
+LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname);
+LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt);
+
+#endif
diff --git a/src/LuaJIT/src/lj_errmsg.h b/src/LuaJIT/src/lj_errmsg.h
new file mode 100644
index 000000000..d1db43865
--- /dev/null
+++ b/src/LuaJIT/src/lj_errmsg.h
@@ -0,0 +1,174 @@
+/*
+** VM error messages.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* This file may be included multiple times with different ERRDEF macros. */
+
+/* Basic error handling. */
+ERRDEF(ERRMEM, "not enough memory")
+ERRDEF(ERRERR, "error in error handling")
+ERRDEF(ERRCPP, "C++ exception")
+
+/* Allocations. */
+ERRDEF(STROV, "string length overflow")
+ERRDEF(UDATAOV, "userdata length overflow")
+ERRDEF(STKOV, "stack overflow")
+ERRDEF(STKOVM, "stack overflow (%s)")
+ERRDEF(TABOV, "table overflow")
+
+/* Table indexing. */
+ERRDEF(NANIDX, "table index is NaN")
+ERRDEF(NILIDX, "table index is nil")
+ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next"))
+
+/* Metamethod resolving. */
+ERRDEF(BADCALL, "attempt to call a %s value")
+ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)")
+ERRDEF(BADOPRV, "attempt to %s a %s value")
+ERRDEF(BADCMPT, "attempt to compare %s with %s")
+ERRDEF(BADCMPV, "attempt to compare two %s values")
+ERRDEF(GETLOOP, "loop in gettable")
+ERRDEF(SETLOOP, "loop in settable")
+ERRDEF(OPCALL, "call")
+ERRDEF(OPINDEX, "index")
+ERRDEF(OPARITH, "perform arithmetic on")
+ERRDEF(OPCAT, "concatenate")
+ERRDEF(OPLEN, "get length of")
+
+/* Type checks. */
+ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)")
+ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)")
+ERRDEF(BADTYPE, "%s expected, got %s")
+ERRDEF(BADVAL, "invalid value")
+ERRDEF(NOVAL, "value expected")
+ERRDEF(NOCORO, "coroutine expected")
+ERRDEF(NOTABN, "nil or table expected")
+ERRDEF(NOFUNCL, "function or level expected")
+ERRDEF(NOSFT, "string/function/table expected")
+ERRDEF(NOPROXY, "boolean or proxy expected")
+ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number")
+ERRDEF(FORLIM, LUA_QL("for") " limit must be a number")
+ERRDEF(FORSTEP, LUA_QL("for") " step must be a number")
+
+/* C API checks. */
+ERRDEF(NOENV, "no calling environment")
+ERRDEF(CYIELD, "attempt to yield across C-call boundary")
+ERRDEF(BADLU, "bad light userdata pointer")
+ERRDEF(NOGCMM, "bad action while in __gc metamethod")
+#if LJ_TARGET_WINDOWS
+ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)")
+#endif
+
+/* Standard library function errors. */
+ERRDEF(ASSERT, "assertion failed!")
+ERRDEF(PROTMT, "cannot change a protected metatable")
+ERRDEF(UNPACK, "too many results to unpack")
+ERRDEF(RDRSTR, "reader function must return a string")
+ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print"))
+ERRDEF(IDXRNG, "index out of range")
+ERRDEF(BASERNG, "base out of range")
+ERRDEF(LVLRNG, "level out of range")
+ERRDEF(INVLVL, "invalid level")
+ERRDEF(INVOPT, "invalid option")
+ERRDEF(INVOPTM, "invalid option " LUA_QS)
+ERRDEF(INVFMT, "invalid format")
+ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object")
+ERRDEF(CORUN, "cannot resume running coroutine")
+ERRDEF(CODEAD, "cannot resume dead coroutine")
+ERRDEF(COSUSP, "cannot resume non-suspended coroutine")
+ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert"))
+ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat"))
+ERRDEF(TABSORT, "invalid order function for sorting")
+ERRDEF(IOCLFL, "attempt to use a closed file")
+ERRDEF(IOSTDCL, "standard file is closed")
+ERRDEF(OSUNIQF, "unable to generate a unique filename")
+ERRDEF(OSDATEF, "field " LUA_QS " missing in date table")
+ERRDEF(STRDUMP, "unable to dump given function")
+ERRDEF(STRSLC, "string slice too long")
+ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern")
+ERRDEF(STRPATC, "invalid pattern capture")
+ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")")
+ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")")
+ERRDEF(STRPATU, "unbalanced pattern")
+ERRDEF(STRCAPI, "invalid capture index")
+ERRDEF(STRCAPN, "too many captures")
+ERRDEF(STRCAPU, "unfinished capture")
+ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format"))
+ERRDEF(STRFMTR, "invalid format (repeated flags)")
+ERRDEF(STRFMTW, "invalid format (width or precision too long)")
+ERRDEF(STRGSRV, "invalid replacement value (a %s)")
+ERRDEF(BADMODN, "name conflict for module " LUA_QS)
+#if LJ_HASJIT
+ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2")
+#elif defined(LJ_ARCH_NOJIT)
+ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
+#else
+ERRDEF(NOJIT, "JIT compiler permanently disabled by build option")
+#endif
+ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
+
+/* Lexer/parser errors. */
+ERRDEF(XNEAR, "%s near " LUA_QS)
+ERRDEF(XELEM, "lexical element too long")
+ERRDEF(XLINES, "chunk has too many lines")
+ERRDEF(XLEVELS, "chunk has too many syntax levels")
+ERRDEF(XNUMBER, "malformed number")
+ERRDEF(XLSTR, "unfinished long string")
+ERRDEF(XLCOM, "unfinished long comment")
+ERRDEF(XSTR, "unfinished string")
+ERRDEF(XESC, "invalid escape sequence")
+ERRDEF(XLDELIM, "invalid long string delimiter")
+ERRDEF(XTOKEN, LUA_QS " expected")
+ERRDEF(XJUMP, "control structure too long")
+ERRDEF(XSLOTS, "function or expression too complex")
+ERRDEF(XLIMC, "chunk has more than %d local variables")
+ERRDEF(XLIMM, "main function has more than %d %s")
+ERRDEF(XLIMF, "function at line %d has more than %d %s")
+ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)")
+ERRDEF(XFIXUP, "function too long for return fixup")
+ERRDEF(XPARAM, " or " LUA_QL("...") " expected")
+ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)")
+ERRDEF(XFUNARG, "function arguments expected")
+ERRDEF(XSYMBOL, "unexpected symbol")
+ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function")
+ERRDEF(XSYNTAX, "syntax error")
+ERRDEF(XBREAK, "no loop to break")
+ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected")
+
+/* Bytecode reader errors. */
+ERRDEF(BCFMT, "cannot load incompatible bytecode")
+ERRDEF(BCBAD, "cannot load malformed bytecode")
+ERRDEF(BCHEAD, "attempt to load bytecode with extra header")
+
+#if LJ_HASFFI
+/* FFI errors. */
+ERRDEF(FFI_INVTYPE, "invalid C type")
+ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large")
+ERRDEF(FFI_BADSCL, "bad storage class")
+ERRDEF(FFI_DECLSPEC, "declaration specifier expected")
+ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS)
+ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS)
+ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS)
+ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS)
+ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS)
+ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS)
+ERRDEF(FFI_BADCALL, LUA_QS " is not callable")
+ERRDEF(FFI_NUMARG, "wrong number of arguments for function call")
+ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS)
+ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed")
+ERRDEF(FFI_WRCONST, "attempt to write to constant location")
+ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS)
+ERRDEF(FFI_BADCBACK, "bad callback")
+ERRDEF(FFI_CBACKOV, "too many callbacks")
+ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields")
+ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)")
+#endif
+
+#undef ERRDEF
+
+/* Detecting unused error messages:
+ awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh
+*/
diff --git a/src/LuaJIT/src/lj_ff.h b/src/LuaJIT/src/lj_ff.h
new file mode 100644
index 000000000..6473ec93c
--- /dev/null
+++ b/src/LuaJIT/src/lj_ff.h
@@ -0,0 +1,18 @@
+/*
+** Fast function IDs.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FF_H
+#define _LJ_FF_H
+
+/* Fast function ID. */
+typedef enum {
+ FF_LUA_ = FF_LUA, /* Lua function (must be 0). */
+ FF_C_ = FF_C, /* Regular C function (must be 1). */
+#define FFDEF(name) FF_##name,
+#include "lj_ffdef.h"
+ FF__MAX
+} FastFunc;
+
+#endif
diff --git a/src/LuaJIT/src/lj_ffrecord.c b/src/LuaJIT/src/lj_ffrecord.c
new file mode 100644
index 000000000..716226bc7
--- /dev/null
+++ b/src/LuaJIT/src/lj_ffrecord.c
@@ -0,0 +1,855 @@
+/*
+** Fast function call recorder.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ffrecord_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_crecord.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* -- Fast function recording handlers ------------------------------------ */
+
+/* Conventions for fast function call handlers:
+**
+** The argument slots start at J->base[0]. All of them are guaranteed to be
+** valid and type-specialized references. J->base[J->maxslot] is set to 0
+** as a sentinel. The runtime argument values start at rd->argv[0].
+**
+** In general fast functions should check for presence of all of their
+** arguments and for the correct argument types. Some simplifications
+** are allowed if the interpreter throws instead. But even if recording
+** is aborted, the generated IR must be consistent (no zero-refs).
+**
+** The number of results in rd->nres is set to 1. Handlers that return
+** a different number of results need to override it. A negative value
+** prevents return processing (e.g. for pending calls).
+**
+** Results need to be stored starting at J->base[0]. Return processing
+** moves them to the right slots later.
+**
+** The per-ffid auxiliary data is the value of the 2nd part of the
+** LJLIB_REC() annotation. This allows handling similar functionality
+** in a common handler.
+*/
+
+/* Type of handler to record a fast function. */
+typedef void (LJ_FASTCALL *RecordFunc)(jit_State *J, RecordFFData *rd);
+
+/* Get runtime value of int argument. */
+static int32_t argv2int(jit_State *J, TValue *o)
+{
+ if (!tvisnumber(o) && !(tvisstr(o) && lj_str_tonumber(strV(o), o)))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return tvisint(o) ? intV(o) : lj_num2int(numV(o));
+}
+
+/* Get runtime value of string argument. */
+static GCstr *argv2str(jit_State *J, TValue *o)
+{
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else {
+ GCstr *s;
+ if (!tvisnumber(o))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ if (tvisint(o))
+ s = lj_str_fromint(J->L, intV(o));
+ else
+ s = lj_str_fromnum(J->L, &o->n);
+ setstrV(J->L, o, s);
+ return s;
+ }
+}
+
+/* Return number of results wanted by caller. */
+static ptrdiff_t results_wanted(jit_State *J)
+{
+ TValue *frame = J->L->base-1;
+ if (frame_islua(frame))
+ return (ptrdiff_t)bc_b(frame_pc(frame)[-1]) - 1;
+ else
+ return -1;
+}
+
+/* Throw error for unsupported variant of fast function. */
+LJ_NORET static void recff_nyiu(jit_State *J)
+{
+ setfuncV(J->L, &J->errinfo, J->fn);
+ lj_trace_err_info(J, LJ_TRERR_NYIFFU);
+}
+
+/* Fallback handler for all fast functions that are not recorded (yet). */
+static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd)
+{
+ setfuncV(J->L, &J->errinfo, J->fn);
+ lj_trace_err_info(J, LJ_TRERR_NYIFF);
+ UNUSED(rd);
+}
+
+/* C functions can have arbitrary side-effects and are not recorded (yet). */
+static void LJ_FASTCALL recff_c(jit_State *J, RecordFFData *rd)
+{
+ setfuncV(J->L, &J->errinfo, J->fn);
+ lj_trace_err_info(J, LJ_TRERR_NYICF);
+ UNUSED(rd);
+}
+
+/* -- Base library fast functions ----------------------------------------- */
+
+static void LJ_FASTCALL recff_assert(jit_State *J, RecordFFData *rd)
+{
+ /* Arguments already specialized. The interpreter throws for nil/false. */
+ rd->nres = J->maxslot; /* Pass through all arguments. */
+}
+
+static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd)
+{
+ /* Arguments already specialized. Result is a constant string. Neat, huh? */
+ uint32_t t;
+ if (tvisnumber(&rd->argv[0]))
+ t = ~LJ_TNUMX;
+ else if (LJ_64 && tvislightud(&rd->argv[0]))
+ t = ~LJ_TLIGHTUD;
+ else
+ t = ~itype(&rd->argv[0]);
+ J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[t]));
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_getmetatable(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tr) {
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ if (lj_record_mm_lookup(J, &ix, MM_metatable))
+ J->base[0] = ix.mobj;
+ else
+ J->base[0] = ix.mt;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ TRef mt = J->base[1];
+ if (tref_istab(tr) && (tref_istab(mt) || (mt && tref_isnil(mt)))) {
+ TRef fref, mtref;
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */
+ fref = emitir(IRT(IR_FREF, IRT_P32), tr, IRFL_TAB_META);
+ mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt;
+ emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref);
+ if (!tref_isnil(mt))
+ emitir(IRT(IR_TBAR, IRT_TAB), tr, 0);
+ J->base[0] = tr;
+ J->needsnap = 1;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawget(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0]; ix.key = J->base[1];
+ if (tref_istab(ix.tab) && ix.key) {
+ ix.val = 0; ix.idxchain = 0;
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ copyTV(J->L, &ix.keyv, &rd->argv[1]);
+ J->base[0] = lj_record_idx(J, &ix);
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawset(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0]; ix.key = J->base[1]; ix.val = J->base[2];
+ if (tref_istab(ix.tab) && ix.key && ix.val) {
+ ix.idxchain = 0;
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ copyTV(J->L, &ix.keyv, &rd->argv[1]);
+ copyTV(J->L, &ix.valv, &rd->argv[2]);
+ lj_record_idx(J, &ix);
+ /* Pass through table at J->base[0] as result. */
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawequal(jit_State *J, RecordFFData *rd)
+{
+ TRef tra = J->base[0];
+ TRef trb = J->base[1];
+ if (tra && trb) {
+ int diff = lj_record_objcmp(J, tra, trb, &rd->argv[0], &rd->argv[1]);
+ J->base[0] = diff ? TREF_FALSE : TREF_TRUE;
+ } /* else: Interpreter will throw. */
+}
+
+/* Determine mode of select() call. */
+int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv)
+{
+ if (tref_isstr(tr) && *strVdata(tv) == '#') { /* select('#', ...) */
+ if (strV(tv)->len == 1) {
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv)));
+ } else {
+ TRef trptr = emitir(IRT(IR_STRREF, IRT_P32), tr, lj_ir_kint(J, 0));
+ TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY);
+ emitir(IRTG(IR_EQ, IRT_INT), trchar, lj_ir_kint(J, '#'));
+ }
+ return 0;
+ } else { /* select(n, ...) */
+ int32_t start = argv2int(J, tv);
+ if (start == 0) lj_trace_err(J, LJ_TRERR_BADTYPE); /* A bit misleading. */
+ return start;
+ }
+}
+
+static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tr) {
+ ptrdiff_t start = lj_ffrecord_select_mode(J, tr, &rd->argv[0]);
+ if (start == 0) { /* select('#', ...) */
+ J->base[0] = lj_ir_kint(J, J->maxslot - 1);
+ } else if (tref_isk(tr)) { /* select(k, ...) */
+ ptrdiff_t n = (ptrdiff_t)J->maxslot;
+ if (start < 0) start += n;
+ else if (start > n) start = n;
+ rd->nres = n - start;
+ if (start >= 1) {
+ ptrdiff_t i;
+ for (i = 0; i < n - start; i++)
+ J->base[i] = J->base[start+i];
+ } /* else: Interpreter will throw. */
+ } else {
+ recff_nyiu(J);
+ }
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ TRef base = J->base[1];
+ if (tr && base) {
+ base = lj_opt_narrow_toint(J, base);
+ if (!tref_isk(base) || IR(tref_ref(base))->i != 10)
+ recff_nyiu(J);
+ }
+ if (tref_isnumber_str(tr)) {
+ if (tref_isstr(tr)) {
+ TValue tmp;
+ if (!lj_str_tonum(strV(&rd->argv[0]), &tmp))
+ recff_nyiu(J); /* Would need an inverted STRTO for this case. */
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ }
+#if LJ_HASFFI
+ } else if (tref_iscdata(tr)) {
+ lj_crecord_tonumber(J, rd);
+ return;
+#endif
+ } else {
+ tr = TREF_NIL;
+ }
+ J->base[0] = tr;
+ UNUSED(rd);
+}
+
+static TValue *recff_metacall_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ lj_record_tailcall(J, 0, 1);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ if (lj_record_mm_lookup(J, &ix, mm)) { /* Has metamethod? */
+ int errcode;
+ TValue argv0;
+ /* Temporarily insert metamethod below object. */
+ J->base[1] = J->base[0];
+ J->base[0] = ix.mobj;
+ copyTV(J->L, &argv0, &rd->argv[0]);
+ copyTV(J->L, &rd->argv[1], &rd->argv[0]);
+ copyTV(J->L, &rd->argv[0], &ix.mobjv);
+ /* Need to protect lj_record_tailcall because it may throw. */
+ errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp);
+ /* Always undo Lua stack changes to avoid confusing the interpreter. */
+ copyTV(J->L, &rd->argv[0], &argv0);
+ if (errcode)
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ rd->nres = -1; /* Pending call. */
+ return 1; /* Tailcalled to metamethod. */
+ }
+ return 0;
+}
+
+static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tref_isstr(tr)) {
+ /* Ignore __tostring in the string base metatable. */
+ /* Pass on result in J->base[0]. */
+ } else if (!recff_metacall(J, rd, MM_tostring)) {
+ if (tref_isnumber(tr)) {
+ J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0);
+ } else if (tref_ispri(tr)) {
+ J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[tref_type(tr)]));
+ } else {
+ recff_nyiu(J);
+ }
+ }
+}
+
+static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ if (tref_istab(ix.tab)) {
+ if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ setintV(&ix.keyv, numberVint(&rd->argv[1])+1);
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ ix.val = 0; ix.idxchain = 0;
+ ix.key = lj_opt_narrow_toint(J, J->base[1]);
+ J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1));
+ J->base[1] = lj_record_idx(J, &ix);
+ rd->nres = tref_isnil(J->base[1]) ? 0 : 2;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_ipairs(jit_State *J, RecordFFData *rd)
+{
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ if (!recff_metacall(J, rd, MM_ipairs))
+#endif
+ {
+ TRef tab = J->base[0];
+ if (tref_istab(tab)) {
+ J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0]));
+ J->base[1] = tab;
+ J->base[2] = lj_ir_kint(J, 0);
+ rd->nres = 3;
+ } /* else: Interpreter will throw. */
+ }
+}
+
+static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd)
+{
+ if (J->maxslot >= 1) {
+ lj_record_call(J, 0, J->maxslot - 1);
+ rd->nres = -1; /* Pending call. */
+ } /* else: Interpreter will throw. */
+}
+
+static TValue *recff_xpcall_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ lj_record_call(J, 1, J->maxslot - 2);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd)
+{
+ if (J->maxslot >= 2) {
+ TValue argv0, argv1;
+ TRef tmp;
+ int errcode;
+ /* Swap function and traceback. */
+ tmp = J->base[0]; J->base[0] = J->base[1]; J->base[1] = tmp;
+ copyTV(J->L, &argv0, &rd->argv[0]);
+ copyTV(J->L, &argv1, &rd->argv[1]);
+ copyTV(J->L, &rd->argv[0], &argv1);
+ copyTV(J->L, &rd->argv[1], &argv0);
+ /* Need to protect lj_record_call because it may throw. */
+ errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp);
+ /* Always undo Lua stack swap to avoid confusing the interpreter. */
+ copyTV(J->L, &rd->argv[0], &argv0);
+ copyTV(J->L, &rd->argv[1], &argv1);
+ if (errcode)
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ rd->nres = -1; /* Pending call. */
+ } /* else: Interpreter will throw. */
+}
+
+/* -- Math library fast functions ----------------------------------------- */
+
+static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J));
+ UNUSED(rd);
+}
+
+/* Record rounding functions math.floor and math.ceil. */
+static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */
+ tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data);
+ /* Result is integral (or NaN/Inf), but may not fit an int32_t. */
+ if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */
+ lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data);
+ if (n == (lua_Number)lj_num2int(n))
+ tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK);
+ }
+ J->base[0] = tr;
+ }
+}
+
+/* Record unary math.* functions, mapped to IR_FPMATH opcode. */
+static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd)
+{
+ J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data);
+}
+
+/* Record math.atan2. */
+static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+ J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2);
+ UNUSED(rd);
+}
+
+/* Record math.ldexp. */
+static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+#if LJ_TARGET_X86ORX64
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+#else
+ TRef tr2 = lj_opt_narrow_toint(J, J->base[1]);
+#endif
+ J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2);
+ UNUSED(rd);
+}
+
+/* Record math.asin, math.acos, math.atan. */
+static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd)
+{
+ TRef y = lj_ir_tonum(J, J->base[0]);
+ TRef x = lj_ir_knum_one(J);
+ uint32_t ffid = rd->data;
+ if (ffid != FF_math_atan) {
+ TRef tmp = emitir(IRTN(IR_MUL), y, y);
+ tmp = emitir(IRTN(IR_SUB), x, tmp);
+ tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT);
+ if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; }
+ }
+ J->base[0] = emitir(IRTN(IR_ATAN2), y, x);
+}
+
+static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data);
+}
+
+static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tref_isinteger(tr)) {
+ J->base[0] = tr;
+ J->base[1] = lj_ir_kint(J, 0);
+ } else {
+ TRef trt;
+ tr = lj_ir_tonum(J, tr);
+ trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC);
+ J->base[0] = trt;
+ J->base[1] = emitir(IRTN(IR_SUB), tr, trt);
+ }
+ rd->nres = 2;
+}
+
+static void LJ_FASTCALL recff_math_degrad(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ TRef trm = lj_ir_knum(J, numV(&J->fn->c.upvalue[0]));
+ J->base[0] = emitir(IRTN(IR_MUL), tr, trm);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ if (!tref_isnumber_str(J->base[1]))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ J->base[0] = lj_opt_narrow_pow(J, tr, J->base[1], &rd->argv[1]);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonumber(J, J->base[0]);
+ uint32_t op = rd->data;
+ BCReg i;
+ for (i = 1; J->base[i] != 0; i++) {
+ TRef tr2 = lj_ir_tonumber(J, J->base[i]);
+ IRType t = IRT_INT;
+ if (!(tref_isinteger(tr) && tref_isinteger(tr2))) {
+ if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT);
+ t = IRT_NUM;
+ }
+ tr = emitir(IRT(op, t), tr, tr2);
+ }
+ J->base[0] = tr;
+}
+
+static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd)
+{
+ GCudata *ud = udataV(&J->fn->c.upvalue[0]);
+ TRef tr, one;
+ lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */
+ tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud)));
+ one = lj_ir_knum_one(J);
+ tr = emitir(IRTN(IR_SUB), tr, one);
+ if (J->base[0]) {
+ TRef tr1 = lj_ir_tonum(J, J->base[0]);
+ if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+ tr2 = emitir(IRTN(IR_SUB), tr2, tr1);
+ tr2 = emitir(IRTN(IR_ADD), tr2, one);
+ tr = emitir(IRTN(IR_MUL), tr, tr2);
+ tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR);
+ tr = emitir(IRTN(IR_ADD), tr, tr1);
+ } else { /* d = floor(d*r1) + 1.0 */
+ tr = emitir(IRTN(IR_MUL), tr, tr1);
+ tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR);
+ tr = emitir(IRTN(IR_ADD), tr, one);
+ }
+ }
+ J->base[0] = tr;
+ UNUSED(rd);
+}
+
+/* -- Bit library fast functions ------------------------------------------ */
+
+/* Record unary bit.tobit, bit.bnot, bit.bswap. */
+static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
+ J->base[0] = (rd->data == IR_TOBIT) ? tr : emitir(IRTI(rd->data), tr, 0);
+}
+
+/* Record N-ary bit.band, bit.bor, bit.bxor. */
+static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
+ uint32_t op = rd->data;
+ BCReg i;
+ for (i = 1; J->base[i] != 0; i++)
+ tr = emitir(IRTI(op), tr, lj_opt_narrow_tobit(J, J->base[i]));
+ J->base[0] = tr;
+}
+
+/* Record bit shifts. */
+static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
+ TRef tsh = lj_opt_narrow_tobit(J, J->base[1]);
+ IROp op = (IROp)rd->data;
+ if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ !tref_isk(tsh))
+ tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31));
+#ifdef LJ_TARGET_UNIFYROT
+ if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) {
+ op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR;
+ tsh = emitir(IRTI(IR_NEG), tsh, tsh);
+ }
+#endif
+ J->base[0] = emitir(IRTI(op), tr, tsh);
+}
+
+/* -- String library fast functions --------------------------------------- */
+
+static void LJ_FASTCALL recff_string_len(jit_State *J, RecordFFData *rd)
+{
+ J->base[0] = emitir(IRTI(IR_FLOAD), lj_ir_tostr(J, J->base[0]), IRFL_STR_LEN);
+ UNUSED(rd);
+}
+
+/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */
+static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
+{
+ TRef trstr = lj_ir_tostr(J, J->base[0]);
+ TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN);
+ TRef tr0 = lj_ir_kint(J, 0);
+ TRef trstart, trend;
+ GCstr *str = argv2str(J, &rd->argv[0]);
+ int32_t start, end;
+ if (rd->data) { /* string.sub(str, start [,end]) */
+ start = argv2int(J, &rd->argv[1]);
+ trstart = lj_opt_narrow_toint(J, J->base[1]);
+ trend = J->base[2];
+ if (tref_isnil(trend)) {
+ trend = lj_ir_kint(J, -1);
+ end = -1;
+ } else {
+ trend = lj_opt_narrow_toint(J, trend);
+ end = argv2int(J, &rd->argv[2]);
+ }
+ } else { /* string.byte(str, [,start [,end]]) */
+ if (J->base[1]) {
+ start = argv2int(J, &rd->argv[1]);
+ trstart = lj_opt_narrow_toint(J, J->base[1]);
+ trend = J->base[2];
+ if (tref_isnil(trend)) {
+ trend = trstart;
+ end = start;
+ } else {
+ trend = lj_opt_narrow_toint(J, trend);
+ end = argv2int(J, &rd->argv[2]);
+ }
+ } else {
+ trend = trstart = lj_ir_kint(J, 1);
+ end = start = 1;
+ }
+ }
+ if (end < 0) {
+ emitir(IRTGI(IR_LT), trend, tr0);
+ trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend),
+ lj_ir_kint(J, 1));
+ end = end+(int32_t)str->len+1;
+ } else if ((MSize)end <= str->len) {
+ emitir(IRTGI(IR_ULE), trend, trlen);
+ } else {
+ emitir(IRTGI(IR_GT), trend, trlen);
+ end = (int32_t)str->len;
+ trend = trlen;
+ }
+ if (start < 0) {
+ emitir(IRTGI(IR_LT), trstart, tr0);
+ trstart = emitir(IRTI(IR_ADD), trlen, trstart);
+ start = start+(int32_t)str->len;
+ emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), trstart, tr0);
+ if (start < 0) {
+ trstart = tr0;
+ start = 0;
+ }
+ } else {
+ if (start == 0) {
+ emitir(IRTGI(IR_EQ), trstart, tr0);
+ trstart = tr0;
+ } else {
+ trstart = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, -1));
+ emitir(IRTGI(IR_GE), trstart, tr0);
+ start--;
+ }
+ }
+ if (rd->data) { /* Return string.sub result. */
+ if (end - start >= 0) {
+ /* Also handle empty range here, to avoid extra traces. */
+ TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart);
+ emitir(IRTGI(IR_GE), trslen, tr0);
+ trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart);
+ J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen);
+ } else { /* Range underflow: return empty string. */
+ emitir(IRTGI(IR_LT), trend, trstart);
+ J->base[0] = lj_ir_kstr(J, lj_str_new(J->L, strdata(str), 0));
+ }
+ } else { /* Return string.byte result(s). */
+ ptrdiff_t i, len = end - start;
+ if (len > 0) {
+ TRef trslen = emitir(IRTI(IR_SUB), trend, trstart);
+ emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len));
+ if (J->baseslot + len > LJ_MAX_JSLOTS)
+ lj_trace_err_info(J, LJ_TRERR_STACKOV);
+ rd->nres = len;
+ for (i = 0; i < len; i++) {
+ TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i));
+ tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp);
+ J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY);
+ }
+ } else { /* Empty range or range underflow: return no results. */
+ emitir(IRTGI(IR_LE), trend, trstart);
+ rd->nres = 0;
+ }
+ }
+}
+
+/* -- Table library fast functions ---------------------------------------- */
+
+static void LJ_FASTCALL recff_table_getn(jit_State *J, RecordFFData *rd)
+{
+ if (tref_istab(J->base[0]))
+ J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, J->base[0]);
+ /* else: Interpreter will throw. */
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_table_remove(jit_State *J, RecordFFData *rd)
+{
+ TRef tab = J->base[0];
+ rd->nres = 0;
+ if (tref_istab(tab)) {
+ if (!J->base[1] || tref_isnil(J->base[1])) { /* Simple pop: t[#t] = nil */
+ TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, tab);
+ GCtab *t = tabV(&rd->argv[0]);
+ MSize len = lj_tab_len(t);
+ emitir(IRTGI(len ? IR_NE : IR_EQ), trlen, lj_ir_kint(J, 0));
+ if (len) {
+ RecordIndex ix;
+ ix.tab = tab;
+ ix.key = trlen;
+ settabV(J->L, &ix.tabv, t);
+ setintV(&ix.keyv, len);
+ ix.idxchain = 0;
+ if (results_wanted(J) != 0) { /* Specialize load only if needed. */
+ ix.val = 0;
+ J->base[0] = lj_record_idx(J, &ix); /* Load previous value. */
+ rd->nres = 1;
+ /* Assumes ix.key/ix.tab is not modified for raw lj_record_idx(). */
+ }
+ ix.val = TREF_NIL;
+ lj_record_idx(J, &ix); /* Remove value. */
+ }
+ } else { /* Complex case: remove in the middle. */
+ recff_nyiu(J);
+ }
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ ix.val = J->base[1];
+ rd->nres = 0;
+ if (tref_istab(ix.tab) && ix.val) {
+ if (!J->base[2]) { /* Simple push: t[#t+1] = v */
+ TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab);
+ GCtab *t = tabV(&rd->argv[0]);
+ ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
+ settabV(J->L, &ix.tabv, t);
+ setintV(&ix.keyv, lj_tab_len(t) + 1);
+ ix.idxchain = 0;
+ lj_record_idx(J, &ix); /* Set new value. */
+ } else { /* Complex case: insert in the middle. */
+ recff_nyiu(J);
+ }
+ } /* else: Interpreter will throw. */
+}
+
+/* -- I/O library fast functions ------------------------------------------ */
+
+/* Get FILE* for I/O function. Any I/O error aborts recording, so there's
+** no need to encode the alternate cases for any of the guards.
+*/
+static TRef recff_io_fp(jit_State *J, uint32_t id)
+{
+ TRef tr, ud, fp;
+ if (id) { /* io.func() */
+ tr = lj_ir_kptr(J, &J2G(J)->gcroot[id]);
+ ud = emitir(IRT(IR_XLOAD, IRT_UDATA), tr, 0);
+ } else { /* fp:method() */
+ ud = J->base[0];
+ if (!tref_isudata(ud))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ tr = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE));
+ }
+ fp = emitir(IRT(IR_FLOAD, IRT_PTR), ud, IRFL_UDATA_FILE);
+ emitir(IRTG(IR_NE, IRT_PTR), fp, lj_ir_knull(J, IRT_PTR));
+ return fp;
+}
+
+static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd)
+{
+ TRef fp = recff_io_fp(J, rd->data);
+ TRef zero = lj_ir_kint(J, 0);
+ TRef one = lj_ir_kint(J, 1);
+ ptrdiff_t i = rd->data == 0 ? 1 : 0;
+ for (; J->base[i]; i++) {
+ TRef str = lj_ir_tostr(J, J->base[i]);
+ TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero);
+ TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN);
+ if (tref_isk(len) && IR(tref_ref(len))->i == 1) {
+ TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY);
+ tr = lj_ir_call(J, IRCALL_fputc, tr, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1));
+ } else {
+ TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_EQ), tr, len);
+ }
+ }
+ J->base[0] = TREF_TRUE;
+}
+
+static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd)
+{
+ TRef fp = recff_io_fp(J, rd->data);
+ TRef tr = lj_ir_call(J, IRCALL_fflush, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
+ J->base[0] = TREF_TRUE;
+}
+
+/* -- Record calls to fast functions -------------------------------------- */
+
+#include "lj_recdef.h"
+
+static uint32_t recdef_lookup(GCfunc *fn)
+{
+ if (fn->c.ffid < sizeof(recff_idmap)/sizeof(recff_idmap[0]))
+ return recff_idmap[fn->c.ffid];
+ else
+ return 0;
+}
+
+/* Record entry to a fast function or C function. */
+void lj_ffrecord_func(jit_State *J)
+{
+ RecordFFData rd;
+ uint32_t m = recdef_lookup(J->fn);
+ rd.data = m & 0xff;
+ rd.nres = 1; /* Default is one result. */
+ rd.argv = J->L->base;
+ J->base[J->maxslot] = 0; /* Mark end of arguments. */
+ (recff_func[m >> 8])(J, &rd); /* Call recff_* handler. */
+ if (rd.nres >= 0) {
+ if (J->postproc == LJ_POST_NONE) J->postproc = LJ_POST_FFRETRY;
+ lj_record_ret(J, 0, rd.nres);
+ }
+}
+
+#undef IR
+#undef emitir
+
+#endif
diff --git a/src/LuaJIT/src/lj_ffrecord.h b/src/LuaJIT/src/lj_ffrecord.h
new file mode 100644
index 000000000..3a107d410
--- /dev/null
+++ b/src/LuaJIT/src/lj_ffrecord.h
@@ -0,0 +1,24 @@
+/*
+** Fast function call recorder.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FFRECORD_H
+#define _LJ_FFRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Data used by handlers to record a fast function. */
+typedef struct RecordFFData {
+ TValue *argv; /* Runtime argument values. */
+ ptrdiff_t nres; /* Number of returned results (defaults to 1). */
+ uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */
+} RecordFFData;
+
+LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv);
+LJ_FUNC void lj_ffrecord_func(jit_State *J);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_frame.h b/src/LuaJIT/src/lj_frame.h
new file mode 100644
index 000000000..e5a56897b
--- /dev/null
+++ b/src/LuaJIT/src/lj_frame.h
@@ -0,0 +1,159 @@
+/*
+** Stack frames.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FRAME_H
+#define _LJ_FRAME_H
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+
+/* -- Lua stack frame ----------------------------------------------------- */
+
+/* Frame type markers in callee function slot (callee base-1). */
+enum {
+ FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG,
+ FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH
+};
+#define FRAME_TYPE 3
+#define FRAME_P 4
+#define FRAME_TYPEP (FRAME_TYPE|FRAME_P)
+
+/* Macros to access and modify Lua frames. */
+#define frame_gc(f) (gcref((f)->fr.func))
+#define frame_func(f) (&frame_gc(f)->fn)
+#define frame_ftsz(f) ((f)->fr.tp.ftsz)
+
+#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE)
+#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP)
+#define frame_islua(f) (frame_type(f) == FRAME_LUA)
+#define frame_isc(f) (frame_type(f) == FRAME_C)
+#define frame_iscont(f) (frame_typep(f) == FRAME_CONT)
+#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG)
+#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL)
+
+#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns))
+#define frame_contpc(f) (frame_pc((f)-1))
+#if LJ_64
+#define frame_contf(f) \
+ ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \
+ (intptr_t)(int32_t)((f)-1)->u32.lo))
+#else
+#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void))
+#endif
+#define frame_delta(f) (frame_ftsz(f) >> 3)
+#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP)
+
+#define frame_prevl(f) ((f) - (1+bc_a(frame_pc(f)[-1])))
+#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f)))
+#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f))
+/* Note: this macro does not skip over FRAME_VARG. */
+
+#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc)))
+#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (sz))
+#define setframe_gc(f, p) (setgcref((f)->fr.func, (p)))
+
+/* -- C stack frame ------------------------------------------------------- */
+
+/* Macros to access and modify the C stack frame chain. */
+
+/* These definitions must match with the arch-specific *.dasc files. */
+#if LJ_TARGET_X86
+#define CFRAME_OFS_ERRF (15*4)
+#define CFRAME_OFS_NRES (14*4)
+#define CFRAME_OFS_PREV (13*4)
+#define CFRAME_OFS_L (12*4)
+#define CFRAME_OFS_PC (6*4)
+#define CFRAME_OFS_MULTRES (5*4)
+#define CFRAME_SIZE (12*4)
+#define CFRAME_SHIFT_MULTRES 0
+#elif LJ_TARGET_X64
+#if LJ_ABI_WIN
+#define CFRAME_OFS_PREV (13*8)
+#define CFRAME_OFS_PC (25*4)
+#define CFRAME_OFS_L (24*4)
+#define CFRAME_OFS_ERRF (23*4)
+#define CFRAME_OFS_NRES (22*4)
+#define CFRAME_OFS_MULTRES (21*4)
+#define CFRAME_SIZE (10*8)
+#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8)
+#define CFRAME_SHIFT_MULTRES 0
+#else
+#define CFRAME_OFS_PREV (4*8)
+#define CFRAME_OFS_PC (7*4)
+#define CFRAME_OFS_L (6*4)
+#define CFRAME_OFS_ERRF (5*4)
+#define CFRAME_OFS_NRES (4*4)
+#define CFRAME_OFS_MULTRES (1*4)
+#define CFRAME_SIZE (10*8)
+#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16)
+#define CFRAME_SHIFT_MULTRES 0
+#endif
+#elif LJ_TARGET_ARM
+#define CFRAME_OFS_ERRF 24
+#define CFRAME_OFS_NRES 20
+#define CFRAME_OFS_PREV 16
+#define CFRAME_OFS_L 12
+#define CFRAME_OFS_PC 8
+#define CFRAME_OFS_MULTRES 4
+#define CFRAME_SIZE 64
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_PPC
+#define CFRAME_OFS_ERRF 48
+#define CFRAME_OFS_NRES 44
+#define CFRAME_OFS_PREV 40
+#define CFRAME_OFS_L 36
+#define CFRAME_OFS_PC 32
+#define CFRAME_OFS_MULTRES 28
+#define CFRAME_SIZE 272
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_PPCSPE
+#define CFRAME_OFS_ERRF 28
+#define CFRAME_OFS_NRES 24
+#define CFRAME_OFS_PREV 20
+#define CFRAME_OFS_L 16
+#define CFRAME_OFS_PC 12
+#define CFRAME_OFS_MULTRES 8
+#define CFRAME_SIZE 184
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_MIPS
+#define CFRAME_OFS_ERRF 124
+#define CFRAME_OFS_NRES 120
+#define CFRAME_OFS_PREV 116
+#define CFRAME_OFS_L 112
+#define CFRAME_OFS_PC 20
+#define CFRAME_OFS_MULTRES 16
+#define CFRAME_SIZE 112
+#define CFRAME_SHIFT_MULTRES 3
+#else
+#error "Missing CFRAME_* definitions for this architecture"
+#endif
+
+#ifndef CFRAME_SIZE_JIT
+#define CFRAME_SIZE_JIT CFRAME_SIZE
+#endif
+
+#define CFRAME_RESUME 1
+#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */
+#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF))
+
+#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF))
+#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES))
+#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV))
+#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES))
+#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES)
+#define cframe_L(cf) \
+ (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th)
+#define cframe_pc(cf) \
+ (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns))
+#define setcframe_L(cf, L) \
+ (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_L), (L)))
+#define setcframe_pc(cf, pc) \
+ (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc)))
+#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME)
+#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF)
+#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK))
+#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe))
+
+#endif
diff --git a/src/LuaJIT/src/lj_func.c b/src/LuaJIT/src/lj_func.c
new file mode 100644
index 000000000..0c0b9014e
--- /dev/null
+++ b/src/LuaJIT/src/lj_func.c
@@ -0,0 +1,184 @@
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_func_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_func.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Prototypes ---------------------------------------------------------- */
+
+void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt)
+{
+ lj_mem_free(g, pt, pt->sizept);
+}
+
+/* -- Upvalues ------------------------------------------------------------ */
+
+static void unlinkuv(GCupval *uv)
+{
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ setgcrefr(uvnext(uv)->prev, uv->prev);
+ setgcrefr(uvprev(uv)->next, uv->next);
+}
+
+/* Find existing open upvalue for a stack slot or create a new one. */
+static GCupval *func_finduv(lua_State *L, TValue *slot)
+{
+ global_State *g = G(L);
+ GCRef *pp = &L->openupval;
+ GCupval *p;
+ GCupval *uv;
+ /* Search the sorted list of open upvalues. */
+ while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) {
+ lua_assert(!p->closed && uvval(p) != &p->tv);
+ if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */
+ if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */
+ flipwhite(obj2gco(p));
+ return p;
+ }
+ pp = &p->nextgc;
+ }
+ /* No matching upvalue found. Create a new one. */
+ uv = lj_mem_newt(L, sizeof(GCupval), GCupval);
+ newwhite(g, uv);
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 0; /* Still open. */
+ setmref(uv->v, slot); /* Pointing to the stack slot. */
+ /* NOBARRIER: The GCupval is new (marked white) and open. */
+ setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */
+ setgcref(*pp, obj2gco(uv));
+ setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */
+ setgcrefr(uv->next, g->uvhead.next);
+ setgcref(uvnext(uv)->prev, obj2gco(uv));
+ setgcref(g->uvhead.next, obj2gco(uv));
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ return uv;
+}
+
+/* Create an empty and closed upvalue. */
+static GCupval *func_emptyuv(lua_State *L)
+{
+ GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval));
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 1;
+ setnilV(&uv->tv);
+ setmref(uv->v, &uv->tv);
+ return uv;
+}
+
+/* Close all open upvalues pointing to some stack level or above. */
+void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
+{
+ GCupval *uv;
+ global_State *g = G(L);
+ while (gcref(L->openupval) != NULL &&
+ uvval((uv = gco2uv(gcref(L->openupval)))) >= level) {
+ GCobj *o = obj2gco(uv);
+ lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv);
+ setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */
+ if (isdead(g, o)) {
+ lj_func_freeuv(g, uv);
+ } else {
+ unlinkuv(uv);
+ lj_gc_closeuv(g, uv);
+ }
+ }
+}
+
+void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv)
+{
+ if (!uv->closed)
+ unlinkuv(uv);
+ lj_mem_freet(g, uv);
+}
+
+/* -- Functions (closures) ------------------------------------------------ */
+
+GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env)
+{
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems));
+ fn->c.gct = ~LJ_TFUNC;
+ fn->c.ffid = FF_C;
+ fn->c.nupvalues = (uint8_t)nelems;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ setmref(fn->c.pc, &G(L)->bc_cfunc_ext);
+ setgcref(fn->c.env, obj2gco(env));
+ return fn;
+}
+
+static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env)
+{
+ uint32_t count;
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv));
+ fn->l.gct = ~LJ_TFUNC;
+ fn->l.ffid = FF_LUA;
+ fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */
+ /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */
+ setmref(fn->l.pc, proto_bc(pt));
+ setgcref(fn->l.env, obj2gco(env));
+ /* Saturating 3 bit counter (0..7) for created closures. */
+ count = (uint32_t)pt->flags + PROTO_CLCOUNT;
+ pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT));
+ return fn;
+}
+
+/* Create a new Lua function with empty upvalues. */
+GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env)
+{
+ GCfunc *fn = func_newL(L, pt, env);
+ MSize i, nuv = pt->sizeuv;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ for (i = 0; i < nuv; i++) {
+ GCupval *uv = func_emptyuv(L);
+ uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24);
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+/* Do a GC check and create a new Lua function with inherited upvalues. */
+GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent)
+{
+ GCfunc *fn;
+ GCRef *puv;
+ MSize i, nuv;
+ TValue *base;
+ lj_gc_check_fixtop(L);
+ fn = func_newL(L, pt, tabref(parent->env));
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ puv = parent->uvptr;
+ nuv = pt->sizeuv;
+ base = L->base;
+ for (i = 0; i < nuv; i++) {
+ uint32_t v = proto_uv(pt)[i];
+ GCupval *uv;
+ if ((v & 0x8000)) {
+ uv = func_finduv(L, base + (v & 0xff));
+ uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24);
+ } else {
+ uv = &gcref(puv[v])->uv;
+ }
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn)
+{
+ MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ lj_mem_free(g, fn, size);
+}
+
diff --git a/src/LuaJIT/src/lj_func.h b/src/LuaJIT/src/lj_func.h
new file mode 100644
index 000000000..7c69e60fc
--- /dev/null
+++ b/src/LuaJIT/src/lj_func.h
@@ -0,0 +1,24 @@
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FUNC_H
+#define _LJ_FUNC_H
+
+#include "lj_obj.h"
+
+/* Prototypes. */
+LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt);
+
+/* Upvalues. */
+LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level);
+LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv);
+
+/* Functions (closures). */
+LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env);
+LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env);
+LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent);
+LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c);
+
+#endif
diff --git a/src/LuaJIT/src/lj_gc.c b/src/LuaJIT/src/lj_gc.c
new file mode 100644
index 000000000..9d0e75599
--- /dev/null
+++ b/src/LuaJIT/src/lj_gc.c
@@ -0,0 +1,838 @@
+/*
+** Garbage collector.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_gc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+#define GCSTEPSIZE 1024u
+#define GCSWEEPMAX 40
+#define GCSWEEPCOST 10
+#define GCFINALIZECOST 100
+
+/* Macros to set GCobj colors and flags. */
+#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
+#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
+#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
+#define markfinalized(u) ((u)->marked |= LJ_GC_FINALIZED)
+
+/* -- Mark phase ---------------------------------------------------------- */
+
+/* Mark a TValue (if needed). */
+#define gc_marktv(g, tv) \
+ { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
+ if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
+
+/* Mark a GCobj (if needed). */
+#define gc_markobj(g, o) \
+ { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
+
+/* Mark a string object. */
+#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
+
+/* Mark a white GCobj. */
+static void gc_mark(global_State *g, GCobj *o)
+{
+ lua_assert(iswhite(o) && !isdead(g, o));
+ white2gray(o);
+ if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUDATA)) {
+ GCtab *mt = tabref(gco2ud(o)->metatable);
+ gray2black(o); /* Userdata are never gray. */
+ if (mt) gc_markobj(g, mt);
+ gc_markobj(g, tabref(gco2ud(o)->env));
+ } else if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUPVAL)) {
+ GCupval *uv = gco2uv(o);
+ gc_marktv(g, uvval(uv));
+ if (uv->closed)
+ gray2black(o); /* Closed upvalues are never gray. */
+ } else if (o->gch.gct != ~LJ_TSTR && o->gch.gct != ~LJ_TCDATA) {
+ lua_assert(o->gch.gct == ~LJ_TFUNC || o->gch.gct == ~LJ_TTAB ||
+ o->gch.gct == ~LJ_TTHREAD || o->gch.gct == ~LJ_TPROTO);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Mark GC roots. */
+static void gc_mark_gcroot(global_State *g)
+{
+ ptrdiff_t i;
+ for (i = 0; i < GCROOT_MAX; i++)
+ if (gcref(g->gcroot[i]) != NULL)
+ gc_markobj(g, gcref(g->gcroot[i]));
+}
+
+/* Start a GC cycle and mark the root set. */
+static void gc_mark_start(global_State *g)
+{
+ setgcrefnull(g->gc.gray);
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ gc_markobj(g, mainthread(g));
+ gc_markobj(g, tabref(mainthread(g)->env));
+ gc_marktv(g, &g->registrytv);
+ gc_mark_gcroot(g);
+ g->gc.state = GCSpropagate;
+}
+
+/* Mark open upvalues. */
+static void gc_mark_uv(global_State *g)
+{
+ GCupval *uv;
+ for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ if (isgray(obj2gco(uv)))
+ gc_marktv(g, uvval(uv));
+ }
+}
+
+/* Mark userdata in mmudata list. */
+static void gc_mark_mmudata(global_State *g)
+{
+ GCobj *root = gcref(g->gc.mmudata);
+ GCobj *u = root;
+ if (u) {
+ do {
+ u = gcnext(u);
+ makewhite(g, u); /* Could be from previous GC. */
+ gc_mark(g, u);
+ } while (u != root);
+ }
+}
+
+/* Separate userdata which which needs finalization to mmudata list. */
+size_t lj_gc_separateudata(global_State *g, int all)
+{
+ size_t m = 0;
+ GCRef *p = &mainthread(g)->nextgc;
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL) {
+ if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
+ p = &o->gch.nextgc; /* Nothing to do. */
+ } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
+ markfinalized(gco2ud(o)); /* Done, as there's no __gc metamethod. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise move userdata to be finalized to mmudata list. */
+ m += sizeudata(gco2ud(o));
+ markfinalized(gco2ud(o));
+ *p = o->gch.nextgc;
+ if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
+ GCobj *root = gcref(g->gc.mmudata);
+ setgcrefr(o->gch.nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ } else { /* Create circular list. */
+ setgcref(o->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ }
+ }
+ }
+ return m;
+}
+
+/* -- Propagation phase --------------------------------------------------- */
+
+/* Traverse a table. */
+static int gc_traverse_tab(global_State *g, GCtab *t)
+{
+ int weak = 0;
+ cTValue *mode;
+ GCtab *mt = tabref(t->metatable);
+ if (mt)
+ gc_markobj(g, mt);
+ mode = lj_meta_fastg(g, mt, MM_mode);
+ if (mode && tvisstr(mode)) { /* Valid __mode field? */
+ const char *modestr = strVdata(mode);
+ int c;
+ while ((c = *modestr++)) {
+ if (c == 'k') weak |= LJ_GC_WEAKKEY;
+ else if (c == 'v') weak |= LJ_GC_WEAKVAL;
+ else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
+ }
+ if (weak > 0) { /* Weak tables are cleared in the atomic phase. */
+ t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
+ setgcrefr(t->gclist, g->gc.weak);
+ setgcref(g->gc.weak, obj2gco(t));
+ }
+ }
+ if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
+ return 1;
+ if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++)
+ gc_marktv(g, arrayslot(t, i));
+ }
+ if (t->hmask > 0) { /* Mark hash part. */
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
+ lua_assert(!tvisnil(&n->key));
+ if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
+ if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
+ }
+ }
+ }
+ return weak;
+}
+
+/* Traverse a function. */
+static void gc_traverse_func(global_State *g, GCfunc *fn)
+{
+ gc_markobj(g, tabref(fn->c.env));
+ if (isluafunc(fn)) {
+ uint32_t i;
+ lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
+ gc_markobj(g, funcproto(fn));
+ for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
+ gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
+ } else {
+ uint32_t i;
+ for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
+ gc_marktv(g, &fn->c.upvalue[i]);
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace. */
+static void gc_marktrace(global_State *g, TraceNo traceno)
+{
+ GCobj *o = obj2gco(traceref(G2J(g), traceno));
+ lua_assert(traceno != G2J(g)->cur.traceno);
+ if (iswhite(o)) {
+ white2gray(o);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Traverse a trace. */
+static void gc_traverse_trace(global_State *g, GCtrace *T)
+{
+ IRRef ref;
+ if (T->traceno == 0) return;
+ for (ref = T->nk; ref < REF_TRUE; ref++) {
+ IRIns *ir = &T->ir[ref];
+ if (ir->o == IR_KGC)
+ gc_markobj(g, ir_kgc(ir));
+ }
+ if (T->link) gc_marktrace(g, T->link);
+ if (T->nextroot) gc_marktrace(g, T->nextroot);
+ if (T->nextside) gc_marktrace(g, T->nextside);
+ gc_markobj(g, gcref(T->startpt));
+}
+
+/* The current trace is a GC root while not anchored in the prototype (yet). */
+#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
+#else
+#define gc_traverse_curtrace(g) UNUSED(g)
+#endif
+
+/* Traverse a prototype. */
+static void gc_traverse_proto(global_State *g, GCproto *pt)
+{
+ ptrdiff_t i;
+ gc_mark_str(proto_chunkname(pt));
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
+ gc_markobj(g, proto_kgc(pt, i));
+#if LJ_HASJIT
+ if (pt->trace) gc_marktrace(g, pt->trace);
+#endif
+}
+
+/* Traverse the frame structure of a stack. */
+static MSize gc_traverse_frames(global_State *g, lua_State *th)
+{
+ TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
+ /* Note: extra vararg frame not skipped, marks function twice (harmless). */
+ for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) {
+ GCfunc *fn = frame_func(frame);
+ TValue *ftop = frame;
+ if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
+ if (ftop > top) top = ftop;
+ gc_markobj(g, fn); /* Need to mark hidden function (or L). */
+ }
+ top++; /* Correct bias of -1 (frame == base-1). */
+ if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
+ return (MSize)(top - bot); /* Return minimum needed stack size. */
+}
+
+/* Traverse a thread object. */
+static void gc_traverse_thread(global_State *g, lua_State *th)
+{
+ TValue *o, *top = th->top;
+ for (o = tvref(th->stack)+1; o < top; o++)
+ gc_marktv(g, o);
+ if (g->gc.state == GCSatomic) {
+ top = tvref(th->stack) + th->stacksize;
+ for (; o < top; o++) /* Clear unmarked slots. */
+ setnilV(o);
+ }
+ gc_markobj(g, tabref(th->env));
+ lj_state_shrinkstack(th, gc_traverse_frames(g, th));
+}
+
+/* Propagate one gray object. Traverse it and turn it black. */
+static size_t propagatemark(global_State *g)
+{
+ GCobj *o = gcref(g->gc.gray);
+ lua_assert(isgray(o));
+ gray2black(o);
+ setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
+ if (LJ_LIKELY(o->gch.gct == ~LJ_TTAB)) {
+ GCtab *t = gco2tab(o);
+ if (gc_traverse_tab(g, t) > 0)
+ black2gray(o); /* Keep weak tables gray. */
+ return sizeof(GCtab) + sizeof(TValue) * t->asize +
+ sizeof(Node) * (t->hmask + 1);
+ } else if (LJ_LIKELY(o->gch.gct == ~LJ_TFUNC)) {
+ GCfunc *fn = gco2func(o);
+ gc_traverse_func(g, fn);
+ return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ } else if (LJ_LIKELY(o->gch.gct == ~LJ_TPROTO)) {
+ GCproto *pt = gco2pt(o);
+ gc_traverse_proto(g, pt);
+ return pt->sizept;
+ } else if (LJ_LIKELY(o->gch.gct == ~LJ_TTHREAD)) {
+ lua_State *th = gco2th(o);
+ setgcrefr(th->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+ black2gray(o); /* Threads are never black. */
+ gc_traverse_thread(g, th);
+ return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
+ } else {
+#if LJ_HASJIT
+ GCtrace *T = gco2trace(o);
+ gc_traverse_trace(g, T);
+ return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
+ T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
+#else
+ lua_assert(0);
+ return 0;
+#endif
+ }
+}
+
+/* Propagate all gray objects. */
+static size_t gc_propagate_gray(global_State *g)
+{
+ size_t m = 0;
+ while (gcref(g->gc.gray) != NULL)
+ m += propagatemark(g);
+ return m;
+}
+
+/* -- Sweep phase --------------------------------------------------------- */
+
+/* Try to shrink some common data structures. */
+static void gc_shrink(global_State *g, lua_State *L)
+{
+ if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
+ lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
+ if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
+ lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
+}
+
+/* Type of GC free functions. */
+typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
+
+/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
+static const GCFreeFunc gc_freefunc[] = {
+ (GCFreeFunc)lj_str_free,
+ (GCFreeFunc)lj_func_freeuv,
+ (GCFreeFunc)lj_state_free,
+ (GCFreeFunc)lj_func_freeproto,
+ (GCFreeFunc)lj_func_free,
+#if LJ_HASJIT
+ (GCFreeFunc)lj_trace_free,
+#else
+ (GCFreeFunc)0,
+#endif
+#if LJ_HASFFI
+ (GCFreeFunc)lj_cdata_free,
+#else
+ (GCFreeFunc)0,
+#endif
+ (GCFreeFunc)lj_tab_free,
+ (GCFreeFunc)lj_udata_free
+};
+
+/* Full sweep of a GC list. */
+#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
+
+/* Partial sweep of a GC list. */
+static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
+{
+ /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
+ int ow = otherwhite(g);
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL && lim-- > 0) {
+ if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
+ gc_fullsweep(g, &gco2th(o)->openupval);
+ if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
+ lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
+ makewhite(g, o); /* Value is alive, change to the current white. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise value is dead, free it. */
+ lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
+ setgcrefr(*p, o->gch.nextgc);
+ if (o == gcref(g->gc.root))
+ setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
+ gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
+ }
+ }
+ return p;
+}
+
+/* Check whether we can clear a key or a value slot from a table. */
+static int gc_mayclear(cTValue *o, int val)
+{
+ if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
+ if (tvisstr(o)) { /* But strings cannot be used as weak references. */
+ gc_mark_str(strV(o)); /* And need to be marked. */
+ return 0;
+ }
+ if (iswhite(gcV(o)))
+ return 1; /* Object is about to be collected. */
+ if (tvisudata(o) && val && isfinalized(udataV(o)))
+ return 1; /* Finalized userdata is dropped only from values. */
+ }
+ return 0; /* Cannot clear. */
+}
+
+/* Clear collected entries from weak tables. */
+static void gc_clearweak(GCobj *o)
+{
+ while (o) {
+ GCtab *t = gco2tab(o);
+ lua_assert((t->marked & LJ_GC_WEAK));
+ if ((t->marked & LJ_GC_WEAKVAL)) {
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++) {
+ /* Clear array slot when value is about to be collected. */
+ TValue *tv = arrayslot(t, i);
+ if (gc_mayclear(tv, 1))
+ setnilV(tv);
+ }
+ }
+ if (t->hmask > 0) {
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ /* Clear hash slot when key or value is about to be collected. */
+ if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
+ gc_mayclear(&n->val, 1)))
+ setnilV(&n->val);
+ }
+ }
+ o = gcref(t->gclist);
+ }
+}
+
+/* Call a userdata or cdata finalizer. */
+static void gc_call_finalizer(global_State *g, lua_State *L,
+ cTValue *mo, GCobj *o)
+{
+ /* Save and restore lots of state around the __gc callback. */
+ uint8_t oldh = hook_save(g);
+ MSize oldt = g->gc.threshold;
+ int errcode;
+ TValue *top;
+ lj_trace_abort(g);
+ top = L->top;
+ L->top = top+2;
+ hook_entergc(g); /* Disable hooks and new traces during __gc. */
+ g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
+ copyTV(L, top, mo);
+ setgcV(L, top+1, o, ~o->gch.gct);
+ errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */
+ hook_restore(g, oldh);
+ g->gc.threshold = oldt; /* Restore GC threshold. */
+ if (errcode)
+ lj_err_throw(L, errcode); /* Propagate errors. */
+}
+
+/* Finalize one userdata or cdata object from the mmudata list. */
+static void gc_finalize(lua_State *L)
+{
+ global_State *g = G(L);
+ GCobj *o = gcnext(gcref(g->gc.mmudata));
+ cTValue *mo;
+ lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */
+ /* Unchain from list of userdata to be finalized. */
+ if (o == gcref(g->gc.mmudata))
+ setgcrefnull(g->gc.mmudata);
+ else
+ setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
+#if LJ_HASFFI
+ if (o->gch.gct == ~LJ_TCDATA) {
+ TValue tmp, *tv;
+ /* Add cdata back to the GC list and make it white. */
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ /* Resolve finalizer. */
+ setcdataV(L, &tmp, gco2cd(o));
+ tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
+ if (!tvisnil(tv)) {
+ copyTV(L, &tmp, tv);
+ setnilV(tv); /* Clear entry in finalizer table. */
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ return;
+ }
+#endif
+ /* Add userdata back to the main userdata list and make it white. */
+ setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, o);
+ makewhite(g, o);
+ /* Resolve the __gc metamethod. */
+ mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
+ if (mo)
+ gc_call_finalizer(g, L, mo, o);
+}
+
+/* Finalize all userdata objects from mmudata list. */
+void lj_gc_finalize_udata(lua_State *L)
+{
+ while (gcref(G(L)->gc.mmudata) != NULL)
+ gc_finalize(L);
+}
+
+#if LJ_HASFFI
+/* Finalize all cdata objects from finalizer table. */
+void lj_gc_finalize_cdata(lua_State *L)
+{
+ global_State *g = G(L);
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ GCtab *t = cts->finalizer;
+ Node *node = noderef(t->node);
+ ptrdiff_t i;
+ setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
+ GCobj *o = gcV(&node[i].key);
+ TValue tmp;
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ copyTV(L, &tmp, &node[i].val);
+ setnilV(&node[i].val);
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ }
+}
+#endif
+
+/* Free all remaining GC objects. */
+void lj_gc_freeall(global_State *g)
+{
+ MSize i, strmask;
+ /* Free everything, except super-fixed objects (the main thread). */
+ g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
+ gc_fullsweep(g, &g->gc.root);
+ strmask = g->strmask;
+ for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
+ gc_fullsweep(g, &g->strhash[i]);
+}
+
+/* -- Collector ----------------------------------------------------------- */
+
+/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
+static void atomic(global_State *g, lua_State *L)
+{
+ size_t udsize;
+
+ gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
+ gc_propagate_gray(g); /* Propagate any left-overs. */
+
+ setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
+ setgcrefnull(g->gc.weak);
+ lua_assert(!iswhite(obj2gco(mainthread(g))));
+ gc_markobj(g, L); /* Mark running thread. */
+ gc_traverse_curtrace(g); /* Traverse current trace. */
+ gc_mark_gcroot(g); /* Mark GC roots (again). */
+ gc_propagate_gray(g); /* Propagate all of the above. */
+
+ setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
+ setgcrefnull(g->gc.grayagain);
+ gc_propagate_gray(g); /* Propagate it. */
+
+ udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
+ gc_mark_mmudata(g); /* Mark them. */
+ udsize += gc_propagate_gray(g); /* And propagate the marks. */
+
+ /* All marking done, clear weak tables. */
+ gc_clearweak(gcref(g->gc.weak));
+
+ /* Prepare for sweep phase. */
+ g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
+ g->strempty.marked = g->gc.currentwhite;
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */
+}
+
+/* GC state machine. Returns a cost estimate for each step performed. */
+static size_t gc_onestep(lua_State *L)
+{
+ global_State *g = G(L);
+ switch (g->gc.state) {
+ case GCSpause:
+ gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
+ return 0;
+ case GCSpropagate:
+ if (gcref(g->gc.gray) != NULL)
+ return propagatemark(g); /* Propagate one gray object. */
+ g->gc.state = GCSatomic; /* End of mark phase. */
+ return 0;
+ case GCSatomic:
+ if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */
+ return LJ_MAX_MEM;
+ atomic(g, L);
+ g->gc.state = GCSsweepstring; /* Start of sweep phase. */
+ g->gc.sweepstr = 0;
+ return 0;
+ case GCSsweepstring: {
+ MSize old = g->gc.total;
+ gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
+ if (g->gc.sweepstr > g->strmask)
+ g->gc.state = GCSsweep; /* All string hash chains sweeped. */
+ lua_assert(old >= g->gc.total);
+ g->gc.estimate -= old - g->gc.total;
+ return GCSWEEPCOST;
+ }
+ case GCSsweep: {
+ MSize old = g->gc.total;
+ setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
+ if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
+ gc_shrink(g, L);
+ if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
+ g->gc.state = GCSfinalize;
+ } else { /* Otherwise skip this phase to help the JIT. */
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ }
+ }
+ lua_assert(old >= g->gc.total);
+ g->gc.estimate -= old - g->gc.total;
+ return GCSWEEPMAX*GCSWEEPCOST;
+ }
+ case GCSfinalize:
+ if (gcref(g->gc.mmudata) != NULL) {
+ if (gcref(g->jit_L)) /* Don't call finalizers on trace. */
+ return LJ_MAX_MEM;
+ gc_finalize(L); /* Finalize one userdata object. */
+ if (g->gc.estimate > GCFINALIZECOST)
+ g->gc.estimate -= GCFINALIZECOST;
+ return GCFINALIZECOST;
+ }
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ return 0;
+ default:
+ lua_assert(0);
+ return 0;
+ }
+}
+
+/* Perform a limited amount of incremental GC steps. */
+int LJ_FASTCALL lj_gc_step(lua_State *L)
+{
+ global_State *g = G(L);
+ MSize lim;
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ lim = (GCSTEPSIZE/100) * g->gc.stepmul;
+ if (lim == 0)
+ lim = LJ_MAX_MEM;
+ g->gc.debt += g->gc.total - g->gc.threshold;
+ do {
+ lim -= (MSize)gc_onestep(L);
+ if (g->gc.state == GCSpause) {
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+ return 1; /* Finished a GC cycle. */
+ }
+ } while ((int32_t)lim > 0);
+ if (g->gc.debt < GCSTEPSIZE) {
+ g->gc.threshold = g->gc.total + GCSTEPSIZE;
+ } else {
+ g->gc.debt -= GCSTEPSIZE;
+ g->gc.threshold = g->gc.total;
+ }
+ g->vmstate = ostate;
+ return 0;
+}
+
+/* Ditto, but fix the stack top first. */
+void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
+{
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ lj_gc_step(L);
+}
+
+#if LJ_HASJIT
+/* Perform multiple GC steps. Called from JIT-compiled code. */
+int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
+{
+ lua_State *L = gco2th(gcref(g->jit_L));
+ L->base = mref(G(L)->jit_base, TValue);
+ L->top = curr_topL(L);
+ while (steps-- > 0 && lj_gc_step(L) == 0)
+ ;
+ /* Return 1 to force a trace exit. */
+ return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
+}
+#endif
+
+/* Perform a full GC cycle. */
+void lj_gc_fullgc(lua_State *L)
+{
+ global_State *g = G(L);
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
+ setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
+ setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
+ g->gc.sweepstr = 0;
+ }
+ while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
+ gc_onestep(L); /* Finish sweep. */
+ lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
+ /* Now perform a full GC. */
+ g->gc.state = GCSpause;
+ do { gc_onestep(L); } while (g->gc.state != GCSpause);
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+}
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Move the GC propagation frontier forward. */
+void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
+{
+ lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ lua_assert(o->gch.gct != ~LJ_TTAB);
+ /* Preserve invariant during propagation. Otherwise it doesn't matter. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, v); /* Move frontier forward. */
+ else
+ makewhite(g, o); /* Make it white to avoid the following barrier. */
+}
+
+/* Specialized barrier for closed upvalue. Pass &uv->tv. */
+void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
+{
+#define TV2MARKED(x) \
+ (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, gcV(tv));
+ else
+ TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
+#undef TV2MARKED
+}
+
+/* Close upvalue. Also needs a write barrier. */
+void lj_gc_closeuv(global_State *g, GCupval *uv)
+{
+ GCobj *o = obj2gco(uv);
+ /* Copy stack slot to upvalue itself and point to the copy. */
+ copyTV(mainthread(g), &uv->tv, uvval(uv));
+ setmref(uv->v, &uv->tv);
+ uv->closed = 1;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
+ gray2black(o); /* Make it black and preserve invariant. */
+ if (tviswhite(&uv->tv))
+ lj_gc_barrierf(g, o, gcV(&uv->tv));
+ } else {
+ makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ }
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace if it's saved during the propagation phase. */
+void lj_gc_barriertrace(global_State *g, uint32_t traceno)
+{
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_marktrace(g, traceno);
+}
+#endif
+
+/* -- Allocator ----------------------------------------------------------- */
+
+/* Call pluggable memory allocator to allocate or resize a fragment. */
+void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
+{
+ global_State *g = G(L);
+ lua_assert((osz == 0) == (p == NULL));
+ p = g->allocf(g->allocd, p, osz, nsz);
+ if (p == NULL && nsz > 0)
+ lj_err_mem(L);
+ lua_assert((nsz == 0) == (p == NULL));
+ lua_assert(checkptr32(p));
+ g->gc.total = (g->gc.total - osz) + nsz;
+ return p;
+}
+
+/* Allocate new GC object and link it to the root set. */
+void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size)
+{
+ global_State *g = G(L);
+ GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
+ if (o == NULL)
+ lj_err_mem(L);
+ lua_assert(checkptr32(o));
+ g->gc.total += size;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ newwhite(g, o);
+ return o;
+}
+
+/* Resize growable vector. */
+void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
+{
+ MSize sz = (*szp) << 1;
+ if (sz < LJ_MIN_VECSZ)
+ sz = LJ_MIN_VECSZ;
+ if (sz > lim)
+ sz = lim;
+ p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
+ *szp = sz;
+ return p;
+}
+
diff --git a/src/LuaJIT/src/lj_gc.h b/src/LuaJIT/src/lj_gc.h
new file mode 100644
index 000000000..df66259ed
--- /dev/null
+++ b/src/LuaJIT/src/lj_gc.h
@@ -0,0 +1,133 @@
+/*
+** Garbage collector.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GC_H
+#define _LJ_GC_H
+
+#include "lj_obj.h"
+
+/* Garbage collector states. Order matters. */
+enum {
+ GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize
+};
+
+/* Bitmasks for marked field of GCobj. */
+#define LJ_GC_WHITE0 0x01
+#define LJ_GC_WHITE1 0x02
+#define LJ_GC_BLACK 0x04
+#define LJ_GC_FINALIZED 0x08
+#define LJ_GC_WEAKKEY 0x08
+#define LJ_GC_WEAKVAL 0x10
+#define LJ_GC_CDATA_FIN 0x10
+#define LJ_GC_FIXED 0x20
+#define LJ_GC_SFIXED 0x40
+
+#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1)
+#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK)
+#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL)
+
+/* Macros to test and set GCobj colors. */
+#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES)
+#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK)
+#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES)))
+#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x)))
+#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES)
+#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES)
+
+#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES)
+#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g))
+#define makewhite(g, x) \
+ ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g))
+#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES)
+#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK)
+#define fixstring(s) ((s)->marked |= LJ_GC_FIXED)
+
+/* Collector. */
+LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all);
+LJ_FUNC void lj_gc_finalize_udata(lua_State *L);
+#if LJ_HASFFI
+LJ_FUNC void lj_gc_finalize_cdata(lua_State *L);
+#else
+#define lj_gc_finalize_cdata(L) UNUSED(L)
+#endif
+LJ_FUNC void lj_gc_freeall(global_State *g);
+LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L);
+LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L);
+#if LJ_HASJIT
+LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps);
+#endif
+LJ_FUNC void lj_gc_fullgc(lua_State *L);
+
+/* GC check: drive collector forward if the GC threshold has been reached. */
+#define lj_gc_check(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step(L); }
+#define lj_gc_check_fixtop(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step_fixtop(L); }
+
+/* Write barriers. */
+LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v);
+LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv);
+LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv);
+#if LJ_HASJIT
+LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
+#endif
+
+/* Move the GC propagation frontier back for tables (make it gray again). */
+static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
+{
+ GCobj *o = obj2gco(t);
+ lua_assert(isblack(o) && !isdead(g, o));
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ black2gray(o);
+ setgcrefr(t->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+}
+
+/* Barrier for stores to table objects. TValue and GCobj variant. */
+#define lj_gc_anybarriert(L, t) \
+ { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_barriert(L, t, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_objbarriert(L, t, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+
+/* Barrier for stores to any other object. TValue and GCobj variant. */
+#define lj_gc_barrier(L, p, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); }
+#define lj_gc_objbarrier(L, p, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
+
+/* Allocator. */
+LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz);
+LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size);
+LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
+ MSize *szp, MSize lim, MSize esz);
+
+#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s))
+
+static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
+{
+ g->gc.total -= (MSize)osize;
+ g->allocf(g->allocd, p, osize, 0);
+}
+
+#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t))))
+#define lj_mem_reallocvec(L, p, on, n, t) \
+ ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t))))
+#define lj_mem_growvec(L, p, n, m, t) \
+ ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
+#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
+
+#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t)))
+#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s)))
+#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p)))
+
+#endif
diff --git a/src/LuaJIT/src/lj_gdbjit.c b/src/LuaJIT/src/lj_gdbjit.c
new file mode 100644
index 000000000..0cff11b08
--- /dev/null
+++ b/src/LuaJIT/src/lj_gdbjit.c
@@ -0,0 +1,793 @@
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_gdbjit_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_frame.h"
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* This is not compiled in by default.
+** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything.
+*/
+#ifdef LUAJIT_USE_GDBJIT
+
+/* The GDB JIT API allows JIT compilers to pass debug information about
+** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher
+** to see it in action.
+**
+** This is a passive API, so it works even when not running under GDB
+** or when attaching to an already running process. Alas, this implies
+** enabling it always has a non-negligible overhead -- do not use in
+** release mode!
+**
+** The LuaJIT GDB JIT client is rather minimal at the moment. It gives
+** each trace a symbol name and adds a source location and frame unwind
+** information. Obviously LuaJIT itself and any embedding C application
+** should be compiled with debug symbols, too (see the Makefile).
+**
+** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace
+** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc.
+** to set breakpoints on specific traces (even ahead of their creation).
+**
+** The source location for each trace allows listing the corresponding
+** source lines with the GDB command "list" (but only if the Lua source
+** has been loaded from a file). Currently this is always set to the
+** location where the trace has been started.
+**
+** Frame unwind information can be inspected with the GDB command
+** "info frame". This also allows proper backtraces across JIT-compiled
+** code with the GDB command "bt".
+**
+** You probably want to add the following settings to a .gdbinit file
+** (or add them to ~/.gdbinit):
+** set disassembly-flavor intel
+** set breakpoint pending on
+**
+** Here's a sample GDB session:
+** ------------------------------------------------------------------------
+
+$ cat >x.lua
+for outer=1,100 do
+ for inner=1,100 do end
+end
+^D
+
+$ luajit -jv x.lua
+[TRACE 1 x.lua:2]
+[TRACE 2 (1/3) x.lua:1 -> 1]
+
+$ gdb --quiet --args luajit x.lua
+(gdb) tbreak TRACE_1
+Function "TRACE_1" not defined.
+Temporary breakpoint 1 (TRACE_1) pending.
+(gdb) run
+Starting program: luajit x.lua
+
+Temporary breakpoint 1, TRACE_1 () at x.lua:2
+2 for inner=1,100 do end
+(gdb) list
+1 for outer=1,100 do
+2 for inner=1,100 do end
+3 end
+(gdb) bt
+#0 TRACE_1 () at x.lua:2
+#1 0x08053690 in lua_pcall [...]
+[...]
+#7 0x0806ff90 in main [...]
+(gdb) disass TRACE_1
+Dump of assembler code for function TRACE_1:
+0xf7fd9fba : mov DWORD PTR ds:0xf7e0e2a0,0x1
+0xf7fd9fc4 : movsd xmm7,QWORD PTR [edx+0x20]
+[...]
+0xf7fd9ff8 : jmp 0xf7fd2014
+End of assembler dump.
+(gdb) tbreak TRACE_2
+Function "TRACE_2" not defined.
+Temporary breakpoint 2 (TRACE_2) pending.
+(gdb) cont
+Continuing.
+
+Temporary breakpoint 2, TRACE_2 () at x.lua:1
+1 for outer=1,100 do
+(gdb) info frame
+Stack level 0, frame at 0xffffd7c0:
+ eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690
+ called by frame at 0xffffd7e0
+ source language unknown.
+ Arglist at 0xffffd78c, args:
+ Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0
+ Saved registers:
+ ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4,
+ eip at 0xffffd7bc
+(gdb)
+
+** ------------------------------------------------------------------------
+*/
+
+/* -- GDB JIT API --------------------------------------------------------- */
+
+/* GDB JIT actions. */
+enum {
+ GDBJIT_NOACTION = 0,
+ GDBJIT_REGISTER,
+ GDBJIT_UNREGISTER
+};
+
+/* GDB JIT entry. */
+typedef struct GDBJITentry {
+ struct GDBJITentry *next_entry;
+ struct GDBJITentry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+} GDBJITentry;
+
+/* GDB JIT descriptor. */
+typedef struct GDBJITdesc {
+ uint32_t version;
+ uint32_t action_flag;
+ GDBJITentry *relevant_entry;
+ GDBJITentry *first_entry;
+} GDBJITdesc;
+
+GDBJITdesc __jit_debug_descriptor = {
+ 1, GDBJIT_NOACTION, NULL, NULL
+};
+
+/* GDB sets a breakpoint at this function. */
+void LJ_NOINLINE __jit_debug_register_code()
+{
+ __asm__ __volatile__("");
+};
+
+/* -- In-memory ELF object definitions ------------------------------------ */
+
+/* ELF definitions. */
+typedef struct ELFheader {
+ uint8_t emagic[4];
+ uint8_t eclass;
+ uint8_t eendian;
+ uint8_t eversion;
+ uint8_t eosabi;
+ uint8_t eabiversion;
+ uint8_t epad[7];
+ uint16_t type;
+ uint16_t machine;
+ uint32_t version;
+ uintptr_t entry;
+ uintptr_t phofs;
+ uintptr_t shofs;
+ uint32_t flags;
+ uint16_t ehsize;
+ uint16_t phentsize;
+ uint16_t phnum;
+ uint16_t shentsize;
+ uint16_t shnum;
+ uint16_t shstridx;
+} ELFheader;
+
+typedef struct ELFsectheader {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t addr;
+ uintptr_t ofs;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t align;
+ uintptr_t entsize;
+} ELFsectheader;
+
+#define ELFSECT_IDX_ABS 0xfff1
+
+enum {
+ ELFSECT_TYPE_PROGBITS = 1,
+ ELFSECT_TYPE_SYMTAB = 2,
+ ELFSECT_TYPE_STRTAB = 3,
+ ELFSECT_TYPE_NOBITS = 8
+};
+
+#define ELFSECT_FLAGS_WRITE 1
+#define ELFSECT_FLAGS_ALLOC 2
+#define ELFSECT_FLAGS_EXEC 4
+
+typedef struct ELFsymbol {
+#if LJ_64
+ uint32_t name;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+ uintptr_t value;
+ uint64_t size;
+#else
+ uint32_t name;
+ uintptr_t value;
+ uint32_t size;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+#endif
+} ELFsymbol;
+
+enum {
+ ELFSYM_TYPE_FUNC = 2,
+ ELFSYM_TYPE_FILE = 4,
+ ELFSYM_BIND_LOCAL = 0 << 4,
+ ELFSYM_BIND_GLOBAL = 1 << 4,
+};
+
+/* DWARF definitions. */
+#define DW_CIE_VERSION 1
+
+enum {
+ DW_CFA_nop = 0x0,
+ DW_CFA_offset_extended = 0x5,
+ DW_CFA_def_cfa = 0xc,
+ DW_CFA_def_cfa_offset = 0xe,
+ DW_CFA_offset_extended_sf = 0x11,
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80
+};
+
+enum {
+ DW_EH_PE_udata4 = 3,
+ DW_EH_PE_textrel = 0x20
+};
+
+enum {
+ DW_TAG_compile_unit = 0x11
+};
+
+enum {
+ DW_children_no = 0,
+ DW_children_yes = 1
+};
+
+enum {
+ DW_AT_name = 0x03,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12
+};
+
+enum {
+ DW_FORM_addr = 0x01,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_string = 0x08
+};
+
+enum {
+ DW_LNS_extended_op = 0,
+ DW_LNS_copy = 1,
+ DW_LNS_advance_pc = 2,
+ DW_LNS_advance_line = 3
+};
+
+enum {
+ DW_LNE_end_sequence = 1,
+ DW_LNE_set_address = 2
+};
+
+enum {
+#if LJ_TARGET_X86
+ DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX,
+ DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI,
+ DW_REG_RA,
+#elif LJ_TARGET_X64
+ /* Yes, the order is strange, but correct. */
+ DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX,
+ DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP,
+ DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11,
+ DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15,
+ DW_REG_RA,
+#elif LJ_TARGET_ARM
+ DW_REG_SP = 13,
+ DW_REG_RA = 14,
+#elif LJ_TARGET_PPC
+ DW_REG_SP = 1,
+ DW_REG_RA = 65,
+ DW_REG_CR = 70,
+#elif LJ_TARGET_MIPS
+ DW_REG_SP = 29,
+ DW_REG_RA = 31,
+#else
+#error "Unsupported target architecture"
+#endif
+};
+
+/* Minimal list of sections for the in-memory ELF object. */
+enum {
+ GDBJIT_SECT_NULL,
+ GDBJIT_SECT_text,
+ GDBJIT_SECT_eh_frame,
+ GDBJIT_SECT_shstrtab,
+ GDBJIT_SECT_strtab,
+ GDBJIT_SECT_symtab,
+ GDBJIT_SECT_debug_info,
+ GDBJIT_SECT_debug_abbrev,
+ GDBJIT_SECT_debug_line,
+ GDBJIT_SECT__MAX
+};
+
+enum {
+ GDBJIT_SYM_UNDEF,
+ GDBJIT_SYM_FILE,
+ GDBJIT_SYM_FUNC,
+ GDBJIT_SYM__MAX
+};
+
+/* In-memory ELF object. */
+typedef struct GDBJITobj {
+ ELFheader hdr; /* ELF header. */
+ ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */
+ ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */
+ uint8_t space[4096]; /* Space for various section data. */
+} GDBJITobj;
+
+/* Combined structure for GDB JIT entry and ELF object. */
+typedef struct GDBJITentryobj {
+ GDBJITentry entry;
+ size_t sz;
+ GDBJITobj obj;
+} GDBJITentryobj;
+
+/* Template for in-memory ELF header. */
+static const ELFheader elfhdr_template = {
+ .emagic = { 0x7f, 'E', 'L', 'F' },
+ .eclass = LJ_64 ? 2 : 1,
+ .eendian = LJ_ENDIAN_SELECT(1, 2),
+ .eversion = 1,
+#if LJ_TARGET_LINUX
+ .eosabi = 0, /* Nope, it's not 3. */
+#elif defined(__FreeBSD__)
+ .eosabi = 9,
+#elif defined(__NetBSD__)
+ .eosabi = 2,
+#elif defined(__OpenBSD__)
+ .eosabi = 12,
+#elif (defined(__sun__) && defined(__svr4__)) || defined(__solaris__)
+ .eosabi = 6,
+#else
+ .eosabi = 0,
+#endif
+ .eabiversion = 0,
+ .epad = { 0, 0, 0, 0, 0, 0, 0 },
+ .type = 1,
+#if LJ_TARGET_X86
+ .machine = 3,
+#elif LJ_TARGET_X64
+ .machine = 62,
+#elif LJ_TARGET_ARM
+ .machine = 40,
+#elif LJ_TARGET_PPC
+ .machine = 20,
+#elif LJ_TARGET_MIPS
+ .machine = 8,
+#else
+#error "Unsupported target architecture"
+#endif
+ .version = 1,
+ .entry = 0,
+ .phofs = 0,
+ .shofs = offsetof(GDBJITobj, sect),
+ .flags = 0,
+ .ehsize = sizeof(ELFheader),
+ .phentsize = 0,
+ .phnum = 0,
+ .shentsize = sizeof(ELFsectheader),
+ .shnum = GDBJIT_SECT__MAX,
+ .shstridx = GDBJIT_SECT_shstrtab
+};
+
+/* -- In-memory ELF object generation ------------------------------------- */
+
+/* Context for generating the ELF object for the GDB JIT API. */
+typedef struct GDBJITctx {
+ uint8_t *p; /* Pointer to next address in obj.space. */
+ uint8_t *startp; /* Pointer to start address in obj.space. */
+ GCtrace *T; /* Generate symbols for this trace. */
+ uintptr_t mcaddr; /* Machine code address. */
+ MSize szmcode; /* Size of machine code. */
+ MSize spadjp; /* Stack adjustment for parent trace or interpreter. */
+ MSize spadj; /* Stack adjustment for trace itself. */
+ BCLine lineno; /* Starting line number. */
+ const char *filename; /* Starting file name. */
+ size_t objsize; /* Final size of ELF object. */
+ GDBJITobj obj; /* In-memory ELF object. */
+} GDBJITctx;
+
+/* Add a zero-terminated string. */
+static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str)
+{
+ uint8_t *p = ctx->p;
+ uint32_t ofs = (uint32_t)(p - ctx->startp);
+ do {
+ *p++ = (uint8_t)*str;
+ } while (*str++);
+ ctx->p = p;
+ return ofs;
+}
+
+/* Append a decimal number. */
+static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n)
+{
+ if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); }
+ *ctx->p++ = '0' + n;
+}
+
+/* Add a ULEB128 value. */
+static void gdbjit_uleb128(GDBJITctx *ctx, uint32_t v)
+{
+ uint8_t *p = ctx->p;
+ for (; v >= 0x80; v >>= 7)
+ *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)v;
+ ctx->p = p;
+}
+
+/* Add a SLEB128 value. */
+static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
+{
+ uint8_t *p = ctx->p;
+ for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7)
+ *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)(v & 0x7f);
+ ctx->p = p;
+}
+
+/* Shortcuts to generate DWARF structures. */
+#define DB(x) (*p++ = (x))
+#define DI8(x) (*(int8_t *)p = (x), p++)
+#define DU16(x) (*(uint16_t *)p = (x), p += 2)
+#define DU32(x) (*(uint32_t *)p = (x), p += 4)
+#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t))
+#define DUV(x) (ctx->p = p, gdbjit_uleb128(ctx, (x)), p = ctx->p)
+#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p)
+#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p)
+#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop
+#define DSECT(name, stmt) \
+ { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \
+ *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \
+
+/* Initialize ELF section headers. */
+static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx)
+{
+ ELFsectheader *sect;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+#define SECTDEF(id, tp, al) \
+ sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \
+ sect->name = gdbjit_strz(ctx, "." #id); \
+ sect->type = ELFSECT_TYPE_##tp; \
+ sect->align = (al)
+
+ SECTDEF(text, NOBITS, 16);
+ sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC;
+ sect->addr = ctx->mcaddr;
+ sect->ofs = 0;
+ sect->size = ctx->szmcode;
+
+ SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t));
+ sect->flags = ELFSECT_FLAGS_ALLOC;
+
+ SECTDEF(shstrtab, STRTAB, 1);
+ SECTDEF(strtab, STRTAB, 1);
+
+ SECTDEF(symtab, SYMTAB, sizeof(uintptr_t));
+ sect->ofs = offsetof(GDBJITobj, sym);
+ sect->size = sizeof(ctx->obj.sym);
+ sect->link = GDBJIT_SECT_strtab;
+ sect->entsize = sizeof(ELFsymbol);
+ sect->info = GDBJIT_SYM_FUNC;
+
+ SECTDEF(debug_info, PROGBITS, 1);
+ SECTDEF(debug_abbrev, PROGBITS, 1);
+ SECTDEF(debug_line, PROGBITS, 1);
+
+#undef SECTDEF
+}
+
+/* Initialize symbol table. */
+static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx)
+{
+ ELFsymbol *sym;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FILE];
+ sym->name = gdbjit_strz(ctx, "JIT mcode");
+ sym->sectidx = ELFSECT_IDX_ABS;
+ sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL;
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FUNC];
+ sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--;
+ gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0';
+ sym->sectidx = GDBJIT_SECT_text;
+ sym->value = 0;
+ sym->size = ctx->szmcode;
+ sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL;
+}
+
+/* Initialize .eh_frame section. */
+static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+ uint8_t *framep = p;
+
+ /* Emit DWARF EH CIE. */
+ DSECT(CIE,
+ DU32(0); /* Offset to CIE itself. */
+ DB(DW_CIE_VERSION);
+ DSTR("zR"); /* Augmentation. */
+ DUV(1); /* Code alignment factor. */
+ DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */
+ DB(DW_REG_RA); /* Return address register. */
+ DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */
+ DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t));
+#if LJ_TARGET_PPC
+ DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1);
+#else
+ DB(DW_CFA_offset|DW_REG_RA); DUV(1);
+#endif
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ /* Emit DWARF EH FDE. */
+ DSECT(FDE,
+ DU32((uint32_t)(p-framep)); /* Offset to CIE. */
+ DU32(0); /* Machine code offset relative to .text. */
+ DU32(ctx->szmcode); /* Machine code length. */
+ DB(0); /* Augmentation data. */
+ /* Registers saved in CFRAME. */
+#if LJ_TARGET_X86
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_DI); DUV(3);
+ DB(DW_CFA_offset|DW_REG_SI); DUV(4);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(5);
+#elif LJ_TARGET_X64
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(3);
+ DB(DW_CFA_offset|DW_REG_15); DUV(4);
+ DB(DW_CFA_offset|DW_REG_14); DUV(5);
+ /* Extra registers saved for JIT-compiled code. */
+ DB(DW_CFA_offset|DW_REG_13); DUV(9);
+ DB(DW_CFA_offset|DW_REG_12); DUV(10);
+#elif LJ_TARGET_ARM
+ {
+ int i;
+ for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); }
+ }
+#elif LJ_TARGET_PPC
+ {
+ int i;
+ DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55);
+ for (i = 14; i <= 31; i++) {
+ DB(DW_CFA_offset|i); DUV(37+(31-i));
+ DB(DW_CFA_offset|32|i); DUV(2+2*(31-i));
+ }
+ }
+#elif LJ_TARGET_MIPS
+ {
+ int i;
+ DB(DW_CFA_offset|30); DUV(2);
+ for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); }
+ for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); }
+ }
+#else
+#error "Unsupported target architecture"
+#endif
+ if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp);
+ DB(DW_CFA_advance_loc|1); /* Only an approximation. */
+ }
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_info section. */
+static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(info,
+ DU16(2); /* DWARF version. */
+ DU32(0); /* Abbrev offset. */
+ DB(sizeof(uintptr_t)); /* Pointer size. */
+
+ DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */
+ DSTR(ctx->filename); /* DW_AT_name. */
+ DADDR(ctx->mcaddr); /* DW_AT_low_pc. */
+ DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */
+ DU32(0); /* DW_AT_stmt_list. */
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_abbrev section. */
+static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ /* Abbrev #1: DW_TAG_compile_unit. */
+ DUV(1); DUV(DW_TAG_compile_unit);
+ DB(DW_children_no);
+ DUV(DW_AT_name); DUV(DW_FORM_string);
+ DUV(DW_AT_low_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_high_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_stmt_list); DUV(DW_FORM_data4);
+ DB(0); DB(0);
+
+ ctx->p = p;
+}
+
+#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op)))
+
+/* Initialize .debug_line section. */
+static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(line,
+ DU16(2); /* DWARF version. */
+ DSECT(header,
+ DB(1); /* Minimum instruction length. */
+ DB(1); /* is_stmt. */
+ DI8(0); /* Line base for special opcodes. */
+ DB(2); /* Line range for special opcodes. */
+ DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */
+ DB(0); DB(1); DB(1); /* Standard opcode lengths. */
+ /* Directory table. */
+ DB(0);
+ /* File name table. */
+ DSTR(ctx->filename); DUV(0); DUV(0); DUV(0);
+ DB(0);
+ )
+
+ DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr);
+ if (ctx->lineno) {
+ DB(DW_LNS_advance_line); DSV(ctx->lineno-1);
+ }
+ DB(DW_LNS_copy);
+ DB(DW_LNS_advance_pc); DUV(ctx->szmcode);
+ DLNE(DW_LNE_end_sequence, 0);
+ )
+
+ ctx->p = p;
+}
+
+#undef DLNE
+
+/* Undef shortcuts. */
+#undef DB
+#undef DI8
+#undef DU16
+#undef DU32
+#undef DADDR
+#undef DUV
+#undef DSV
+#undef DSTR
+#undef DALIGNNOP
+#undef DSECT
+
+/* Type of a section initializer callback. */
+typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx);
+
+/* Call section initializer and set the section offset and size. */
+static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf)
+{
+ ctx->startp = ctx->p;
+ ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj);
+ initf(ctx);
+ ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp);
+}
+
+#define SECTALIGN(p, a) \
+ ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1)))
+
+/* Build in-memory ELF object. */
+static void gdbjit_buildobj(GDBJITctx *ctx)
+{
+ GDBJITobj *obj = &ctx->obj;
+ /* Fill in ELF header and clear structures. */
+ memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader));
+ memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX);
+ memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX);
+ /* Initialize sections. */
+ ctx->p = obj->space;
+ gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr);
+ gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline);
+ SECTALIGN(ctx->p, sizeof(uintptr_t));
+ gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe);
+ ctx->objsize = (size_t)((char *)ctx->p - (char *)obj);
+ lua_assert(ctx->objsize < sizeof(GDBJITobj));
+}
+
+#undef SECTALIGN
+
+/* -- Interface to GDB JIT API -------------------------------------------- */
+
+/* Add new entry to GDB JIT symbol chain. */
+static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
+{
+ /* Allocate memory for GDB JIT entry and ELF object. */
+ MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize);
+ GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj);
+ memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */
+ eo->sz = sz;
+ ctx->T->gdbjit_entry = (void *)eo;
+ /* Link new entry to chain and register it. */
+ eo->entry.prev_entry = NULL;
+ eo->entry.next_entry = __jit_debug_descriptor.first_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = &eo->entry;
+ eo->entry.symfile_addr = (const char *)&eo->obj;
+ eo->entry.symfile_size = ctx->objsize;
+ __jit_debug_descriptor.first_entry = &eo->entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_REGISTER;
+ __jit_debug_register_code();
+}
+
+/* Add debug info for newly compiled trace and notify GDB. */
+void lj_gdbjit_addtrace(jit_State *J, GCtrace *T)
+{
+ GDBJITctx ctx;
+ GCproto *pt = &gcref(T->startpt)->pt;
+ TraceNo parent = T->ir[REF_BASE].op1;
+ const BCIns *startpc = mref(T->startpc, const BCIns);
+ ctx.T = T;
+ ctx.mcaddr = (uintptr_t)T->mcode;
+ ctx.szmcode = T->szmcode;
+ ctx.spadjp = CFRAME_SIZE_JIT +
+ (MSize)(parent ? traceref(J, parent)->spadjust : 0);
+ ctx.spadj = CFRAME_SIZE_JIT + T->spadjust;
+ lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc);
+ ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
+ ctx.filename = proto_chunknamestr(pt);
+ if (*ctx.filename == '@' || *ctx.filename == '=')
+ ctx.filename++;
+ else
+ ctx.filename = "(string)";
+ gdbjit_buildobj(&ctx);
+ gdbjit_newentry(J->L, &ctx);
+}
+
+/* Delete debug info for trace and notify GDB. */
+void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
+{
+ GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry;
+ if (eo) {
+ if (eo->entry.prev_entry)
+ eo->entry.prev_entry->next_entry = eo->entry.next_entry;
+ else
+ __jit_debug_descriptor.first_entry = eo->entry.next_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = eo->entry.prev_entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER;
+ __jit_debug_register_code();
+ lj_mem_free(J2G(J), eo, eo->sz);
+ }
+}
+
+#endif
+#endif
diff --git a/src/LuaJIT/src/lj_gdbjit.h b/src/LuaJIT/src/lj_gdbjit.h
new file mode 100644
index 000000000..66c57a78c
--- /dev/null
+++ b/src/LuaJIT/src/lj_gdbjit.h
@@ -0,0 +1,22 @@
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GDBJIT_H
+#define _LJ_GDBJIT_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT)
+
+LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T);
+
+#else
+#define lj_gdbjit_addtrace(J, T) UNUSED(T)
+#define lj_gdbjit_deltrace(J, T) UNUSED(T)
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_ir.c b/src/LuaJIT/src/lj_ir.c
new file mode 100644
index 000000000..f198a6df1
--- /dev/null
+++ b/src/LuaJIT/src/lj_ir.c
@@ -0,0 +1,492 @@
+/*
+** SSA IR (Intermediate Representation) emitter.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ir_c
+#define LUA_CORE
+
+/* For pointers to libc/libm functions. */
+#include
+#include
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+#endif
+#include "lj_vm.h"
+#include "lj_lib.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* -- IR tables ----------------------------------------------------------- */
+
+/* IR instruction modes. */
+LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = {
+IRDEF(IRMODE)
+ 0
+};
+
+/* C call info for CALL* instructions. */
+LJ_DATADEF const CCallInfo lj_ir_callinfo[] = {
+#define IRCALLCI(cond, name, nargs, kind, type, flags) \
+ { (ASMFunction)IRCALLCOND_##cond(name), \
+ (nargs)|(CCI_CALL_##kind)|(IRT_##type<irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ if (szins) {
+ baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns),
+ 2*szins*sizeof(IRIns));
+ J->irtoplim = J->irbotlim + 2*szins;
+ } else {
+ baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns));
+ J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4;
+ J->irtoplim = J->irbotlim + LJ_MIN_IRSZ;
+ }
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+}
+
+/* Grow IR buffer at the bottom or shift it up. */
+static void lj_ir_growbot(jit_State *J)
+{
+ IRIns *baseir = J->irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ lua_assert(szins != 0);
+ lua_assert(J->cur.nk == J->irbotlim);
+ if (J->cur.nins + (szins >> 1) < J->irtoplim) {
+ /* More than half of the buffer is free on top: shift up by a quarter. */
+ MSize ofs = szins >> 2;
+ memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim -= ofs;
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+ } else {
+ /* Double the buffer size, but split the growth amongst top/bottom. */
+ IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns);
+ MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */
+ memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim = J->irbotlim + 2*szins;
+ J->cur.ir = J->irbuf = newbase - J->irbotlim;
+ }
+}
+
+/* Emit IR without any optimizations. */
+TRef LJ_FASTCALL lj_ir_emit(jit_State *J)
+{
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ IROp op = fins->o;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+ ir->o = op;
+ ir->op1 = fins->op1;
+ ir->op2 = fins->op2;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+}
+
+/* Emit call to a C function. */
+TRef lj_ir_call(jit_State *J, IRCallID id, ...)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ uint32_t n = CCI_NARGS(ci);
+ TRef tr = TREF_NIL;
+ va_list argp;
+ va_start(argp, id);
+ if ((ci->flags & CCI_L)) n--;
+ if (n > 0)
+ tr = va_arg(argp, IRRef);
+ while (n-- > 1)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef));
+ va_end(argp);
+ if (CCI_OP(ci) == IR_CALLS)
+ J->needsnap = 1; /* Need snapshot after call with side effect. */
+ return emitir(CCI_OPTYPE(ci), tr, id);
+}
+
+/* -- Interning of constants ---------------------------------------------- */
+
+/*
+** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS.
+** They are chained like all other instructions, but grow downwards.
+** The are interned (like strings in the VM) to facilitate reference
+** comparisons. The same constant must get the same reference.
+*/
+
+/* Get ref of next IR constant and optionally grow IR.
+** Note: this may invalidate all IRIns *!
+*/
+static LJ_AINLINE IRRef ir_nextk(jit_State *J)
+{
+ IRRef ref = J->cur.nk;
+ if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J);
+ J->cur.nk = --ref;
+ return ref;
+}
+
+/* Intern int32_t constant. */
+TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev)
+ if (cir[ref].i == k)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = k;
+ ir->t.irt = IRT_INT;
+ ir->o = IR_KINT;
+ ir->prev = J->chain[IR_KINT];
+ J->chain[IR_KINT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_INT);
+}
+
+/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
+** 64 bit constant. The constants themselves are stored in a chained array
+** and shared across traces.
+**
+** Rationale for choosing this data structure:
+** - The address of the constants is embedded in the generated machine code
+** and must never move. A resizable array or hash table wouldn't work.
+** - Most apps need very few non-32 bit integer constants (less than a dozen).
+** - Linear search is hard to beat in terms of speed and low complexity.
+*/
+typedef struct K64Array {
+ MRef next; /* Pointer to next list. */
+ MSize numk; /* Number of used elements in this array. */
+ TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
+} K64Array;
+
+/* Free all chained arrays. */
+void lj_ir_k64_freeall(jit_State *J)
+{
+ K64Array *k;
+ for (k = mref(J->k64, K64Array); k; ) {
+ K64Array *next = mref(k->next, K64Array);
+ lj_mem_free(J2G(J), k, sizeof(K64Array));
+ k = next;
+ }
+}
+
+/* Find 64 bit constant in chained array or add it. */
+cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
+{
+ K64Array *k, *kp = NULL;
+ TValue *ntv;
+ MSize idx;
+ /* Search for the constant in the whole chain of arrays. */
+ for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) {
+ kp = k; /* Remember previous element in list. */
+ for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
+ TValue *tv = &k->k[idx];
+ if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
+ return tv;
+ }
+ }
+ /* Constant was not found, need to add it. */
+ if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
+ K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
+ setmref(kn->next, NULL);
+ kn->numk = 0;
+ if (kp)
+ setmref(kp->next, kn); /* Chain to the end of the list. */
+ else
+ setmref(J->k64, kn); /* Link first array. */
+ kp = kn;
+ }
+ ntv = &kp->k[kp->numk++]; /* Add to current array. */
+ ntv->u64 = u64;
+ return ntv;
+}
+
+/* Intern 64 bit constant, given by its address. */
+TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (ir_k64(&cir[ref]) == tv)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ lua_assert(checkptr32(tv));
+ setmref(ir->ptr, tv);
+ ir->t.irt = t;
+ ir->o = op;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern FP constant, given by its 64 bit pattern. */
+TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64));
+}
+
+/* Intern 64 bit integer constant. */
+TRef lj_ir_kint64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64));
+}
+
+/* Check whether a number is int and return it. -0 is NOT considered an int. */
+static int numistrueint(lua_Number n, int32_t *kp)
+{
+ int32_t k = lj_num2int(n);
+ if (n == (lua_Number)k) {
+ if (kp) *kp = k;
+ if (k == 0) { /* Special check for -0. */
+ TValue tv;
+ setnumV(&tv, n);
+ if (tv.u32.hi != 0)
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Intern number as int32_t constant if possible, otherwise as FP constant. */
+TRef lj_ir_knumint(jit_State *J, lua_Number n)
+{
+ int32_t k;
+ if (numistrueint(n, &k))
+ return lj_ir_kint(J, k);
+ else
+ return lj_ir_knum(J, n);
+}
+
+/* Intern GC object "constant". */
+TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ lua_assert(!isdead(J2G(J), o));
+ for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
+ if (ir_kgc(&cir[ref]) == o)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ /* NOBARRIER: Current trace is a GC root. */
+ setgcref(ir->gcr, o);
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KGC;
+ ir->prev = J->chain[IR_KGC];
+ J->chain[IR_KGC] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern 32 bit pointer constant. */
+TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr);
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (mref(cir[ref].ptr, void) == ptr)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ setmref(ir->ptr, ptr);
+ ir->t.irt = IRT_P32;
+ ir->o = op;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_P32);
+}
+
+/* Intern typed NULL constant. */
+TRef lj_ir_knull(jit_State *J, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev)
+ if (irt_t(cir[ref].t) == t)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KNULL;
+ ir->prev = J->chain[IR_KNULL];
+ J->chain[IR_KNULL] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern key slot. */
+TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
+ IRRef ref;
+ /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
+ lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot);
+ for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
+ if (cir[ref].op12 == op12)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->op12 = op12;
+ ir->t.irt = IRT_P32;
+ ir->o = IR_KSLOT;
+ ir->prev = J->chain[IR_KSLOT];
+ J->chain[IR_KSLOT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_P32);
+}
+
+/* -- Access to IR constants ---------------------------------------------- */
+
+/* Copy value of IR constant. */
+void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
+{
+ UNUSED(L);
+ lua_assert(ir->o != IR_KSLOT); /* Common mistake. */
+ switch (ir->o) {
+ case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break;
+ case IR_KINT: setintV(tv, ir->i); break;
+ case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
+ case IR_KPTR: case IR_KKPTR: case IR_KNULL:
+ setlightudV(tv, mref(ir->ptr, void));
+ break;
+ case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
+#if LJ_HASFFI
+ case IR_KINT64: {
+ GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8);
+ *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64;
+ setcdataV(L, tv, cd);
+ break;
+ }
+#endif
+ default: lua_assert(0); break;
+ }
+}
+
+/* -- Convert IR operand types -------------------------------------------- */
+
+/* Convert from string to number. */
+TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr)
+{
+ if (!tref_isnumber(tr)) {
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or string to number. */
+TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr)
+{
+ if (!tref_isnum(tr)) {
+ if (tref_isinteger(tr))
+ tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ else if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or number to string. */
+TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
+{
+ if (!tref_isstr(tr)) {
+ if (!tref_isnumber(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0);
+ }
+ return tr;
+}
+
+/* -- Miscellaneous IR ops ------------------------------------------------ */
+
+/* Evaluate numeric comparison. */
+int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
+{
+ switch (op) {
+ case IR_EQ: return (a == b);
+ case IR_NE: return (a != b);
+ case IR_LT: return (a < b);
+ case IR_GE: return (a >= b);
+ case IR_LE: return (a <= b);
+ case IR_GT: return (a > b);
+ case IR_ULT: return !(a >= b);
+ case IR_UGE: return !(a < b);
+ case IR_ULE: return !(a > b);
+ case IR_UGT: return !(a <= b);
+ default: lua_assert(0); return 0;
+ }
+}
+
+/* Evaluate string comparison. */
+int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
+{
+ int res = lj_str_cmp(a, b);
+ switch (op) {
+ case IR_LT: return (res < 0);
+ case IR_GE: return (res >= 0);
+ case IR_LE: return (res <= 0);
+ case IR_GT: return (res > 0);
+ default: lua_assert(0); return 0;
+ }
+}
+
+/* Rollback IR to previous state. */
+void lj_ir_rollback(jit_State *J, IRRef ref)
+{
+ IRRef nins = J->cur.nins;
+ while (nins > ref) {
+ IRIns *ir;
+ nins--;
+ ir = IR(nins);
+ J->chain[ir->o] = ir->prev;
+ }
+ J->cur.nins = nins;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+
+#endif
diff --git a/src/LuaJIT/src/lj_ir.h b/src/LuaJIT/src/lj_ir.h
new file mode 100644
index 000000000..99ad8d9c4
--- /dev/null
+++ b/src/LuaJIT/src/lj_ir.h
@@ -0,0 +1,540 @@
+/*
+** SSA IR (Intermediate Representation) format.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IR_H
+#define _LJ_IR_H
+
+#include "lj_obj.h"
+
+/* -- IR instructions ----------------------------------------------------- */
+
+/* IR instruction definition. Order matters, see below. ORDER IR */
+#define IRDEF(_) \
+ /* Guarded assertions. */ \
+ /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
+ _(LT, N , ref, ref) \
+ _(GE, N , ref, ref) \
+ _(LE, N , ref, ref) \
+ _(GT, N , ref, ref) \
+ \
+ _(ULT, N , ref, ref) \
+ _(UGE, N , ref, ref) \
+ _(ULE, N , ref, ref) \
+ _(UGT, N , ref, ref) \
+ \
+ _(EQ, C , ref, ref) \
+ _(NE, C , ref, ref) \
+ \
+ _(ABC, N , ref, ref) \
+ _(RETF, S , ref, ref) \
+ \
+ /* Miscellaneous ops. */ \
+ _(NOP, N , ___, ___) \
+ _(BASE, N , lit, lit) \
+ _(HIOP, S , ref, ref) \
+ _(LOOP, S , ___, ___) \
+ _(USE, S , ref, ___) \
+ _(PHI, S , ref, ref) \
+ _(RENAME, S , ref, lit) \
+ \
+ /* Constants. */ \
+ _(KPRI, N , ___, ___) \
+ _(KINT, N , cst, ___) \
+ _(KGC, N , cst, ___) \
+ _(KPTR, N , cst, ___) \
+ _(KKPTR, N , cst, ___) \
+ _(KNULL, N , cst, ___) \
+ _(KNUM, N , cst, ___) \
+ _(KINT64, N , cst, ___) \
+ _(KSLOT, N , ref, lit) \
+ \
+ /* Bit ops. */ \
+ _(BNOT, N , ref, ___) \
+ _(BSWAP, N , ref, ___) \
+ _(BAND, C , ref, ref) \
+ _(BOR, C , ref, ref) \
+ _(BXOR, C , ref, ref) \
+ _(BSHL, N , ref, ref) \
+ _(BSHR, N , ref, ref) \
+ _(BSAR, N , ref, ref) \
+ _(BROL, N , ref, ref) \
+ _(BROR, N , ref, ref) \
+ \
+ /* Arithmetic ops. ORDER ARITH */ \
+ _(ADD, C , ref, ref) \
+ _(SUB, N , ref, ref) \
+ _(MUL, C , ref, ref) \
+ _(DIV, N , ref, ref) \
+ _(MOD, N , ref, ref) \
+ _(POW, N , ref, ref) \
+ _(NEG, N , ref, ref) \
+ \
+ _(ABS, N , ref, ref) \
+ _(ATAN2, N , ref, ref) \
+ _(LDEXP, N , ref, ref) \
+ _(MIN, C , ref, ref) \
+ _(MAX, C , ref, ref) \
+ _(FPMATH, N , ref, lit) \
+ \
+ /* Overflow-checking arithmetic ops. */ \
+ _(ADDOV, CW, ref, ref) \
+ _(SUBOV, NW, ref, ref) \
+ _(MULOV, CW, ref, ref) \
+ \
+ /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
+ \
+ /* Memory references. */ \
+ _(AREF, R , ref, ref) \
+ _(HREFK, R , ref, ref) \
+ _(HREF, L , ref, ref) \
+ _(NEWREF, S , ref, ref) \
+ _(UREFO, LW, ref, lit) \
+ _(UREFC, LW, ref, lit) \
+ _(FREF, R , ref, lit) \
+ _(STRREF, N , ref, ref) \
+ \
+ /* Loads and Stores. These must be in the same order. */ \
+ _(ALOAD, L , ref, ___) \
+ _(HLOAD, L , ref, ___) \
+ _(ULOAD, L , ref, ___) \
+ _(FLOAD, L , ref, lit) \
+ _(XLOAD, L , ref, lit) \
+ _(SLOAD, L , lit, lit) \
+ _(VLOAD, L , ref, ___) \
+ \
+ _(ASTORE, S , ref, ref) \
+ _(HSTORE, S , ref, ref) \
+ _(USTORE, S , ref, ref) \
+ _(FSTORE, S , ref, ref) \
+ _(XSTORE, S , ref, ref) \
+ \
+ /* Allocations. */ \
+ _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
+ _(XSNEW, A , ref, ref) \
+ _(TNEW, AW, lit, lit) \
+ _(TDUP, AW, ref, ___) \
+ _(CNEW, AW, ref, ref) \
+ _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
+ \
+ /* Barriers. */ \
+ _(TBAR, S , ref, ___) \
+ _(OBAR, S , ref, ref) \
+ _(XBAR, S , ___, ___) \
+ \
+ /* Type conversions. */ \
+ _(CONV, NW, ref, lit) \
+ _(TOBIT, N , ref, ref) \
+ _(TOSTR, N , ref, ___) \
+ _(STRTO, N , ref, ___) \
+ \
+ /* Calls. */ \
+ _(CALLN, N , ref, lit) \
+ _(CALLL, L , ref, lit) \
+ _(CALLS, S , ref, lit) \
+ _(CALLXS, S , ref, ref) \
+ _(CARG, N , ref, ref) \
+ \
+ /* End of list. */
+
+/* IR opcodes (max. 256). */
+typedef enum {
+#define IRENUM(name, m, m1, m2) IR_##name,
+IRDEF(IRENUM)
+#undef IRENUM
+ IR__MAX
+} IROp;
+
+/* Stored opcode. */
+typedef uint8_t IROp1;
+
+LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
+LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
+LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
+
+/* Delta between xLOAD and xSTORE. */
+#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
+
+LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
+LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
+LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
+LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
+
+/* -- Named IR literals --------------------------------------------------- */
+
+/* FPMATH sub-functions. ORDER FPM. */
+#define IRFPMDEF(_) \
+ _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
+ _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \
+ _(SIN) _(COS) _(TAN) \
+ _(OTHER)
+
+typedef enum {
+#define FPMENUM(name) IRFPM_##name,
+IRFPMDEF(FPMENUM)
+#undef FPMENUM
+ IRFPM__MAX
+} IRFPMathOp;
+
+/* FLOAD fields. */
+#define IRFLDEF(_) \
+ _(STR_LEN, offsetof(GCstr, len)) \
+ _(FUNC_ENV, offsetof(GCfunc, l.env)) \
+ _(FUNC_PC, offsetof(GCfunc, l.pc)) \
+ _(TAB_META, offsetof(GCtab, metatable)) \
+ _(TAB_ARRAY, offsetof(GCtab, array)) \
+ _(TAB_NODE, offsetof(GCtab, node)) \
+ _(TAB_ASIZE, offsetof(GCtab, asize)) \
+ _(TAB_HMASK, offsetof(GCtab, hmask)) \
+ _(TAB_NOMM, offsetof(GCtab, nomm)) \
+ _(UDATA_META, offsetof(GCudata, metatable)) \
+ _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
+ _(UDATA_FILE, sizeof(GCudata)) \
+ _(CDATA_TYPEID, offsetof(GCcdata, typeid)) \
+ _(CDATA_PTR, sizeof(GCcdata)) \
+ _(CDATA_INT64, sizeof(GCcdata)) \
+ _(CDATA_INT64_4, sizeof(GCcdata) + 4)
+
+typedef enum {
+#define FLENUM(name, ofs) IRFL_##name,
+IRFLDEF(FLENUM)
+#undef FLENUM
+ IRFL__MAX
+} IRFieldID;
+
+/* SLOAD mode bits, stored in op2. */
+#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
+#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */
+#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
+#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
+#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
+#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
+
+/* XLOAD mode, stored in op2. */
+#define IRXLOAD_READONLY 1 /* Load from read-only data. */
+#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
+#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
+
+/* CONV mode, stored in op2. */
+#define IRCONV_SRCMASK 0x001f /* Source IRType. */
+#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
+#define IRCONV_DSH 5
+#define IRCONV_NUM_INT ((IRT_NUM<>2)&3))
+#define irm_iscomm(m) ((m) & IRM_C)
+#define irm_kind(m) ((m) & IRM_S)
+
+#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
+
+LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
+
+/* -- IR instruction types ------------------------------------------------ */
+
+/* Map of itypes to non-negative numbers. ORDER LJ_T.
+** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
+** IRT_P32 and IRT_P64, which never escape the IR.
+** The various integers are only used in the IR and can only escape to
+** a TValue after implicit or explicit conversion. Their types must be
+** contiguous and next to IRT_NUM (see the typerange macros below).
+*/
+#define IRTDEF(_) \
+ _(NIL) _(FALSE) _(TRUE) _(LIGHTUD) _(STR) _(P32) _(THREAD) \
+ _(PROTO) _(FUNC) _(P64) _(CDATA) _(TAB) _(UDATA) \
+ _(FLOAT) _(NUM) _(I8) _(U8) _(I16) _(U16) _(INT) _(U32) _(I64) _(U64) \
+ _(SOFTFP) /* There is room for 9 more types. */
+
+/* IR result type and flags (8 bit). */
+typedef enum {
+#define IRTENUM(name) IRT_##name,
+IRTDEF(IRTENUM)
+#undef IRTENUM
+
+ /* Native pointer type and the corresponding integer type. */
+ IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
+ IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
+ IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
+
+ /* Additional flags. */
+ IRT_MARK = 0x20, /* Marker for misc. purposes. */
+ IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
+ IRT_GUARD = 0x80, /* Instruction is a guard. */
+
+ /* Masks. */
+ IRT_TYPE = 0x1f,
+ IRT_T = 0xff
+} IRType;
+
+#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
+
+/* Stored IRType. */
+typedef struct IRType1 { uint8_t irt; } IRType1;
+
+#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
+#define IRTI(o) (IRT((o), IRT_INT))
+#define IRTN(o) (IRT((o), IRT_NUM))
+#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
+#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
+
+#define irt_t(t) ((IRType)(t).irt)
+#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
+#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
+#define irt_typerange(t, first, last) \
+ ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
+
+#define irt_isnil(t) (irt_type(t) == IRT_NIL)
+#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
+#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
+#define irt_isstr(t) (irt_type(t) == IRT_STR)
+#define irt_istab(t) (irt_type(t) == IRT_TAB)
+#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
+#define irt_isnum(t) (irt_type(t) == IRT_NUM)
+#define irt_isint(t) (irt_type(t) == IRT_INT)
+#define irt_isi8(t) (irt_type(t) == IRT_I8)
+#define irt_isu8(t) (irt_type(t) == IRT_U8)
+#define irt_isi16(t) (irt_type(t) == IRT_I16)
+#define irt_isu16(t) (irt_type(t) == IRT_U16)
+#define irt_isu32(t) (irt_type(t) == IRT_U32)
+#define irt_isi64(t) (irt_type(t) == IRT_I64)
+#define irt_isu64(t) (irt_type(t) == IRT_U64)
+
+#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
+#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
+#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
+#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
+#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
+
+#if LJ_64
+#define IRT_IS64 \
+ ((1u<> irt_type(t)) & 1)
+#define irt_is64orfp(t) (((IRT_IS64|(1u<>irt_type(t)) & 1)
+
+static LJ_AINLINE IRType itype2irt(const TValue *tv)
+{
+ if (tvisint(tv))
+ return IRT_INT;
+ else if (tvisnum(tv))
+ return IRT_NUM;
+#if LJ_64
+ else if (tvislightud(tv))
+ return IRT_LIGHTUD;
+#endif
+ else
+ return (IRType)~itype(tv);
+}
+
+static LJ_AINLINE uint32_t irt_toitype_(IRType t)
+{
+ lua_assert(!LJ_64 || t != IRT_LIGHTUD);
+ if (LJ_DUALNUM && t > IRT_NUM) {
+ return LJ_TISNUM;
+ } else {
+ lua_assert(t <= IRT_NUM);
+ return ~(uint32_t)t;
+ }
+}
+
+#define irt_toitype(t) irt_toitype_(irt_type((t)))
+
+#define irt_isguard(t) ((t).irt & IRT_GUARD)
+#define irt_ismarked(t) ((t).irt & IRT_MARK)
+#define irt_setmark(t) ((t).irt |= IRT_MARK)
+#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
+#define irt_isphi(t) ((t).irt & IRT_ISPHI)
+#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
+#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
+
+/* Stored combined IR opcode and type. */
+typedef uint16_t IROpT;
+
+/* -- IR references ------------------------------------------------------- */
+
+/* IR references. */
+typedef uint16_t IRRef1; /* One stored reference. */
+typedef uint32_t IRRef2; /* Two stored references. */
+typedef uint32_t IRRef; /* Used to pass around references. */
+
+/* Fixed references. */
+enum {
+ REF_BIAS = 0x8000,
+ REF_TRUE = REF_BIAS-3,
+ REF_FALSE = REF_BIAS-2,
+ REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
+ REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
+ REF_FIRST = REF_BIAS+1,
+ REF_DROP = 0xffff
+};
+
+/* Note: IRMlit operands must be < REF_BIAS, too!
+** This allows for fast and uniform manipulation of all operands
+** without looking up the operand mode in lj_ir_mode:
+** - CSE calculates the maximum reference of two operands.
+** This must work with mixed reference/literal operands, too.
+** - DCE marking only checks for operand >= REF_BIAS.
+** - LOOP needs to substitute reference operands.
+** Constant references and literals must not be modified.
+*/
+
+#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
+
+#define irref_isk(ref) ((ref) < REF_BIAS)
+
+/* Tagged IR references (32 bit).
+**
+** +-------+-------+---------------+
+** | irt | flags | ref |
+** +-------+-------+---------------+
+**
+** The tag holds a copy of the IRType and speeds up IR type checks.
+*/
+typedef uint32_t TRef;
+
+#define TREF_REFMASK 0x0000ffff
+#define TREF_FRAME 0x00010000
+#define TREF_CONT 0x00020000
+
+#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
+
+#define tref_ref(tr) ((IRRef1)(tr))
+#define tref_t(tr) ((IRType)((tr)>>24))
+#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
+#define tref_typerange(tr, first, last) \
+ ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
+
+#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
+#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
+#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
+#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
+#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
+#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
+#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
+#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
+#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
+#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
+#define tref_isint(tr) (tref_istype((tr), IRT_INT))
+
+#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
+#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
+#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
+#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
+#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
+#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
+#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
+
+#define tref_isk(tr) (irref_isk(tref_ref((tr))))
+#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
+
+#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
+#define TREF_NIL (TREF_PRI(IRT_NIL))
+#define TREF_FALSE (TREF_PRI(IRT_FALSE))
+#define TREF_TRUE (TREF_PRI(IRT_TRUE))
+
+/* -- IR format ----------------------------------------------------------- */
+
+/* IR instruction format (64 bit).
+**
+** 16 16 8 8 8 8
+** +-------+-------+---+---+---+---+
+** | op1 | op2 | t | o | r | s |
+** +-------+-------+---+---+---+---+
+** | op12/i/gco | ot | prev | (alternative fields in union)
+** +---------------+-------+-------+
+** 32 16 16
+**
+** prev is only valid prior to register allocation and then reused for r + s.
+*/
+
+typedef union IRIns {
+ struct {
+ LJ_ENDIAN_LOHI(
+ IRRef1 op1; /* IR operand 1. */
+ , IRRef1 op2; /* IR operand 2. */
+ )
+ IROpT ot; /* IR opcode and type (overlaps t and o). */
+ IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
+ };
+ struct {
+ IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
+ LJ_ENDIAN_LOHI(
+ IRType1 t; /* IR type. */
+ , IROp1 o; /* IR opcode. */
+ )
+ LJ_ENDIAN_LOHI(
+ uint8_t r; /* Register allocation (overlaps prev). */
+ , uint8_t s; /* Spill slot allocation (overlaps prev). */
+ )
+ };
+ int32_t i; /* 32 bit signed integer literal (overlaps op12). */
+ GCRef gcr; /* GCobj constant (overlaps op12). */
+ MRef ptr; /* Pointer constant (overlaps op12). */
+} IRIns;
+
+#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr))
+#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
+#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
+#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
+#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
+#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue))
+#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_k64(ir) \
+ check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_kptr(ir) \
+ check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void))
+
+/* A store or any other op with a non-weak guard has a side-effect. */
+static LJ_AINLINE int ir_sideeff(IRIns *ir)
+{
+ return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
+}
+
+LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
+
+#endif
diff --git a/src/LuaJIT/src/lj_ircall.h b/src/LuaJIT/src/lj_ircall.h
new file mode 100644
index 000000000..39f054a97
--- /dev/null
+++ b/src/LuaJIT/src/lj_ircall.h
@@ -0,0 +1,271 @@
+/*
+** IR CALL* instruction definitions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IRCALL_H
+#define _LJ_IRCALL_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+
+/* C call info for CALL* instructions. */
+typedef struct CCallInfo {
+ ASMFunction func; /* Function pointer. */
+ uint32_t flags; /* Number of arguments and flags. */
+} CCallInfo;
+
+#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */
+#define CCI_NARGS_MAX 32 /* Max. # of args. */
+
+#define CCI_OTSHIFT 16
+#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
+#define CCI_OPSHIFT 24
+#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
+
+#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
+#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
+#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
+#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
+#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
+#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
+
+/* C call info flags. */
+#define CCI_L 0x0100 /* Implicit L arg. */
+#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
+#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
+#define CCI_VARARG 0x0800 /* Vararg function. */
+
+#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
+#define CCI_CC_SHIFT 12
+/* ORDER CC */
+#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
+#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
+#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
+#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
+
+/* Helpers for conditional function definitions. */
+#define IRCALLCOND_ANY(x) x
+
+#if LJ_TARGET_X86ORX64
+#define IRCALLCOND_FPMATH(x) NULL
+#else
+#define IRCALLCOND_FPMATH(x) x
+#endif
+
+#if LJ_SOFTFP
+#define IRCALLCOND_SOFTFP(x) x
+#if LJ_HASFFI
+#define IRCALLCOND_SOFTFP_FFI(x) x
+#else
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+#else
+#define IRCALLCOND_SOFTFP(x) NULL
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+
+#define LJ_NEED_FP64 (LJ_TARGET_PPC || LJ_TARGET_MIPS)
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+#define IRCALLCOND_FP64_FFI(x) x
+#else
+#define IRCALLCOND_FP64_FFI(x) NULL
+#endif
+
+#if LJ_HASFFI
+#define IRCALLCOND_FFI(x) x
+#if LJ_32
+#define IRCALLCOND_FFI32(x) x
+#else
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+#else
+#define IRCALLCOND_FFI(x) NULL
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+
+#if LJ_SOFTFP
+#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */
+#else
+#define ARG1_FP 1
+#endif
+
+#if LJ_32
+#define ARG2_64 4 /* Treat as 4 32 bit arguments. */
+#else
+#define ARG2_64 2
+#endif
+
+/* Function definitions for CALL* instructions. */
+#define IRCALLDEF(_) \
+ _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
+ _(ANY, lj_str_new, 3, S, STR, CCI_L) \
+ _(ANY, lj_str_tonum, 2, FN, INT, 0) \
+ _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \
+ _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \
+ _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \
+ _(ANY, lj_tab_len, 1, FL, INT, 0) \
+ _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
+ _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
+ _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \
+ _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_NOFPRCLOBBER) \
+ _(ANY, lj_vm_modi, 2, FN, INT, 0) \
+ _(ANY, sinh, ARG1_FP, N, NUM, 0) \
+ _(ANY, cosh, ARG1_FP, N, NUM, 0) \
+ _(ANY, tanh, ARG1_FP, N, NUM, 0) \
+ _(ANY, fputc, 2, S, INT, 0) \
+ _(ANY, fwrite, 4, S, INT, 0) \
+ _(ANY, fflush, 1, S, INT, 0) \
+ /* ORDER FPM */ \
+ _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, exp, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log10, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sin, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, cos, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, tan, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \
+ _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \
+ _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
+ _(SOFTFP, softfp_add, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_div, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
+ _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
+ _(SOFTFP, softfp_d2i, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
+ _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \
+ _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \
+ _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
+ _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
+ _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \
+ _(FFI, strlen, 1, L, INTP, 0) \
+ _(FFI, memcpy, 3, S, PTR, 0) \
+ _(FFI, memset, 3, S, PTR, 0) \
+ _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER)
+ \
+ /* End of list. */
+
+typedef enum {
+#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
+IRCALLDEF(IRCALLENUM)
+#undef IRCALLENUM
+ IRCALL__MAX
+} IRCallID;
+
+LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
+
+LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
+
+/* Soft-float declarations. */
+#if LJ_SOFTFP
+#if LJ_TARGET_ARM
+#define softfp_add __aeabi_dadd
+#define softfp_sub __aeabi_dsub
+#define softfp_mul __aeabi_dmul
+#define softfp_div __aeabi_ddiv
+#define softfp_cmp __aeabi_cdcmple
+#define softfp_i2d __aeabi_i2d
+#define softfp_d2i __aeabi_d2iz
+#define softfp_ui2d __aeabi_ui2d
+#define softfp_f2d __aeabi_f2d
+#define softfp_d2ui __aeabi_d2uiz
+#define softfp_d2f __aeabi_d2f
+#define softfp_i2f __aeabi_i2f
+#define softfp_ui2f __aeabi_ui2f
+#define softfp_f2i __aeabi_f2iz
+#define softfp_f2ui __aeabi_f2uiz
+#define fp64_l2d __aeabi_l2d
+#define fp64_ul2d __aeabi_ul2d
+#define fp64_l2f __aeabi_l2f
+#define fp64_ul2f __aeabi_ul2f
+#if LJ_TARGET_OSX
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#define fp64_d2l __aeabi_d2lz
+#define fp64_d2ul __aeabi_d2ulz
+#define fp64_f2l __aeabi_f2lz
+#define fp64_f2ul __aeabi_f2ulz
+#endif
+#else
+#error "Missing soft-float definitions for target architecture"
+#endif
+extern double softfp_add(double a, double b);
+extern double softfp_sub(double a, double b);
+extern double softfp_mul(double a, double b);
+extern double softfp_div(double a, double b);
+extern void softfp_cmp(double a, double b);
+extern double softfp_i2d(int32_t a);
+extern int32_t softfp_d2i(double a);
+#if LJ_HASFFI
+extern double softfp_ui2d(uint32_t a);
+extern double softfp_f2d(float a);
+extern uint32_t softfp_d2ui(double a);
+extern float softfp_d2f(double a);
+extern float softfp_i2f(int32_t a);
+extern float softfp_ui2f(uint32_t a);
+extern int32_t softfp_f2i(float a);
+extern uint32_t softfp_f2ui(float a);
+#endif
+#endif
+
+#if LJ_HASFFI && LJ_NEED_FP64
+#ifdef __GNUC__
+#define fp64_l2d __floatdidf
+#define fp64_ul2d __floatundidf
+#define fp64_l2f __floatdisf
+#define fp64_ul2f __floatundisf
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#error "Missing fp64 helper definitions for this compiler"
+#endif
+#endif
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+extern double fp64_l2d(int64_t a);
+extern double fp64_ul2d(uint64_t a);
+extern float fp64_l2f(int64_t a);
+extern float fp64_ul2f(uint64_t a);
+extern int64_t fp64_d2l(double a);
+extern uint64_t fp64_d2ul(double a);
+extern int64_t fp64_f2l(float a);
+extern uint64_t fp64_f2ul(float a);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_iropt.h b/src/LuaJIT/src/lj_iropt.h
new file mode 100644
index 000000000..caa420f47
--- /dev/null
+++ b/src/LuaJIT/src/lj_iropt.h
@@ -0,0 +1,159 @@
+/*
+** Common header for IR emitter and optimizations.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IROPT_H
+#define _LJ_IROPT_H
+
+#include
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* IR emitter. */
+LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
+
+/* Save current IR in J->fold.ins, but do not emit it (yet). */
+static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
+{
+ J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
+}
+
+#define lj_ir_set(J, ot, a, b) \
+ lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
+
+/* Get ref of next IR instruction and optionally grow IR.
+** Note: this may invalidate all IRIns*!
+*/
+static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
+{
+ IRRef ref = J->cur.nins;
+ if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
+ J->cur.nins = ref + 1;
+ return ref;
+}
+
+/* Interning of constants. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
+LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
+LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
+LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
+LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
+LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
+LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
+LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
+
+#if LJ_64
+#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
+#else
+#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
+#endif
+
+static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
+{
+ TValue tv;
+ tv.n = n;
+ return lj_ir_knum_u64(J, tv.u64);
+}
+
+#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
+#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
+#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
+#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
+#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
+
+/* Special FP constants. */
+#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
+#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
+#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
+
+/* Special 128 bit SIMD constants. */
+#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS))
+#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG))
+
+/* Access to constants. */
+LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
+
+/* Convert IR operand types. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
+
+/* Miscellaneous IR ops. */
+LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
+LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
+LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
+
+/* Emit IR instructions with on-the-fly optimizations. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
+
+/* Special return values for the fold functions. */
+enum {
+ NEXTFOLD, /* Couldn't fold, pass on. */
+ RETRYFOLD, /* Retry fold with modified fins. */
+ KINTFOLD, /* Return ref for int constant in fins->i. */
+ FAILFOLD, /* Guard would always fail. */
+ DROPFOLD, /* Guard eliminated. */
+ MAX_FOLD
+};
+
+#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
+#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
+#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
+#define LEFTFOLD (J->fold.ins.op1)
+#define RIGHTFOLD (J->fold.ins.op2)
+#define CSEFOLD (lj_opt_cse(J))
+#define EMITFOLD (lj_ir_emit(J))
+
+/* Load/store forwarding. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
+LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
+
+/* Dead-store elimination. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
+
+/* Narrowing. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
+#if LJ_HASFFI
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
+#endif
+LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op);
+LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
+
+/* Optimization passes. */
+LJ_FUNC void lj_opt_dce(jit_State *J);
+LJ_FUNC int lj_opt_loop(jit_State *J);
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+LJ_FUNC void lj_opt_split(jit_State *J);
+#else
+#define lj_opt_split(J) UNUSED(J)
+#endif
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_jit.h b/src/LuaJIT/src/lj_jit.h
new file mode 100644
index 000000000..dd0c08d84
--- /dev/null
+++ b/src/LuaJIT/src/lj_jit.h
@@ -0,0 +1,400 @@
+/*
+** Common definitions for the JIT compiler.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_JIT_H
+#define _LJ_JIT_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+
+/* JIT engine flags. */
+#define JIT_F_ON 0x00000001
+
+/* CPU-specific JIT engine flags. */
+#if LJ_TARGET_X86ORX64
+#define JIT_F_CMOV 0x00000010
+#define JIT_F_SSE2 0x00000020
+#define JIT_F_SSE3 0x00000040
+#define JIT_F_SSE4_1 0x00000080
+#define JIT_F_P4 0x00000100
+#define JIT_F_PREFER_IMUL 0x00000200
+#define JIT_F_SPLIT_XMM 0x00000400
+#define JIT_F_LEA_AGU 0x00000800
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_CMOV
+#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
+#elif LJ_TARGET_ARM
+#define JIT_F_ARMV6 0x00000010
+#define JIT_F_ARMV6T2 0x00000020
+#define JIT_F_ARMV7 0x00000040
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_ARMV6
+#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7"
+#elif LJ_TARGET_MIPS
+#define JIT_F_MIPS32R2 0x00000010
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_MIPS32R2
+#define JIT_F_CPUSTRING "\010MIPS32R2"
+#else
+#define JIT_F_CPU_FIRST 0
+#define JIT_F_CPUSTRING ""
+#endif
+
+/* Optimization flags. */
+#define JIT_F_OPT_MASK 0x0fff0000
+
+#define JIT_F_OPT_FOLD 0x00010000
+#define JIT_F_OPT_CSE 0x00020000
+#define JIT_F_OPT_DCE 0x00040000
+#define JIT_F_OPT_FWD 0x00080000
+#define JIT_F_OPT_DSE 0x00100000
+#define JIT_F_OPT_NARROW 0x00200000
+#define JIT_F_OPT_LOOP 0x00400000
+#define JIT_F_OPT_ABC 0x00800000
+#define JIT_F_OPT_FUSE 0x01000000
+
+/* Optimizations names for -O. Must match the order above. */
+#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
+#define JIT_F_OPTSTRING \
+ "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4fuse"
+
+/* Optimization levels set a fixed combination of flags. */
+#define JIT_F_OPT_0 0
+#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
+#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
+#define JIT_F_OPT_3 \
+ (JIT_F_OPT_2|JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_FUSE)
+#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
+
+#if LJ_TARGET_WINDOWS || LJ_64
+/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
+#define JIT_P_sizemcode_DEFAULT 64
+#else
+/* Could go as low as 4K, but the mmap() overhead would be rather high. */
+#define JIT_P_sizemcode_DEFAULT 32
+#endif
+
+/* Optimization parameters and their defaults. Length is a char in octal! */
+#define JIT_PARAMDEF(_) \
+ _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
+ _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
+ _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
+ _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
+ _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
+ \
+ _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
+ _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
+ _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
+ \
+ _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
+ _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
+ _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
+ _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
+ \
+ /* Size of each machine code area (in KBytes). */ \
+ _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
+ /* Max. total size of all machine code areas (in KBytes). */ \
+ _(\010, maxmcode, 512) \
+ /* End of list. */
+
+enum {
+#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
+JIT_PARAMDEF(JIT_PARAMENUM)
+#undef JIT_PARAMENUM
+ JIT_P__MAX
+};
+
+#define JIT_PARAMSTR(len, name, value) #len #name
+#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
+
+/* Trace compiler state. */
+typedef enum {
+ LJ_TRACE_IDLE, /* Trace compiler idle. */
+ LJ_TRACE_ACTIVE = 0x10,
+ LJ_TRACE_RECORD, /* Bytecode recording active. */
+ LJ_TRACE_START, /* New trace started. */
+ LJ_TRACE_END, /* End of trace. */
+ LJ_TRACE_ASM, /* Assemble trace. */
+ LJ_TRACE_ERR /* Trace aborted with error. */
+} TraceState;
+
+/* Post-processing action. */
+typedef enum {
+ LJ_POST_NONE, /* No action. */
+ LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
+ LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
+ LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
+ LJ_POST_FIXBOOL, /* Fixup boolean result. */
+ LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
+} PostProc;
+
+/* Machine code type. */
+#if LJ_TARGET_X86ORX64
+typedef uint8_t MCode;
+#else
+typedef uint32_t MCode;
+#endif
+
+/* Stack snapshot header. */
+typedef struct SnapShot {
+ uint16_t mapofs; /* Offset into snapshot map. */
+ IRRef1 ref; /* First IR ref for this snapshot. */
+ uint8_t nslots; /* Number of valid slots. */
+ uint8_t topslot; /* Maximum frame extent. */
+ uint8_t nent; /* Number of compressed entries. */
+ uint8_t count; /* Count of taken exits for this snapshot. */
+} SnapShot;
+
+#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
+
+/* Compressed snapshot entry. */
+typedef uint32_t SnapEntry;
+
+#define SNAP_FRAME 0x010000 /* Frame slot. */
+#define SNAP_CONT 0x020000 /* Continuation slot. */
+#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
+#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
+LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
+LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
+
+#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
+#define SNAP_TR(slot, tr) \
+ (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
+#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
+#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
+#define snap_ref(sn) ((sn) & 0xffff)
+#define snap_slot(sn) ((BCReg)((sn) >> 24))
+#define snap_isframe(sn) ((sn) & SNAP_FRAME)
+#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
+#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
+
+/* Snapshot and exit numbers. */
+typedef uint32_t SnapNo;
+typedef uint32_t ExitNo;
+
+/* Trace number. */
+typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
+typedef uint16_t TraceNo1; /* Stored trace number. */
+
+/* Type of link. ORDER LJ_TRLINK */
+typedef enum {
+ LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
+ LJ_TRLINK_ROOT, /* Link to other root trace. */
+ LJ_TRLINK_LOOP, /* Loop to same trace. */
+ LJ_TRLINK_TAILREC, /* Tail-recursion. */
+ LJ_TRLINK_UPREC, /* Up-recursion. */
+ LJ_TRLINK_DOWNREC, /* Down-recursion. */
+ LJ_TRLINK_INTERP, /* Fallback to interpreter. */
+ LJ_TRLINK_RETURN /* Return to interpreter. */
+} TraceLink;
+
+/* Trace object. */
+typedef struct GCtrace {
+ GCHeader;
+ uint8_t topslot; /* Top stack slot already checked to be allocated. */
+ uint8_t linktype; /* Type of link. */
+ IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
+ GCRef gclist;
+ IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
+ IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
+ uint16_t nsnap; /* Number of snapshots. */
+ uint16_t nsnapmap; /* Number of snapshot map elements. */
+ SnapShot *snap; /* Snapshot array. */
+ SnapEntry *snapmap; /* Snapshot map. */
+ GCRef startpt; /* Starting prototype. */
+ MRef startpc; /* Bytecode PC of starting instruction. */
+ BCIns startins; /* Original bytecode of starting instruction. */
+ MSize szmcode; /* Size of machine code. */
+ MCode *mcode; /* Start of machine code. */
+ MSize mcloop; /* Offset of loop start in machine code. */
+ uint16_t nchild; /* Number of child traces (root trace only). */
+ uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
+ TraceNo1 traceno; /* Trace number. */
+ TraceNo1 link; /* Linked trace (or self for loops). */
+ TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
+ TraceNo1 nextroot; /* Next root trace for same prototype. */
+ TraceNo1 nextside; /* Next side trace of same root trace. */
+ uint16_t unused2;
+#ifdef LUAJIT_USE_GDBJIT
+ void *gdbjit_entry; /* GDB JIT entry. */
+#endif
+} GCtrace;
+
+#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
+#define traceref(J, n) \
+ check_exp((n)>0 && (MSize)(n)sizetrace, (GCtrace *)gcref(J->trace[(n)]))
+
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
+
+static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
+{
+ if (snap+1 == &T->snap[T->nsnap])
+ return T->nsnapmap;
+ else
+ return (snap+1)->mapofs;
+}
+
+/* Round-robin penalty cache for bytecodes leading to aborted traces. */
+typedef struct HotPenalty {
+ MRef pc; /* Starting bytecode PC. */
+ uint16_t val; /* Penalty value, i.e. hotcount start. */
+ uint16_t reason; /* Abort reason (really TraceErr). */
+} HotPenalty;
+
+#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
+#define PENALTY_MIN (36*2) /* Minimum penalty value. */
+#define PENALTY_MAX 60000 /* Maximum penalty value. */
+#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
+
+/* Round-robin backpropagation cache for narrowing conversions. */
+typedef struct BPropEntry {
+ IRRef1 key; /* Key: original reference. */
+ IRRef1 val; /* Value: reference after conversion. */
+ IRRef mode; /* Mode for this entry (currently IRCONV_*). */
+} BPropEntry;
+
+/* Number of slots for the backpropagation cache. Must be a power of 2. */
+#define BPROP_SLOTS 16
+
+/* Scalar evolution analysis cache. */
+typedef struct ScEvEntry {
+ IRRef1 idx; /* Index reference. */
+ IRRef1 start; /* Constant start reference. */
+ IRRef1 stop; /* Constant stop reference. */
+ IRRef1 step; /* Constant step reference. */
+ IRType1 t; /* Scalar type. */
+ uint8_t dir; /* Direction. 1: +, 0: -. */
+} ScEvEntry;
+
+/* 128 bit SIMD constants. */
+enum {
+ LJ_KSIMD_ABS,
+ LJ_KSIMD_NEG,
+ LJ_KSIMD__MAX
+};
+
+/* Get 16 byte aligned pointer to SIMD constant. */
+#define LJ_KSIMD(J, n) \
+ ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
+
+/* Set/reset flag to activate the SPLIT pass for the current trace. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+#define lj_needsplit(J) (J->needsplit = 1)
+#define lj_resetsplit(J) (J->needsplit = 0)
+#else
+#define lj_needsplit(J) UNUSED(J)
+#define lj_resetsplit(J) UNUSED(J)
+#endif
+
+/* Fold state is used to fold instructions on-the-fly. */
+typedef struct FoldState {
+ IRIns ins; /* Currently emitted instruction. */
+ IRIns left; /* Instruction referenced by left operand. */
+ IRIns right; /* Instruction referenced by right operand. */
+} FoldState;
+
+/* JIT compiler state. */
+typedef struct jit_State {
+ GCtrace cur; /* Current trace. */
+
+ lua_State *L; /* Current Lua state. */
+ const BCIns *pc; /* Current PC. */
+ GCfunc *fn; /* Current function. */
+ GCproto *pt; /* Current prototype. */
+ TRef *base; /* Current frame base, points into J->slots. */
+
+ uint32_t flags; /* JIT engine flags. */
+ BCReg maxslot; /* Relative to baseslot. */
+ BCReg baseslot; /* Current frame base, offset into J->slots. */
+
+ uint8_t mergesnap; /* Allowed to merge with next snapshot. */
+ uint8_t needsnap; /* Need snapshot before recording next bytecode. */
+ IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
+ uint8_t bcskip; /* Number of bytecode instructions to skip. */
+
+ FoldState fold; /* Fold state. */
+
+ const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
+ MSize bc_extent; /* Extent of the range. */
+
+ TraceState state; /* Trace compiler state. */
+
+ int32_t instunroll; /* Unroll counter for instable loops. */
+ int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
+ int32_t tailcalled; /* Number of successive tailcalls. */
+ int32_t framedepth; /* Current frame depth. */
+ int32_t retdepth; /* Return frame depth (count of RETF). */
+
+ MRef k64; /* Pointer to chained array of 64 bit constants. */
+ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
+
+ IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
+ IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
+ IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
+ IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
+
+ MSize sizesnap; /* Size of temp. snapshot buffer. */
+ SnapShot *snapbuf; /* Temp. snapshot buffer. */
+ SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
+ MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
+
+ PostProc postproc; /* Required post-processing after execution. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+ int needsplit; /* Need SPLIT pass. */
+#endif
+
+ GCRef *trace; /* Array of traces. */
+ TraceNo freetrace; /* Start of scan for next free trace. */
+ MSize sizetrace; /* Size of trace array. */
+
+ IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
+ TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
+
+ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
+
+ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
+
+ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
+ uint32_t penaltyslot; /* Round-robin index into penalty slots. */
+ uint32_t prngstate; /* PRNG state. */
+
+ BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
+ uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
+
+ ScEvEntry scev; /* Scalar evolution analysis cache slots. */
+
+ const BCIns *startpc; /* Bytecode PC of starting instruction. */
+ TraceNo parent; /* Parent of current side trace (0 for root traces). */
+ ExitNo exitno; /* Exit number in parent of current side trace. */
+
+ BCIns *patchpc; /* PC for pending re-patch. */
+ BCIns patchins; /* Instruction for pending re-patch. */
+
+ int mcprot; /* Protection of current mcode area. */
+ MCode *mcarea; /* Base of current mcode area. */
+ MCode *mctop; /* Top of current mcode area. */
+ MCode *mcbot; /* Bottom of current mcode area. */
+ size_t szmcarea; /* Size of current mcode area. */
+ size_t szallmcarea; /* Total size of all allocated mcode areas. */
+
+ TValue errinfo; /* Additional info element for trace errors. */
+}
+#if LJ_TARGET_ARM
+LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */
+#endif
+jit_State;
+
+/* Trivial PRNG e.g. used for penalty randomization. */
+static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
+{
+ /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
+ J->prngstate = J->prngstate * 1103515245 + 12345;
+ return J->prngstate >> (32-bits);
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_lex.c b/src/LuaJIT/src/lj_lex.c
new file mode 100644
index 000000000..669d2dfe9
--- /dev/null
+++ b/src/LuaJIT/src/lj_lex.c
@@ -0,0 +1,524 @@
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_lex_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#if LJ_HASFFI
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_state.h"
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_char.h"
+
+/* Lua lexer token names. */
+static const char *const tokennames[] = {
+#define TKSTR1(name) #name,
+#define TKSTR2(name, sym) #sym,
+TKDEF(TKSTR1, TKSTR2)
+#undef TKSTR1
+#undef TKSTR2
+ NULL
+};
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define char2int(c) ((int)(uint8_t)(c))
+#define next(ls) \
+ (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls))
+#define save_and_next(ls) (save(ls, ls->current), next(ls))
+#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
+#define END_OF_STREAM (-1)
+
+static int fillbuf(LexState *ls)
+{
+ size_t sz;
+ const char *buf = ls->rfunc(ls->L, ls->rdata, &sz);
+ if (buf == NULL || sz == 0) return END_OF_STREAM;
+ ls->n = (MSize)sz - 1;
+ ls->p = buf;
+ return char2int(*(ls->p++));
+}
+
+static LJ_NOINLINE void save_grow(LexState *ls, int c)
+{
+ MSize newsize;
+ if (ls->sb.sz >= LJ_MAX_STR/2)
+ lj_lex_error(ls, 0, LJ_ERR_XELEM);
+ newsize = ls->sb.sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, newsize);
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static LJ_AINLINE void save(LexState *ls, int c)
+{
+ if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz))
+ save_grow(ls, c);
+ else
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static void inclinenumber(LexState *ls)
+{
+ int old = ls->current;
+ lua_assert(currIsNewline(ls));
+ next(ls); /* skip `\n' or `\r' */
+ if (currIsNewline(ls) && ls->current != old)
+ next(ls); /* skip `\n\r' or `\r\n' */
+ if (++ls->linenumber >= LJ_MAX_LINE)
+ lj_lex_error(ls, ls->token, LJ_ERR_XLINES);
+}
+
+/* -- Scanner for terminals ----------------------------------------------- */
+
+#if LJ_HASFFI
+/* Load FFI library on-demand. Needed if we create cdata objects. */
+static void lex_loadffi(lua_State *L)
+{
+ ptrdiff_t oldtop = savestack(L, L->top);
+ luaopen_ffi(L);
+ L->top = restorestack(L, oldtop);
+}
+
+/* Parse 64 bit integer. */
+static int lex_number64(LexState *ls, TValue *tv)
+{
+ uint64_t n = 0;
+ uint8_t *p = (uint8_t *)ls->sb.buf;
+ CTypeID id = CTID_INT64;
+ GCcdata *cd;
+ int numl = 0;
+ if (p[0] == '0' && (p[1] & ~0x20) == 'X') { /* Hexadecimal. */
+ p += 2;
+ if (!lj_char_isxdigit(*p)) return 0;
+ do {
+ n = n*16 + (*p & 15);
+ if (!lj_char_isdigit(*p)) n += 9;
+ p++;
+ } while (lj_char_isxdigit(*p));
+ } else { /* Decimal. */
+ if (!lj_char_isdigit(*p)) return 0;
+ do {
+ n = n*10 + (*p - '0');
+ p++;
+ } while (lj_char_isdigit(*p));
+ }
+ for (;;) { /* Parse suffixes. */
+ if ((*p & ~0x20) == 'U')
+ id = CTID_UINT64;
+ else if ((*p & ~0x20) == 'L')
+ numl++;
+ else
+ break;
+ p++;
+ }
+ if (numl != 2 || *p != '\0') return 0;
+ /* Return cdata holding a 64 bit integer. */
+ cd = lj_cdata_new_(ls->L, id, 8);
+ *(uint64_t *)cdataptr(cd) = n;
+ lj_parse_keepcdata(ls, tv, cd);
+ return 1; /* Ok. */
+}
+#endif
+
+/* Parse a number literal. */
+static void lex_number(LexState *ls, TValue *tv)
+{
+ int c, xp = 'E';
+ lua_assert(lj_char_isdigit(ls->current));
+ if ((c = ls->current) == '0') {
+ save_and_next(ls);
+ if ((ls->current & ~0x20) == 'X') xp = 'P';
+ }
+ while (lj_char_isident(ls->current) || ls->current == '.' ||
+ ((ls->current == '-' || ls->current == '+') && (c & ~0x20) == xp)) {
+ c = ls->current;
+ save_and_next(ls);
+ }
+#if LJ_HASFFI
+ c &= ~0x20;
+ if ((c == 'I' || c == 'L' || c == 'U') && !ctype_ctsG(G(ls->L)))
+ lex_loadffi(ls->L);
+ if (c == 'I') /* Parse imaginary part of complex number. */
+ ls->sb.n--;
+#endif
+ save(ls, '\0');
+#if LJ_HASFFI
+ if ((c == 'L' || c == 'U') && lex_number64(ls, tv)) { /* Parse 64 bit int. */
+ return;
+ } else
+#endif
+ if (lj_str_numconv(ls->sb.buf, tv)) {
+#if LJ_HASFFI
+ if (c == 'I') { /* Return cdata holding a complex number. */
+ GCcdata *cd = lj_cdata_new_(ls->L, CTID_COMPLEX_DOUBLE, 2*sizeof(double));
+ ((double *)cdataptr(cd))[0] = 0;
+ ((double *)cdataptr(cd))[1] = numberVnum(tv);
+ lj_parse_keepcdata(ls, tv, cd);
+ }
+#endif
+ if (LJ_DUALNUM && tvisnum(tv)) {
+ int32_t k = lj_num2int(numV(tv));
+ if ((lua_Number)k == numV(tv)) /* -0 cannot end up here. */
+ setintV(tv, k);
+ }
+ return;
+ }
+ lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
+}
+
+static int skip_sep(LexState *ls)
+{
+ int count = 0;
+ int s = ls->current;
+ lua_assert(s == '[' || s == ']');
+ save_and_next(ls);
+ while (ls->current == '=') {
+ save_and_next(ls);
+ count++;
+ }
+ return (ls->current == s) ? count : (-count) - 1;
+}
+
+static void read_long_string(LexState *ls, TValue *tv, int sep)
+{
+ save_and_next(ls); /* skip 2nd `[' */
+ if (currIsNewline(ls)) /* string starts with a newline? */
+ inclinenumber(ls); /* skip it */
+ for (;;) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
+ break;
+ case ']':
+ if (skip_sep(ls) == sep) {
+ save_and_next(ls); /* skip 2nd `]' */
+ goto endloop;
+ }
+ break;
+ case '\n':
+ case '\r':
+ save(ls, '\n');
+ inclinenumber(ls);
+ if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */
+ break;
+ default:
+ if (tv) save_and_next(ls);
+ else next(ls);
+ break;
+ }
+ } endloop:
+ if (tv) {
+ GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep),
+ ls->sb.n - 2*(2 + (MSize)sep));
+ setstrV(ls->L, tv, str);
+ }
+}
+
+static void read_string(LexState *ls, int delim, TValue *tv)
+{
+ save_and_next(ls);
+ while (ls->current != delim) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
+ continue;
+ case '\n':
+ case '\r':
+ lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
+ continue;
+ case '\\': {
+ int c = next(ls); /* Skip the '\\'. */
+ switch (c) {
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'x': /* Hexadecimal escape '\xXX'. */
+ c = (next(ls) & 15u) << 4;
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9 << 4;
+ }
+ c += (next(ls) & 15u);
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9;
+ }
+ break;
+ case 'z': /* Skip whitespace. */
+ next(ls);
+ while (lj_char_isspace(ls->current))
+ if (currIsNewline(ls)) inclinenumber(ls); else next(ls);
+ continue;
+ case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue;
+ case '\\': case '\"': case '\'': break;
+ case END_OF_STREAM: continue;
+ default:
+ if (!lj_char_isdigit(c))
+ goto err_xesc;
+ c -= '0'; /* Decimal escape '\ddd'. */
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (c > 255) {
+ err_xesc:
+ lj_lex_error(ls, TK_string, LJ_ERR_XESC);
+ }
+ next(ls);
+ }
+ }
+ save(ls, c);
+ continue;
+ }
+ save(ls, c);
+ next(ls);
+ continue;
+ }
+ default:
+ save_and_next(ls);
+ break;
+ }
+ }
+ save_and_next(ls); /* skip delimiter */
+ setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2));
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+static int llex(LexState *ls, TValue *tv)
+{
+ lj_str_resetbuf(&ls->sb);
+ for (;;) {
+ if (lj_char_isident(ls->current)) {
+ GCstr *s;
+ if (lj_char_isdigit(ls->current)) { /* Numeric literal. */
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ /* Identifier or reserved word. */
+ do {
+ save_and_next(ls);
+ } while (lj_char_isident(ls->current));
+ s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n);
+ if (s->reserved > 0) /* Reserved word? */
+ return TK_OFS + s->reserved;
+ setstrV(ls->L, tv, s);
+ return TK_name;
+ }
+ switch (ls->current) {
+ case '\n':
+ case '\r':
+ inclinenumber(ls);
+ continue;
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ next(ls);
+ continue;
+ case '-':
+ next(ls);
+ if (ls->current != '-') return '-';
+ /* else is a comment */
+ next(ls);
+ if (ls->current == '[') {
+ int sep = skip_sep(ls);
+ lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */
+ if (sep >= 0) {
+ read_long_string(ls, NULL, sep); /* long comment */
+ lj_str_resetbuf(&ls->sb);
+ continue;
+ }
+ }
+ /* else short comment */
+ while (!currIsNewline(ls) && ls->current != END_OF_STREAM)
+ next(ls);
+ continue;
+ case '[': {
+ int sep = skip_sep(ls);
+ if (sep >= 0) {
+ read_long_string(ls, tv, sep);
+ return TK_string;
+ } else if (sep == -1) {
+ return '[';
+ } else {
+ lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM);
+ continue;
+ }
+ }
+ case '=':
+ next(ls);
+ if (ls->current != '=') return '='; else { next(ls); return TK_eq; }
+ case '<':
+ next(ls);
+ if (ls->current != '=') return '<'; else { next(ls); return TK_le; }
+ case '>':
+ next(ls);
+ if (ls->current != '=') return '>'; else { next(ls); return TK_ge; }
+ case '~':
+ next(ls);
+ if (ls->current != '=') return '~'; else { next(ls); return TK_ne; }
+ case '"':
+ case '\'':
+ read_string(ls, ls->current, tv);
+ return TK_string;
+ case '.':
+ save_and_next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ return TK_dots; /* ... */
+ }
+ return TK_concat; /* .. */
+ } else if (!lj_char_isdigit(ls->current)) {
+ return '.';
+ } else {
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ case END_OF_STREAM:
+ return TK_eof;
+ default: {
+ int c = ls->current;
+ next(ls);
+ return c; /* Single-char tokens (+ - / ...). */
+ }
+ }
+ }
+}
+
+/* -- Lexer API ----------------------------------------------------------- */
+
+/* Setup lexer state. */
+int lj_lex_setup(lua_State *L, LexState *ls)
+{
+ int header = 0;
+ ls->L = L;
+ ls->fs = NULL;
+ ls->n = 0;
+ ls->p = NULL;
+ ls->vstack = NULL;
+ ls->sizevstack = 0;
+ ls->vtop = 0;
+ ls->bcstack = NULL;
+ ls->sizebcstack = 0;
+ ls->lookahead = TK_eof; /* No look-ahead token. */
+ ls->linenumber = 1;
+ ls->lastline = 1;
+ lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF);
+ next(ls); /* Read-ahead first char. */
+ if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb &&
+ char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
+ ls->n -= 2;
+ ls->p += 2;
+ next(ls);
+ header = 1;
+ }
+ if (ls->current == '#') { /* Skip POSIX #! header line. */
+ do {
+ next(ls);
+ if (ls->current == END_OF_STREAM) return 0;
+ } while (!currIsNewline(ls));
+ inclinenumber(ls);
+ header = 1;
+ }
+ if (ls->current == LUA_SIGNATURE[0]) { /* Bytecode dump. */
+ if (header) {
+ /*
+ ** Loading bytecode with an extra header is disabled for security
+ ** reasons. This may circumvent the usual check for bytecode vs.
+ ** Lua code by looking at the first char. Since this is a potential
+ ** security violation no attempt is made to echo the chunkname either.
+ */
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCHEAD));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Cleanup lexer state. */
+void lj_lex_cleanup(lua_State *L, LexState *ls)
+{
+ global_State *g = G(L);
+ lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
+ lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
+ lj_str_freebuf(g, &ls->sb);
+}
+
+void lj_lex_next(LexState *ls)
+{
+ ls->lastline = ls->linenumber;
+ if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
+ ls->token = llex(ls, &ls->tokenval); /* Get next token. */
+ } else { /* Otherwise return lookahead token. */
+ ls->token = ls->lookahead;
+ ls->lookahead = TK_eof;
+ ls->tokenval = ls->lookaheadval;
+ }
+}
+
+LexToken lj_lex_lookahead(LexState *ls)
+{
+ lua_assert(ls->lookahead == TK_eof);
+ ls->lookahead = llex(ls, &ls->lookaheadval);
+ return ls->lookahead;
+}
+
+const char *lj_lex_token2str(LexState *ls, LexToken token)
+{
+ if (token > TK_OFS)
+ return tokennames[token-TK_OFS-1];
+ else if (!lj_char_iscntrl(token))
+ return lj_str_pushf(ls->L, "%c", token);
+ else
+ return lj_str_pushf(ls->L, "char(%d)", token);
+}
+
+void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...)
+{
+ const char *tok;
+ va_list argp;
+ if (token == 0) {
+ tok = NULL;
+ } else if (token == TK_name || token == TK_string || token == TK_number) {
+ save(ls, '\0');
+ tok = ls->sb.buf;
+ } else {
+ tok = lj_lex_token2str(ls, token);
+ }
+ va_start(argp, em);
+ lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp);
+ va_end(argp);
+}
+
+void lj_lex_init(lua_State *L)
+{
+ uint32_t i;
+ for (i = 0; i < TK_RESERVED; i++) {
+ GCstr *s = lj_str_newz(L, tokennames[i]);
+ fixstring(s); /* Reserved words are never collected. */
+ s->reserved = (uint8_t)(i+1);
+ }
+}
+
diff --git a/src/LuaJIT/src/lj_lex.h b/src/LuaJIT/src/lj_lex.h
new file mode 100644
index 000000000..1ddf4b593
--- /dev/null
+++ b/src/LuaJIT/src/lj_lex.h
@@ -0,0 +1,82 @@
+/*
+** Lexical analyzer.
+** Major parts taken verbatim from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#ifndef _LJ_LEX_H
+#define _LJ_LEX_H
+
+#include
+
+#include "lj_obj.h"
+#include "lj_err.h"
+
+/* Lua lexer tokens. */
+#define TKDEF(_, __) \
+ _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
+ _(for) _(function) _(if) _(in) _(local) _(nil) _(not) _(or) \
+ _(repeat) _(return) _(then) _(true) _(until) _(while) \
+ __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
+ __(number, ) __(name, ) __(string, ) __(eof, )
+
+enum {
+ TK_OFS = 256,
+#define TKENUM1(name) TK_##name,
+#define TKENUM2(name, sym) TK_##name,
+TKDEF(TKENUM1, TKENUM2)
+#undef TKENUM1
+#undef TKENUM2
+ TK_RESERVED = TK_while - TK_OFS
+};
+
+typedef int LexToken;
+
+/* Combined bytecode ins/line. Only used during bytecode generation. */
+typedef struct BCInsLine {
+ BCIns ins; /* Bytecode instruction. */
+ BCLine line; /* Line number for this bytecode. */
+} BCInsLine;
+
+/* Info for local variables. Only used during bytecode generation. */
+typedef struct VarInfo {
+ GCRef name; /* Local variable name. */
+ BCPos startpc; /* First point where the local variable is active. */
+ BCPos endpc; /* First point where the local variable is dead. */
+} VarInfo;
+
+/* Lua lexer state. */
+typedef struct LexState {
+ struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
+ struct lua_State *L; /* Lua state. */
+ TValue tokenval; /* Current token value. */
+ TValue lookaheadval; /* Lookahead token value. */
+ int current; /* Current character (charint). */
+ LexToken token; /* Current token. */
+ LexToken lookahead; /* Lookahead token. */
+ MSize n; /* Bytes left in input buffer. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_Reader rfunc; /* Reader callback. */
+ void *rdata; /* Reader callback data. */
+ BCLine linenumber; /* Input line counter. */
+ BCLine lastline; /* Line of last token. */
+ GCstr *chunkname; /* Current chunk name (interned string). */
+ const char *chunkarg; /* Chunk name argument. */
+ VarInfo *vstack; /* Stack for names and extents of local variables. */
+ MSize sizevstack; /* Size of variable stack. */
+ MSize vtop; /* Top of variable stack. */
+ BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
+ MSize sizebcstack; /* Size of bytecode stack. */
+ uint32_t level; /* Syntactical nesting level. */
+} LexState;
+
+LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_next(LexState *ls);
+LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
+LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token);
+LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...);
+LJ_FUNC void lj_lex_init(lua_State *L);
+
+#endif
diff --git a/src/LuaJIT/src/lj_lib.c b/src/LuaJIT/src/lj_lib.c
new file mode 100644
index 000000000..ae9b9301d
--- /dev/null
+++ b/src/LuaJIT/src/lj_lib.c
@@ -0,0 +1,260 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_lib_c
+#define LUA_CORE
+
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_bc.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_lib.h"
+
+/* -- Library initialization ---------------------------------------------- */
+
+static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
+{
+ if (libname) {
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname);
+ if (!tvistab(L->top-1)) {
+ L->top--;
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ settabV(L, L->top, tabV(L->top-1));
+ L->top++;
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ L->top--;
+ settabV(L, L->top-1, tabV(L->top));
+ } else {
+ lua_createtable(L, 0, hsize);
+ }
+ return tabV(L->top-1);
+}
+
+void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *p, const lua_CFunction *cf)
+{
+ GCtab *env = tabref(L->env);
+ GCfunc *ofn = NULL;
+ int ffid = *p++;
+ BCIns *bcff = &L2GG(L)->bcff[*p++];
+ GCtab *tab = lib_create_table(L, libname, *p++);
+ ptrdiff_t tpos = L->top - L->base;
+
+ /* Avoid barriers further down. */
+ lj_gc_anybarriert(L, tab);
+ tab->nomm = 0;
+
+ for (;;) {
+ uint32_t tag = *p++;
+ MSize len = tag & LIBINIT_LENMASK;
+ tag &= LIBINIT_TAGMASK;
+ if (tag != LIBINIT_STRING) {
+ const char *name;
+ MSize nuv = (MSize)(L->top - L->base - tpos);
+ GCfunc *fn = lj_func_newC(L, nuv, env);
+ if (nuv) {
+ L->top = L->base + tpos;
+ memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv);
+ }
+ fn->c.ffid = (uint8_t)(ffid++);
+ name = (const char *)p;
+ p += len;
+ if (tag == LIBINIT_CF)
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+ else
+ setmref(fn->c.pc, bcff++);
+ if (tag == LIBINIT_ASM_)
+ fn->c.f = ofn->c.f; /* Copy handler from previous function. */
+ else
+ fn->c.f = *cf++; /* Get cf or handler from C function table. */
+ if (len) {
+ /* NOBARRIER: See above for common barrier. */
+ setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn);
+ }
+ ofn = fn;
+ } else {
+ switch (tag | len) {
+ case LIBINIT_SET:
+ L->top -= 2;
+ if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
+ env = tabV(L->top);
+ else /* NOBARRIER: See above for common barrier. */
+ copyTV(L, lj_tab_set(L, tab, L->top+1), L->top);
+ break;
+ case LIBINIT_NUMBER:
+ memcpy(&L->top->n, p, sizeof(double));
+ L->top++;
+ p += sizeof(double);
+ break;
+ case LIBINIT_COPY:
+ copyTV(L, L->top, L->top - *p++);
+ L->top++;
+ break;
+ case LIBINIT_LASTCL:
+ setfuncV(L, L->top++, ofn);
+ break;
+ case LIBINIT_FFID:
+ ffid++;
+ break;
+ case LIBINIT_END:
+ return;
+ default:
+ setstrV(L, L->top++, lj_str_new(L, (const char *)p, len));
+ p += len;
+ break;
+ }
+ }
+ }
+}
+
+/* -- Type checks --------------------------------------------------------- */
+
+TValue *lj_lib_checkany(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ return o;
+}
+
+GCstr *lj_lib_checkstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ return s;
+ }
+ }
+ lj_err_argt(L, narg, LUA_TSTRING);
+ return NULL; /* unreachable */
+}
+
+GCstr *lj_lib_optstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL;
+}
+
+#if LJ_DUALNUM
+void lj_lib_checknumber(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+}
+#endif
+
+lua_Number lj_lib_checknum(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_UNLIKELY(tvisint(o))) {
+ lua_Number n = (lua_Number)intV(o);
+ setnumV(o, n);
+ return n;
+ } else {
+ return numV(o);
+ }
+}
+
+int32_t lj_lib_checkint(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2int(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
+}
+
+int32_t lj_lib_checkbit(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2bit(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvisfunc(o)))
+ lj_err_argt(L, narg, LUA_TFUNCTION);
+ return funcV(o);
+}
+
+GCtab *lj_lib_checktab(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvistab(o)))
+ lj_err_argt(L, narg, LUA_TTABLE);
+ return tabV(o);
+}
+
+GCtab *lj_lib_checktabornil(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (tvistab(o))
+ return tabV(o);
+ else if (tvisnil(o))
+ return NULL;
+ }
+ lj_err_arg(L, narg, LJ_ERR_NOTABN);
+ return NULL; /* unreachable */
+}
+
+int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst)
+{
+ GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg);
+ if (s) {
+ const char *opt = strdata(s);
+ MSize len = s->len;
+ int i;
+ for (i = 0; *(const uint8_t *)lst; i++) {
+ if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0)
+ return i;
+ lst += 1+*(const uint8_t *)lst;
+ }
+ lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt);
+ }
+ return def;
+}
+
diff --git a/src/LuaJIT/src/lj_lib.h b/src/LuaJIT/src/lj_lib.h
new file mode 100644
index 000000000..81519d7e6
--- /dev/null
+++ b/src/LuaJIT/src/lj_lib.h
@@ -0,0 +1,112 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LIB_H
+#define _LJ_LIB_H
+
+#include "lj_obj.h"
+
+/*
+** A fallback handler is called by the assembler VM if the fast path fails:
+**
+** - too few arguments: unrecoverable.
+** - wrong argument type: recoverable, if coercion succeeds.
+** - bad argument value: unrecoverable.
+** - stack overflow: recoverable, if stack reallocation succeeds.
+** - extra handling: recoverable.
+**
+** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
+** lj_err_caller() or lj_err_callermsg().
+** The recoverable cases return 0 or the number of results + 1.
+** The assembler VM retries the fast path only if 0 is returned.
+** This time the fallback must not be called again or it gets stuck in a loop.
+*/
+
+/* Return values from fallback handler. */
+#define FFH_RETRY 0
+#define FFH_UNREACHABLE FFH_RETRY
+#define FFH_RES(n) ((n)+1)
+#define FFH_TAILCALL (-1)
+
+LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
+#if LJ_DUALNUM
+LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
+#else
+#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
+#endif
+LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
+LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
+LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
+LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
+
+/* Avoid including lj_frame.h. */
+#define lj_lib_upvalue(L, n) \
+ (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
+
+#if LJ_TARGET_WINDOWS
+#define lj_lib_checkfpu(L) \
+ do { setnumV(L->top++, (lua_Number)1437217655); \
+ if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
+ L->top--; } while (0)
+#else
+#define lj_lib_checkfpu(L) UNUSED(L)
+#endif
+
+/* Push internal function on the stack. */
+static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
+ int id, int n)
+{
+ GCfunc *fn;
+ lua_pushcclosure(L, f, n);
+ fn = funcV(L->top-1);
+ fn->c.ffid = (uint8_t)id;
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+}
+
+#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
+
+/* Library function declarations. Scanned by buildvm. */
+#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
+#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
+#define LJLIB_ASM_(name)
+#define LJLIB_SET(name)
+#define LJLIB_PUSH(arg)
+#define LJLIB_REC(handler)
+#define LJLIB_NOREGUV
+#define LJLIB_NOREG
+
+#define LJ_LIB_REG(L, regname, name) \
+ lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
+
+LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *init, const lua_CFunction *cf);
+
+/* Library init data tags. */
+#define LIBINIT_LENMASK 0x3f
+#define LIBINIT_TAGMASK 0xc0
+#define LIBINIT_CF 0x00
+#define LIBINIT_ASM 0x40
+#define LIBINIT_ASM_ 0x80
+#define LIBINIT_STRING 0xc0
+#define LIBINIT_MAXSTR 0x39
+#define LIBINIT_SET 0xfa
+#define LIBINIT_NUMBER 0xfb
+#define LIBINIT_COPY 0xfc
+#define LIBINIT_LASTCL 0xfd
+#define LIBINIT_FFID 0xfe
+#define LIBINIT_END 0xff
+
+/* Exported library functions. */
+
+typedef struct RandomState RandomState;
+LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
+
+#endif
diff --git a/src/LuaJIT/src/lj_mcode.c b/src/LuaJIT/src/lj_mcode.c
new file mode 100644
index 000000000..fb6b6dcee
--- /dev/null
+++ b/src/LuaJIT/src/lj_mcode.c
@@ -0,0 +1,356 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_mcode_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_gc.h"
+#include "lj_jit.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#endif
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_HASJIT || LJ_HASFFI
+
+/* Define this if you want to run LuaJIT with Valgrind. */
+#ifdef LUAJIT_USE_VALGRIND
+#include
+#endif
+
+#if !LJ_TARGET_X86ORX64 && LJ_TARGET_OSX
+void sys_icache_invalidate(void *start, size_t len);
+#endif
+
+/* Synchronize data/instruction cache. */
+void lj_mcode_sync(void *start, void *end)
+{
+#ifdef LUAJIT_USE_VALGRIND
+ VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
+#endif
+#if LJ_TARGET_X86ORX64
+ UNUSED(start); UNUSED(end);
+#elif LJ_TARGET_OSX
+ sys_icache_invalidate(start, (char *)end-(char *)start);
+#elif LJ_TARGET_PPC
+ lj_vm_cachesync(start, end);
+#elif defined(__GNUC__)
+ __clear_cache(start, end);
+#else
+#error "Missing builtin to flush instruction cache"
+#endif
+}
+
+#endif
+
+#if LJ_HASJIT
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include
+
+#define MCPROT_RW PAGE_READWRITE
+#define MCPROT_RX PAGE_EXECUTE_READ
+#define MCPROT_RWX PAGE_EXECUTE_READWRITE
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
+{
+ void *p = VirtualAlloc((void *)hint, sz,
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
+ if (!p && !hint)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J); UNUSED(sz);
+ VirtualFree(p, 0, MEM_RELEASE);
+}
+
+static void mcode_setprot(void *p, size_t sz, DWORD prot)
+{
+ DWORD oprot;
+ VirtualProtect(p, sz, prot, &oprot);
+}
+
+#elif LJ_TARGET_POSIX
+
+#include
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define MCPROT_RW (PROT_READ|PROT_WRITE)
+#define MCPROT_RX (PROT_READ|PROT_EXEC)
+#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+ void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED && !hint)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J);
+ munmap(p, sz);
+}
+
+static void mcode_setprot(void *p, size_t sz, int prot)
+{
+ mprotect(p, sz, prot);
+}
+
+#elif LJ_64
+
+#error "Missing OS support for explicit placement of executable memory"
+
+#else
+
+/* Fallback allocator. This will fail if memory is not executable by default. */
+#define LUAJIT_UNPROTECT_MCODE
+#define MCPROT_RW 0
+#define MCPROT_RX 0
+#define MCPROT_RWX 0
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+ UNUSED(hint); UNUSED(prot);
+ return lj_mem_new(J->L, sz);
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ lj_mem_free(J2G(J), p, sz);
+}
+
+#define mcode_setprot(p, sz, prot) UNUSED(p)
+
+#endif
+
+/* -- MCode area protection ----------------------------------------------- */
+
+/* Define this ONLY if the page protection twiddling becomes a bottleneck. */
+#ifdef LUAJIT_UNPROTECT_MCODE
+
+/* It's generally considered to be a potential security risk to have
+** pages with simultaneous write *and* execute access in a process.
+**
+** Do not even think about using this mode for server processes or
+** apps handling untrusted external data (such as a browser).
+**
+** The security risk is not in LuaJIT itself -- but if an adversary finds
+** any *other* flaw in your C application logic, then any RWX memory page
+** simplifies writing an exploit considerably.
+*/
+#define MCPROT_GEN MCPROT_RWX
+#define MCPROT_RUN MCPROT_RWX
+
+static void mcode_protect(jit_State *J, int prot)
+{
+ UNUSED(J); UNUSED(prot);
+}
+
+#else
+
+/* This is the default behaviour and much safer:
+**
+** Most of the time the memory pages holding machine code are executable,
+** but NONE of them is writable.
+**
+** The current memory area is marked read-write (but NOT executable) only
+** during the short time window while the assembler generates machine code.
+*/
+#define MCPROT_GEN MCPROT_RW
+#define MCPROT_RUN MCPROT_RX
+
+/* Change protection of MCode area. */
+static void mcode_protect(jit_State *J, int prot)
+{
+ if (J->mcprot != prot) {
+ mcode_setprot(J->mcarea, J->szmcarea, prot);
+ J->mcprot = prot;
+ }
+}
+
+#endif
+
+/* -- MCode area allocation ----------------------------------------------- */
+
+#if LJ_TARGET_X64
+#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
+#else
+#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
+#endif
+
+#ifdef LJ_TARGET_JUMPRANGE
+
+/* Get memory within relative jump distance of our code in 64 bit mode. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+ /* Target an address in the static assembler code (64K aligned).
+ ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
+ */
+#if LJ_TARGET_MIPS
+ /* Use the middle of the 256MB-aligned region. */
+ uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
+ 0x08000000u;
+#else
+ uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
+#endif
+ const uintptr_t range = (1u << LJ_TARGET_JUMPRANGE) - (1u << 21);
+ /* First try a contiguous area below the last one. */
+ uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
+ int i;
+ for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */
+ if (mcode_validptr(hint)) {
+ void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
+
+ if (mcode_validptr(p)) {
+ if ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)
+ return p;
+ mcode_free(J, p, sz); /* Free badly placed area. */
+ }
+ }
+ /* Next try probing pseudo-random addresses. */
+ do {
+ hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */
+ } while (!(hint + sz < range));
+ hint = target + hint - (range>>1);
+ }
+ lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */
+ return NULL;
+}
+
+#else
+
+/* All memory addresses are reachable by relative jumps. */
+#define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
+
+#endif
+
+/* -- MCode area management ----------------------------------------------- */
+
+/* Linked list of MCode areas. */
+typedef struct MCLink {
+ MCode *next; /* Next area. */
+ size_t size; /* Size of current area. */
+} MCLink;
+
+/* Allocate a new MCode area. */
+static void mcode_allocarea(jit_State *J)
+{
+ MCode *oldarea = J->mcarea;
+ size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ J->mcarea = (MCode *)mcode_alloc(J, sz);
+ J->szmcarea = sz;
+ J->mcprot = MCPROT_GEN;
+ J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
+ J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
+ ((MCLink *)J->mcarea)->next = oldarea;
+ ((MCLink *)J->mcarea)->size = sz;
+ J->szallmcarea += sz;
+}
+
+/* Free all MCode areas. */
+void lj_mcode_free(jit_State *J)
+{
+ MCode *mc = J->mcarea;
+ J->mcarea = NULL;
+ J->szallmcarea = 0;
+ while (mc) {
+ MCode *next = ((MCLink *)mc)->next;
+ mcode_free(J, mc, ((MCLink *)mc)->size);
+ mc = next;
+ }
+}
+
+/* -- MCode transactions -------------------------------------------------- */
+
+/* Reserve the remainder of the current MCode area. */
+MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
+{
+ if (!J->mcarea)
+ mcode_allocarea(J);
+ else
+ mcode_protect(J, MCPROT_GEN);
+ *lim = J->mcbot;
+ return J->mctop;
+}
+
+/* Commit the top part of the current MCode area. */
+void lj_mcode_commit(jit_State *J, MCode *top)
+{
+ J->mctop = top;
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Abort the reservation. */
+void lj_mcode_abort(jit_State *J)
+{
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Set/reset protection to allow patching of MCode areas. */
+MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
+{
+#ifdef LUAJIT_UNPROTECT_MCODE
+ UNUSED(J); UNUSED(ptr); UNUSED(finish);
+ return NULL;
+#else
+ if (finish) {
+ if (J->mcarea == ptr)
+ mcode_protect(J, MCPROT_RUN);
+ else
+ mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN);
+ return NULL;
+ } else {
+ MCode *mc = J->mcarea;
+ /* Try current area first to use the protection cache. */
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
+ mcode_protect(J, MCPROT_GEN);
+ return mc;
+ }
+ /* Otherwise search through the list of MCode areas. */
+ for (;;) {
+ mc = ((MCLink *)mc)->next;
+ lua_assert(mc != NULL);
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
+ mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN);
+ return mc;
+ }
+ }
+ }
+#endif
+}
+
+/* Limit of MCode reservation reached. */
+void lj_mcode_limiterr(jit_State *J, size_t need)
+{
+ size_t sizemcode, maxmcode;
+ lj_mcode_abort(J);
+ sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
+ if ((size_t)need > sizemcode)
+ lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */
+ if (J->szallmcarea + sizemcode > maxmcode)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ mcode_allocarea(J);
+ lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_mcode.h b/src/LuaJIT/src/lj_mcode.h
new file mode 100644
index 000000000..f4bffea87
--- /dev/null
+++ b/src/LuaJIT/src/lj_mcode.h
@@ -0,0 +1,30 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_MCODE_H
+#define _LJ_MCODE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT || LJ_HASFFI
+LJ_FUNC void lj_mcode_sync(void *start, void *end);
+#endif
+
+#if LJ_HASJIT
+
+#include "lj_jit.h"
+
+LJ_FUNC void lj_mcode_free(jit_State *J);
+LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
+LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
+LJ_FUNC void lj_mcode_abort(jit_State *J);
+LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
+LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
+
+#define lj_mcode_commitbot(J, m) (J->mcbot = (m))
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_meta.c b/src/LuaJIT/src/lj_meta.c
new file mode 100644
index 000000000..ab8099e8b
--- /dev/null
+++ b/src/LuaJIT/src/lj_meta.c
@@ -0,0 +1,468 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_meta_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_vm.h"
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* String interning of metamethod names for fast indexing. */
+void lj_meta_init(lua_State *L)
+{
+#define MMNAME(name) "__" #name
+ const char *metanames = MMDEF(MMNAME);
+#undef MMNAME
+ global_State *g = G(L);
+ const char *p, *q;
+ uint32_t mm;
+ for (mm = 0, p = metanames; *p; mm++, p = q) {
+ GCstr *s;
+ for (q = p+2; *q && *q != '_'; q++) ;
+ s = lj_str_new(L, p, (size_t)(q-p));
+ /* NOBARRIER: g->gcroot[] is a GC root. */
+ setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s));
+ }
+}
+
+/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */
+cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
+{
+ cTValue *mo = lj_tab_getstr(mt, name);
+ lua_assert(mm <= MM_FAST);
+ if (!mo || tvisnil(mo)) { /* No metamethod? */
+ mt->nomm |= (uint8_t)(1u<metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm));
+ if (mo)
+ return mo;
+ }
+ return niltv(L);
+}
+
+#if LJ_HASFFI
+/* Tailcall from C function. */
+int lj_meta_tailcall(lua_State *L, cTValue *tv)
+{
+ TValue *base = L->base;
+ TValue *top = L->top;
+ const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */
+ copyTV(L, base-1, tv); /* Replace frame with new object. */
+ top->u32.lo = LJ_CONT_TAILCALL;
+ setframe_pc(top, pc);
+ setframe_gc(top+1, obj2gco(L)); /* Dummy frame object. */
+ setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT);
+ L->base = L->top = top+2;
+ /*
+ ** before: [old_mo|PC] [... ...]
+ ** ^base ^top
+ ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta]
+ ** ^base/top
+ ** tailcall: [new_mo|PC] [... ...]
+ ** ^base ^top
+ */
+ return 0;
+}
+#endif
+
+/* Setup call to metamethod to be run by Assembler VM. */
+static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
+ cTValue *a, cTValue *b)
+{
+ /*
+ ** |-- framesize -> top top+1 top+2 top+3
+ ** before: [func slots ...]
+ ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b]
+ ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b]
+ ** ^-- func base ^-- mm base
+ ** after mm: [func slots ...] [result]
+ ** ^-- copy to base[PC_RA] --/ for lj_cont_ra
+ ** istruecond + branch for lj_cont_cond*
+ ** ignore for lj_cont_nop
+ ** next PC: [func slots ...]
+ */
+ TValue *top = L->top;
+ if (curr_funcisL(L)) top = curr_topL(L);
+ setcont(top, cont); /* Assembler VM stores PC in upper word. */
+ copyTV(L, top+1, mo); /* Store metamethod and two arguments. */
+ copyTV(L, top+2, a);
+ copyTV(L, top+3, b);
+ return top+2; /* Return new base. */
+}
+
+/* -- C helpers for some instructions, called from assembler VM ----------- */
+
+/* Helper for TGET*. __index chain and metamethod. */
+cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k)
+{
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (!tvisnil(tv) ||
+ !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index)))
+ return tv;
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_ra, mo, o, k);
+ return NULL; /* Trigger metamethod call. */
+ }
+ o = mo;
+ }
+ lj_err_msg(L, LJ_ERR_GETLOOP);
+ return NULL; /* unreachable */
+}
+
+/* Helper for TSET*. __newindex chain and metamethod. */
+TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k)
+{
+ TValue tmp;
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (LJ_LIKELY(!tvisnil(tv))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ return (TValue *)tv;
+ } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ if (tv != niltv(L))
+ return (TValue *)tv;
+ if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX);
+ else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; }
+ else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX);
+ return lj_tab_newkey(L, t, k);
+ }
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_nop, mo, o, k);
+ /* L->top+2 = v filled in by caller. */
+ return NULL; /* Trigger metamethod call. */
+ }
+ copyTV(L, &tmp, mo);
+ o = &tmp;
+ }
+ lj_err_msg(L, LJ_ERR_SETLOOP);
+ return NULL; /* unreachable */
+}
+
+static cTValue *str2num(cTValue *o, TValue *n)
+{
+ if (tvisnum(o))
+ return o;
+ else if (tvisint(o))
+ return (setnumV(n, (lua_Number)intV(o)), n);
+ else if (tvisstr(o) && lj_str_tonum(strV(o), n))
+ return n;
+ else
+ return NULL;
+}
+
+/* Helper for arithmetic instructions. Coercion, metamethod. */
+TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
+ BCReg op)
+{
+ MMS mm = bcmode_mm(op);
+ TValue tempb, tempc;
+ cTValue *b, *c;
+ if ((b = str2num(rb, &tempb)) != NULL &&
+ (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */
+ setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add));
+ return NULL;
+ } else {
+ cTValue *mo = lj_meta_lookup(L, rb, mm);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, rc, mm);
+ if (tvisnil(mo)) {
+ if (str2num(rb, &tempb) == NULL) rc = rb;
+ lj_err_optype(L, rc, LJ_ERR_OPARITH);
+ return NULL; /* unreachable */
+ }
+ }
+ return mmcall(L, lj_cont_ra, mo, rb, rc);
+ }
+}
+
+/* In-place coercion of a number to a string. */
+static LJ_AINLINE int tostring(lua_State *L, TValue *o)
+{
+ if (tvisstr(o)) {
+ return 1;
+ } else if (tvisnumber(o)) {
+ setstrV(L, o, lj_str_fromnumber(L, o));
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
+TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
+{
+ int fromc = 0;
+ if (left < 0) { left = -left; fromc = 1; }
+ do {
+ int n = 1;
+ if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) {
+ cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, top, MM_concat);
+ if (tvisnil(mo)) {
+ if (tvisstr(top-1) || tvisnumber(top-1)) top++;
+ lj_err_optype(L, top-1, LJ_ERR_OPCAT);
+ return NULL; /* unreachable */
+ }
+ }
+ /* One of the top two elements is not a string, call __cat metamethod:
+ **
+ ** before: [...][CAT stack .........................]
+ ** top-1 top top+1 top+2
+ ** pick two: [...][CAT stack ...] [o1] [o2]
+ ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2]
+ ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2]
+ ** ^-- func base ^-- mm base
+ ** after mm: [...][CAT stack ...] <--push-- [result]
+ ** next step: [...][CAT stack .............]
+ */
+ copyTV(L, top+2, top); /* Careful with the order of stack copies! */
+ copyTV(L, top+1, top-1);
+ copyTV(L, top, mo);
+ setcont(top-1, lj_cont_cat);
+ return top+1; /* Trigger metamethod call. */
+ } else if (strV(top)->len == 0) { /* Shortcut. */
+ (void)tostring(L, top-1);
+ } else {
+ /* Pick as many strings as possible from the top and concatenate them:
+ **
+ ** before: [...][CAT stack ...........................]
+ ** pick str: [...][CAT stack ...] [...... strings ......]
+ ** concat: [...][CAT stack ...] [result]
+ ** next step: [...][CAT stack ............]
+ */
+ MSize tlen = strV(top)->len;
+ char *buffer;
+ int i;
+ for (n = 1; n <= left && tostring(L, top-n); n++) {
+ MSize len = strV(top-n)->len;
+ if (len >= LJ_MAX_STR - tlen)
+ lj_err_msg(L, LJ_ERR_STROV);
+ tlen += len;
+ }
+ buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen);
+ n--;
+ tlen = 0;
+ for (i = n; i >= 0; i--) {
+ MSize len = strV(top-i)->len;
+ memcpy(buffer + tlen, strVdata(top-i), len);
+ tlen += len;
+ }
+ setstrV(L, top-n, lj_str_new(L, buffer, tlen));
+ }
+ left -= n;
+ top -= n;
+ } while (left >= 1);
+ if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
+ if (!fromc) L->top = curr_topL(L);
+ lj_gc_step(L);
+ }
+ return NULL;
+}
+
+/* Helper for LEN. __len metamethod. */
+TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o)
+{
+ cTValue *mo = lj_meta_lookup(L, o, MM_len);
+ if (tvisnil(mo)) {
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ if (tvistab(o))
+ tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<gch.metatable), MM_eq);
+ if (mo) {
+ TValue *top;
+ uint32_t it;
+ if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) {
+ cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq);
+ if (mo2 == NULL || !lj_obj_equal(mo, mo2))
+ return (TValue *)(intptr_t)ne;
+ }
+ top = curr_top(L);
+ setcont(top, ne ? lj_cont_condf : lj_cont_condt);
+ copyTV(L, top+1, mo);
+ it = ~(uint32_t)o1->gch.gct;
+ setgcV(L, top+2, o1, it);
+ setgcV(L, top+3, o2, it);
+ return top+2; /* Trigger metamethod call. */
+ }
+ return (TValue *)(intptr_t)ne;
+}
+
+#if LJ_HASFFI
+TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
+{
+ ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt;
+ int op = (int)bc_op(ins) & ~1;
+ TValue tv;
+ cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)];
+ cTValue *o1mm = o1;
+ if (op == BC_ISEQV) {
+ o2 = &L->base[bc_d(ins)];
+ if (!tviscdata(o1mm)) o1mm = o2;
+ } else if (op == BC_ISEQS) {
+ setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins))));
+ o2 = &tv;
+ } else if (op == BC_ISEQN) {
+ o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
+ } else {
+ lua_assert(op == BC_ISEQP);
+ setitype(&tv, ~bc_d(ins));
+ o2 = &tv;
+ }
+ mo = lj_meta_lookup(L, o1mm, MM_eq);
+ if (LJ_LIKELY(!tvisnil(mo)))
+ return mmcall(L, cont, mo, o1, o2);
+ else
+ return (TValue *)(intptr_t)(bc_op(ins) & 1);
+}
+#endif
+
+/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */
+TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
+{
+ if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm);
+ if (LJ_UNLIKELY(tvisnil(mo))) goto err;
+ return mmcall(L, cont, mo, o1, o2);
+ } else if (itype(o1) == itype(o2)) { /* Never called with two numbers. */
+ if (tvisstr(o1) && tvisstr(o2)) {
+ int32_t res = lj_str_cmp(strV(o1), strV(o2));
+ return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1));
+ } else {
+ trymt:
+ while (1) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, o1, mm);
+ cTValue *mo2 = lj_meta_lookup(L, o2, mm);
+ if (tvisnil(mo) || !lj_obj_equal(mo, mo2)) {
+ if (op & 2) { /* MM_le not found: retry with MM_lt. */
+ cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */
+ op ^= 3; /* Use LT and flip condition. */
+ continue;
+ }
+ goto err;
+ }
+ return mmcall(L, cont, mo, o1, o2);
+ }
+ }
+ } else if (tvisbool(o1) && tvisbool(o2)) {
+ goto trymt;
+ } else {
+ err:
+ lj_err_comp(L, o1, o2);
+ return NULL;
+ }
+}
+
+/* Helper for calls. __call metamethod. */
+void lj_meta_call(lua_State *L, TValue *func, TValue *top)
+{
+ cTValue *mo = lj_meta_lookup(L, func, MM_call);
+ TValue *p;
+ if (!tvisfunc(mo))
+ lj_err_optype_call(L, func);
+ for (p = top; p > func; p--) copyTV(L, p, p-1);
+ copyTV(L, func, mo);
+}
+
+/* Helper for FORI. Coercion. */
+void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o)
+{
+ if (!(tvisnumber(o) || (tvisstr(o) && lj_str_tonumber(strV(o), o))))
+ lj_err_msg(L, LJ_ERR_FORINIT);
+ if (!(tvisnumber(o+1) || (tvisstr(o+1) && lj_str_tonumber(strV(o+1), o+1))))
+ lj_err_msg(L, LJ_ERR_FORLIM);
+ if (!(tvisnumber(o+2) || (tvisstr(o+2) && lj_str_tonumber(strV(o+2), o+2))))
+ lj_err_msg(L, LJ_ERR_FORSTEP);
+ if (LJ_DUALNUM) {
+ /* Ensure all slots are integers or all slots are numbers. */
+ int32_t k[3];
+ int nint = 0;
+ ptrdiff_t i;
+ for (i = 0; i <= 2; i++) {
+ if (tvisint(o+i)) {
+ k[i] = intV(o+i); nint++;
+ } else {
+ k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i));
+ }
+ }
+ if (nint == 3) { /* Narrow to integers. */
+ setintV(o, k[0]);
+ setintV(o+1, k[1]);
+ setintV(o+2, k[2]);
+ } else if (nint != 0) { /* Widen to numbers. */
+ if (tvisint(o)) setnumV(o, (lua_Number)intV(o));
+ if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1));
+ if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2));
+ }
+ }
+}
+
diff --git a/src/LuaJIT/src/lj_meta.h b/src/LuaJIT/src/lj_meta.h
new file mode 100644
index 000000000..b6275c498
--- /dev/null
+++ b/src/LuaJIT/src/lj_meta.h
@@ -0,0 +1,37 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_META_H
+#define _LJ_META_H
+
+#include "lj_obj.h"
+
+/* Metamethod handling */
+LJ_FUNC void lj_meta_init(lua_State *L);
+LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
+LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
+#if LJ_HASFFI
+LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
+#endif
+
+#define lj_meta_fastg(g, mt, mm) \
+ ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
+ lj_meta_cache(mt, mm, mmname_str(g, mm)))
+#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm)
+
+/* C helpers for some instructions, called from assembler VM. */
+LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
+ cTValue *rc, BCReg op);
+LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
+LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
+LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
+LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
+LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
+
+#endif
diff --git a/src/LuaJIT/src/lj_obj.c b/src/LuaJIT/src/lj_obj.c
new file mode 100644
index 000000000..04aeb461e
--- /dev/null
+++ b/src/LuaJIT/src/lj_obj.c
@@ -0,0 +1,35 @@
+/*
+** Miscellaneous object handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_obj_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+/* Object type names. */
+LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */
+ "no value", "nil", "boolean", "userdata", "number", "string",
+ "table", "function", "userdata", "thread", "proto", "cdata"
+};
+
+LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */
+ "nil", "boolean", "boolean", "userdata", "string", "upval", "thread",
+ "proto", "function", "trace", "cdata", "table", "userdata", "number"
+};
+
+/* Compare two objects without calling metamethods. */
+int lj_obj_equal(cTValue *o1, cTValue *o2)
+{
+ if (itype(o1) == itype(o2)) {
+ if (tvispri(o1))
+ return 1;
+ if (!tvisnum(o1))
+ return gcrefeq(o1->gcr, o2->gcr);
+ } else if (!tvisnumber(o1) || !tvisnumber(o2)) {
+ return 0;
+ }
+ return numberVnum(o1) == numberVnum(o2);
+}
+
diff --git a/src/LuaJIT/src/lj_obj.h b/src/LuaJIT/src/lj_obj.h
new file mode 100644
index 000000000..43ed9204b
--- /dev/null
+++ b/src/LuaJIT/src/lj_obj.h
@@ -0,0 +1,844 @@
+/*
+** LuaJIT VM tags, values and objects.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#ifndef _LJ_OBJ_H
+#define _LJ_OBJ_H
+
+#include "lua.h"
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Memory references (32 bit address space) ---------------------------- */
+
+/* Memory size. */
+typedef uint32_t MSize;
+
+/* Memory reference */
+typedef struct MRef {
+ uint32_t ptr32; /* Pseudo 32 bit pointer. */
+} MRef;
+
+#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
+
+#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
+#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
+
+/* -- GC object references (32 bit address space) ------------------------- */
+
+/* GCobj reference */
+typedef struct GCRef {
+ uint32_t gcptr32; /* Pseudo 32 bit pointer. */
+} GCRef;
+
+/* Common GC header for all collectable objects. */
+#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
+/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
+
+#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
+#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
+#define gcrefu(r) ((r).gcptr32)
+#define gcrefi(r) ((int32_t)(r).gcptr32)
+#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
+#define gcnext(gc) (gcref((gc)->gch.nextgc))
+
+#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
+#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i))
+#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
+#define setgcrefnull(r) ((r).gcptr32 = 0)
+#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
+
+/* IMPORTANT NOTE:
+**
+** All uses of the setgcref* macros MUST be accompanied with a write barrier.
+**
+** This is to ensure the integrity of the incremental GC. The invariant
+** to preserve is that a black object never points to a white object.
+** I.e. never store a white object into a field of a black object.
+**
+** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
+** - The source is not a GC object (NULL).
+** - The target is a GC root. I.e. everything in global_State.
+** - The target is a lua_State field (threads are never black).
+** - The target is a stack slot, see setgcV et al.
+** - The target is an open upvalue, i.e. pointing to a stack slot.
+** - The target is a newly created object (i.e. marked white). But make
+** sure nothing invokes the GC inbetween.
+** - The target and the source are the same object (self-reference).
+** - The target already contains the object (e.g. moving elements around).
+**
+** The most common case is a store to a stack slot. All other cases where
+** a barrier has been omitted are annotated with a NOBARRIER comment.
+**
+** The same logic applies for stores to table slots (array part or hash
+** part). ALL uses of lj_tab_set* require a barrier for the stored value
+** *and* the stored key, based on the above rules. In practice this means
+** a barrier is needed if *either* of the key or value are a GC object.
+**
+** It's ok to LEAVE OUT the write barrier in the following special cases:
+** - The stored value is nil. The key doesn't matter because it's either
+** not resurrected or lj_tab_newkey() will take care of the key barrier.
+** - The key doesn't matter if the *previously* stored value is guaranteed
+** to be non-nil (because the key is kept alive in the table).
+** - The key doesn't matter if it's guaranteed not to be part of the table,
+** since lj_tab_newkey() takes care of the key barrier. This applies
+** trivially to new tables, but watch out for resurrected keys. Storing
+** a nil value leaves the key in the table!
+**
+** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
+** by the interpreter for all table stores.
+**
+** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
+** dead keys in tables. The reference is left in, but it's guaranteed to
+** be never dereferenced as long as the value is nil. It's ok if the key is
+** freed or if any object subsequently gets the same address.
+**
+** Not destroying dead keys helps to keep key hash slots stable. This avoids
+** specialization back-off for HREFK when a value flips between nil and
+** non-nil and the GC gets in the way. It also allows safely hoisting
+** HREF/HREFK across GC steps. Dead keys are only removed if a table is
+** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
+**
+** The trade-off is that a write barrier for tables must take the key into
+** account, too. Implicitly resurrecting the key by storing a non-nil value
+** may invalidate the incremental GC invariant.
+*/
+
+/* -- Common type definitions --------------------------------------------- */
+
+/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
+typedef uint32_t BCIns; /* Bytecode instruction. */
+typedef uint32_t BCPos; /* Bytecode position. */
+typedef uint32_t BCReg; /* Bytecode register. */
+typedef int32_t BCLine; /* Bytecode line number. */
+
+/* Internal assembler functions. Never call these directly from C. */
+typedef void (*ASMFunction)(void);
+
+/* Resizable string buffer. Need this here, details in lj_str.h. */
+typedef struct SBuf {
+ char *buf; /* String buffer base. */
+ MSize n; /* String buffer length. */
+ MSize sz; /* String buffer size. */
+} SBuf;
+
+/* -- Tags and values ----------------------------------------------------- */
+
+/* Frame link. */
+typedef union {
+ int32_t ftsz; /* Frame type and size of previous frame. */
+ MRef pcr; /* Overlaps PC for Lua frames. */
+} FrameLink;
+
+/* Tagged value. */
+typedef LJ_ALIGN(8) union TValue {
+ uint64_t u64; /* 64 bit pattern overlaps number. */
+ lua_Number n; /* Number object overlaps split tag/value object. */
+ struct {
+ LJ_ENDIAN_LOHI(
+ union {
+ GCRef gcr; /* GCobj reference (if any). */
+ int32_t i; /* Integer value. */
+ };
+ , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
+ )
+ };
+ struct {
+ LJ_ENDIAN_LOHI(
+ GCRef func; /* Function for next frame (or dummy L). */
+ , FrameLink tp; /* Link to previous frame. */
+ )
+ } fr;
+ struct {
+ LJ_ENDIAN_LOHI(
+ uint32_t lo; /* Lower 32 bits of number. */
+ , uint32_t hi; /* Upper 32 bits of number. */
+ )
+ } u32;
+} TValue;
+
+typedef const TValue cTValue;
+
+#define tvref(r) (mref(r, TValue))
+
+/* More external and GCobj tags for internal objects. */
+#define LAST_TT LUA_TTHREAD
+#define LUA_TPROTO (LAST_TT+1)
+#define LUA_TCDATA (LAST_TT+2)
+
+/* Internal object tags.
+**
+** Internal tags overlap the MSW of a number object (must be a double).
+** Interpreted as a double these are special NaNs. The FPU only generates
+** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
+** for use as internal tags. Small negative numbers are used to shorten the
+** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
+**
+** ---MSW---.---LSW---
+** primitive types | itype | |
+** lightuserdata | itype | void * | (32 bit platforms)
+** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers)
+** GC objects | itype | GCRef |
+** int (LJ_DUALNUM)| itype | int |
+** number -------double------
+**
+** ORDER LJ_T
+** Primitive types nil/false/true must be first, lightuserdata next.
+** GC objects are at the end, table/userdata must be lowest.
+** Also check lj_ir.h for similar ordering constraints.
+*/
+#define LJ_TNIL (~0u)
+#define LJ_TFALSE (~1u)
+#define LJ_TTRUE (~2u)
+#define LJ_TLIGHTUD (~3u)
+#define LJ_TSTR (~4u)
+#define LJ_TUPVAL (~5u)
+#define LJ_TTHREAD (~6u)
+#define LJ_TPROTO (~7u)
+#define LJ_TFUNC (~8u)
+#define LJ_TTRACE (~9u)
+#define LJ_TCDATA (~10u)
+#define LJ_TTAB (~11u)
+#define LJ_TUDATA (~12u)
+/* This is just the canonical number type used in some places. */
+#define LJ_TNUMX (~13u)
+
+/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
+#if LJ_64
+#define LJ_TISNUM 0xfffeffffu
+#else
+#define LJ_TISNUM LJ_TNUMX
+#endif
+#define LJ_TISTRUECOND LJ_TFALSE
+#define LJ_TISPRI LJ_TTRUE
+#define LJ_TISGCV (LJ_TSTR+1)
+#define LJ_TISTABUD LJ_TTAB
+
+/* -- String object ------------------------------------------------------- */
+
+/* String object header. String payload follows. */
+typedef struct GCstr {
+ GCHeader;
+ uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
+ uint8_t unused;
+ MSize hash; /* Hash of string. */
+ MSize len; /* Size of string. */
+} GCstr;
+
+#define strref(r) (&gcref((r))->str)
+#define strdata(s) ((const char *)((s)+1))
+#define strdatawr(s) ((char *)((s)+1))
+#define strVdata(o) strdata(strV(o))
+#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1)
+
+/* -- Userdata object ----------------------------------------------------- */
+
+/* Userdata object. Payload follows. */
+typedef struct GCudata {
+ GCHeader;
+ uint8_t udtype; /* Userdata type. */
+ uint8_t unused2;
+ GCRef env; /* Should be at same offset in GCfunc. */
+ MSize len; /* Size of payload. */
+ GCRef metatable; /* Must be at same offset in GCtab. */
+ uint32_t align1; /* To force 8 byte alignment of the payload. */
+} GCudata;
+
+/* Userdata types. */
+enum {
+ UDTYPE_USERDATA, /* Regular userdata. */
+ UDTYPE_IO_FILE, /* I/O library FILE. */
+ UDTYPE_FFI_CLIB, /* FFI C library namespace. */
+ UDTYPE__MAX
+};
+
+#define uddata(u) ((void *)((u)+1))
+#define sizeudata(u) (sizeof(struct GCudata)+(u)->len)
+
+/* -- C data object ------------------------------------------------------- */
+
+/* C data object. Payload follows. */
+typedef struct GCcdata {
+ GCHeader;
+ uint16_t typeid; /* C type ID. */
+} GCcdata;
+
+/* Prepended to variable-sized or realigned C data objects. */
+typedef struct GCcdataVar {
+ uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */
+ uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */
+ MSize len; /* Size of payload. */
+} GCcdataVar;
+
+#define cdataptr(cd) ((void *)((cd)+1))
+#define cdataisv(cd) ((cd)->marked & 0x80)
+#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
+#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len)
+#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra)
+#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset))
+
+/* -- Prototype object ---------------------------------------------------- */
+
+#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef))
+#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
+
+typedef struct GCproto {
+ GCHeader;
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ MSize sizebc; /* Number of bytecode instructions. */
+ GCRef gclist;
+ MRef k; /* Split constant array (points to the middle). */
+ MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
+ MSize sizekgc; /* Number of collectable constants. */
+ MSize sizekn; /* Number of lua_Number constants. */
+ MSize sizept; /* Total size including colocated arrays. */
+ uint8_t sizeuv; /* Number of upvalues. */
+ uint8_t flags; /* Miscellaneous flags (see below). */
+ uint16_t trace; /* Anchor for chain of root traces. */
+ /* ------ The following fields are for debugging/tracebacks only ------ */
+ GCRef chunkname; /* Name of the chunk this function was defined in. */
+ BCLine firstline; /* First line of the function definition. */
+ BCLine numline; /* Number of lines for the function definition. */
+ MRef lineinfo; /* Compressed map from bytecode ins. to source line. */
+ MRef uvinfo; /* Upvalue names. */
+ MRef varinfo; /* Names and compressed extents of local variables. */
+} GCproto;
+
+/* Flags for prototype. */
+#define PROTO_CHILD 0x01 /* Has child prototypes. */
+#define PROTO_VARARG 0x02 /* Vararg function. */
+#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */
+#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */
+#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */
+/* Only used during parsing. */
+#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */
+#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */
+/* Top bits used for counting created closures. */
+#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */
+#define PROTO_CLC_BITS 3
+
+#define proto_kgc(pt, idx) \
+ check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
+ gcref(mref((pt)->k, GCRef)[(idx)]))
+#define proto_knumtv(pt, idx) \
+ check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
+#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto)))
+#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt)))
+#define proto_uv(pt) (mref((pt)->uv, uint16_t))
+
+#define proto_chunkname(pt) (strref((pt)->chunkname))
+#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt))))
+#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void))
+#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t))
+#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t))
+
+/* -- Upvalue object ------------------------------------------------------ */
+
+typedef struct GCupval {
+ GCHeader;
+ uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */
+ uint8_t unused;
+ union {
+ TValue tv; /* If closed: the value itself. */
+ struct { /* If open: double linked list, anchored at thread. */
+ GCRef prev;
+ GCRef next;
+ };
+ };
+ MRef v; /* Points to stack slot (open) or above (closed). */
+ uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */
+} GCupval;
+
+#define uvprev(uv_) (&gcref((uv_)->prev)->uv)
+#define uvnext(uv_) (&gcref((uv_)->next)->uv)
+#define uvval(uv_) (mref((uv_)->v, TValue))
+
+/* -- Function object (closures) ------------------------------------------ */
+
+/* Common header for functions. env should be at same offset in GCudata. */
+#define GCfuncHeader \
+ GCHeader; uint8_t ffid; uint8_t nupvalues; \
+ GCRef env; GCRef gclist; MRef pc
+
+typedef struct GCfuncC {
+ GCfuncHeader;
+ lua_CFunction f; /* C function to be called. */
+ TValue upvalue[1]; /* Array of upvalues (TValue). */
+} GCfuncC;
+
+typedef struct GCfuncL {
+ GCfuncHeader;
+ GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */
+} GCfuncL;
+
+typedef union GCfunc {
+ GCfuncC c;
+ GCfuncL l;
+} GCfunc;
+
+#define FF_LUA 0
+#define FF_C 1
+#define isluafunc(fn) ((fn)->c.ffid == FF_LUA)
+#define iscfunc(fn) ((fn)->c.ffid == FF_C)
+#define isffunc(fn) ((fn)->c.ffid > FF_C)
+#define funcproto(fn) \
+ check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
+#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
+#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
+
+/* -- Table object -------------------------------------------------------- */
+
+/* Hash node. */
+typedef struct Node {
+ TValue val; /* Value object. Must be first field. */
+ TValue key; /* Key object. */
+ MRef next; /* Hash chain. */
+ MRef freetop; /* Top of free elements (stored in t->node[0]). */
+} Node;
+
+LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
+
+typedef struct GCtab {
+ GCHeader;
+ uint8_t nomm; /* Negative cache for fast metamethods. */
+ int8_t colo; /* Array colocation. */
+ MRef array; /* Array part. */
+ GCRef gclist;
+ GCRef metatable; /* Must be at same offset in GCudata. */
+ MRef node; /* Hash part. */
+ uint32_t asize; /* Size of array part (keys [0, asize-1]). */
+ uint32_t hmask; /* Hash part mask (size of hash part - 1). */
+} GCtab;
+
+#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
+#define tabref(r) (&gcref((r))->tab)
+#define noderef(r) (mref((r), Node))
+#define nextnode(n) (mref((n)->next, Node))
+
+/* -- State objects ------------------------------------------------------- */
+
+/* VM states. */
+enum {
+ LJ_VMST_INTERP, /* Interpreter. */
+ LJ_VMST_C, /* C function. */
+ LJ_VMST_GC, /* Garbage collector. */
+ LJ_VMST_EXIT, /* Trace exit handler. */
+ LJ_VMST_RECORD, /* Trace recorder. */
+ LJ_VMST_OPT, /* Optimizer. */
+ LJ_VMST_ASM, /* Assembler. */
+ LJ_VMST__MAX
+};
+
+#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st)
+
+/* Metamethods. ORDER MM */
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+#define MMDEF_52(_) _(pairs) _(ipairs)
+#else
+#define MMDEF_52(_)
+#endif
+
+#define MMDEF(_) \
+ _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
+ /* Only the above (fast) metamethods are negative cached (max. 8). */ \
+ _(lt) _(le) _(concat) _(call) \
+ /* The following must be in ORDER ARITH. */ \
+ _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
+ /* The following are used in the standard libraries. */ \
+ _(metatable) _(tostring) MMDEF_52(_)
+
+typedef enum {
+#define MMENUM(name) MM_##name,
+MMDEF(MMENUM)
+#undef MMENUM
+ MM__MAX,
+ MM____ = MM__MAX,
+ MM_FAST = MM_len
+} MMS;
+
+/* GC root IDs. */
+typedef enum {
+ GCROOT_MMNAME, /* Metamethod names. */
+ GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
+ GCROOT_BASEMT, /* Metatables for base types. */
+ GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
+ GCROOT_IO_INPUT, /* Userdata for default I/O input file. */
+ GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */
+ GCROOT_MAX
+} GCRootID;
+
+#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)])
+#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
+#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
+
+typedef struct GCState {
+ MSize total; /* Memory currently allocated. */
+ MSize threshold; /* Memory threshold. */
+ uint8_t currentwhite; /* Current white color. */
+ uint8_t state; /* GC state. */
+ uint8_t unused1;
+ uint8_t unused2;
+ MSize sweepstr; /* Sweep position in string table. */
+ GCRef root; /* List of all collectable objects. */
+ MRef sweep; /* Sweep position in root list. */
+ GCRef gray; /* List of gray objects. */
+ GCRef grayagain; /* List of objects for atomic traversal. */
+ GCRef weak; /* List of weak tables (to be cleared). */
+ GCRef mmudata; /* List of userdata (to be finalized). */
+ MSize stepmul; /* Incremental GC step granularity. */
+ MSize debt; /* Debt (how much GC is behind schedule). */
+ MSize estimate; /* Estimate of memory actually in use. */
+ MSize pause; /* Pause between successive GC cycles. */
+} GCState;
+
+/* Global state, shared by all threads of a Lua universe. */
+typedef struct global_State {
+ GCRef *strhash; /* String hash table (hash chain anchors). */
+ MSize strmask; /* String hash mask (size of hash table - 1). */
+ MSize strnum; /* Number of strings in hash table. */
+ lua_Alloc allocf; /* Memory allocator. */
+ void *allocd; /* Memory allocator data. */
+ GCState gc; /* Garbage collector. */
+ SBuf tmpbuf; /* Temporary buffer for string concatenation. */
+ Node nilnode; /* Fallback 1-element hash part (nil key and value). */
+ GCstr strempty; /* Empty string. */
+ uint8_t stremptyz; /* Zero terminator of empty string. */
+ uint8_t hookmask; /* Hook mask. */
+ uint8_t dispatchmode; /* Dispatch mode. */
+ uint8_t vmevmask; /* VM event mask. */
+ GCRef mainthref; /* Link to main thread. */
+ TValue registrytv; /* Anchor for registry. */
+ TValue tmptv, tmptv2; /* Temporary TValues. */
+ GCupval uvhead; /* Head of double-linked list of all open upvalues. */
+ int32_t hookcount; /* Instruction hook countdown. */
+ int32_t hookcstart; /* Start count for instruction hook counter. */
+ lua_Hook hookf; /* Hook function. */
+ lua_CFunction wrapf; /* Wrapper for C function calls. */
+ lua_CFunction panic; /* Called as a last resort for errors. */
+ volatile int32_t vmstate; /* VM state or current JIT code trace number. */
+ BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
+ BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
+ GCRef jit_L; /* Current JIT code lua_State or NULL. */
+ MRef jit_base; /* Current JIT code L->base. */
+ MRef ctype_state; /* Pointer to C type state. */
+ GCRef gcroot[GCROOT_MAX]; /* GC roots. */
+} global_State;
+
+#define mainthread(g) (&gcref(g->mainthref)->th)
+#define niltv(L) \
+ check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
+#define niltvg(g) \
+ check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
+
+/* Hook management. Hook event masks are defined in lua.h. */
+#define HOOK_EVENTMASK 0x0f
+#define HOOK_ACTIVE 0x10
+#define HOOK_ACTIVE_SHIFT 4
+#define HOOK_VMEVENT 0x20
+#define HOOK_GC 0x40
+#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
+#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
+#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC))
+#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
+#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
+#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
+#define hook_restore(g, h) \
+ ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
+
+/* Per-thread state object. */
+struct lua_State {
+ GCHeader;
+ uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */
+ uint8_t status; /* Thread status. */
+ MRef glref; /* Link to global state. */
+ GCRef gclist; /* GC chain. */
+ TValue *base; /* Base of currently executing function. */
+ TValue *top; /* First free slot in the stack. */
+ MRef maxstack; /* Last free slot in the stack. */
+ MRef stack; /* Stack base. */
+ GCRef openupval; /* List of open upvalues in the stack. */
+ GCRef env; /* Thread environment (table of globals). */
+ void *cframe; /* End of C stack frame chain. */
+ MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */
+};
+
+#define G(L) (mref(L->glref, global_State))
+#define registry(L) (&G(L)->registrytv)
+
+/* Macros to access the currently executing (Lua) function. */
+#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
+#define curr_funcisL(L) (isluafunc(curr_func(L)))
+#define curr_proto(L) (funcproto(curr_func(L)))
+#define curr_topL(L) (L->base + curr_proto(L)->framesize)
+#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
+
+/* -- GC object definition and conversions -------------------------------- */
+
+/* GC header for generic access to common fields of GC objects. */
+typedef struct GChead {
+ GCHeader;
+ uint8_t unused1;
+ uint8_t unused2;
+ GCRef env;
+ GCRef gclist;
+ GCRef metatable;
+} GChead;
+
+/* The env field SHOULD be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
+
+/* The metatable field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
+
+/* The gclist field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
+
+typedef union GCobj {
+ GChead gch;
+ GCstr str;
+ GCupval uv;
+ lua_State th;
+ GCproto pt;
+ GCfunc fn;
+ GCcdata cd;
+ GCtab tab;
+ GCudata ud;
+} GCobj;
+
+/* Macros to convert a GCobj pointer into a specific value. */
+#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
+#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
+#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
+#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
+#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
+#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
+#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
+#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
+
+/* Macro to convert any collectable object into a GCobj pointer. */
+#define obj2gco(v) ((GCobj *)(v))
+
+/* -- TValue getters/setters ---------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+#include "lj_gc.h"
+#endif
+
+/* Macros to test types. */
+#define itype(o) ((o)->it)
+#define tvisnil(o) (itype(o) == LJ_TNIL)
+#define tvisfalse(o) (itype(o) == LJ_TFALSE)
+#define tvistrue(o) (itype(o) == LJ_TTRUE)
+#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
+#if LJ_64
+#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
+#else
+#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
+#endif
+#define tvisstr(o) (itype(o) == LJ_TSTR)
+#define tvisfunc(o) (itype(o) == LJ_TFUNC)
+#define tvisthread(o) (itype(o) == LJ_TTHREAD)
+#define tvisproto(o) (itype(o) == LJ_TPROTO)
+#define tviscdata(o) (itype(o) == LJ_TCDATA)
+#define tvistab(o) (itype(o) == LJ_TTAB)
+#define tvisudata(o) (itype(o) == LJ_TUDATA)
+#define tvisnumber(o) (itype(o) <= LJ_TISNUM)
+#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM)
+#define tvisnum(o) (itype(o) < LJ_TISNUM)
+
+#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND)
+#define tvispri(o) (itype(o) >= LJ_TISPRI)
+#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */
+#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
+
+/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
+#define tvisnan(o) ((o)->n != (o)->n)
+#if LJ_64
+#define tviszero(o) (((o)->u64 << 1) == 0)
+#else
+#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
+#endif
+#define tvispzero(o) ((o)->u64 == 0)
+#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000))
+#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000))
+#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
+
+/* Macros to convert type ids. */
+#if LJ_64
+#define itypemap(o) \
+ (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
+#else
+#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
+#endif
+
+/* Macros to get tagged values. */
+#define gcval(o) (gcref((o)->gcr))
+#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it))
+#if LJ_64
+#define lightudV(o) \
+ check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
+#else
+#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void))
+#endif
+#define gcV(o) check_exp(tvisgcv(o), gcval(o))
+#define strV(o) check_exp(tvisstr(o), &gcval(o)->str)
+#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn)
+#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th)
+#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt)
+#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd)
+#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab)
+#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud)
+#define numV(o) check_exp(tvisnum(o), (o)->n)
+#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
+
+/* Macros to set tagged values. */
+#define setitype(o, i) ((o)->it = (i))
+#define setnilV(o) ((o)->it = LJ_TNIL)
+#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
+
+static LJ_AINLINE void setlightudV(TValue *o, void *p)
+{
+#if LJ_64
+ o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
+#else
+ setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
+#endif
+}
+
+#if LJ_64
+#define checklightudptr(L, p) \
+ (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
+#define setcont(o, f) \
+ ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
+#else
+#define checklightudptr(L, p) (p)
+#define setcont(o, f) setlightudV((o), (void *)(f))
+#endif
+
+#define tvchecklive(L, o) \
+ UNUSED(L), lua_assert(!tvisgcv(o) || \
+ ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o))))
+
+static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype)
+{
+ setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o);
+}
+
+#define define_setV(name, type, tag) \
+static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \
+{ \
+ setgcV(L, o, obj2gco(v), tag); \
+}
+define_setV(setstrV, GCstr, LJ_TSTR)
+define_setV(setthreadV, lua_State, LJ_TTHREAD)
+define_setV(setprotoV, GCproto, LJ_TPROTO)
+define_setV(setfuncV, GCfunc, LJ_TFUNC)
+define_setV(setcdataV, GCcdata, LJ_TCDATA)
+define_setV(settabV, GCtab, LJ_TTAB)
+define_setV(setudataV, GCudata, LJ_TUDATA)
+
+#define setnumV(o, x) ((o)->n = (x))
+#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000))
+#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000))
+#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000))
+
+static LJ_AINLINE void setintV(TValue *o, int32_t i)
+{
+#if LJ_DUALNUM
+ o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
+#else
+ o->n = (lua_Number)i;
+#endif
+}
+
+static LJ_AINLINE void setint64V(TValue *o, int64_t i)
+{
+ if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
+ setintV(o, (int32_t)i);
+ else
+ setnumV(o, (lua_Number)i);
+}
+
+#if LJ_64
+#define setintptrV(o, i) setint64V((o), (i))
+#else
+#define setintptrV(o, i) setintV((o), (i))
+#endif
+
+/* Copy tagged values. */
+static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
+{
+ *o1 = *o2; tvchecklive(L, o1);
+}
+
+/* -- Number to integer conversion ---------------------------------------- */
+
+#if LJ_SOFTFP
+LJ_ASMF int32_t lj_vm_tobit(double x);
+#endif
+
+static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
+{
+#if LJ_SOFTFP
+ return lj_vm_tobit(n);
+#else
+ TValue o;
+ o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */
+ return (int32_t)o.u32.lo;
+#endif
+}
+
+#if LJ_TARGET_X86 && !defined(__SSE2__)
+#define lj_num2int(n) lj_num2bit((n))
+#else
+#define lj_num2int(n) ((int32_t)(n))
+#endif
+
+static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
+{
+#ifdef _MSC_VER
+ if (n >= 9223372036854775808.0) /* They think it's a feature. */
+ return (uint64_t)(int64_t)(n - 18446744073709551616.0);
+ else
+#endif
+ return (uint64_t)n;
+}
+
+static LJ_AINLINE int32_t numberVint(cTValue *o)
+{
+ if (LJ_LIKELY(tvisint(o)))
+ return intV(o);
+ else
+ return lj_num2int(numV(o));
+}
+
+static LJ_AINLINE lua_Number numberVnum(cTValue *o)
+{
+ if (LJ_UNLIKELY(tvisint(o)))
+ return (lua_Number)intV(o);
+ else
+ return numV(o);
+}
+
+/* -- Miscellaneous object handling --------------------------------------- */
+
+/* Names and maps for internal and external object tags. */
+LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
+LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
+
+#define typename(o) (lj_obj_itypename[itypemap(o)])
+
+/* Compare two objects without calling metamethods. */
+LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2);
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_dce.c b/src/LuaJIT/src/lj_opt_dce.c
new file mode 100644
index 000000000..90a937372
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_dce.c
@@ -0,0 +1,77 @@
+/*
+** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_dce_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Scan through all snapshots and mark all referenced instructions. */
+static void dce_marksnap(jit_State *J)
+{
+ SnapNo i, nsnap = J->cur.nsnap;
+ for (i = 0; i < nsnap; i++) {
+ SnapShot *snap = &J->cur.snap[i];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (ref >= REF_FIRST)
+ irt_setmark(IR(ref)->t);
+ }
+ }
+}
+
+/* Backwards propagate marks. Replace unused instructions with NOPs. */
+static void dce_propagate(jit_State *J)
+{
+ IRRef1 *pchain[IR__MAX];
+ IRRef ins;
+ uint32_t i;
+ for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i];
+ for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) {
+ IRIns *ir = IR(ins);
+ if (irt_ismarked(ir->t)) {
+ irt_clearmark(ir->t);
+ pchain[ir->o] = &ir->prev;
+ } else if (!ir_sideeff(ir)) {
+ *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */
+ ir->t.irt = IRT_NIL;
+ ir->o = IR_NOP; /* Replace instruction with NOP. */
+ ir->op1 = ir->op2 = 0;
+ ir->prev = 0;
+ continue;
+ }
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+}
+
+/* Dead Code Elimination.
+**
+** First backpropagate marks for all used instructions. Then replace
+** the unused ones with a NOP. Note that compressing the IR to eliminate
+** the NOPs does not pay off.
+*/
+void lj_opt_dce(jit_State *J)
+{
+ if ((J->flags & JIT_F_OPT_DCE)) {
+ dce_marksnap(J);
+ dce_propagate(J);
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_fold.c b/src/LuaJIT/src/lj_opt_fold.c
new file mode 100644
index 000000000..9b0307fbe
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_fold.c
@@ -0,0 +1,2218 @@
+/*
+** FOLD: Constant Folding, Algebraic Simplifications and Reassociation.
+** ABCelim: Array Bounds Check Elimination.
+** CSE: Common-Subexpression Elimination.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_fold_c
+#define LUA_CORE
+
+#include
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_carith.h"
+#include "lj_vm.h"
+
+/* Here's a short description how the FOLD engine processes instructions:
+**
+** The FOLD engine receives a single instruction stored in fins (J->fold.ins).
+** The instruction and its operands are used to select matching fold rules.
+** These are applied iteratively until a fixed point is reached.
+**
+** The 8 bit opcode of the instruction itself plus the opcodes of the
+** two instructions referenced by its operands form a 24 bit key
+** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits).
+**
+** This key is used for partial matching against the fold rules. The
+** left/right operand fields of the key are successively masked with
+** the 'any' wildcard, from most specific to least specific:
+**
+** ins left right
+** ins any right
+** ins left any
+** ins any any
+**
+** The masked key is used to lookup a matching fold rule in a semi-perfect
+** hash table. If a matching rule is found, the related fold function is run.
+** Multiple rules can share the same fold function. A fold rule may return
+** one of several special values:
+**
+** - NEXTFOLD means no folding was applied, because an additional test
+** inside the fold function failed. Matching continues against less
+** specific fold rules. Finally the instruction is passed on to CSE.
+**
+** - RETRYFOLD means the instruction was modified in-place. Folding is
+** retried as if this instruction had just been received.
+**
+** All other return values are terminal actions -- no further folding is
+** applied:
+**
+** - INTFOLD(i) returns a reference to the integer constant i.
+**
+** - LEFTFOLD and RIGHTFOLD return the left/right operand reference
+** without emitting an instruction.
+**
+** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit
+** it without passing through any further optimizations.
+**
+** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have
+** no result (e.g. guarded assertions): FAILFOLD means the guard would
+** always fail, i.e. the current trace is pointless. DROPFOLD means
+** the guard is always true and has been eliminated. CONDFOLD is a
+** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail).
+**
+** - Any other return value is interpreted as an IRRef or TRef. This
+** can be a reference to an existing or a newly created instruction.
+** Only the least-significant 16 bits (IRRef1) are used to form a TRef
+** which is finally returned to the caller.
+**
+** The FOLD engine receives instructions both from the trace recorder and
+** substituted instructions from LOOP unrolling. This means all types
+** of instructions may end up here, even though the recorder bypasses
+** FOLD in some cases. Thus all loads, stores and allocations must have
+** an any/any rule to avoid being passed on to CSE.
+**
+** Carefully read the following requirements before adding or modifying
+** any fold rules:
+**
+** Requirement #1: All fold rules must preserve their destination type.
+**
+** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result).
+** Never use lj_ir_knumint() which can have either a KINT or KNUM result.
+**
+** Requirement #2: Fold rules should not create *new* instructions which
+** reference operands *across* PHIs.
+**
+** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the
+** left operand is a PHI. Then fleft->op1 would point across the PHI
+** frontier to an invariant instruction. Adding a PHI for this instruction
+** would be counterproductive. The solution is to add a barrier which
+** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case.
+** The only exception is for recurrences with high latencies like
+** repeated int->num->int conversions.
+**
+** One could relax this condition a bit if the referenced instruction is
+** a PHI, too. But this often leads to worse code due to excessive
+** register shuffling.
+**
+** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though.
+** Even returning fleft->op1 would be ok, because a new PHI will added,
+** if needed. But again, this leads to excessive register shuffling and
+** should be avoided.
+**
+** Requirement #3: The set of all fold rules must be monotonic to guarantee
+** termination.
+**
+** The goal is optimization, so one primarily wants to add strength-reducing
+** rules. This means eliminating an instruction or replacing an instruction
+** with one or more simpler instructions. Don't add fold rules which point
+** into the other direction.
+**
+** Some rules (like commutativity) do not directly reduce the strength of
+** an instruction, but enable other fold rules (e.g. by moving constants
+** to the right operand). These rules must be made unidirectional to avoid
+** cycles.
+**
+** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fleft (&J->fold.left)
+#define fright (&J->fold.right)
+#define knumleft (ir_knum(fleft)->n)
+#define knumright (ir_knum(fright)->n)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Fold function type. Fastcall on x86 significantly reduces their size. */
+typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
+
+/* Macros for the fold specs, so buildvm can recognize them. */
+#define LJFOLD(x)
+#define LJFOLDX(x)
+#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J)
+/* Note: They must be at the start of a line or buildvm ignores them! */
+
+/* Barrier to prevent using operands across PHIs. */
+#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD
+
+/* Barrier to prevent folding across a GC step.
+** GC steps can only happen at the head of a trace and at LOOP.
+** And the GC is only driven forward if there is at least one allocation.
+*/
+#define gcstep_barrier(J, ref) \
+ ((ref) < J->chain[IR_LOOP] && \
+ (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
+ J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
+ J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR]))
+
+/* -- Constant folding for FP numbers ------------------------------------- */
+
+LJFOLD(ADD KNUM KNUM)
+LJFOLD(SUB KNUM KNUM)
+LJFOLD(MUL KNUM KNUM)
+LJFOLD(DIV KNUM KNUM)
+LJFOLD(NEG KNUM KNUM)
+LJFOLD(ABS KNUM KNUM)
+LJFOLD(ATAN2 KNUM KNUM)
+LJFOLD(LDEXP KNUM KNUM)
+LJFOLD(MIN KNUM KNUM)
+LJFOLD(MAX KNUM KNUM)
+LJFOLDF(kfold_numarith)
+{
+ lua_Number a = knumleft;
+ lua_Number b = knumright;
+ lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(LDEXP KNUM KINT)
+LJFOLDF(kfold_ldexp)
+{
+#if LJ_TARGET_X86ORX64
+ UNUSED(J);
+ return NEXTFOLD;
+#else
+ return lj_ir_knum(J, ldexp(knumleft, fright->i));
+#endif
+}
+
+LJFOLD(FPMATH KNUM any)
+LJFOLDF(kfold_fpmath)
+{
+ lua_Number a = knumleft;
+ lua_Number y = lj_vm_foldfpm(a, fins->op2);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(POW KNUM KINT)
+LJFOLDF(kfold_numpow)
+{
+ lua_Number a = knumleft;
+ lua_Number b = (lua_Number)fright->i;
+ lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+/* Must not use kfold_kref for numbers (could be NaN). */
+LJFOLD(EQ KNUM KNUM)
+LJFOLD(NE KNUM KNUM)
+LJFOLD(LT KNUM KNUM)
+LJFOLD(GE KNUM KNUM)
+LJFOLD(LE KNUM KNUM)
+LJFOLD(GT KNUM KNUM)
+LJFOLD(ULT KNUM KNUM)
+LJFOLD(UGE KNUM KNUM)
+LJFOLD(ULE KNUM KNUM)
+LJFOLD(UGT KNUM KNUM)
+LJFOLDF(kfold_numcomp)
+{
+ return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o));
+}
+
+/* -- Constant folding for 32 bit integers -------------------------------- */
+
+static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op)
+{
+ switch (op) {
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+ case IR_MUL: k1 *= k2; break;
+ case IR_MOD: k1 = lj_vm_modi(k1, k2); break;
+ case IR_NEG: k1 = -k1; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+ case IR_BSHL: k1 <<= (k2 & 31); break;
+ case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break;
+ case IR_BSAR: k1 >>= (k2 & 31); break;
+ case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break;
+ case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break;
+ case IR_MIN: k1 = k1 < k2 ? k1 : k2; break;
+ case IR_MAX: k1 = k1 > k2 ? k1 : k2; break;
+ default: lua_assert(0); break;
+ }
+ return k1;
+}
+
+LJFOLD(ADD KINT KINT)
+LJFOLD(SUB KINT KINT)
+LJFOLD(MUL KINT KINT)
+LJFOLD(MOD KINT KINT)
+LJFOLD(NEG KINT KINT)
+LJFOLD(BAND KINT KINT)
+LJFOLD(BOR KINT KINT)
+LJFOLD(BXOR KINT KINT)
+LJFOLD(BSHL KINT KINT)
+LJFOLD(BSHR KINT KINT)
+LJFOLD(BSAR KINT KINT)
+LJFOLD(BROL KINT KINT)
+LJFOLD(BROR KINT KINT)
+LJFOLD(MIN KINT KINT)
+LJFOLD(MAX KINT KINT)
+LJFOLDF(kfold_intarith)
+{
+ return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o));
+}
+
+LJFOLD(ADDOV KINT KINT)
+LJFOLD(SUBOV KINT KINT)
+LJFOLD(MULOV KINT KINT)
+LJFOLDF(kfold_intovarith)
+{
+ lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i,
+ fins->o - IR_ADDOV);
+ int32_t k = lj_num2int(n);
+ if (n != (lua_Number)k)
+ return FAILFOLD;
+ return INTFOLD(k);
+}
+
+LJFOLD(BNOT KINT)
+LJFOLDF(kfold_bnot)
+{
+ return INTFOLD(~fleft->i);
+}
+
+LJFOLD(BSWAP KINT)
+LJFOLDF(kfold_bswap)
+{
+ return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i));
+}
+
+LJFOLD(LT KINT KINT)
+LJFOLD(GE KINT KINT)
+LJFOLD(LE KINT KINT)
+LJFOLD(GT KINT KINT)
+LJFOLD(ULT KINT KINT)
+LJFOLD(UGE KINT KINT)
+LJFOLD(ULE KINT KINT)
+LJFOLD(UGT KINT KINT)
+LJFOLD(ABC KINT KINT)
+LJFOLDF(kfold_intcomp)
+{
+ int32_t a = fleft->i, b = fright->i;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD(a < b);
+ case IR_GE: return CONDFOLD(a >= b);
+ case IR_LE: return CONDFOLD(a <= b);
+ case IR_GT: return CONDFOLD(a > b);
+ case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b);
+ case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b);
+ case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b);
+ case IR_ABC:
+ case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b);
+ default: lua_assert(0); return FAILFOLD;
+ }
+}
+
+LJFOLD(UGE any KINT)
+LJFOLDF(kfold_intcomp0)
+{
+ if (fright->i == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Constant folding for 64 bit integers -------------------------------- */
+
+static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op)
+{
+ switch (op) {
+#if LJ_64 || LJ_HASFFI
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+#endif
+#if LJ_HASFFI
+ case IR_MUL: k1 *= k2; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+#endif
+ default: UNUSED(k2); lua_assert(0); break;
+ }
+ return k1;
+}
+
+LJFOLD(ADD KINT64 KINT64)
+LJFOLD(SUB KINT64 KINT64)
+LJFOLD(MUL KINT64 KINT64)
+LJFOLD(BAND KINT64 KINT64)
+LJFOLD(BOR KINT64 KINT64)
+LJFOLD(BXOR KINT64 KINT64)
+LJFOLDF(kfold_int64arith)
+{
+ return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64,
+ ir_k64(fright)->u64, (IROp)fins->o));
+}
+
+LJFOLD(DIV KINT64 KINT64)
+LJFOLD(MOD KINT64 KINT64)
+LJFOLD(POW KINT64 KINT64)
+LJFOLDF(kfold_int64arith2)
+{
+#if LJ_HASFFI
+ uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64;
+ if (irt_isi64(fins->t)) {
+ k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) :
+ fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) :
+ lj_carith_powi64((int64_t)k1, (int64_t)k2);
+ } else {
+ k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) :
+ fins->o == IR_MOD ? lj_carith_modu64(k1, k2) :
+ lj_carith_powu64(k1, k2);
+ }
+ return INT64FOLD(k1);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSHL KINT64 KINT)
+LJFOLD(BSHR KINT64 KINT)
+LJFOLD(BSAR KINT64 KINT)
+LJFOLD(BROL KINT64 KINT)
+LJFOLD(BROR KINT64 KINT)
+LJFOLDF(kfold_int64shift)
+{
+#if LJ_HASFFI || LJ_64
+ uint64_t k = ir_k64(fleft)->u64;
+ int32_t sh = (fright->i & 63);
+ switch ((IROp)fins->o) {
+ case IR_BSHL: k <<= sh; break;
+#if LJ_HASFFI
+ case IR_BSHR: k >>= sh; break;
+ case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break;
+ case IR_BROL: k = lj_rol(k, sh); break;
+ case IR_BROR: k = lj_ror(k, sh); break;
+#endif
+ default: lua_assert(0); break;
+ }
+ return INT64FOLD(k);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BNOT KINT64)
+LJFOLDF(kfold_bnot64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(~ir_k64(fleft)->u64);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSWAP KINT64)
+LJFOLDF(kfold_bswap64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64));
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(LT KINT64 KINT)
+LJFOLD(GE KINT64 KINT)
+LJFOLD(LE KINT64 KINT)
+LJFOLD(GT KINT64 KINT)
+LJFOLD(ULT KINT64 KINT)
+LJFOLD(UGE KINT64 KINT)
+LJFOLD(ULE KINT64 KINT)
+LJFOLD(UGT KINT64 KINT)
+LJFOLDF(kfold_int64comp)
+{
+#if LJ_HASFFI
+ uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD(a < b);
+ case IR_GE: return CONDFOLD(a >= b);
+ case IR_LE: return CONDFOLD(a <= b);
+ case IR_GT: return CONDFOLD(a > b);
+ case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b);
+ case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b);
+ case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b);
+ case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b);
+ default: lua_assert(0); return FAILFOLD;
+ }
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(UGE any KINT64)
+LJFOLDF(kfold_int64comp0)
+{
+#if LJ_HASFFI
+ if (ir_k64(fright)->u64 == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+/* -- Constant folding for strings ---------------------------------------- */
+
+LJFOLD(SNEW KKPTR KINT)
+LJFOLDF(kfold_snew_kptr)
+{
+ GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i);
+ return lj_ir_kstr(J, s);
+}
+
+LJFOLD(SNEW any KINT)
+LJFOLDF(kfold_snew_empty)
+{
+ if (fright->i == 0)
+ return lj_ir_kstr(J, &J2G(J)->strempty);
+ return NEXTFOLD;
+}
+
+LJFOLD(STRREF KGC KINT)
+LJFOLDF(kfold_strref)
+{
+ GCstr *str = ir_kstr(fleft);
+ lua_assert((MSize)fright->i < str->len);
+ return lj_ir_kkptr(J, (char *)strdata(str) + fright->i);
+}
+
+LJFOLD(STRREF SNEW any)
+LJFOLDF(kfold_strref_snew)
+{
+ PHIBARRIER(fleft);
+ if (irref_isk(fins->op2) && fright->i == 0) {
+ return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */
+ } else {
+ /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */
+ IRIns *ir = IR(fleft->op1);
+ IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */
+ lua_assert(ir->o == IR_STRREF);
+ PHIBARRIER(ir);
+ fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */
+ fins->op1 = str;
+ fins->ot = IRT(IR_STRREF, IRT_P32);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CALLN CARG IRCALL_lj_str_cmp)
+LJFOLDF(kfold_strcmp)
+{
+ if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
+ GCstr *a = ir_kstr(IR(fleft->op1));
+ GCstr *b = ir_kstr(IR(fleft->op2));
+ return INTFOLD(lj_str_cmp(a, b));
+ }
+ return NEXTFOLD;
+}
+
+/* -- Constant folding of pointer arithmetic ------------------------------ */
+
+LJFOLD(ADD KGC KINT)
+LJFOLD(ADD KGC KINT64)
+LJFOLDF(kfold_add_kgc)
+{
+ GCobj *o = ir_kgc(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+ return lj_ir_kkptr(J, (char *)o + ofs);
+}
+
+LJFOLD(ADD KPTR KINT)
+LJFOLD(ADD KPTR KINT64)
+LJFOLD(ADD KKPTR KINT)
+LJFOLD(ADD KKPTR KINT64)
+LJFOLDF(kfold_add_kptr)
+{
+ void *p = ir_kptr(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+ return lj_ir_kptr_(J, fleft->o, (char *)p + ofs);
+}
+
+/* -- Constant folding of conversions ------------------------------------- */
+
+LJFOLD(TOBIT KNUM KNUM)
+LJFOLDF(kfold_tobit)
+{
+ return INTFOLD(lj_num2bit(knumleft));
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_INT)
+LJFOLDF(kfold_conv_kint_num)
+{
+ return lj_ir_knum(J, (lua_Number)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_U32)
+LJFOLDF(kfold_conv_kintu32_num)
+{
+ return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_I64_INT)
+LJFOLD(CONV KINT IRCONV_U64_INT)
+LJFOLDF(kfold_conv_kint_i64)
+{
+ if ((fins->op2 & IRCONV_SEXT))
+ return INT64FOLD((uint64_t)(int64_t)fleft->i);
+ else
+ return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_I64)
+LJFOLDF(kfold_conv_kint64_num_i64)
+{
+ return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_U64)
+LJFOLDF(kfold_conv_kint64_num_u64)
+{
+ return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_INT_I64)
+LJFOLD(CONV KINT64 IRCONV_U32_I64)
+LJFOLDF(kfold_conv_kint64_int_i64)
+{
+ return INTFOLD((int32_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KNUM IRCONV_INT_NUM)
+LJFOLDF(kfold_conv_knum_int_num)
+{
+ lua_Number n = knumleft;
+ if (!(fins->op2 & IRCONV_TRUNC)) {
+ int32_t k = lj_num2int(n);
+ if (irt_isguard(fins->t) && n != (lua_Number)k) {
+ /* We're about to create a guard which always fails, like CONV +1.5.
+ ** Some pathological loops cause this during LICM, e.g.:
+ ** local x,k,t = 0,1.5,{1,[1.5]=2}
+ ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
+ ** assert(x == 300)
+ */
+ return FAILFOLD;
+ }
+ return INTFOLD(k);
+ } else {
+ return INTFOLD((int32_t)n);
+ }
+}
+
+LJFOLD(CONV KNUM IRCONV_U32_NUM)
+LJFOLDF(kfold_conv_knum_u32_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+ return INTFOLD((int32_t)(uint32_t)knumleft);
+}
+
+LJFOLD(CONV KNUM IRCONV_I64_NUM)
+LJFOLDF(kfold_conv_knum_i64_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+ return INT64FOLD((uint64_t)(int64_t)knumleft);
+}
+
+LJFOLD(CONV KNUM IRCONV_U64_NUM)
+LJFOLDF(kfold_conv_knum_u64_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+ return INT64FOLD(lj_num2u64(knumleft));
+}
+
+LJFOLD(TOSTR KNUM)
+LJFOLDF(kfold_tostr_knum)
+{
+ return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft));
+}
+
+LJFOLD(TOSTR KINT)
+LJFOLDF(kfold_tostr_kint)
+{
+ return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i));
+}
+
+LJFOLD(STRTO KGC)
+LJFOLDF(kfold_strto)
+{
+ TValue n;
+ if (lj_str_tonum(ir_kstr(fleft), &n))
+ return lj_ir_knum(J, numV(&n));
+ return FAILFOLD;
+}
+
+/* -- Constant folding of equality checks --------------------------------- */
+
+/* Don't constant-fold away FLOAD checks against KNULL. */
+LJFOLD(EQ FLOAD KNULL)
+LJFOLD(NE FLOAD KNULL)
+LJFOLDX(lj_opt_cse)
+
+/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */
+LJFOLD(EQ any KNULL)
+LJFOLD(NE any KNULL)
+LJFOLD(EQ KNULL any)
+LJFOLD(NE KNULL any)
+LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */
+LJFOLD(NE KINT KINT)
+LJFOLD(EQ KINT64 KINT64)
+LJFOLD(NE KINT64 KINT64)
+LJFOLD(EQ KGC KGC)
+LJFOLD(NE KGC KGC)
+LJFOLDF(kfold_kref)
+{
+ return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE));
+}
+
+/* -- Algebraic shortcuts ------------------------------------------------- */
+
+LJFOLD(FPMATH FPMATH IRFPM_FLOOR)
+LJFOLD(FPMATH FPMATH IRFPM_CEIL)
+LJFOLD(FPMATH FPMATH IRFPM_TRUNC)
+LJFOLDF(shortcut_round)
+{
+ IRFPMathOp op = (IRFPMathOp)fleft->op2;
+ if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC)
+ return LEFTFOLD; /* round(round_left(x)) = round_left(x) */
+ return NEXTFOLD;
+}
+
+LJFOLD(ABS ABS KNUM)
+LJFOLDF(shortcut_left)
+{
+ return LEFTFOLD; /* f(g(x)) ==> g(x) */
+}
+
+LJFOLD(ABS NEG KNUM)
+LJFOLDF(shortcut_dropleft)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */
+ return RETRYFOLD;
+}
+
+/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */
+LJFOLD(NEG NEG any)
+LJFOLD(BNOT BNOT)
+LJFOLD(BSWAP BSWAP)
+LJFOLDF(shortcut_leftleft)
+{
+ PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */
+ return fleft->op1; /* f(g(x)) ==> x */
+}
+
+/* -- FP algebraic simplifications ---------------------------------------- */
+
+/* FP arithmetic is tricky -- there's not much to simplify.
+** Please note the following common pitfalls before sending "improvements":
+** x+0 ==> x is INVALID for x=-0
+** 0-x ==> -x is INVALID for x=+0
+** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN
+*/
+
+LJFOLD(ADD NEG any)
+LJFOLDF(simplify_numadd_negx)
+{
+ PHIBARRIER(fleft);
+ fins->o = IR_SUB; /* (-a) + b ==> b - a */
+ fins->op1 = fins->op2;
+ fins->op2 = fleft->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(ADD any NEG)
+LJFOLDF(simplify_numadd_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_SUB; /* a + (-b) ==> a - b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any KNUM)
+LJFOLDF(simplify_numsub_k)
+{
+ lua_Number n = knumright;
+ if (n == 0.0) /* x - (+-0) ==> x */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB NEG KNUM)
+LJFOLDF(simplify_numsub_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */
+ fins->op1 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any NEG)
+LJFOLDF(simplify_numsub_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_ADD; /* a - (-b) ==> a + b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL any KNUM)
+LJFOLD(DIV any KNUM)
+LJFOLDF(simplify_nummuldiv_k)
+{
+ lua_Number n = knumright;
+ if (n == 1.0) { /* x o 1 ==> x */
+ return LEFTFOLD;
+ } else if (n == -1.0) { /* x o -1 ==> -x */
+ fins->o = IR_NEG;
+ fins->op2 = (IRRef1)lj_ir_knum_neg(J);
+ return RETRYFOLD;
+ } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL NEG KNUM)
+LJFOLD(DIV NEG KNUM)
+LJFOLDF(simplify_nummuldiv_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */
+ fins->op2 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL NEG NEG)
+LJFOLD(DIV NEG NEG)
+LJFOLDF(simplify_nummuldiv_negneg)
+{
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(POW any KINT)
+LJFOLDF(simplify_numpow_xk)
+{
+ int32_t k = fright->i;
+ TRef ref = fins->op1;
+ if (k == 0) /* x ^ 0 ==> 1 */
+ return lj_ir_knum_one(J); /* Result must be a number, not an int. */
+ if (k == 1) /* x ^ 1 ==> x */
+ return LEFTFOLD;
+ if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */
+ return NEXTFOLD;
+ if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */
+ ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref);
+ k = -k;
+ }
+ /* Unroll x^k for 1 <= k <= 65536. */
+ for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */
+ ref = emitir(IRTN(IR_MUL), ref, ref);
+ if ((k >>= 1) != 0) { /* Handle trailing bits. */
+ TRef tmp = emitir(IRTN(IR_MUL), ref, ref);
+ for (; k != 1; k >>= 1) {
+ if (k & 1)
+ ref = emitir(IRTN(IR_MUL), ref, tmp);
+ tmp = emitir(IRTN(IR_MUL), tmp, tmp);
+ }
+ ref = emitir(IRTN(IR_MUL), ref, tmp);
+ }
+ return ref;
+}
+
+LJFOLD(POW KNUM any)
+LJFOLDF(simplify_numpow_kx)
+{
+ lua_Number n = knumleft;
+ if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */
+ fins->o = IR_CONV;
+#if LJ_TARGET_X86ORX64
+ fins->op1 = fins->op2;
+ fins->op2 = IRCONV_NUM_INT;
+ fins->op2 = (IRRef1)lj_opt_fold(J);
+#endif
+ fins->op1 = (IRRef1)lj_ir_knum_one(J);
+ fins->o = IR_LDEXP;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+/* -- Simplify conversions ------------------------------------------------ */
+
+LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */
+LJFOLDF(shortcut_conv_num_int)
+{
+ PHIBARRIER(fleft);
+ /* Only safe with a guarded conversion to int. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t))
+ return fleft->op1; /* f(g(x)) ==> x */
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */
+LJFOLDF(simplify_conv_int_num)
+{
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/
+LJFOLDF(simplify_conv_u32_num)
+{
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32*/
+LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32*/
+LJFOLDF(simplify_conv_i64_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ /* Reduce to a sign-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+ return RETRYFOLD;
+ } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+#if LJ_TARGET_X64
+ return fleft->op1;
+#else
+ /* Reduce to a zero-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRT_I64<<5)|IRT_U32;
+ return RETRYFOLD;
+#endif
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT */
+LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT */
+LJFOLDF(simplify_conv_int_i64)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */
+LJFOLDF(simplify_conv_flt_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(TOBIT CONV KNUM)
+LJFOLDF(simplify_tobit_conv)
+{
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
+ (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ lua_assert(irt_isnum(fleft->t));
+ return fleft->op1;
+ }
+ return NEXTFOLD;
+}
+
+/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(FPMATH CONV IRFPM_FLOOR)
+LJFOLD(FPMATH CONV IRFPM_CEIL)
+LJFOLD(FPMATH CONV IRFPM_TRUNC)
+LJFOLDF(simplify_floor_conv)
+{
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
+ (fleft->op2 & IRCONV_SRCMASK) == IRT_U32)
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+/* Strength reduction of widening. */
+LJFOLD(CONV any IRCONV_I64_INT)
+LJFOLD(CONV any IRCONV_U64_INT)
+LJFOLDF(simplify_conv_sext)
+{
+ IRRef ref = fins->op1;
+ int64_t ofs = 0;
+ if (!(fins->op2 & IRCONV_SEXT))
+ return NEXTFOLD;
+ PHIBARRIER(fleft);
+ if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t)))
+ goto ok_reduce;
+ if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
+ ofs = (int64_t)IR(fleft->op2)->i;
+ ref = fleft->op1;
+ }
+ /* Use scalar evolution analysis results to strength-reduce sign-extension. */
+ if (ref == J->scev.idx) {
+ IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
+ lua_assert(irt_isint(J->scev.t));
+ if (lo && IR(lo)->i + ofs >= 0) {
+ ok_reduce:
+#if LJ_TARGET_X64
+ /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
+ return LEFTFOLD;
+#else
+ /* Reduce to a (cheaper) zero-extension. */
+ fins->op2 &= ~IRCONV_SEXT;
+ return RETRYFOLD;
+#endif
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Strength reduction of narrowing. */
+LJFOLD(CONV ADD IRCONV_INT_I64)
+LJFOLD(CONV SUB IRCONV_INT_I64)
+LJFOLD(CONV MUL IRCONV_INT_I64)
+LJFOLD(CONV ADD IRCONV_INT_U64)
+LJFOLD(CONV SUB IRCONV_INT_U64)
+LJFOLD(CONV MUL IRCONV_INT_U64)
+LJFOLDF(simplify_conv_narrow)
+{
+ IROp op = (IROp)fleft->o;
+ IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2;
+ PHIBARRIER(fleft);
+ op1 = emitir(IRTI(IR_CONV), op1, mode);
+ op2 = emitir(IRTI(IR_CONV), op2, mode);
+ fins->ot = IRTI(op);
+ fins->op1 = op1;
+ fins->op2 = op2;
+ return RETRYFOLD;
+}
+
+/* Special CSE rule for CONV. */
+LJFOLD(CONV any any)
+LJFOLDF(cse_conv)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK);
+ uint8_t guard = irt_isguard(fins->t);
+ IRRef ref = J->chain[IR_CONV];
+ while (ref > op1) {
+ IRIns *ir = IR(ref);
+ /* Commoning with stronger checks is ok. */
+ if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 &&
+ irt_isguard(ir->t) >= guard)
+ return ref;
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD; /* No fallthrough to regular CSE. */
+}
+
+/* FP conversion narrowing. */
+LJFOLD(TOBIT ADD KNUM)
+LJFOLD(TOBIT SUB KNUM)
+LJFOLD(CONV ADD IRCONV_INT_NUM)
+LJFOLD(CONV SUB IRCONV_INT_NUM)
+LJFOLD(CONV ADD IRCONV_I64_NUM)
+LJFOLD(CONV SUB IRCONV_I64_NUM)
+LJFOLDF(narrow_convert)
+{
+ PHIBARRIER(fleft);
+ /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */
+ if (J->chain[IR_LOOP])
+ return NEXTFOLD;
+ lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT);
+ return lj_opt_narrow_convert(J);
+}
+
+/* -- Integer algebraic simplifications ----------------------------------- */
+
+LJFOLD(ADD any KINT)
+LJFOLD(ADDOV any KINT)
+LJFOLD(SUBOV any KINT)
+LJFOLDF(simplify_intadd_k)
+{
+ if (fright->i == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(MULOV any KINT)
+LJFOLDF(simplify_intmul_k)
+{
+ if (fright->i == 0) /* i * 0 ==> 0 */
+ return RIGHTFOLD;
+ if (fright->i == 1) /* i * 1 ==> i */
+ return LEFTFOLD;
+ if (fright->i == 2) { /* i * 2 ==> i + i */
+ fins->o = IR_ADDOV;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT)
+LJFOLDF(simplify_intsub_k)
+{
+ if (fright->i == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB KINT any)
+LJFOLD(SUB KINT64 any)
+LJFOLDF(simplify_intsub_kleft)
+{
+ if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) {
+ fins->o = IR_NEG; /* 0 - i ==> -i */
+ fins->op1 = fins->op2;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD any KINT64)
+LJFOLDF(simplify_intadd_k64)
+{
+ if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT64)
+LJFOLDF(simplify_intsub_k64)
+{
+ uint64_t k = ir_kint64(fright)->u64;
+ if (k == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k);
+ return RETRYFOLD;
+}
+
+static TRef simplify_intmul_k(jit_State *J, int32_t k)
+{
+ /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2.
+ ** But this is mainly intended for simple address arithmetic.
+ ** Also it's easier for the backend to optimize the original multiplies.
+ */
+ if (k == 1) { /* i * 1 ==> i */
+ return LEFTFOLD;
+ } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
+ fins->o = IR_BSHL;
+ fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k));
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT)
+LJFOLDF(simplify_intmul_k32)
+{
+ if (fright->i == 0) /* i * 0 ==> 0 */
+ return INTFOLD(0);
+ else if (fright->i > 0)
+ return simplify_intmul_k(J, fright->i);
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT64)
+LJFOLDF(simplify_intmul_k64)
+{
+ if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */
+ return INT64FOLD(0);
+#if LJ_64
+ /* NYI: SPLIT for BSHL and 32 bit backend support. */
+ else if (ir_kint64(fright)->u64 < 0x80000000u)
+ return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
+#endif
+ return NEXTFOLD;
+}
+
+LJFOLD(MOD any KINT)
+LJFOLDF(simplify_intmod_k)
+{
+ int32_t k = fright->i;
+ lua_assert(k != 0);
+ if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */
+ fins->o = IR_BAND;
+ fins->op2 = lj_ir_kint(J, k-1);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MOD KINT any)
+LJFOLDF(simplify_intmod_kleft)
+{
+ if (fleft->i == 0)
+ return INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any any)
+LJFOLD(SUBOV any any)
+LJFOLDF(simplify_intsub)
+{
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD any)
+LJFOLDF(simplify_intsubadd_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */
+ return fleft->op1;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB SUB any)
+LJFOLDF(simplify_intsubsub_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op1 == fleft->op1) { /* (i - j) - i ==> 0 - j */
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ fins->op2 = fleft->op2;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any SUB)
+LJFOLDF(simplify_intsubsub_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) /* i - (i - j) ==> j */
+ return fright->op2;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any ADD)
+LJFOLDF(simplify_intsubadd_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */
+ fins->op2 = fright->op2;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */
+ fins->op2 = fright->op1;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD ADD)
+LJFOLDF(simplify_intsubaddadd_cancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any KINT)
+LJFOLD(BAND any KINT64)
+LJFOLDF(simplify_band_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i & 0 ==> 0 */
+ return RIGHTFOLD;
+ if (k == -1) /* i & -1 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BOR any KINT)
+LJFOLD(BOR any KINT64)
+LJFOLDF(simplify_bor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i | 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) /* i | -1 ==> -1 */
+ return RIGHTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR any KINT)
+LJFOLD(BXOR any KINT64)
+LJFOLDF(simplify_bxor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i xor 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) { /* i xor -1 ==> ~i */
+ fins->o = IR_BNOT;
+ fins->op2 = 0;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any KINT)
+LJFOLD(BSHR any KINT)
+LJFOLD(BSAR any KINT)
+LJFOLD(BROL any KINT)
+LJFOLD(BROR any KINT)
+LJFOLDF(simplify_shift_ik)
+{
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (fright->i & mask);
+ if (k == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ if (k != fright->i) { /* i o k ==> i o (k & mask) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+#ifndef LJ_TARGET_UNIFYROT
+ if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */
+ fins->o = IR_BROL;
+ fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask);
+ return RETRYFOLD;
+ }
+#endif
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any BAND)
+LJFOLD(BSHR any BAND)
+LJFOLD(BSAR any BAND)
+LJFOLD(BROL any BAND)
+LJFOLD(BROR any BAND)
+LJFOLDF(simplify_shift_andk)
+{
+ IRIns *irk = IR(fright->op2);
+ PHIBARRIER(fright);
+ if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = irk->i & mask;
+ if (k == mask) {
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL KINT any)
+LJFOLD(BSHR KINT any)
+LJFOLD(BSHL KINT64 any)
+LJFOLD(BSHR KINT64 any)
+LJFOLDF(simplify_shift1_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0) /* 0 o i ==> 0 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSAR KINT any)
+LJFOLD(BROL KINT any)
+LJFOLD(BROR KINT any)
+LJFOLD(BSAR KINT64 any)
+LJFOLD(BROL KINT64 any)
+LJFOLD(BROR KINT64 any)
+LJFOLDF(simplify_shift2_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BAND KINT)
+LJFOLD(BSHR BAND KINT)
+LJFOLD(BROL BAND KINT)
+LJFOLD(BROR BAND KINT)
+LJFOLDF(simplify_shiftk_andk)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft);
+ if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ fins->op1 = fleft->op1;
+ fins->op1 = (IRRef1)lj_opt_fold(J);
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ fins->ot = IRTI(IR_BAND);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND BSHL KINT)
+LJFOLD(BAND BSHR KINT)
+LJFOLDF(simplify_andk_shiftk)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT &&
+ kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i)
+ return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */
+ return NEXTFOLD;
+}
+
+/* -- Reassociation ------------------------------------------------------- */
+
+LJFOLD(ADD ADD KINT)
+LJFOLD(MUL MUL KINT)
+LJFOLD(BAND BAND KINT)
+LJFOLD(BOR BOR KINT)
+LJFOLD(BXOR BXOR KINT)
+LJFOLDF(reassoc_intarith_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT) {
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD ADD KINT64)
+LJFOLD(MUL MUL KINT64)
+LJFOLD(BAND BAND KINT64)
+LJFOLD(BOR BOR KINT64)
+LJFOLD(BXOR BXOR KINT64)
+LJFOLDF(reassoc_intarith_k64)
+{
+#if LJ_HASFFI || LJ_64
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT64) {
+ uint64_t k = kfold_int64arith(ir_k64(irk)->u64,
+ ir_k64(fright)->u64, (IROp)fins->o);
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint64(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(MIN MIN any)
+LJFOLD(MAX MAX any)
+LJFOLD(BAND BAND any)
+LJFOLD(BOR BOR any)
+LJFOLDF(reassoc_dup)
+{
+ if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
+ return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR BXOR any)
+LJFOLDF(reassoc_bxor)
+{
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BSHL KINT)
+LJFOLD(BSHR BSHR KINT)
+LJFOLD(BSAR BSAR KINT)
+LJFOLD(BROL BROL KINT)
+LJFOLD(BROR BROR KINT)
+LJFOLDF(reassoc_shift)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */
+ if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (irk->i & mask) + (fright->i & mask);
+ if (k > mask) { /* Combined shift too wide? */
+ if (fins->o == IR_BSHL || fins->o == IR_BSHR)
+ return mask == 31 ? INTFOLD(0) : INT64FOLD(0);
+ else if (fins->o == IR_BSAR)
+ k = mask;
+ else
+ k &= mask;
+ }
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MIN KNUM)
+LJFOLD(MAX MAX KNUM)
+LJFOLD(MIN MIN KINT)
+LJFOLD(MAX MAX KINT)
+LJFOLDF(reassoc_minmax_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KNUM) {
+ lua_Number a = ir_knum(irk)->n;
+ lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD);
+ if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_knum(J, y);
+ return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
+ } else if (irk->o == IR_KINT) {
+ int32_t a = irk->i;
+ int32_t y = kfold_intop(a, fright->i, fins->o);
+ if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, y);
+ return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MAX any)
+LJFOLD(MAX MIN any)
+LJFOLDF(reassoc_minmax_left)
+{
+ if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
+ return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN any MAX)
+LJFOLD(MAX any MIN)
+LJFOLDF(reassoc_minmax_right)
+{
+ if (fins->op1 == fright->op1 || fins->op1 == fright->op2)
+ return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */
+ return NEXTFOLD;
+}
+
+/* -- Array bounds check elimination -------------------------------------- */
+
+/* Eliminate ABC across PHIs to handle t[i-1] forwarding case.
+** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists.
+** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig.
+*/
+LJFOLD(ABC any ADD)
+LJFOLDF(abc_fwd)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ if (irref_isk(fright->op2)) {
+ IRIns *add2 = IR(fright->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(fright->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef lim = add2->op1;
+ if (fins->op1 > lim) lim = fins->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == fins->op1 && ir->op2 == add2->op1)
+ return DROPFOLD;
+ ref = ir->prev;
+ }
+ }
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate ABC for constants.
+** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2))
+** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2.
+*/
+LJFOLD(ABC any KINT)
+LJFOLDF(abc_k)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef asize = fins->op1;
+ while (ref > asize) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == asize && irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (fright->i > k)
+ ir->op2 = fins->op2;
+ return DROPFOLD;
+ }
+ ref = ir->prev;
+ }
+ return EMITFOLD; /* Already performed CSE. */
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate invariant ABC inside loop. */
+LJFOLD(ABC any any)
+LJFOLDF(abc_invar)
+{
+ if (!irt_isint(fins->t) && J->chain[IR_LOOP]) /* Currently marked as PTR. */
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Commutativity ------------------------------------------------------- */
+
+/* The refs of commutative ops are canonicalized. Lower refs go to the right.
+** Rationale behind this:
+** - It (also) moves constants to the right.
+** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices).
+** - It helps CSE to find more matches.
+** - The assembler generates better code with constants at the right.
+*/
+
+LJFOLD(ADD any any)
+LJFOLD(MUL any any)
+LJFOLD(ADDOV any any)
+LJFOLD(MULOV any any)
+LJFOLDF(comm_swap)
+{
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(EQ any any)
+LJFOLD(NE any any)
+LJFOLDF(comm_equal)
+{
+ /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD(fins->o == IR_EQ);
+ return fold_comm_swap(J);
+}
+
+LJFOLD(LT any any)
+LJFOLD(GE any any)
+LJFOLD(LE any any)
+LJFOLD(GT any any)
+LJFOLD(ULT any any)
+LJFOLD(UGE any any)
+LJFOLD(ULE any any)
+LJFOLD(UGT any any)
+LJFOLDF(comm_comp)
+{
+ /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1);
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any any)
+LJFOLD(BOR any any)
+LJFOLD(MIN any any)
+LJFOLD(MAX any any)
+LJFOLDF(comm_dup)
+{
+ if (fins->op1 == fins->op2) /* x o x ==> x */
+ return LEFTFOLD;
+ return fold_comm_swap(J);
+}
+
+LJFOLD(BXOR any any)
+LJFOLDF(comm_bxor)
+{
+ if (fins->op1 == fins->op2) /* i xor i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return fold_comm_swap(J);
+}
+
+/* -- Simplification of compound expressions ------------------------------ */
+
+static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p)
+{
+ int32_t k;
+ switch (irt_type(ir->t)) {
+ case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p);
+ case IRT_I8: k = (int32_t)*(int8_t *)p; break;
+ case IRT_U8: k = (int32_t)*(uint8_t *)p; break;
+ case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break;
+ case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break;
+ case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break;
+ case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p);
+ default: return 0;
+ }
+ return lj_ir_kint(J, k);
+}
+
+/* Turn: string.sub(str, a, b) == kstr
+** into: string.byte(str, a) == string.byte(kstr, 1) etc.
+** Note: this creates unaligned XLOADs on x86/x64.
+*/
+LJFOLD(EQ SNEW KGC)
+LJFOLD(NE SNEW KGC)
+LJFOLDF(merge_eqne_snew_kgc)
+{
+ GCstr *kstr = ir_kstr(fright);
+ int32_t len = (int32_t)kstr->len;
+ lua_assert(irt_isstr(fins->t));
+
+#if LJ_TARGET_X86ORX64
+#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */
+#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */
+#else
+#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */
+#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */
+#endif
+
+ if (len <= FOLD_SNEW_MAX_LEN) {
+ IROp op = (IROp)fins->o;
+ IRRef strref = fleft->op1;
+ lua_assert(IR(strref)->o == IR_STRREF);
+ if (op == IR_EQ) {
+ emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len));
+ /* Caveat: fins/fleft/fright is no longer valid after emitir. */
+ } else {
+ /* NE is not expanded since this would need an OR of two conds. */
+ if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */
+ return NEXTFOLD;
+ if (IR(fleft->op2)->i != len)
+ return DROPFOLD;
+ }
+ if (len > 0) {
+ /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */
+ uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) :
+ len == 2 ? IRT(IR_XLOAD, IRT_U16) :
+ IRTI(IR_XLOAD));
+ TRef tmp = emitir(ot, strref,
+ IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0));
+ TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr));
+ if (len == 3)
+ tmp = emitir(IRTI(IR_BAND), tmp,
+ lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00)));
+ fins->op1 = (IRRef1)tmp;
+ fins->op2 = (IRRef1)val;
+ fins->ot = (IROpT)IRTGI(op);
+ return RETRYFOLD;
+ } else {
+ return DROPFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* -- Loads --------------------------------------------------------------- */
+
+/* Loads cannot be folded or passed on to CSE in general.
+** Alias analysis is needed to check for forwarding opportunities.
+**
+** Caveat: *all* loads must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ALOAD any)
+LJFOLDX(lj_opt_fwd_aload)
+
+/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */
+LJFOLD(HLOAD KKPTR)
+LJFOLDF(kfold_hload_kkptr)
+{
+ UNUSED(J);
+ lua_assert(ir_kptr(fleft) == niltvg(J2G(J)));
+ return TREF_NIL;
+}
+
+LJFOLD(HLOAD any)
+LJFOLDX(lj_opt_fwd_hload)
+
+LJFOLD(ULOAD any)
+LJFOLDX(lj_opt_fwd_uload)
+
+LJFOLD(CALLL any IRCALL_lj_tab_len)
+LJFOLDX(lj_opt_fwd_tab_len)
+
+/* Upvalue refs are really loads, but there are no corresponding stores.
+** So CSE is ok for them, except for UREFO across a GC step (see below).
+** If the referenced function is const, its upvalue addresses are const, too.
+** This can be used to improve CSE by looking for the same address,
+** even if the upvalues originate from a different function.
+*/
+LJFOLD(UREFO KGC any)
+LJFOLD(UREFC KGC any)
+LJFOLDF(cse_uref)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef ref = J->chain[fins->o];
+ GCfunc *fn = ir_kfunc(fleft);
+ GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)]));
+ while (ref > 0) {
+ IRIns *ir = IR(ref);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn2 = ir_kfunc(IR(ir->op1));
+ if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) {
+ if (fins->o == IR_UREFO && gcstep_barrier(J, ref))
+ break;
+ return ref;
+ }
+ }
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD;
+}
+
+LJFOLD(HREF TNEW any)
+LJFOLDF(fwd_href_tnew)
+{
+ if (lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF TDUP KPRI)
+LJFOLD(HREF TDUP KGC)
+LJFOLD(HREF TDUP KNUM)
+LJFOLDF(fwd_href_tdup)
+{
+ TValue keyv;
+ lj_ir_kvalue(J->L, &keyv, fright);
+ if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) &&
+ lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+/* We can safely FOLD/CSE array/hash refs and field loads, since there
+** are no corresponding stores. But we need to check for any NEWREF with
+** an aliased table, as it may invalidate all of the pointers and fields.
+** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
+** FLOADs. And NEWREF itself is treated like a store (see below).
+*/
+LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tnew_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD(fleft->op1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TNEW IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tnew_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((1 << fleft->op2)-1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tdup_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tdup_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask);
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF any any)
+LJFOLD(FLOAD any IRFL_TAB_ARRAY)
+LJFOLD(FLOAD any IRFL_TAB_NODE)
+LJFOLD(FLOAD any IRFL_TAB_ASIZE)
+LJFOLD(FLOAD any IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_ah)
+{
+ TRef tr = lj_opt_cse(J);
+ return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD;
+}
+
+/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */
+LJFOLD(FLOAD KGC IRFL_STR_LEN)
+LJFOLDF(fload_str_len_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kstr(fleft)->len);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD SNEW IRFL_STR_LEN)
+LJFOLDF(fload_str_len_snew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ PHIBARRIER(fleft);
+ return fleft->op2;
+ }
+ return NEXTFOLD;
+}
+
+/* The C type ID of cdata objects is immutable. */
+LJFOLD(FLOAD KGC IRFL_CDATA_TYPEID)
+LJFOLDF(fload_cdata_typeid_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kcdata(fleft)->typeid);
+ return NEXTFOLD;
+}
+
+/* Get the contents of immutable cdata objects. */
+LJFOLD(FLOAD KGC IRFL_CDATA_PTR)
+LJFOLD(FLOAD KGC IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_int64_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ void *p = cdataptr(ir_kcdata(fleft));
+ if (irt_is64(fins->t))
+ return INT64FOLD(*(uint64_t *)p);
+ else
+ return INTFOLD(*(int32_t *)p);
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD CNEW IRFL_CDATA_TYPEID)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_TYPEID)
+LJFOLDF(fload_cdata_typeid_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */
+ return NEXTFOLD;
+}
+
+/* Pointer and int64 cdata objects are immutable. */
+LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_ptr_int64_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op2; /* Fold even across PHI to avoid allocations. */
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD any IRFL_STR_LEN)
+LJFOLD(FLOAD any IRFL_CDATA_TYPEID)
+LJFOLD(FLOAD any IRFL_CDATA_PTR)
+LJFOLD(FLOAD any IRFL_CDATA_INT64)
+LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */
+LJFOLDX(lj_opt_cse)
+
+/* All other field loads need alias analysis. */
+LJFOLD(FLOAD any any)
+LJFOLDX(lj_opt_fwd_fload)
+
+/* This is for LOOP only. Recording handles SLOADs internally. */
+LJFOLD(SLOAD any any)
+LJFOLDF(fwd_sload)
+{
+ if ((fins->op2 & IRSLOAD_FRAME)) {
+ TRef tr = lj_opt_cse(J);
+ return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr;
+ } else {
+ lua_assert(J->slot[fins->op1] != 0);
+ return J->slot[fins->op1];
+ }
+}
+
+/* Only fold for KKPTR. The pointer _and_ the contents must be const. */
+LJFOLD(XLOAD KKPTR any)
+LJFOLDF(xload_kptr)
+{
+ TRef tr = kfold_xload(J, fins, ir_kptr(fleft));
+ return tr ? tr : NEXTFOLD;
+}
+
+LJFOLD(XLOAD any any)
+LJFOLDX(lj_opt_fwd_xload)
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Write barriers are amenable to CSE, but not across any incremental
+** GC steps.
+**
+** The same logic applies to open upvalue references, because a stack
+** may be resized during a GC step (not the current stack, but maybe that
+** of a coroutine).
+*/
+LJFOLD(TBAR any)
+LJFOLD(OBAR any any)
+LJFOLD(UREFO any any)
+LJFOLDF(barrier_tab)
+{
+ TRef tr = lj_opt_cse(J);
+ if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */
+ return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */
+ return tr;
+}
+
+LJFOLD(TBAR TNEW)
+LJFOLD(TBAR TDUP)
+LJFOLDF(barrier_tnew_tdup)
+{
+ /* New tables are always white and never need a barrier. */
+ if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */
+ return NEXTFOLD;
+ return DROPFOLD;
+}
+
+/* -- Stores and allocations ---------------------------------------------- */
+
+/* Stores and allocations cannot be folded or passed on to CSE in general.
+** But some stores can be eliminated with dead-store elimination (DSE).
+**
+** Caveat: *all* stores and allocs must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ASTORE any any)
+LJFOLD(HSTORE any any)
+LJFOLDX(lj_opt_dse_ahstore)
+
+LJFOLD(USTORE any any)
+LJFOLDX(lj_opt_dse_ustore)
+
+LJFOLD(FSTORE any any)
+LJFOLDX(lj_opt_dse_fstore)
+
+LJFOLD(XSTORE any any)
+LJFOLDX(lj_opt_dse_xstore)
+
+LJFOLD(NEWREF any any) /* Treated like a store. */
+LJFOLD(CALLS any any)
+LJFOLD(CALLL any any) /* Safeguard fallback. */
+LJFOLD(CALLXS any any)
+LJFOLD(XBAR)
+LJFOLD(RETF any any) /* Modifies BASE. */
+LJFOLD(TNEW any any)
+LJFOLD(TDUP any)
+LJFOLD(CNEW any any)
+LJFOLD(XSNEW any any)
+LJFOLDX(lj_ir_emit)
+
+/* ------------------------------------------------------------------------ */
+
+/* Every entry in the generated hash table is a 32 bit pattern:
+**
+** xxxxxxxx iiiiiii lllllll rrrrrrrrrr
+**
+** xxxxxxxx = 8 bit index into fold function table
+** iiiiiii = 7 bit folded instruction opcode
+** lllllll = 7 bit left instruction opcode
+** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field
+*/
+
+#include "lj_folddef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Fold IR instruction. */
+TRef LJ_FASTCALL lj_opt_fold(jit_State *J)
+{
+ uint32_t key, any;
+ IRRef ref;
+
+ if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) {
+ lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) |
+ JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT);
+ /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */
+ if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N)
+ return lj_opt_cse(J);
+
+ /* Forwarding or CSE disabled? Emit raw IR for loads, except for SLOAD. */
+ if ((J->flags & (JIT_F_OPT_FWD|JIT_F_OPT_CSE)) !=
+ (JIT_F_OPT_FWD|JIT_F_OPT_CSE) &&
+ irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD)
+ return lj_ir_emit(J);
+
+ /* DSE disabled? Emit raw IR for stores. */
+ if (!(J->flags & JIT_F_OPT_DSE) && irm_kind(lj_ir_mode[fins->o]) == IRM_S)
+ return lj_ir_emit(J);
+ }
+
+ /* Fold engine start/retry point. */
+retry:
+ /* Construct key from opcode and operand opcodes (unless literal/none). */
+ key = ((uint32_t)fins->o << 17);
+ if (fins->op1 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op1)->o << 10;
+ *fleft = *IR(fins->op1);
+ }
+ if (fins->op2 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op2)->o;
+ *fright = *IR(fins->op2);
+ } else {
+ key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */
+ }
+
+ /* Check for a match in order from most specific to least specific. */
+ any = 0;
+ for (;;) {
+ uint32_t k = key | (any & 0x1ffff);
+ uint32_t h = fold_hashkey(k);
+ uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */
+ if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) {
+ ref = (IRRef)tref_ref(fold_func[fh >> 24](J));
+ if (ref != NEXTFOLD)
+ break;
+ }
+ if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */
+ return lj_opt_cse(J);
+ any = (any | (any >> 10)) ^ 0xffc00;
+ }
+
+ /* Return value processing, ordered by frequency. */
+ if (LJ_LIKELY(ref >= MAX_FOLD))
+ return TREF(ref, irt_t(IR(ref)->t));
+ if (ref == RETRYFOLD)
+ goto retry;
+ if (ref == KINTFOLD)
+ return lj_ir_kint(J, fins->i);
+ if (ref == FAILFOLD)
+ lj_trace_err(J, LJ_TRERR_GFAIL);
+ lua_assert(ref == DROPFOLD);
+ return REF_DROP;
+}
+
+/* -- Common-Subexpression Elimination ------------------------------------ */
+
+/* CSE an IR instruction. This is very fast due to the skip-list chains. */
+TRef LJ_FASTCALL lj_opt_cse(jit_State *J)
+{
+ /* Avoid narrow to wide store-to-load forwarding stall */
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ IROp op = fins->o;
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ /* Limited search for same operands in per-opcode chain. */
+ IRRef ref = J->chain[op];
+ IRRef lim = fins->op1;
+ if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */
+ ref = IR(ref)->prev;
+ }
+ }
+ /* Otherwise emit IR (inlined for speed). */
+ {
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ ir->prev = J->chain[op];
+ ir->op12 = op12;
+ J->chain[op] = (IRRef1)ref;
+ ir->o = fins->o;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+ }
+}
+
+/* CSE with explicit search limit. */
+TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim)
+{
+ IRRef ref = J->chain[fins->o];
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return ref;
+ ref = IR(ref)->prev;
+ }
+ return lj_ir_emit(J);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+#undef knumleft
+#undef knumright
+#undef emitir
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_loop.c b/src/LuaJIT/src/lj_opt_loop.c
new file mode 100644
index 000000000..ee5c4d1e9
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_loop.c
@@ -0,0 +1,437 @@
+/*
+** LOOP: Loop Optimizations.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_loop_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_vm.h"
+
+/* Loop optimization:
+**
+** Traditional Loop-Invariant Code Motion (LICM) splits the instructions
+** of a loop into invariant and variant instructions. The invariant
+** instructions are hoisted out of the loop and only the variant
+** instructions remain inside the loop body.
+**
+** Unfortunately LICM is mostly useless for compiling dynamic languages.
+** The IR has many guards and most of the subsequent instructions are
+** control-dependent on them. The first non-hoistable guard would
+** effectively prevent hoisting of all subsequent instructions.
+**
+** That's why we use a special form of unrolling using copy-substitution,
+** combined with redundancy elimination:
+**
+** The recorded instruction stream is re-emitted to the compiler pipeline
+** with substituted operands. The substitution table is filled with the
+** refs returned by re-emitting each instruction. This can be done
+** on-the-fly, because the IR is in strict SSA form, where every ref is
+** defined before its use.
+**
+** This aproach generates two code sections, separated by the LOOP
+** instruction:
+**
+** 1. The recorded instructions form a kind of pre-roll for the loop. It
+** contains a mix of invariant and variant instructions and performs
+** exactly one loop iteration (but not necessarily the 1st iteration).
+**
+** 2. The loop body contains only the variant instructions and performs
+** all remaining loop iterations.
+**
+** On first sight that looks like a waste of space, because the variant
+** instructions are present twice. But the key insight is that the
+** pre-roll honors the control-dependencies for *both* the pre-roll itself
+** *and* the loop body!
+**
+** It also means one doesn't have to explicitly model control-dependencies
+** (which, BTW, wouldn't help LICM much). And it's much easier to
+** integrate sparse snapshotting with this approach.
+**
+** One of the nicest aspects of this approach is that all of the
+** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be
+** reused with only minor restrictions (e.g. one should not fold
+** instructions across loop-carried dependencies).
+**
+** But in general all optimizations can be applied which only need to look
+** backwards into the generated instruction stream. At any point in time
+** during the copy-substitution process this contains both a static loop
+** iteration (the pre-roll) and a dynamic one (from the to-be-copied
+** instruction up to the end of the partial loop body).
+**
+** Since control-dependencies are implicitly kept, CSE also applies to all
+** kinds of guards. The major advantage is that all invariant guards can
+** be hoisted, too.
+**
+** Load/store forwarding works across loop iterations, too. This is
+** important if loop-carried dependencies are kept in upvalues or tables.
+** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may
+** become a forwarded loop-recurrence after inlining.
+**
+** Since the IR is in SSA form, loop-carried dependencies have to be
+** modeled with PHI instructions. The potential candidates for PHIs are
+** collected on-the-fly during copy-substitution. After eliminating the
+** redundant ones, PHI instructions are emitted *below* the loop body.
+**
+** Note that this departure from traditional SSA form doesn't change the
+** semantics of the PHI instructions themselves. But it greatly simplifies
+** on-the-fly generation of the IR and the machine code.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- PHI elimination ----------------------------------------------------- */
+
+/* Emit or eliminate collected PHIs. */
+static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi,
+ SnapNo onsnap)
+{
+ int passx = 0;
+ IRRef i, nslots;
+ IRRef invar = J->chain[IR_LOOP];
+ /* Pass #1: mark redundant and potentially redundant PHIs. */
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRRef rref = subst[lref];
+ if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */
+ irt_setmark(IR(lref)->t);
+ } else if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) {
+ /* Quick check for simple recurrences failed, need pass2. */
+ irt_setmark(IR(lref)->t);
+ passx = 1;
+ }
+ }
+ /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */
+ if (passx) {
+ SnapNo s;
+ for (i = J->cur.nins-1; i > invar; i--) {
+ IRIns *ir = IR(i);
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (!irref_isk(ir->op1)) {
+ irt_clearmark(IR(ir->op1)->t);
+ if (ir->op1 < invar &&
+ ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */
+ ir = IR(ir->op1);
+ while (ir->o == IR_CARG) {
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (irref_isk(ir->op1)) break;
+ ir = IR(ir->op1);
+ irt_clearmark(ir->t);
+ }
+ }
+ }
+ }
+ for (s = J->cur.nsnap-1; s >= onsnap; s--) {
+ SnapShot *snap = &J->cur.snap[s];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref)) irt_clearmark(IR(ref)->t);
+ }
+ }
+ }
+ /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */
+ nslots = J->baseslot+J->maxslot;
+ for (i = 1; i < nslots; i++) {
+ IRRef ref = tref_ref(J->slot[i]);
+ while (!irref_isk(ref) && ref != subst[ref]) {
+ IRIns *ir = IR(ref);
+ irt_clearmark(ir->t); /* Unmark potential uses, too. */
+ if (irt_isphi(ir->t) || irt_ispri(ir->t))
+ break;
+ irt_setphi(ir->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ ref = subst[ref];
+ if (ref > invar)
+ break;
+ }
+ }
+ /* Pass #4: propagate non-redundant PHIs. */
+ while (passx) {
+ passx = 0;
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */
+ IRRef rref = subst[lref];
+ if (lref == rref) { /* Mark redundant PHI. */
+ irt_setmark(ir->t);
+ } else {
+ IRIns *irr = IR(rref);
+ if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */
+ irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */
+ passx = 1; /* Retry. */
+ }
+ }
+ }
+ }
+ }
+ /* Pass #5: emit PHI instructions or eliminate PHIs. */
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */
+ IRRef rref = subst[lref];
+ if (rref > invar)
+ irt_setphi(IR(rref)->t);
+ emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref);
+ } else { /* Otherwise eliminate PHI. */
+ irt_clearmark(ir->t);
+ irt_clearphi(ir->t);
+ }
+ }
+}
+
+/* -- Loop unrolling using copy-substitution ------------------------------ */
+
+/* Copy-substitute snapshot. */
+static void loop_subst_snap(jit_State *J, SnapShot *osnap,
+ SnapEntry *loopmap, IRRef1 *subst)
+{
+ SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs];
+ SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)];
+ MSize nmapofs;
+ MSize on, ln, nn, onent = osnap->nent;
+ BCReg nslots = osnap->nslots;
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap];
+ if (irt_isguard(J->guardemit)) { /* Guard inbetween? */
+ nmapofs = J->cur.nsnapmap;
+ J->cur.nsnap++; /* Add new snapshot. */
+ } else { /* Otherwise overwrite previous snapshot. */
+ snap--;
+ nmapofs = snap->mapofs;
+ }
+ J->guardemit.irt = 0;
+ /* Setup new snapshot. */
+ snap->mapofs = (uint16_t)nmapofs;
+ snap->ref = (IRRef1)J->cur.nins;
+ snap->nslots = nslots;
+ snap->topslot = osnap->topslot;
+ snap->count = 0;
+ nmap = &J->cur.snapmap[nmapofs];
+ /* Substitute snapshot slots. */
+ on = ln = nn = 0;
+ while (on < onent) {
+ SnapEntry osn = omap[on], lsn = loopmap[ln];
+ if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */
+ nmap[nn++] = lsn;
+ ln++;
+ } else { /* Copy substituted slot from snapshot map. */
+ if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */
+ if (!irref_isk(snap_ref(osn)))
+ osn = snap_setref(osn, subst[snap_ref(osn)]);
+ nmap[nn++] = osn;
+ on++;
+ }
+ }
+ while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */
+ nmap[nn++] = loopmap[ln++];
+ snap->nent = (uint8_t)nn;
+ omap += onent;
+ nmap += nn;
+ while (omap < nextmap) /* Copy PC + frame links. */
+ *nmap++ = *omap++;
+ J->cur.nsnapmap = (uint16_t)(nmap - J->cur.snapmap);
+}
+
+/* Unroll loop. */
+static void loop_unroll(jit_State *J)
+{
+ IRRef1 phi[LJ_MAX_PHI];
+ uint32_t nphi = 0;
+ IRRef1 *subst;
+ SnapNo onsnap;
+ SnapShot *osnap, *loopsnap;
+ SnapEntry *loopmap, *psentinel;
+ IRRef ins, invar;
+
+ /* Use temp buffer for substitution table.
+ ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
+ ** Caveat: don't call into the VM or run the GC or the buffer may be gone.
+ */
+ invar = J->cur.nins;
+ subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf,
+ (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS;
+ subst[REF_BASE] = REF_BASE;
+
+ /* LOOP separates the pre-roll from the loop body. */
+ emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0);
+
+ /* Grow snapshot buffer and map for copy-substituted snapshots.
+ ** Need up to twice the number of snapshots minus #0 and loop snapshot.
+ ** Need up to twice the number of entries plus fallback substitutions
+ ** from the loop snapshot entries for each new snapshot.
+ ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap!
+ */
+ onsnap = J->cur.nsnap;
+ lj_snap_grow_buf(J, 2*onsnap-2);
+ lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent);
+
+ /* The loop snapshot is used for fallback substitutions. */
+ loopsnap = &J->cur.snap[onsnap-1];
+ loopmap = &J->cur.snapmap[loopsnap->mapofs];
+ /* The PC of snapshot #0 and the loop snapshot must match. */
+ psentinel = &loopmap[loopsnap->nent];
+ lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]);
+ *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
+
+ /* Start substitution with snapshot #1 (#0 is empty for root traces). */
+ osnap = &J->cur.snap[1];
+
+ /* Copy and substitute all recorded instructions and snapshots. */
+ for (ins = REF_FIRST; ins < invar; ins++) {
+ IRIns *ir;
+ IRRef op1, op2;
+
+ if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */
+ loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */
+
+ /* Substitute instruction operands. */
+ ir = IR(ins);
+ op1 = ir->op1;
+ if (!irref_isk(op1)) op1 = subst[op1];
+ op2 = ir->op2;
+ if (!irref_isk(op2)) op2 = subst[op2];
+ if (irm_kind(lj_ir_mode[ir->o]) == IRM_N &&
+ op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */
+ subst[ins] = (IRRef1)ins; /* Shortcut. */
+ } else {
+ /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */
+ IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */
+ IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2));
+ subst[ins] = (IRRef1)ref;
+ if (ref != ins) {
+ IRIns *irr = IR(ref);
+ if (ref < invar) { /* Loop-carried dependency? */
+ /* Potential PHI? */
+ if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ /* Check all loop-carried dependencies for type instability. */
+ if (!irt_sametype(t, irr->t)) {
+ if (irt_isinteger(t) && irt_isinteger(irr->t))
+ continue;
+ else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */
+ ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT));
+ else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */
+ ref = tref_ref(emitir(IRTGI(IR_CONV), ref,
+ IRCONV_INT_NUM|IRCONV_CHECK));
+ else
+ lj_trace_err(J, LJ_TRERR_TYPEINS);
+ subst[ins] = (IRRef1)ref;
+ irr = IR(ref);
+ goto phiconv;
+ }
+ } else if (ref != REF_DROP && irr->o == IR_CONV &&
+ ref > invar && irr->op1 < invar) {
+ /* May need an extra PHI for a CONV. */
+ ref = irr->op1;
+ irr = IR(ref);
+ phiconv:
+ if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ }
+ }
+ }
+ }
+ if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */
+ J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs;
+ lua_assert(J->cur.nsnapmap <= J->sizesnapmap);
+ *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */
+
+ loop_emit_phi(J, subst, phi, nphi, onsnap);
+}
+
+/* Undo any partial changes made by the loop optimization. */
+static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap)
+{
+ ptrdiff_t i;
+ SnapShot *snap = &J->cur.snap[nsnap-1];
+ SnapEntry *map = J->cur.snapmap;
+ map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */
+ J->cur.nsnapmap = (uint16_t)nsnapmap;
+ J->cur.nsnap = nsnap;
+ J->guardemit.irt = 0;
+ lj_ir_rollback(J, ins);
+ for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */
+ BPropEntry *bp = &J->bpropcache[i];
+ if (bp->val >= ins)
+ bp->key = 0;
+ }
+ for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */
+ IRIns *ir = IR(ins);
+ irt_clearphi(ir->t);
+ irt_clearmark(ir->t);
+ }
+}
+
+/* Protected callback for loop optimization. */
+static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(L); UNUSED(dummy);
+ loop_unroll((jit_State *)ud);
+ return NULL;
+}
+
+/* Loop optimization. */
+int lj_opt_loop(jit_State *J)
+{
+ IRRef nins = J->cur.nins;
+ SnapNo nsnap = J->cur.nsnap;
+ MSize nsnapmap = J->cur.nsnapmap;
+ int errcode = lj_vm_cpcall(J->L, NULL, J, cploop_opt);
+ if (LJ_UNLIKELY(errcode)) {
+ lua_State *L = J->L;
+ if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */
+ int32_t e = numberVint(L->top-1);
+ switch ((TraceError)e) {
+ case LJ_TRERR_TYPEINS: /* Type instability. */
+ case LJ_TRERR_GFAIL: /* Guard would always fail. */
+ /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */
+ if (--J->instunroll < 0) /* But do not unroll forever. */
+ break;
+ L->top--; /* Remove error object. */
+ loop_undo(J, nins, nsnap, nsnapmap);
+ return 1; /* Loop optimization failed, continue recording. */
+ default:
+ break;
+ }
+ }
+ lj_err_throw(L, errcode); /* Propagate all other errors. */
+ }
+ return 0; /* Loop optimization is ok. */
+}
+
+#undef IR
+#undef emitir
+#undef emitir_raw
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_mem.c b/src/LuaJIT/src/lj_opt_mem.c
new file mode 100644
index 000000000..17e29569f
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_mem.c
@@ -0,0 +1,874 @@
+/*
+** Memory access optimizations.
+** AA: Alias Analysis using high-level semantic disambiguation.
+** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
+** DSE: Dead-Store Elimination.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_mem_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fright (&J->fold.right)
+
+/*
+** Caveat #1: return value is not always a TRef -- only use with tref_ref().
+** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
+*/
+
+/* Return values from alias analysis. */
+typedef enum {
+ ALIAS_NO, /* The two refs CANNOT alias (exact). */
+ ALIAS_MAY, /* The two refs MAY alias (inexact). */
+ ALIAS_MUST /* The two refs MUST alias (exact). */
+} AliasRet;
+
+/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
+
+/* Simplified escape analysis: check for intervening stores. */
+static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
+{
+ IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */
+ for (ir++; ir < stop; ir++)
+ if (ir->op2 == ref &&
+ (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
+ ir->o == IR_USTORE || ir->o == IR_FSTORE))
+ return ALIAS_MAY; /* Reference was stored and might alias. */
+ return ALIAS_NO; /* Reference was not stored. */
+}
+
+/* Alias analysis for two different table references. */
+static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
+{
+ IRIns *taba = IR(ta), *tabb = IR(tb);
+ int newa, newb;
+ lua_assert(ta != tb);
+ lua_assert(irt_istab(taba->t) && irt_istab(tabb->t));
+ /* Disambiguate new allocations. */
+ newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
+ newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
+ if (newa && newb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (newb) { /* At least one allocation? */
+ IRIns *tmp = taba; taba = tabb; tabb = tmp;
+ } else if (!newa) {
+ return ALIAS_MAY; /* Anything else: we just don't know. */
+ }
+ return aa_escape(J, taba, tabb);
+}
+
+/* Alias analysis for array and hash access using key-based disambiguation. */
+static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRRef ka = refa->op2;
+ IRRef kb = refb->op2;
+ IRIns *keya, *keyb;
+ IRRef ta, tb;
+ if (refa == refb)
+ return ALIAS_MUST; /* Shortcut for same refs. */
+ keya = IR(ka);
+ if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
+ keyb = IR(kb);
+ if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
+ ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
+ tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
+ if (ka == kb) {
+ /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
+ if (ta == tb)
+ return ALIAS_MUST; /* Same key, same table. */
+ else
+ return aa_table(J, ta, tb); /* Same key, possibly different table. */
+ }
+ if (irref_isk(ka) && irref_isk(kb))
+ return ALIAS_NO; /* Different constant keys. */
+ if (refa->o == IR_AREF) {
+ /* Disambiguate array references based on index arithmetic. */
+ int32_t ofsa = 0, ofsb = 0;
+ IRRef basea = ka, baseb = kb;
+ lua_assert(refb->o == IR_AREF);
+ /* Gather base and offset from t[base] or t[base+-ofs]. */
+ if (keya->o == IR_ADD && irref_isk(keya->op2)) {
+ basea = keya->op1;
+ ofsa = IR(keya->op2)->i;
+ if (basea == kb && ofsa != 0)
+ return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
+ }
+ if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
+ baseb = keyb->op1;
+ ofsb = IR(keyb->op2)->i;
+ if (ka == baseb && ofsb != 0)
+ return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
+ }
+ if (basea == baseb && ofsa != ofsb)
+ return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
+ } else {
+ /* Disambiguate hash references based on the type of their keys. */
+ lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
+ (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF));
+ if (!irt_sametype(keya->t, keyb->t))
+ return ALIAS_NO; /* Different key types. */
+ }
+ if (ta == tb)
+ return ALIAS_MAY; /* Same table, cannot disambiguate keys. */
+ else
+ return aa_table(J, ta, tb); /* Try to disambiguate tables. */
+}
+
+/* Array and hash load forwarding. */
+static TRef fwd_ahload(jit_State *J, IRRef xref)
+{
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[fins->o+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store (yet): const-fold loads from allocations. */
+ {
+ IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
+ IRRef tab = ir->op1;
+ ir = IR(tab);
+ if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
+ /* A NEWREF with a number key may end up pointing to the array part.
+ ** But it's referenced from HSTORE and not found in the ASTORE chain.
+ ** For now simply consider this a conflict without forwarding anything.
+ */
+ if (xr->o == IR_AREF) {
+ IRRef ref2 = J->chain[IR_NEWREF];
+ while (ref2 > tab) {
+ IRIns *newref = IR(ref2);
+ if (irt_isnum(IR(newref->op2)->t))
+ goto cselim;
+ ref2 = newref->prev;
+ }
+ }
+ /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
+ ** But the above search for conflicting stores was limited by xref.
+ ** So continue searching, limited by the TNEW/TDUP. Store forwarding
+ ** is ok, too. A conflict does NOT limit the search for a matching load.
+ */
+ while (ref > tab) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: goto cselim; /* Conflicting store. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+ lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t));
+ if (irt_ispri(fins->t)) {
+ return TREF_PRI(irt_type(fins->t));
+ } else if (irt_isnum(fins->t) || irt_isstr(fins->t)) {
+ TValue keyv;
+ cTValue *tv;
+ IRIns *key = IR(xr->op2);
+ if (key->o == IR_KSLOT) key = IR(key->op1);
+ lj_ir_kvalue(J->L, &keyv, key);
+ tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
+ lua_assert(itype2irt(tv) == irt_type(fins->t));
+ if (irt_isnum(fins->t))
+ return lj_ir_knum_u64(J, tv->u64);
+ else
+ return lj_ir_kstr(J, strV(tv));
+ }
+ /* Othwerwise: don't intern as a constant. */
+ }
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[fins->o];
+ while (ref > lim) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref)
+ return ref; /* Load forwarding. */
+ ref = load->prev;
+ }
+ return 0; /* Conflict or no match. */
+}
+
+/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
+static TRef fwd_aload_reassoc(jit_State *J)
+{
+ IRIns *irx = IR(fins->op1);
+ IRIns *key = IR(irx->op2);
+ if (key->o == IR_ADD && irref_isk(key->op2)) {
+ IRIns *add2 = IR(key->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(key->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_AREF];
+ IRRef lim = add2->op1;
+ if (irx->op1 > lim) lim = irx->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
+ return fwd_ahload(J, ref);
+ ref = ir->prev;
+ }
+ }
+ }
+ return 0;
+}
+
+/* ALOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
+{
+ IRRef ref;
+ if ((ref = fwd_ahload(J, fins->op1)) ||
+ (ref = fwd_aload_reassoc(J)))
+ return ref;
+ return EMITFOLD;
+}
+
+/* HLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
+{
+ IRRef ref = fwd_ahload(J, fins->op1);
+ if (ref)
+ return ref;
+ return EMITFOLD;
+}
+
+/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
+int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
+{
+ IRRef lim = fins->op1; /* Search limit. */
+ IRRef ref;
+
+ /* The key for an ASTORE may end up in the hash part after a NEWREF. */
+ if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
+ ref = J->chain[IR_ASTORE];
+ while (ref > lim) {
+ if (ref < J->chain[IR_NEWREF])
+ return 0; /* Conflict. */
+ ref = IR(ref)->prev;
+ }
+ }
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = store->prev;
+ }
+
+ return 1; /* No conflict. Can fold to niltv. */
+}
+
+/* Check whether there's no aliasing NEWREF for the left operand. */
+int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
+{
+ IRRef ta = fins->op1;
+ IRRef ref = J->chain[IR_NEWREF];
+ while (ref > lim) {
+ IRIns *newref = IR(ref);
+ if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = newref->prev;
+ }
+ return 1; /* No conflict. Can safely FOLD/CSE. */
+}
+
+/* ASTORE/HSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[fins->o];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- ULOAD forwarding ---------------------------------------------------- */
+
+/* The current alias analysis for upvalues is very simplistic. It only
+** disambiguates between the unique upvalues of the same function.
+** This is good enough for now, since most upvalues are read-only.
+**
+** A more precise analysis would be feasible with the help of the parser:
+** generate a unique key for every upvalue, even across all prototypes.
+** Lacking a realistic use-case, it's unclear whether this is beneficial.
+*/
+static AliasRet aa_uref(IRIns *refa, IRIns *refb)
+{
+ if (refa->o != refb->o)
+ return ALIAS_NO; /* Different UREFx type. */
+ if (refa->op1 == refb->op1) { /* Same function. */
+ if (refa->op2 == refb->op2)
+ return ALIAS_MUST; /* Same function, same upvalue idx. */
+ else
+ return ALIAS_NO; /* Same function, different upvalue idx. */
+ } else { /* Different functions, check disambiguation hash values. */
+ if (((refa->op2 ^ refb->op2) & 0xff))
+ return ALIAS_NO; /* Upvalues with different hash values cannot alias. */
+ else
+ return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */
+ }
+}
+
+/* ULOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
+{
+ IRRef uref = fins->op1;
+ IRRef lim = uref; /* Search limit. */
+ IRIns *xr = IR(uref);
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_USTORE];
+ while (ref > uref) {
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* USTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[IR_USTORE];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ if (ref+1 < J->cur.nins &&
+ store[1].o == IR_OBAR && store[1].op1 == xref) {
+ IRRef1 *bp = &J->chain[IR_OBAR];
+ IRIns *obar;
+ for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
+ bp = &obar->prev;
+ /* Remove OBAR, too. */
+ *bp = obar->prev;
+ obar->o = IR_NOP;
+ obar->t.irt = IRT_NIL;
+ obar->op1 = obar->op2 = 0;
+ obar->prev = 0;
+ }
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
+
+/* Alias analysis for field access.
+** Field loads are cheap and field stores are rare.
+** Simple disambiguation based on field types is good enough.
+*/
+static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ if (refa->op2 != refb->op2)
+ return ALIAS_NO; /* Different fields. */
+ if (refa->op1 == refb->op1)
+ return ALIAS_MUST; /* Same field, same object. */
+ else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
+ return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */
+ else
+ return ALIAS_MAY; /* Same field, possibly different object. */
+}
+
+/* Only the loads for mutable fields end up here (see FOLD). */
+TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
+{
+ IRRef oref = fins->op1; /* Object reference. */
+ IRRef fid = fins->op2; /* Field ID. */
+ IRRef lim = oref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_FSTORE];
+ while (ref > oref) {
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, fins, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store: const-fold field loads from allocations. */
+ if (fid == IRFL_TAB_META) {
+ IRIns *ir = IR(oref);
+ if (ir->o == IR_TNEW || ir->o == IR_TDUP)
+ return lj_ir_knull(J, IRT_TAB);
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* FSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
+{
+ IRRef fref = fins->op1; /* FREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(fref);
+ IRRef1 *refp = &J->chain[IR_FSTORE];
+ IRRef ref = *refp;
+ while (ref > fref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or conflicting loads. */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
+
+/* Find cdata allocation for a reference (if any). */
+static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
+{
+ while (ir->o == IR_ADD) {
+ if (!irref_isk(ir->op1)) {
+ IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */
+ if (ir1) return ir1;
+ }
+ if (irref_isk(ir->op2)) return NULL;
+ ir = IR(ir->op2); /* Flatten right-recursion. */
+ }
+ return ir->o == IR_CNEW ? ir : NULL;
+}
+
+/* Alias analysis for two cdata allocations. */
+static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRIns *cnewa = aa_findcnew(J, refa);
+ IRIns *cnewb = aa_findcnew(J, refb);
+ if (cnewa == cnewb)
+ return ALIAS_MAY; /* Same allocation or neither is an allocation. */
+ if (cnewa && cnewb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (cnewb) { cnewa = cnewb; refb = refa; }
+ return aa_escape(J, cnewa, refb);
+}
+
+/* Alias analysis for XLOAD/XSTORE. */
+static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
+{
+ ptrdiff_t ofsa = 0, ofsb = 0;
+ IRIns *refb = IR(xb->op1);
+ IRIns *basea = refa, *baseb = refb;
+ /* This implements (very) strict aliasing rules.
+ ** Different types do NOT alias, except for differences in signedness.
+ ** NYI: this also prevents type punning through unions.
+ */
+ if (irt_sametype(xa->t, xb->t)) {
+ if (refa == refb)
+ return ALIAS_MUST; /* Shortcut for same refs with identical type. */
+ } else if (!(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
+ ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1)) {
+ return ALIAS_NO;
+ }
+ /* Offset-based disambiguation. */
+ if (refa->o == IR_ADD && irref_isk(refa->op2)) {
+ IRIns *irk = IR(refa->op2);
+ basea = IR(refa->op1);
+ ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ if (basea == refb && ofsa != 0)
+ return ALIAS_NO; /* base+-ofs vs. base. */
+ }
+ if (refb->o == IR_ADD && irref_isk(refb->op2)) {
+ IRIns *irk = IR(refb->op2);
+ baseb = IR(refb->op1);
+ ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ if (refa == baseb && ofsb != 0)
+ return ALIAS_NO; /* base vs. base+-ofs. */
+ }
+ if (basea == baseb) {
+ /* This assumes strictly-typed, non-overlapping accesses. */
+ if (ofsa != ofsb)
+ return ALIAS_NO; /* base+-o1 vs. base+-o2 and o1 != o2. */
+ return ALIAS_MUST; /* Unsigned vs. signed access to the same address. */
+ }
+ /* NYI: structural disambiguation. */
+ return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */
+}
+
+/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
+static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
+{
+ IRRef ref = J->chain[op];
+ IRRef lim = op1;
+ if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == op1 && ir->op2 == op2)
+ return ref;
+ ref = ir->prev;
+ }
+ return 0;
+}
+
+/* Reassociate index references. */
+static IRRef reassoc_xref(jit_State *J, IRIns *ir)
+{
+ ptrdiff_t ofs = 0;
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */
+ IRIns *irk = IR(ir->op2);
+ ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ ir = IR(ir->op1);
+ }
+ if (ir->o == IR_ADD) { /* Add of base + index. */
+ /* Index ref > base ref for loop-carried dependences. Only check op1. */
+ IRIns *ir2, *ir1 = IR(ir->op1);
+ int32_t shift = 0;
+ IRRef idxref;
+ /* Determine index shifts. Don't bother with IR_MUL here. */
+ if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
+ shift = IR(ir1->op2)->i;
+ else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
+ shift = 1;
+ else
+ ir1 = ir;
+ ir2 = IR(ir1->op1);
+ /* A non-reassociated add. Must be a loop-carried dependence. */
+ if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
+ ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
+ else
+ return 0;
+ idxref = ir2->op1;
+ /* Try to CSE the reassociated chain. Give up if not found. */
+ if (ir1 != ir &&
+ !(idxref = reassoc_trycse(J, ir1->o, idxref,
+ ir1->o == IR_BSHL ? ir1->op2 : idxref)))
+ return 0;
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
+ return 0;
+ if (ofs != 0) {
+ IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
+ return 0;
+ }
+ return idxref; /* Success, found a reassociated index reference. Phew. */
+ }
+ return 0; /* Failure. */
+}
+
+/* XLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ if ((fins->op2 & IRXLOAD_READONLY))
+ goto cselim;
+ if ((fins->op2 & IRXLOAD_VOLATILE))
+ goto doemit;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_XSTORE];
+retry:
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST:
+ /* Emit conversion if the loaded type doesn't match the forwarded type. */
+ if (!irt_sametype(fins->t, IR(store->op2)->t)) {
+ IRType st = irt_type(fins->t);
+ if (st == IRT_I8 || st == IRT_I16) { /* Trunc + sign-extend. */
+ st |= IRCONV_SEXT;
+ } else if (st == IRT_U8 || st == IRT_U16) { /* Trunc + zero-extend. */
+ } else if (st == IRT_INT && !irt_isint(IR(store->op2)->t)) {
+ st = irt_type(IR(store->op2)->t); /* Needs dummy CONV.int.*. */
+ } else { /* I64/U64 are boxed, U32 is hidden behind a CONV.num.u32. */
+ goto store_fwd;
+ }
+ fins->ot = IRTI(IR_CONV);
+ fins->op1 = store->op2;
+ fins->op2 = (IRT_INT<<5)|st;
+ return RETRYFOLD;
+ }
+ store_fwd:
+ return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[IR_XLOAD];
+ while (ref > lim) {
+ /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
+ if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
+ return ref;
+ ref = IR(ref)->prev;
+ }
+
+ /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
+ if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
+ xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
+ ref = J->chain[IR_XSTORE];
+ while (ref > lim) /* Skip stores that have already been checked. */
+ ref = IR(ref)->prev;
+ lim = xref;
+ xr = IR(xref);
+ goto retry; /* Retry with the reassociated reference. */
+ }
+doemit:
+ return EMITFOLD;
+}
+
+/* XSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRRef1 *refp = &J->chain[IR_XSTORE];
+ IRRef ref = *refp;
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ while (ref > lim) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or any XLOADs (no AA performed). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- Forwarding of lj_tab_len -------------------------------------------- */
+
+/* This is rather simplistic right now, but better than nothing. */
+TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
+{
+ IRRef tab = fins->op1; /* Table reference. */
+ IRRef lim = tab; /* Search limit. */
+ IRRef ref;
+
+ /* Any ASTORE is a conflict and limits the search. */
+ if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
+
+ /* Search for conflicting HSTORE with numeric key. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ IRIns *href = IR(store->op1);
+ IRIns *key = IR(href->op2);
+ if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
+ lim = ref; /* Conflicting store found, limits search for TLEN. */
+ break;
+ }
+ ref = store->prev;
+ }
+
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
+
+/* Check whether the previous value for a table store is non-nil.
+** This can be derived either from a previous store or from a previous
+** load (because all loads from tables perform a type check).
+**
+** The result of the analysis can be used to avoid the metatable check
+** and the guard against HREF returning niltv. Both of these are cheap,
+** so let's not spend too much effort on the analysis.
+**
+** A result of 1 is exact: previous value CANNOT be nil.
+** A result of 0 is inexact: previous value MAY be nil.
+*/
+int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
+{
+ /* First check stores. */
+ IRRef ref = J->chain[loadop+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ if (store->op1 == xref) { /* Same xREF. */
+ /* A nil store MAY alias, but a non-nil store MUST alias. */
+ return !irt_isnil(store->t);
+ } else if (irt_isnil(store->t)) { /* Must check any nil store. */
+ IRRef skref = IR(store->op1)->op2;
+ IRRef xkref = IR(xref)->op2;
+ /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
+ if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
+ if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
+ return 0; /* A nil store with same const key or var key MAY alias. */
+ /* Different const keys CANNOT alias. */
+ } /* Different key types CANNOT alias. */
+ } /* Other non-nil stores MAY alias. */
+ ref = store->prev;
+ }
+
+ /* Check loads since nothing could be derived from stores. */
+ ref = J->chain[loadop];
+ while (ref > xref) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref) { /* Same xREF. */
+ /* A nil load MAY alias, but a non-nil load MUST alias. */
+ return !irt_isnil(load->t);
+ } /* Other non-nil loads MAY alias. */
+ ref = load->prev;
+ }
+ return 0; /* Nothing derived at all, previous value MAY be nil. */
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fright
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_narrow.c b/src/LuaJIT/src/lj_opt_narrow.c
new file mode 100644
index 000000000..9bbb20835
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_narrow.c
@@ -0,0 +1,648 @@
+/*
+** NARROW: Narrowing of numbers to integers (double to int32_t).
+** STRIPOV: Stripping of overflow checks.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_narrow_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_str.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* Rationale for narrowing optimizations:
+**
+** Lua has only a single number type and this is a FP double by default.
+** Narrowing doubles to integers does not pay off for the interpreter on a
+** current-generation x86/x64 machine. Most FP operations need the same
+** amount of execution resources as their integer counterparts, except
+** with slightly longer latencies. Longer latencies are a non-issue for
+** the interpreter, since they are usually hidden by other overhead.
+**
+** The total CPU execution bandwidth is the sum of the bandwidth of the FP
+** and the integer units, because they execute in parallel. The FP units
+** have an equal or higher bandwidth than the integer units. Not using
+** them means losing execution bandwidth. Moving work away from them to
+** the already quite busy integer units is a losing proposition.
+**
+** The situation for JIT-compiled code is a bit different: the higher code
+** density makes the extra latencies much more visible. Tight loops expose
+** the latencies for updating the induction variables. Array indexing
+** requires narrowing conversions with high latencies and additional
+** guards (to check that the index is really an integer). And many common
+** optimizations only work on integers.
+**
+** One solution would be speculative, eager narrowing of all number loads.
+** This causes many problems, like losing -0 or the need to resolve type
+** mismatches between traces. It also effectively forces the integer type
+** to have overflow-checking semantics. This impedes many basic
+** optimizations and requires adding overflow checks to all integer
+** arithmetic operations (whereas FP arithmetics can do without).
+**
+** Always replacing an FP op with an integer op plus an overflow check is
+** counter-productive on a current-generation super-scalar CPU. Although
+** the overflow check branches are highly predictable, they will clog the
+** execution port for the branch unit and tie up reorder buffers. This is
+** turning a pure data-flow dependency into a different data-flow
+** dependency (with slightly lower latency) *plus* a control dependency.
+** In general, you don't want to do this since latencies due to data-flow
+** dependencies can be well hidden by out-of-order execution.
+**
+** A better solution is to keep all numbers as FP values and only narrow
+** when it's beneficial to do so. LuaJIT uses predictive narrowing for
+** induction variables and demand-driven narrowing for index expressions,
+** integer arguments and bit operations. Additionally it can eliminate or
+** hoist most of the resulting overflow checks. Regular arithmetic
+** computations are never narrowed to integers.
+**
+** The integer type in the IR has convenient wrap-around semantics and
+** ignores overflow. Extra operations have been added for
+** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type.
+** Apart from reducing overall complexity of the compiler, this also
+** nicely solves the problem where you want to apply algebraic
+** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can
+** use lea instead of an add for integer ADD, but not for ADDOV (lea does
+** not affect the flags, but it helps to avoid register moves).
+**
+**
+** All of the above has to be reconsidered for architectures with slow FP
+** operations or without a hardware FPU. The dual-number mode of LuaJIT
+** addresses this issue. Arithmetic operations are performed on integers
+** as far as possible and overflow checks are added as needed.
+**
+** This implies that narrowing for integer arguments and bit operations
+** should also strip overflow checks, e.g. replace ADDOV with ADD. The
+** original overflow guards are weak and can be eliminated by DCE, if
+** there's no other use.
+**
+** A slight twist is that it's usually beneficial to use overflow-checked
+** integer arithmetics if all inputs are already integers. This is the only
+** change that affects the single-number mode, too.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Elimination of narrowing type conversions --------------------------- */
+
+/* Narrowing of index expressions and bit operations is demand-driven. The
+** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT)
+** in all of these cases (e.g. array indexing or string indexing). FOLD
+** already takes care of eliminating simple redundant conversions like
+** CONV.int.num(CONV.num.int(x)) ==> x.
+**
+** But the surrounding code is FP-heavy and arithmetic operations are
+** performed on FP numbers (for the single-number mode). Consider a common
+** example such as 'x=t[i+1]', with 'i' already an integer (due to induction
+** variable narrowing). The index expression would be recorded as
+** CONV.int.num(ADD(CONV.num.int(i), 1))
+** which is clearly suboptimal.
+**
+** One can do better by recursively backpropagating the narrowing type
+** conversion across FP arithmetic operations. This turns FP ops into
+** their corresponding integer counterparts. Depending on the semantics of
+** the conversion they also need to check for overflow. Currently only ADD
+** and SUB are supported.
+**
+** The above example can be rewritten as
+** ADDOV(CONV.int.num(CONV.num.int(i)), 1)
+** and then into ADDOV(i, 1) after folding of the conversions. The original
+** FP ops remain in the IR and are eliminated by DCE since all references to
+** them are gone.
+**
+** [In dual-number mode the trace recorder already emits ADDOV etc., but
+** this can be further reduced. See below.]
+**
+** Special care has to be taken to avoid narrowing across an operation
+** which is potentially operating on non-integral operands. One obvious
+** case is when an expression contains a non-integral constant, but ends
+** up as an integer index at runtime (like t[x+1.5] with x=0.5).
+**
+** Operations with two non-constant operands illustrate a similar problem
+** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there,
+** unless it can be proven that either operand is integral (e.g. by CSEing
+** a previous conversion). As a not-so-obvious corollary this logic also
+** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]).
+**
+** Correctness of the transformation is guaranteed by avoiding to expand
+** the tree by adding more conversions than the one we would need to emit
+** if not backpropagating. TOBIT employs a more optimistic rule, because
+** the conversion has special semantics, designed to make the life of the
+** compiler writer easier. ;-)
+**
+** Using on-the-fly backpropagation of an expression tree doesn't work
+** because it's unknown whether the transform is correct until the end.
+** This either requires IR rollback and cache invalidation for every
+** subtree or a two-pass algorithm. The former didn't work out too well,
+** so the code now combines a recursive collector with a stack-based
+** emitter.
+**
+** [A recursive backpropagation algorithm with backtracking, employing
+** skip-list lookup and round-robin caching, emitting stack operations
+** on-the-fly for a stack-based interpreter -- and all of that in a meager
+** kilobyte? Yep, compilers are a great treasure chest. Throw away your
+** textbooks and read the codebase of a compiler today!]
+**
+** There's another optimization opportunity for array indexing: it's
+** always accompanied by an array bounds-check. The outermost overflow
+** check may be delegated to the ABC operation. This works because ABC is
+** an unsigned comparison and wrap-around due to overflow creates negative
+** numbers.
+**
+** But this optimization is only valid for constants that cannot overflow
+** an int32_t into the range of valid array indexes [0..2^27+1). A check
+** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30
+** wraps to -2^30-1.
+**
+** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are
+** quite common. So the above example finally ends up as ADD(i, 1)!
+**
+** Later on, the assembler is able to fuse the whole array reference and
+** the ADD into the memory operands of loads and other instructions. This
+** is why LuaJIT is able to generate very pretty (and fast) machine code
+** for array indexing. And that, my dear, concludes another story about
+** one of the hidden secrets of LuaJIT ...
+*/
+
+/* Maximum backpropagation depth and maximum stack size. */
+#define NARROW_MAX_BACKPROP 100
+#define NARROW_MAX_STACK 256
+
+/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1]
+** The lower 16 bits hold a reference (or 0). The upper 16 bits hold
+** the IR opcode + type or one of the following special opcodes:
+*/
+enum {
+ NARROW_REF, /* Push ref. */
+ NARROW_CONV, /* Push conversion of ref. */
+ NARROW_SEXT, /* Push sign-extension of ref. */
+ NARROW_INT /* Push KINT ref. The next code holds an int32_t. */
+};
+
+typedef uint32_t NarrowIns;
+
+#define NARROWINS(op, ref) (((op) << 16) + (ref))
+#define narrow_op(ins) ((IROpT)((ins) >> 16))
+#define narrow_ref(ins) ((IRRef1)(ins))
+
+/* Context used for narrowing of type conversions. */
+typedef struct NarrowConv {
+ jit_State *J; /* JIT compiler state. */
+ NarrowIns *sp; /* Current stack pointer. */
+ NarrowIns *maxsp; /* Maximum stack pointer minus redzone. */
+ int lim; /* Limit on the number of emitted conversions. */
+ IRRef mode; /* Conversion mode (IRCONV_*). */
+ IRType t; /* Destination type: IRT_INT or IRT_I64. */
+ NarrowIns stack[NARROW_MAX_STACK]; /* Stack holding stack-machine code. */
+} NarrowConv;
+
+/* Lookup a reference in the backpropagation cache. */
+static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode)
+{
+ ptrdiff_t i;
+ for (i = 0; i < BPROP_SLOTS; i++) {
+ BPropEntry *bp = &J->bpropcache[i];
+ /* Stronger checks are ok, too. */
+ if (bp->key == key && bp->mode >= mode &&
+ ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0)
+ return bp;
+ }
+ return NULL;
+}
+
+/* Add an entry to the backpropagation cache. */
+static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode)
+{
+ uint32_t slot = J->bpropslot;
+ BPropEntry *bp = &J->bpropcache[slot];
+ J->bpropslot = (slot + 1) & (BPROP_SLOTS-1);
+ bp->key = key;
+ bp->val = val;
+ bp->mode = mode;
+}
+
+/* Backpropagate overflow stripping. */
+static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+ jit_State *J = nc->J;
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_ADDOV || ir->o == IR_SUBOV ||
+ (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) {
+ BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT);
+ if (bp) {
+ ref = bp->val;
+ } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+ narrow_stripov_backprop(nc, ir->op1, depth);
+ narrow_stripov_backprop(nc, ir->op2, depth);
+ *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
+ return;
+ }
+ }
+ *nc->sp++ = NARROWINS(NARROW_REF, ref);
+}
+
+/* Backpropagate narrowing conversion. Return number of needed conversions. */
+static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+ jit_State *J = nc->J;
+ IRIns *ir = IR(ref);
+ IRRef cref;
+
+ /* Check the easy cases first. */
+ if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY)
+ narrow_stripov_backprop(nc, ir->op1, depth+1);
+ else
+ *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); /* Undo conversion. */
+ if (nc->t == IRT_I64)
+ *nc->sp++ = NARROWINS(NARROW_SEXT, 0); /* Sign-extend integer. */
+ return 0;
+ } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */
+ lua_Number n = ir_knum(ir)->n;
+ if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) {
+ /* Allows a wider range of constants. */
+ int64_t k64 = (int64_t)n;
+ if (n == (lua_Number)k64) { /* Only if const doesn't lose precision. */
+ *nc->sp++ = NARROWINS(NARROW_INT, 0);
+ *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */
+ return 0;
+ }
+ } else {
+ int32_t k = lj_num2int(n);
+ /* Only if constant is a small integer. */
+ if (checki16(k) && n == (lua_Number)k) {
+ *nc->sp++ = NARROWINS(NARROW_INT, 0);
+ *nc->sp++ = (NarrowIns)k;
+ return 0;
+ }
+ }
+ return 10; /* Never narrow other FP constants (this is rare). */
+ }
+
+ /* Try to CSE the conversion. Stronger checks are ok, too. */
+ cref = J->chain[fins->o];
+ while (cref > ref) {
+ IRIns *cr = IR(cref);
+ if (cr->op1 == ref &&
+ (fins->o == IR_TOBIT ||
+ ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) &&
+ irt_isguard(cr->t) >= irt_isguard(fins->t)))) {
+ *nc->sp++ = NARROWINS(NARROW_REF, cref);
+ return 0; /* Already there, no additional conversion needed. */
+ }
+ cref = cr->prev;
+ }
+
+ /* Backpropagate across ADD/SUB. */
+ if (ir->o == IR_ADD || ir->o == IR_SUB) {
+ /* Try cache lookup first. */
+ IRRef mode = nc->mode;
+ BPropEntry *bp;
+ /* Inner conversions need a stronger check. */
+ if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0)
+ mode += IRCONV_CHECK-IRCONV_INDEX;
+ bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+ if (bp) {
+ *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+ return 0;
+ } else if (nc->t == IRT_I64) {
+ /* Try sign-extending from an existing (checked) conversion to int. */
+ mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX;
+ bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+ if (bp) {
+ *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+ *nc->sp++ = NARROWINS(NARROW_SEXT, 0);
+ return 0;
+ }
+ }
+ if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+ NarrowIns *savesp = nc->sp;
+ int count = narrow_conv_backprop(nc, ir->op1, depth);
+ count += narrow_conv_backprop(nc, ir->op2, depth);
+ if (count <= nc->lim) { /* Limit total number of conversions. */
+ *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref);
+ return count;
+ }
+ nc->sp = savesp; /* Too many conversions, need to backtrack. */
+ }
+ }
+
+ /* Otherwise add a conversion. */
+ *nc->sp++ = NARROWINS(NARROW_CONV, ref);
+ return 1;
+}
+
+/* Emit the conversions collected during backpropagation. */
+static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
+{
+ /* The fins fields must be saved now -- emitir() overwrites them. */
+ IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0;
+ IROpT convot = fins->ot;
+ IRRef1 convop2 = fins->op2;
+ NarrowIns *next = nc->stack; /* List of instructions from backpropagation. */
+ NarrowIns *last = nc->sp;
+ NarrowIns *sp = nc->stack; /* Recycle the stack to store operands. */
+ while (next < last) { /* Simple stack machine to process the ins. list. */
+ NarrowIns ref = *next++;
+ IROpT op = narrow_op(ref);
+ if (op == NARROW_REF) {
+ *sp++ = ref;
+ } else if (op == NARROW_CONV) {
+ *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */
+ } else if (op == NARROW_SEXT) {
+ lua_assert(sp >= nc->stack+1);
+ sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1],
+ (IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+ } else if (op == NARROW_INT) {
+ lua_assert(next < last);
+ *sp++ = nc->t == IRT_I64 ?
+ lj_ir_kint64(J, (int64_t)(int32_t)*next++) :
+ lj_ir_kint(J, *next++);
+ } else { /* Regular IROpT. Pops two operands and pushes one result. */
+ IRRef mode = nc->mode;
+ lua_assert(sp >= nc->stack+2);
+ sp--;
+ /* Omit some overflow checks for array indexing. See comments above. */
+ if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) {
+ if (next == last && irref_isk(narrow_ref(sp[0])) &&
+ (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u)
+ guardot = 0;
+ else /* Otherwise cache a stronger check. */
+ mode += IRCONV_CHECK-IRCONV_INDEX;
+ }
+ sp[-1] = emitir(op+guardot, sp[-1], sp[0]);
+ /* Add to cache. */
+ if (narrow_ref(ref))
+ narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode);
+ }
+ }
+ lua_assert(sp == nc->stack+1);
+ return nc->stack[0];
+}
+
+/* Narrow a type conversion of an arithmetic operation. */
+TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J)
+{
+ if ((J->flags & JIT_F_OPT_NARROW)) {
+ NarrowConv nc;
+ nc.J = J;
+ nc.sp = nc.stack;
+ nc.maxsp = &nc.stack[NARROW_MAX_STACK-4];
+ nc.t = irt_type(fins->t);
+ if (fins->o == IR_TOBIT) {
+ nc.mode = IRCONV_TOBIT; /* Used only in the backpropagation cache. */
+ nc.lim = 2; /* TOBIT can use a more optimistic rule. */
+ } else {
+ nc.mode = fins->op2;
+ nc.lim = 1;
+ }
+ if (narrow_conv_backprop(&nc, fins->op1, 0) <= nc.lim)
+ return narrow_conv_emit(J, &nc);
+ }
+ return NEXTFOLD;
+}
+
+/* -- Narrowing of implicit conversions ----------------------------------- */
+
+/* Recursively strip overflow checks. */
+static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode)
+{
+ IRRef ref = tref_ref(tr);
+ IRIns *ir = IR(ref);
+ int op = ir->o;
+ if (op >= IR_ADDOV && op <= lastop) {
+ BPropEntry *bp = narrow_bpc_get(J, ref, mode);
+ if (bp) {
+ return TREF(bp->val, irt_t(IR(bp->val)->t));
+ } else {
+ IRRef op1 = ir->op1, op2 = ir->op2; /* The IR may be reallocated. */
+ op1 = narrow_stripov(J, op1, lastop, mode);
+ op2 = narrow_stripov(J, op2, lastop, mode);
+ tr = emitir(IRT(op - IR_ADDOV + IR_ADD,
+ ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2);
+ narrow_bpc_set(J, ref, tref_ref(tr), mode);
+ }
+ } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) {
+ tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode);
+ }
+ return tr;
+}
+
+/* Narrow array index. */
+TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr)
+{
+ IRIns *ir;
+ lua_assert(tref_isnumber(tr));
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX);
+ /* Omit some overflow checks for array indexing. See comments above. */
+ ir = IR(tref_ref(tr));
+ if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) &&
+ (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u)
+ return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2);
+ return tr;
+}
+
+/* Narrow conversion to integer operand (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr)
+{
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY);
+ if (!tref_isinteger(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /*
+ ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV.
+ ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same.
+ */
+ return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+/* Narrow conversion to bitop operand (overflow wrapped). */
+TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr)
+{
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J));
+ if (!tref_isinteger(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /*
+ ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV.
+ ** MULOV cannot be stripped due to precision widening.
+ */
+ return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+#if LJ_HASFFI
+/* Narrow C array index (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr)
+{
+ lua_assert(tref_isnumber(tr));
+ if (tref_isnum(tr))
+ return emitir(IRT(IR_CONV, IRT_INTP), tr,
+ (IRT_INTP<<5)|IRT_NUM|IRCONV_TRUNC|IRCONV_ANY);
+ /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
+ return narrow_stripov(J, tr, IR_MULOV,
+ LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) :
+ ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT));
+}
+#endif
+
+/* -- Narrowing of arithmetic operators ----------------------------------- */
+
+/* Check whether a number fits into an int32_t (-0 is ok, too). */
+static int numisint(lua_Number n)
+{
+ return (n == (lua_Number)lj_num2int(n));
+}
+
+/* Narrowing of arithmetic operations. */
+TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op)
+{
+ if (tref_isstr(rb)) {
+ rb = emitir(IRTG(IR_STRTO, IRT_NUM), rb, 0);
+ lj_str_tonum(strV(vb), vb);
+ }
+ if (tref_isstr(rc)) {
+ rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+ lj_str_tonum(strV(vc), vc);
+ }
+ /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */
+ if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) &&
+ tref_isinteger(rb) && tref_isinteger(rc) &&
+ numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc),
+ (int)op - (int)IR_ADD)))
+ return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc);
+ if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT);
+ if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ return emitir(IRTN(op), rb, rc);
+}
+
+/* Narrowing of unary minus operator. */
+TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc)
+{
+ if (tref_isstr(rc)) {
+ rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+ lj_str_tonum(strV(vc), vc);
+ }
+ if (tref_isinteger(rc)) {
+ if ((uint32_t)numberVint(vc) != 0x80000000u)
+ return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc);
+ rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ }
+ return emitir(IRTN(IR_NEG), rc, lj_ir_knum_neg(J));
+}
+
+/* Narrowing of modulo operator. */
+TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc)
+{
+ TRef tmp;
+ if (tvisstr(vc) && !lj_str_tonum(strV(vc), vc))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) &&
+ tref_isinteger(rb) && tref_isinteger(rc) &&
+ (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) {
+ emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0));
+ return emitir(IRTI(IR_MOD), rb, rc);
+ }
+ /* b % c ==> b - floor(b/c)*c */
+ rb = lj_ir_tonum(J, rb);
+ rc = lj_ir_tonum(J, rc);
+ tmp = emitir(IRTN(IR_DIV), rb, rc);
+ tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR);
+ tmp = emitir(IRTN(IR_MUL), tmp, rc);
+ return emitir(IRTN(IR_SUB), rb, tmp);
+}
+
+/* Narrowing of power operator or math.pow. */
+TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc)
+{
+ if (tvisstr(vc) && !lj_str_tonum(strV(vc), vc))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /* Narrowing must be unconditional to preserve (-x)^i semantics. */
+ if (tvisint(vc) || numisint(numV(vc))) {
+ int checkrange = 0;
+ /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */
+ if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) {
+ int32_t k = numberVint(vc);
+ if (!(k >= -65536 && k <= 65536)) goto split_pow;
+ checkrange = 1;
+ }
+ if (!tref_isinteger(rc)) {
+ if (tref_isstr(rc))
+ rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0);
+ /* Guarded conversion to integer! */
+ rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK);
+ }
+ if (checkrange && !tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */
+ TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536));
+ emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536));
+ }
+ return emitir(IRTN(IR_POW), rb, rc);
+ }
+split_pow:
+ /* FOLD covers most cases, but some are easier to do here. */
+ if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb)))))
+ return rb; /* 1 ^ x ==> 1 */
+ rc = lj_ir_tonum(J, rc);
+ if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5)
+ return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */
+ /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */
+ rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2);
+ rc = emitir(IRTN(IR_MUL), rb, rc);
+ return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2);
+}
+
+/* -- Predictive narrowing of induction variables ------------------------- */
+
+/* Narrow a single runtime value. */
+static int narrow_forl(jit_State *J, cTValue *o)
+{
+ if (tvisint(o)) return 1;
+ if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o));
+ return 0;
+}
+
+/* Narrow the FORL index type by looking at the runtime values. */
+IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv)
+{
+ lua_assert(tvisnumber(&tv[FORL_IDX]) &&
+ tvisnumber(&tv[FORL_STOP]) &&
+ tvisnumber(&tv[FORL_STEP]));
+ /* Narrow only if the runtime values of start/stop/step are all integers. */
+ if (narrow_forl(J, &tv[FORL_IDX]) &&
+ narrow_forl(J, &tv[FORL_STOP]) &&
+ narrow_forl(J, &tv[FORL_STEP])) {
+ /* And if the loop index can't possibly overflow. */
+ lua_Number step = numberVnum(&tv[FORL_STEP]);
+ lua_Number sum = numberVnum(&tv[FORL_STOP]) + step;
+ if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0))
+ return IRT_INT;
+ }
+ return IRT_NUM;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+#undef emitir_raw
+
+#endif
diff --git a/src/LuaJIT/src/lj_opt_split.c b/src/LuaJIT/src/lj_opt_split.c
new file mode 100644
index 000000000..72720e86e
--- /dev/null
+++ b/src/LuaJIT/src/lj_opt_split.c
@@ -0,0 +1,725 @@
+/*
+** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_split_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI))
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_vm.h"
+
+/* SPLIT pass:
+**
+** This pass splits up 64 bit IR instructions into multiple 32 bit IR
+** instructions. It's only active for soft-float targets or for 32 bit CPUs
+** which lack native 64 bit integer operations (the FFI is currently the
+** only emitter for 64 bit integer instructions).
+**
+** Splitting the IR in a separate pass keeps each 32 bit IR assembler
+** backend simple. Only a small amount of extra functionality needs to be
+** implemented. This is much easier than adding support for allocating
+** register pairs to each backend (believe me, I tried). A few simple, but
+** important optimizations can be performed by the SPLIT pass, which would
+** be tedious to do in the backend.
+**
+** The basic idea is to replace each 64 bit IR instruction with its 32 bit
+** equivalent plus an extra HIOP instruction. The splitted IR is not passed
+** through FOLD or any other optimizations, so each HIOP is guaranteed to
+** immediately follow it's counterpart. The actual functionality of HIOP is
+** inferred from the previous instruction.
+**
+** The operands of HIOP hold the hiword input references. The output of HIOP
+** is the hiword output reference, which is also used to hold the hiword
+** register or spill slot information. The register allocator treats this
+** instruction independently of any other instruction, which improves code
+** quality compared to using fixed register pairs.
+**
+** It's easier to split up some instructions into two regular 32 bit
+** instructions. E.g. XLOAD is split up into two XLOADs with two different
+** addresses. Obviously 64 bit constants need to be split up into two 32 bit
+** constants, too. Some hiword instructions can be entirely omitted, e.g.
+** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls
+** are split up into two 32 bit arguments each.
+**
+** On soft-float targets, floating-point instructions are directly converted
+** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX).
+** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump).
+**
+** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with
+** two int64_t fields:
+**
+** 0100 p32 ADD base +8
+** 0101 i64 XLOAD 0100
+** 0102 i64 ADD 0101 +1
+** 0103 p32 ADD base +16
+** 0104 i64 XSTORE 0103 0102
+**
+** mov rax, [esi+0x8]
+** add rax, +0x01
+** mov [esi+0x10], rax
+**
+** Here's the transformed IR and the x86 machine code after the SPLIT pass:
+**
+** 0100 p32 ADD base +8
+** 0101 int XLOAD 0100
+** 0102 p32 ADD base +12
+** 0103 int XLOAD 0102
+** 0104 int ADD 0101 +1
+** 0105 int HIOP 0103 +0
+** 0106 p32 ADD base +16
+** 0107 int XSTORE 0106 0104
+** 0108 p32 ADD base +20
+** 0109 int XSTORE 0108 0105
+**
+** mov eax, [esi+0x8]
+** mov ecx, [esi+0xc]
+** add eax, +0x01
+** adc ecx, +0x00
+** mov [esi+0x10], eax
+** mov [esi+0x14], ecx
+**
+** You may notice the reassociated hiword address computation, which is
+** later fused into the mov operands by the assembler.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Directly emit the transformed IR without updating chains etc. */
+static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2)
+{
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *ir = IR(nref);
+ ir->ot = ot;
+ ir->op1 = op1;
+ ir->op2 = op2;
+ return nref;
+}
+
+#if LJ_SOFTFP
+/* Emit a (checked) number to integer conversion. */
+static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check)
+{
+ IRRef tmp, res;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo);
+#endif
+ res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i);
+ if (check) {
+ tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d);
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+ split_emit(J, IRTGI(IR_EQ), tmp, lo);
+ split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi);
+ }
+ return res;
+}
+
+/* Emit a CALLN with one split 64 bit argument. */
+static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+
+/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */
+static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+#endif
+
+/* Emit a CALLN with two split 64 bit arguments. */
+static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ tmp, tmp);
+}
+
+/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */
+static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref)
+{
+ IRRef nref = oir[ref].prev;
+ IRIns *ir = IR(nref);
+ int32_t ofs = 4;
+ if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) {
+ /* Reassociate address. */
+ ofs += IR(ir->op2)->i;
+ nref = ir->op1;
+ if (ofs == 0) return nref;
+ }
+ return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs));
+}
+
+/* Transform the old IR to the new IR. */
+static void split_ir(jit_State *J)
+{
+ IRRef nins = J->cur.nins, nk = J->cur.nk;
+ MSize irlen = nins - nk;
+ MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1));
+ IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need);
+ IRRef1 *hisubst;
+ IRRef ref;
+
+ /* Copy old IR to buffer. */
+ memcpy(oir, IR(nk), irlen*sizeof(IRIns));
+ /* Bias hiword substitution table and old IR. Loword kept in field prev. */
+ hisubst = (IRRef1 *)&oir[irlen] - nk;
+ oir -= nk;
+
+ /* Remove all IR instructions, but retain IR constants. */
+ J->cur.nins = REF_FIRST;
+ J->loopref = 0;
+
+ /* Process constants and fixed references. */
+ for (ref = nk; ref <= REF_BASE; ref++) {
+ IRIns *ir = &oir[ref];
+ if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) {
+ /* Split up 64 bit constant. */
+ TValue tv = *ir_k64(ir);
+ ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo);
+ hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi);
+ } else {
+ ir->prev = ref; /* Identity substitution for loword. */
+ hisubst[ref] = 0;
+ }
+ }
+
+ /* Process old IR instructions. */
+ for (ref = REF_FIRST; ref < nins; ref++) {
+ IRIns *ir = &oir[ref];
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *nir = IR(nref);
+ IRRef hi = 0;
+
+ /* Copy-substitute old instruction to new instruction. */
+ nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev;
+ nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev;
+ ir->prev = nref; /* Loword substitution. */
+ nir->o = ir->o;
+ nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI);
+ hisubst[ref] = 0;
+
+ /* Split 64 bit instructions. */
+#if LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */
+ switch (ir->o) {
+ case IR_ADD:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add);
+ break;
+ case IR_SUB:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div);
+ break;
+ case IR_POW:
+ hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi);
+ break;
+ case IR_FPMATH:
+ /* Try to rejoin pow from EXP2, MUL and LOG2. */
+ if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) {
+ IRIns *irp = IR(nir->op1);
+ if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) {
+ IRIns *irm4 = IR(irp->op1);
+ IRIns *irm3 = IR(irm4->op1);
+ IRIns *irm12 = IR(irm3->op1);
+ IRIns *irl1 = IR(irm12->op1);
+ if (irm12->op1 > J->loopref && irl1->o == IR_CALLN &&
+ irl1->op2 == IRCALL_lj_vm_log2) {
+ IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */
+ IRRef arg3 = irm3->op2, arg4 = irm4->op2;
+ J->cur.nins--;
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4);
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow);
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+ break;
+ }
+ }
+ }
+ hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+ case IR_ATAN2:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2);
+ break;
+ case IR_LDEXP:
+ hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp);
+ break;
+ case IR_NEG: case IR_ABS:
+ nir->o = IR_CONV; /* Pass through loword. */
+ nir->op2 = (IRT_INT << 5) | IRT_INT;
+ hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ case IR_SLOAD:
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref,
+ IRCALL_softfp_i2d);
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ case IR_XLOAD: {
+ IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */
+ J->cur.nins--;
+ hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */
+ nref = lj_ir_nextins(J);
+ nir = IR(nref);
+ *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */
+ hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
+#if LJ_LE
+ ir->prev = nref;
+#else
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ }
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE:
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]);
+ break;
+ case IR_XSTORE: {
+#if LJ_LE
+ IRRef hiref = hisubst[ir->op2];
+#else
+ IRRef hiref = nir->op2; nir->op2 = hisubst[ir->op2];
+#endif
+ split_emit(J, IRT(IR_XSTORE, IRT_SOFTFP),
+ split_ptr(J, oir, ir->op1), hiref);
+ break;
+ }
+ case IR_CONV: { /* Conversion to number. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ UNUSED(st);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) {
+ hi = split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d);
+ break;
+ }
+#endif
+ lua_assert(st == IRT_INT ||
+ (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)));
+ nir->o = IR_CALLN;
+#if LJ_32 && LJ_HASFFI
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d :
+ st == IRT_FLOAT ? IRCALL_softfp_f2d :
+ IRCALL_softfp_ui2d;
+#else
+ nir->op2 = IRCALL_softfp_i2d;
+#endif
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ case IR_CALLN:
+ case IR_CALLL:
+ case IR_CALLS:
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI:
+ if (nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ if (hisubst[ir->op1] != hisubst[ir->op2])
+ split_emit(J, IRT(IR_PHI, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ default:
+ lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX);
+ hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_32 && LJ_HASFFI
+ if (irt_isint64(ir->t)) {
+ IRRef hiref = hisubst[ir->op1];
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ switch (ir->o) {
+ case IR_ADD:
+ case IR_SUB:
+ /* Use plain op for hiword if loword cannot produce a carry/borrow. */
+ if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) {
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->op1 = hiref; nir->op2 = hisubst[ir->op2];
+ hi = nref;
+ break;
+ }
+ /* fallthrough */
+ case IR_NEG:
+ hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ break;
+ case IR_MOD:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ break;
+ case IR_POW:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ break;
+ case IR_FLOAD:
+ lua_assert(ir->op2 == IRFL_CDATA_INT64);
+ hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XLOAD:
+ hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XSTORE:
+#if LJ_LE
+ hiref = hisubst[ir->op2];
+#else
+ hiref = nir->op2; nir->op2 = hisubst[ir->op2];
+#endif
+ split_emit(J, IRTI(IR_XSTORE), split_ptr(J, oir, ir->op1), hiref);
+ break;
+ case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_SOFTFP
+ if (st == IRT_NUM) { /* NUM to 64 bit int conv. */
+ hi = split_call_l(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul);
+ } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul;
+ hi = split_emit(J, IRTI(IR_HIOP), nref, nref);
+ }
+#else
+ if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */
+ hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref);
+ }
+#endif
+ else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */
+ /* Drop cast, since assembler doesn't care. */
+ goto fwdlo;
+ } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */
+ IRRef k31 = lj_ir_kint(J, 31);
+ nir = IR(nref); /* May have been reallocated. */
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->o = IR_BSAR; /* hi = bsar(lo, 31). */
+ nir->op2 = k31;
+ hi = nref;
+ } else { /* Zero-extend to 64 bit. */
+ hi = lj_ir_kint(J, 0);
+ goto fwdlo;
+ }
+ break;
+ }
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI: {
+ IRRef hiref2;
+ if ((irref_isk(nir->op1) && irref_isk(nir->op2)) ||
+ nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ hiref2 = hisubst[ir->op2];
+ if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2))
+ split_emit(J, IRTI(IR_PHI), hiref, hiref2);
+ break;
+ }
+ default:
+ lua_assert(ir->o <= IR_NE); /* Comparisons. */
+ split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_SOFTFP
+ if (ir->o == IR_SLOAD) {
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ if (!(nir->op2 & IRSLOAD_TYPECHECK))
+ nir->t.irt = IRT_INT; /* Drop guard. */
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t));
+ }
+ } else if (ir->o == IR_TOBIT) {
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit);
+ } else if (ir->o == IR_TOSTR) {
+ if (hisubst[ir->op1]) {
+ if (irref_isk(ir->op1))
+ nir->op1 = ir->op1;
+ else
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref);
+ }
+ } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) {
+ if (irref_isk(ir->op2) && hisubst[ir->op2])
+ nir->op2 = ir->op2;
+ } else
+#endif
+ if (ir->o == IR_CONV) { /* See above, too. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */
+#if LJ_SOFTFP
+ if (irt_isfloat(ir->t)) {
+ split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+#else
+ if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */
+ ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)),
+ hisubst[ir->op1], nref);
+ }
+#endif
+ else { /* Truncate to lower 32 bits. */
+ fwdlo:
+ ir->prev = nir->op1; /* Forward loword. */
+ /* Replace with NOP to avoid messing up the snapshot logic. */
+ nir->ot = IRT(IR_NOP, IRT_NIL);
+ nir->op1 = nir->op2 = 0;
+ }
+ }
+#endif
+#if LJ_SOFTFP && LJ_32 && LJ_HASFFI
+ else if (irt_isfloat(ir->t)) {
+ if (st == IRT_NUM) {
+ split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ } else {
+ nir->o = IR_CALLN;
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f;
+ }
+ } else if (st == IRT_FLOAT) {
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui;
+ } else
+#endif
+#if LJ_SOFTFP
+ if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) {
+ if (irt_isguard(ir->t)) {
+ lua_assert(st == IRT_NUM && irt_isint(ir->t));
+ J->cur.nins--;
+ ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1);
+ } else {
+ split_call_l(J, hisubst, oir, ir,
+#if LJ_32 && LJ_HASFFI
+ st == IRT_NUM ?
+ (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
+ (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui)
+#else
+ IRCALL_softfp_d2i
+#endif
+ );
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+ }
+#endif
+ } else if (ir->o == IR_CALLXS) {
+ IRRef hiref;
+ split_call:
+ hiref = hisubst[ir->op1];
+ if (hiref) {
+ IROpT ot = nir->ot;
+ IRRef op2 = nir->op2;
+ nir->ot = IRT(IR_CARG, IRT_NIL);
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, ot, nref, op2);
+ }
+ if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t))
+ hi = split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ nref, nref);
+ } else if (ir->o == IR_CARG) {
+ IRRef hiref = hisubst[ir->op1];
+ if (hiref) {
+ IRRef op2 = nir->op2;
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+ hiref = hisubst[ir->op2];
+ if (hiref) {
+#if !LJ_TARGET_X86
+ int carg = 0;
+ IRIns *cir;
+ for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1))
+ carg++;
+ if ((carg & 1) == 0) { /* Align 64 bit arguments. */
+ IRRef op2 = nir->op2;
+ nir->op2 = REF_NIL;
+ nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+#endif
+#if LJ_BE
+ { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; }
+#endif
+ ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref);
+ }
+ } else if (ir->o == IR_CNEWI) {
+ if (hisubst[ir->op2])
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]);
+ } else if (ir->o == IR_LOOP) {
+ J->loopref = nref; /* Needed by assembler. */
+ }
+ hisubst[ref] = hi; /* Store hiword substitution. */
+ }
+
+ /* Add PHI marks. */
+ for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) {
+ IRIns *ir = IR(ref);
+ if (ir->o != IR_PHI) break;
+ if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t);
+ if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t);
+ }
+
+ /* Substitute snapshot maps. */
+ oir[nins].prev = J->cur.nins; /* Substitution for last snapshot. */
+ {
+ SnapNo i, nsnap = J->cur.nsnap;
+ for (i = 0; i < nsnap; i++) {
+ SnapShot *snap = &J->cur.snap[i];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ snap->ref = snap->ref == REF_FIRST ? REF_FIRST : oir[snap->ref].prev;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRIns *ir = &oir[snap_ref(sn)];
+ if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn))))
+ map[n] = ((sn & 0xffff0000) | ir->prev);
+ }
+ }
+ }
+}
+
+/* Protected callback for split pass. */
+static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ split_ir(J);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+#if defined(LUA_USE_ASSERT) || LJ_SOFTFP
+/* Slow, but sure way to check whether a SPLIT pass is needed. */
+static int split_needsplit(jit_State *J)
+{
+ IRIns *ir, *irend;
+ IRRef ref;
+ for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++)
+ if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t))
+ return 1;
+ if (LJ_SOFTFP) {
+ for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev)
+ if ((IR(ref)->op2 & IRSLOAD_CONVERT))
+ return 1;
+ }
+ for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) {
+ IRType st = (IR(ref)->op2 & IRCONV_SRCMASK);
+ if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) ||
+ st == IRT_I64 || st == IRT_U64)
+ return 1;
+ }
+ return 0; /* Nope. */
+}
+#endif
+
+/* SPLIT pass. */
+void lj_opt_split(jit_State *J)
+{
+#if LJ_SOFTFP
+ if (!J->needsplit)
+ J->needsplit = split_needsplit(J);
+#else
+ lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */
+#endif
+ if (J->needsplit) {
+ int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit);
+ if (errcode) {
+ /* Completely reset the trace to avoid inconsistent dump on abort. */
+ J->cur.nins = J->cur.nk = REF_BASE;
+ J->cur.nsnap = 0;
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ }
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/src/LuaJIT/src/lj_parse.c b/src/LuaJIT/src/lj_parse.c
new file mode 100644
index 000000000..2835aef08
--- /dev/null
+++ b/src/LuaJIT/src/lj_parse.c
@@ -0,0 +1,2515 @@
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_parse_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+/* -- Parser structures and definitions ----------------------------------- */
+
+/* Expression kinds. */
+typedef enum {
+ /* Constant expressions must be first and in this order: */
+ VKNIL,
+ VKFALSE,
+ VKTRUE,
+ VKSTR, /* sval = string value */
+ VKNUM, /* nval = number value */
+ VKLAST = VKNUM,
+ VKCDATA, /* nval = cdata value, not treated as a constant expression */
+ /* Non-constant expressions follow: */
+ VLOCAL, /* info = local register */
+ VUPVAL, /* info = upvalue index */
+ VGLOBAL, /* sval = string value */
+ VINDEXED, /* info = table register, aux = index reg/byte/string const */
+ VJMP, /* info = instruction PC */
+ VRELOCABLE, /* info = instruction PC */
+ VNONRELOC, /* info = result register */
+ VCALL, /* info = instruction PC, aux = base */
+ VVOID
+} ExpKind;
+
+/* Expression descriptor. */
+typedef struct ExpDesc {
+ union {
+ struct {
+ uint32_t info; /* Primary info. */
+ uint32_t aux; /* Secondary info. */
+ } s;
+ TValue nval; /* Number value. */
+ GCstr *sval; /* String value. */
+ } u;
+ ExpKind k;
+ BCPos t; /* True condition jump list. */
+ BCPos f; /* False condition jump list. */
+} ExpDesc;
+
+/* Macros for expressions. */
+#define expr_hasjump(e) ((e)->t != (e)->f)
+
+#define expr_isk(e) ((e)->k <= VKLAST)
+#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e))
+#define expr_isnumk(e) ((e)->k == VKNUM)
+#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e))
+#define expr_isstrk(e) ((e)->k == VKSTR)
+
+#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval)
+#define expr_numberV(e) numberVnum(expr_numtv((e)))
+
+/* Initialize expression. */
+static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info)
+{
+ e->k = k;
+ e->u.s.info = info;
+ e->f = e->t = NO_JMP;
+}
+
+/* Check number constant for +-0. */
+static int expr_numiszero(ExpDesc *e)
+{
+ TValue *o = expr_numtv(e);
+ return tvisint(o) ? (intV(o) == 0) : tviszero(o);
+}
+
+/* Per-function linked list of scope blocks. */
+typedef struct FuncScope {
+ struct FuncScope *prev; /* Link to outer scope. */
+ BCPos breaklist; /* Jump list for loop breaks. */
+ uint8_t nactvar; /* Number of active vars outside the scope. */
+ uint8_t upval; /* Some variable in the scope is an upvalue. */
+ uint8_t isbreakable; /* Scope is a loop and allows a break. */
+} FuncScope;
+
+/* Index into variable stack. */
+typedef uint16_t VarIndex;
+#define LJ_MAX_VSTACK 65536
+
+/* Upvalue map. */
+typedef struct UVMap {
+ VarIndex vidx; /* Varinfo index. */
+ uint16_t slot; /* Slot or parent upvalue index. */
+} UVMap;
+
+/* Per-function state. */
+typedef struct FuncState {
+ GCtab *kt; /* Hash table for constants. */
+ LexState *ls; /* Lexer state. */
+ lua_State *L; /* Lua state. */
+ FuncScope *bl; /* Current scope. */
+ struct FuncState *prev; /* Enclosing function. */
+ BCPos pc; /* Next bytecode position. */
+ BCPos lasttarget; /* Bytecode position of last jump target. */
+ BCPos jpc; /* Pending jump list to next bytecode. */
+ BCReg freereg; /* First free register. */
+ BCReg nactvar; /* Number of active local variables. */
+ BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */
+ BCLine linedefined; /* First line of the function definition. */
+ BCInsLine *bcbase; /* Base of bytecode stack. */
+ BCPos bclim; /* Limit of bytecode stack. */
+ MSize vbase; /* Base of variable stack for this function. */
+ uint8_t flags; /* Prototype flags. */
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ uint8_t nuv; /* Number of upvalues */
+ VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */
+ UVMap uvloc[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx and slot. */
+} FuncState;
+
+/* Binary and unary operators. ORDER OPR */
+typedef enum BinOpr {
+ OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */
+ OPR_CONCAT,
+ OPR_NE, OPR_EQ,
+ OPR_LT, OPR_GE, OPR_LE, OPR_GT,
+ OPR_AND, OPR_OR,
+ OPR_NOBINOPR
+} BinOpr;
+
+LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD);
+
+/* -- Error handling ------------------------------------------------------ */
+
+LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em)
+{
+ lj_lex_error(ls, ls->token, em);
+}
+
+LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken token)
+{
+ lj_lex_error(ls, ls->token, LJ_ERR_XTOKEN, lj_lex_token2str(ls, token));
+}
+
+LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what)
+{
+ if (fs->linedefined == 0)
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what);
+ else
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what);
+}
+
+#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m)
+#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m)
+#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); }
+
+/* -- Management of constants --------------------------------------------- */
+
+/* Return bytecode encoding for primitive constant. */
+#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k)
+
+#define tvhaskslot(o) ((o)->u32.hi == 0)
+#define tvkslot(o) ((o)->u32.lo)
+
+/* Add a number constant. */
+static BCReg const_num(FuncState *fs, ExpDesc *e)
+{
+ lua_State *L = fs->L;
+ TValue *o;
+ lua_assert(expr_isnumk(e));
+ o = lj_tab_set(L, fs->kt, &e->u.nval);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkn;
+ return fs->nkn++;
+}
+
+/* Add a GC object constant. */
+static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype)
+{
+ lua_State *L = fs->L;
+ TValue key, *o;
+ setgcV(L, &key, gc, itype);
+ /* NOBARRIER: the key is new or kept alive. */
+ o = lj_tab_set(L, fs->kt, &key);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkgc;
+ return fs->nkgc++;
+}
+
+/* Add a string constant. */
+static BCReg const_str(FuncState *fs, ExpDesc *e)
+{
+ lua_assert(expr_isstrk(e) || e->k == VGLOBAL);
+ return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR);
+}
+
+/* Anchor string constant to avoid GC. */
+GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ GCstr *s = lj_str_new(L, str, len);
+ TValue *tv = lj_tab_setstr(L, ls->fs->kt, s);
+ if (tvisnil(tv)) setboolV(tv, 1);
+ lj_gc_check(L);
+ return s;
+}
+
+#if LJ_HASFFI
+/* Anchor cdata to avoid GC. */
+void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ setcdataV(L, tv, cd);
+ setboolV(lj_tab_set(L, ls->fs->kt, tv), 1);
+}
+#endif
+
+/* -- Jump list handling -------------------------------------------------- */
+
+/* Get next element in jump list. */
+static BCPos jmp_next(FuncState *fs, BCPos pc)
+{
+ ptrdiff_t delta = bc_j(fs->bcbase[pc].ins);
+ if ((BCPos)delta == NO_JMP)
+ return NO_JMP;
+ else
+ return (BCPos)(((ptrdiff_t)pc+1)+delta);
+}
+
+/* Check if any of the instructions on the jump list produce no value. */
+static int jmp_novalue(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list)) {
+ BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins;
+ if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG))
+ return 1;
+ }
+ return 0;
+}
+
+/* Patch register of test instructions. */
+static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg)
+{
+ BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc];
+ BCOp op = bc_op(ilp->ins);
+ if (op == BC_ISTC || op == BC_ISFC) {
+ if (reg != NO_REG && reg != bc_d(ilp->ins)) {
+ setbc_a(&ilp->ins, reg);
+ } else { /* Nothing to store or already in the right register. */
+ setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC));
+ setbc_a(&ilp->ins, 0);
+ }
+ } else if (bc_a(ilp->ins) == NO_REG) {
+ if (reg == NO_REG) {
+ ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0);
+ } else {
+ setbc_a(&ilp->ins, reg);
+ if (reg >= bc_a(ilp[1].ins))
+ setbc_a(&ilp[1].ins, reg+1);
+ }
+ } else {
+ return 0; /* Cannot patch other instructions. */
+ }
+ return 1;
+}
+
+/* Drop values for all instructions on jump list. */
+static void jmp_dropval(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list))
+ jmp_patchtestreg(fs, list, NO_REG);
+}
+
+/* Patch jump instruction to target. */
+static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest)
+{
+ BCIns *jmp = &fs->bcbase[pc].ins;
+ BCPos offset = dest-(pc+1)+BCBIAS_J;
+ lua_assert(dest != NO_JMP);
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XJUMP);
+ setbc_d(jmp, offset);
+}
+
+/* Append to jump list. */
+static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2)
+{
+ if (l2 == NO_JMP) {
+ return;
+ } else if (*l1 == NO_JMP) {
+ *l1 = l2;
+ } else {
+ BCPos list = *l1;
+ BCPos next;
+ while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */
+ list = next;
+ jmp_patchins(fs, list, l2);
+ }
+}
+
+/* Patch jump list and preserve produced values. */
+static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget,
+ BCReg reg, BCPos dtarget)
+{
+ while (list != NO_JMP) {
+ BCPos next = jmp_next(fs, list);
+ if (jmp_patchtestreg(fs, list, reg))
+ jmp_patchins(fs, list, vtarget); /* Jump to target with value. */
+ else
+ jmp_patchins(fs, list, dtarget); /* Jump to default target. */
+ list = next;
+ }
+}
+
+/* Jump to following instruction. Append to list of pending jumps. */
+static void jmp_tohere(FuncState *fs, BCPos list)
+{
+ fs->lasttarget = fs->pc;
+ jmp_append(fs, &fs->jpc, list);
+}
+
+/* Patch jump list to target. */
+static void jmp_patch(FuncState *fs, BCPos list, BCPos target)
+{
+ if (target == fs->pc) {
+ jmp_tohere(fs, list);
+ } else {
+ lua_assert(target < fs->pc);
+ jmp_patchval(fs, list, target, NO_REG, target);
+ }
+}
+
+/* -- Bytecode register allocator ----------------------------------------- */
+
+/* Bump frame size. */
+static void bcreg_bump(FuncState *fs, BCReg n)
+{
+ BCReg sz = fs->freereg + n;
+ if (sz > fs->framesize) {
+ if (sz >= LJ_MAX_SLOTS)
+ err_syntax(fs->ls, LJ_ERR_XSLOTS);
+ fs->framesize = (uint8_t)sz;
+ }
+}
+
+/* Reserve registers. */
+static void bcreg_reserve(FuncState *fs, BCReg n)
+{
+ bcreg_bump(fs, n);
+ fs->freereg += n;
+}
+
+/* Free register. */
+static void bcreg_free(FuncState *fs, BCReg reg)
+{
+ if (reg >= fs->nactvar) {
+ fs->freereg--;
+ lua_assert(reg == fs->freereg);
+ }
+}
+
+/* Free register for expression. */
+static void expr_free(FuncState *fs, ExpDesc *e)
+{
+ if (e->k == VNONRELOC)
+ bcreg_free(fs, e->u.s.info);
+}
+
+/* -- Bytecode emitter ---------------------------------------------------- */
+
+/* Emit bytecode instruction. */
+static BCPos bcemit_INS(FuncState *fs, BCIns ins)
+{
+ BCPos pc = fs->pc;
+ LexState *ls = fs->ls;
+ jmp_patchval(fs, fs->jpc, pc, NO_REG, pc);
+ fs->jpc = NO_JMP;
+ if (LJ_UNLIKELY(pc >= fs->bclim)) {
+ ptrdiff_t base = fs->bcbase - ls->bcstack;
+ checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions");
+ lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine);
+ fs->bclim = (BCPos)(ls->sizebcstack - base);
+ fs->bcbase = ls->bcstack + base;
+ }
+ fs->bcbase[pc].ins = ins;
+ fs->bcbase[pc].line = ls->lastline;
+ fs->pc = pc+1;
+ return pc;
+}
+
+#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c))
+#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d))
+#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j))
+
+#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins)
+
+/* -- Bytecode emitter for expressions ------------------------------------ */
+
+/* Discharge non-constant expression to any register. */
+static void expr_discharge(FuncState *fs, ExpDesc *e)
+{
+ BCIns ins;
+ if (e->k == VUPVAL) {
+ ins = BCINS_AD(BC_UGET, 0, e->u.s.info);
+ } else if (e->k == VGLOBAL) {
+ ins = BCINS_AD(BC_GGET, 0, const_str(fs, e));
+ } else if (e->k == VINDEXED) {
+ BCReg rc = e->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1));
+ } else {
+ bcreg_free(fs, rc);
+ ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc);
+ }
+ bcreg_free(fs, e->u.s.info);
+ } else if (e->k == VCALL) {
+ e->u.s.info = e->u.s.aux;
+ e->k = VNONRELOC;
+ return;
+ } else if (e->k == VLOCAL) {
+ e->k = VNONRELOC;
+ return;
+ } else {
+ return;
+ }
+ e->u.s.info = bcemit_INS(fs, ins);
+ e->k = VRELOCABLE;
+}
+
+/* Emit bytecode to set a range of registers to nil. */
+static void bcemit_nil(FuncState *fs, BCReg from, BCReg n)
+{
+ if (fs->pc > fs->lasttarget) { /* No jumps to current position? */
+ BCIns *ip = &fs->bcbase[fs->pc-1].ins;
+ BCReg pto, pfrom = bc_a(*ip);
+ switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */
+ case BC_KPRI:
+ if (bc_d(*ip) != ~LJ_TNIL) break;
+ if (from == pfrom) {
+ if (n == 1) return;
+ } else if (from == pfrom+1) {
+ from = pfrom;
+ n++;
+ } else {
+ break;
+ }
+ fs->pc--; /* Drop KPRI. */
+ break;
+ case BC_KNIL:
+ pto = bc_d(*ip);
+ if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */
+ if (from+n-1 > pto)
+ setbc_d(ip, from+n-1); /* Patch previous instruction range. */
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ /* Emit new instruction or replace old instruction. */
+ bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) :
+ BCINS_AD(BC_KNIL, from, from+n-1));
+}
+
+/* Discharge an expression to a specific register. Ignore branches. */
+static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ BCIns ins;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR) {
+ ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e));
+ } else if (e->k == VKNUM) {
+#if LJ_DUALNUM
+ cTValue *tv = expr_numtv(e);
+ if (tvisint(tv) && checki16(intV(tv)))
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv));
+ else
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checki16(k) && n == (lua_Number)k)
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k);
+ else
+#endif
+ ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e));
+#if LJ_HASFFI
+ } else if (e->k == VKCDATA) {
+ fs->flags |= PROTO_FFI;
+ ins = BCINS_AD(BC_KCDATA, reg,
+ const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA));
+#endif
+ } else if (e->k == VRELOCABLE) {
+ setbc_a(bcptr(fs, e), reg);
+ goto noins;
+ } else if (e->k == VNONRELOC) {
+ if (reg == e->u.s.info)
+ goto noins;
+ ins = BCINS_AD(BC_MOV, reg, e->u.s.info);
+ } else if (e->k == VKNIL) {
+ bcemit_nil(fs, reg, 1);
+ goto noins;
+ } else if (e->k <= VKTRUE) {
+ ins = BCINS_AD(BC_KPRI, reg, const_pri(e));
+ } else {
+ lua_assert(e->k == VVOID || e->k == VJMP);
+ return;
+ }
+ bcemit_INS(fs, ins);
+noins:
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Forward declaration. */
+static BCPos bcemit_jmp(FuncState *fs);
+
+/* Discharge an expression to a specific register. */
+static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ expr_toreg_nobranch(fs, e, reg);
+ if (e->k == VJMP)
+ jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */
+ if (expr_hasjump(e)) { /* Discharge expression with branches. */
+ BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP;
+ if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) {
+ BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs);
+ jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE);
+ bcemit_AJ(fs, BC_JMP, fs->freereg, 1);
+ jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE);
+ jmp_tohere(fs, jval);
+ }
+ jend = fs->pc;
+ fs->lasttarget = jend;
+ jmp_patchval(fs, e->f, jend, reg, jfalse);
+ jmp_patchval(fs, e->t, jend, reg, jtrue);
+ }
+ e->f = e->t = NO_JMP;
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Discharge an expression to the next free register. */
+static void expr_tonextreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ expr_free(fs, e);
+ bcreg_reserve(fs, 1);
+ expr_toreg(fs, e, fs->freereg - 1);
+}
+
+/* Discharge an expression to any register. */
+static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ if (e->k == VNONRELOC) {
+ if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */
+ if (e->u.s.info >= fs->nactvar) {
+ expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */
+ return e->u.s.info;
+ }
+ }
+ expr_tonextreg(fs, e); /* Discharge to next register. */
+ return e->u.s.info;
+}
+
+/* Partially discharge expression to a value. */
+static void expr_toval(FuncState *fs, ExpDesc *e)
+{
+ if (expr_hasjump(e))
+ expr_toanyreg(fs, e);
+ else
+ expr_discharge(fs, e);
+}
+
+/* Emit store for LHS expression. */
+static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
+{
+ BCIns ins;
+ if (var->k == VLOCAL) {
+ expr_free(fs, e);
+ expr_toreg(fs, e, var->u.s.info);
+ return;
+ } else if (var->k == VUPVAL) {
+ expr_toval(fs, e);
+ if (e->k <= VKTRUE)
+ ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e));
+ else if (e->k == VKSTR)
+ ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e));
+ else if (e->k == VKNUM)
+ ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e));
+ else
+ ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e));
+ } else if (var->k == VGLOBAL) {
+ BCReg ra = expr_toanyreg(fs, e);
+ ins = BCINS_AD(BC_GSET, ra, const_str(fs, var));
+ } else {
+ BCReg ra, rc;
+ lua_assert(var->k == VINDEXED);
+ ra = expr_toanyreg(fs, e);
+ rc = var->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1));
+ } else {
+ /* Free late alloced key reg to avoid assert on free of value reg. */
+ /* This can only happen when called from expr_table(). */
+ lua_assert(e->k != VNONRELOC || ra < fs->nactvar ||
+ rc < ra || (bcreg_free(fs, rc),1));
+ ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc);
+ }
+ }
+ bcemit_INS(fs, ins);
+ expr_free(fs, e);
+}
+
+/* Emit method lookup expression. */
+static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key)
+{
+ BCReg idx, func, obj = expr_toanyreg(fs, e);
+ expr_free(fs, e);
+ func = fs->freereg;
+ bcemit_AD(fs, BC_MOV, func+1, obj); /* Copy object to first argument. */
+ lua_assert(expr_isstrk(key));
+ idx = const_str(fs, key);
+ if (idx <= BCMAX_C) {
+ bcreg_reserve(fs, 2);
+ bcemit_ABC(fs, BC_TGETS, func, obj, idx);
+ } else {
+ bcreg_reserve(fs, 3);
+ bcemit_AD(fs, BC_KSTR, func+2, idx);
+ bcemit_ABC(fs, BC_TGETV, func, obj, func+2);
+ fs->freereg--;
+ }
+ e->u.s.info = func;
+ e->k = VNONRELOC;
+}
+
+/* -- Bytecode emitter for branches --------------------------------------- */
+
+/* Emit unconditional branch. */
+static BCPos bcemit_jmp(FuncState *fs)
+{
+ BCPos jpc = fs->jpc;
+ BCPos j = fs->pc - 1;
+ BCIns *ip = &fs->bcbase[j].ins;
+ fs->jpc = NO_JMP;
+ if ((int32_t)j >= (int32_t)fs->lasttarget &&
+ bc_op(*ip) == BC_UCLO)
+ setbc_j(ip, NO_JMP);
+ else
+ j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP);
+ jmp_append(fs, &j, jpc);
+ return j;
+}
+
+/* Invert branch condition of bytecode instruction. */
+static void invertcond(FuncState *fs, ExpDesc *e)
+{
+ BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins;
+ setbc_op(ip, bc_op(*ip)^1);
+}
+
+/* Emit conditional branch. */
+static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond)
+{
+ BCPos pc;
+ if (e->k == VRELOCABLE) {
+ BCIns *ip = bcptr(fs, e);
+ if (bc_op(*ip) == BC_NOT) {
+ *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip));
+ return bcemit_jmp(fs);
+ }
+ }
+ if (e->k != VNONRELOC) {
+ bcreg_reserve(fs, 1);
+ expr_toreg_nobranch(fs, e, fs->freereg-1);
+ }
+ bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info);
+ pc = bcemit_jmp(fs);
+ expr_free(fs, e);
+ return pc;
+}
+
+/* Emit branch on true condition. */
+static void bcemit_branch_t(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ invertcond(fs, e), pc = e->u.s.info;
+ else if (e->k == VKFALSE || e->k == VKNIL)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 0);
+ jmp_append(fs, &e->f, pc);
+ jmp_tohere(fs, e->t);
+ e->t = NO_JMP;
+}
+
+/* Emit branch on false condition. */
+static void bcemit_branch_f(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ pc = e->u.s.info;
+ else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 1);
+ jmp_append(fs, &e->t, pc);
+ jmp_tohere(fs, e->f);
+ e->f = NO_JMP;
+}
+
+/* -- Bytecode emitter for operators -------------------------------------- */
+
+/* Try constant-folding of arithmetic operators. */
+static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ TValue o;
+ lua_Number n;
+ if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0;
+ n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD);
+ setnumV(&o, n);
+ if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */
+ if (LJ_DUALNUM) {
+ int32_t k = lj_num2int(n);
+ if ((lua_Number)k == n) {
+ setintV(&e1->u.nval, k);
+ return 1;
+ }
+ }
+ setnumV(&e1->u.nval, n);
+ return 1;
+}
+
+/* Emit arithmetic operator. */
+static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ BCReg rb, rc, t;
+ uint32_t op;
+ if (foldarith(opr, e1, e2))
+ return;
+ if (opr == OPR_POW) {
+ op = BC_POW;
+ rc = expr_toanyreg(fs, e2);
+ rb = expr_toanyreg(fs, e1);
+ } else {
+ op = opr-OPR_ADD+BC_ADDVV;
+ /* Must discharge 2nd operand first since VINDEXED might free regs. */
+ expr_toval(fs, e2);
+ if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C)
+ op -= BC_ADDVV-BC_ADDVN;
+ else
+ rc = expr_toanyreg(fs, e2);
+ /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */
+ lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC);
+ expr_toval(fs, e1);
+ /* Avoid two consts to satisfy bytecode constraints. */
+ if (expr_isnumk(e1) && !expr_isnumk(e2) &&
+ (t = const_num(fs, e1)) <= BCMAX_B) {
+ rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV;
+ } else {
+ rb = expr_toanyreg(fs, e1);
+ }
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc);
+ e1->k = VRELOCABLE;
+}
+
+/* Emit comparison operator. */
+static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ ExpDesc *eret = e1;
+ BCIns ins;
+ expr_toval(fs, e1);
+ if (opr == OPR_EQ || opr == OPR_NE) {
+ BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV;
+ BCReg ra;
+ if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */
+ ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */
+ expr_toval(fs, e2);
+ switch (e2->k) {
+ case VKNIL: case VKFALSE: case VKTRUE:
+ ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2));
+ break;
+ case VKSTR:
+ ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2));
+ break;
+ case VKNUM:
+ ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2));
+ break;
+ default:
+ ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2));
+ break;
+ }
+ } else {
+ uint32_t op = opr-OPR_LT+BC_ISLT;
+ BCReg ra, rd;
+ if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */
+ e1 = e2; e2 = eret; /* Swap operands. */
+ op = ((op-BC_ISLT)^3)+BC_ISLT;
+ }
+ rd = expr_toanyreg(fs, e2);
+ ra = expr_toanyreg(fs, e1);
+ ins = BCINS_AD(op, ra, rd);
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ bcemit_INS(fs, ins);
+ eret->u.s.info = bcemit_jmp(fs);
+ eret->k = VJMP;
+}
+
+/* Fixup left side of binary operator. */
+static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e)
+{
+ if (op == OPR_AND) {
+ bcemit_branch_t(fs, e);
+ } else if (op == OPR_OR) {
+ bcemit_branch_f(fs, e);
+ } else if (op == OPR_CONCAT) {
+ expr_tonextreg(fs, e);
+ } else if (op == OPR_EQ || op == OPR_NE) {
+ if (!expr_isk_nojump(e)) expr_toanyreg(fs, e);
+ } else {
+ if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e);
+ }
+}
+
+/* Emit binary operator. */
+static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
+{
+ if (op <= OPR_POW) {
+ bcemit_arith(fs, op, e1, e2);
+ } else if (op == OPR_AND) {
+ lua_assert(e1->t == NO_JMP); /* List must be closed. */
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->f, e1->f);
+ *e1 = *e2;
+ } else if (op == OPR_OR) {
+ lua_assert(e1->f == NO_JMP); /* List must be closed. */
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->t, e1->t);
+ *e1 = *e2;
+ } else if (op == OPR_CONCAT) {
+ expr_toval(fs, e2);
+ if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) {
+ lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1);
+ expr_free(fs, e1);
+ setbc_b(bcptr(fs, e2), e1->u.s.info);
+ e1->u.s.info = e2->u.s.info;
+ } else {
+ expr_tonextreg(fs, e2);
+ expr_free(fs, e2);
+ expr_free(fs, e1);
+ e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info);
+ }
+ e1->k = VRELOCABLE;
+ } else {
+ lua_assert(op == OPR_NE || op == OPR_EQ ||
+ op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT);
+ bcemit_comp(fs, op, e1, e2);
+ }
+}
+
+/* Emit unary operator. */
+static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
+{
+ if (op == BC_NOT) {
+ /* Swap true and false lists. */
+ { BCPos temp = e->f; e->f = e->t; e->t = temp; }
+ jmp_dropval(fs, e->f);
+ jmp_dropval(fs, e->t);
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE) {
+ e->k = VKTRUE;
+ return;
+ } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) {
+ e->k = VKFALSE;
+ return;
+ } else if (e->k == VJMP) {
+ invertcond(fs, e);
+ return;
+ } else if (e->k == VRELOCABLE) {
+ bcreg_reserve(fs, 1);
+ setbc_a(bcptr(fs, e), fs->freereg-1);
+ e->u.s.info = fs->freereg-1;
+ e->k = VNONRELOC;
+ } else {
+ lua_assert(e->k == VNONRELOC);
+ }
+ } else {
+ lua_assert(op == BC_UNM || op == BC_LEN);
+ if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */
+#if LJ_HASFFI
+ if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */
+ GCcdata *cd = cdataV(&e->u.nval);
+ int64_t *p = (int64_t *)cdataptr(cd);
+ if (cd->typeid == CTID_COMPLEX_DOUBLE)
+ p[1] ^= (int64_t)U64x(80000000,00000000);
+ else
+ *p = -*p;
+ return;
+ } else
+#endif
+ if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */
+ TValue *o = expr_numtv(e);
+ if (tvisint(o)) {
+ int32_t k = intV(o);
+ if (k == -k)
+ setnumV(o, -(lua_Number)k);
+ else
+ setintV(o, -k);
+ return;
+ } else {
+ o->u64 ^= U64x(80000000,00000000);
+ return;
+ }
+ }
+ }
+ expr_toanyreg(fs, e);
+ }
+ expr_free(fs, e);
+ e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info);
+ e->k = VRELOCABLE;
+}
+
+/* -- Lexer support ------------------------------------------------------- */
+
+/* Check and consume optional token. */
+static int lex_opt(LexState *ls, LexToken tok)
+{
+ if (ls->token == tok) {
+ lj_lex_next(ls);
+ return 1;
+ }
+ return 0;
+}
+
+/* Check and consume token. */
+static void lex_check(LexState *ls, LexToken tok)
+{
+ if (ls->token != tok)
+ err_token(ls, tok);
+ lj_lex_next(ls);
+}
+
+/* Check for matching token. */
+static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
+{
+ if (!lex_opt(ls, what)) {
+ if (line == ls->linenumber) {
+ err_token(ls, what);
+ } else {
+ const char *swhat = lj_lex_token2str(ls, what);
+ const char *swho = lj_lex_token2str(ls, who);
+ lj_lex_error(ls, ls->token, LJ_ERR_XMATCH, swhat, swho, line);
+ }
+ }
+}
+
+/* Check for string token. */
+static GCstr *lex_str(LexState *ls)
+{
+ GCstr *s;
+ if (ls->token != TK_name)
+ err_token(ls, TK_name);
+ s = strV(&ls->tokenval);
+ lj_lex_next(ls);
+ return s;
+}
+
+/* -- Variable handling --------------------------------------------------- */
+
+#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]])
+
+/* Define a new local variable. */
+static void var_new(LexState *ls, BCReg n, GCstr *name)
+{
+ FuncState *fs = ls->fs;
+ MSize vtop = ls->vtop;
+ checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables");
+ if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
+ if (ls->sizevstack >= LJ_MAX_VSTACK)
+ lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
+ lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
+ }
+ lua_assert((uintptr_t)name < VARNAME__MAX ||
+ lj_tab_getstr(fs->kt, name) != NULL);
+ /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
+ setgcref(ls->vstack[vtop].name, obj2gco(name));
+ fs->varmap[fs->nactvar+n] = (uint16_t)vtop;
+ ls->vtop = vtop+1;
+}
+
+#define var_new_lit(ls, n, v) \
+ var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1))
+
+#define var_new_fixed(ls, n, vn) \
+ var_new(ls, (n), (GCstr *)(uintptr_t)(vn))
+
+/* Add local variables. */
+static void var_add(LexState *ls, BCReg nvars)
+{
+ FuncState *fs = ls->fs;
+ fs->nactvar = (uint8_t)(fs->nactvar + nvars);
+ for (; nvars; nvars--)
+ var_get(ls, fs, fs->nactvar - nvars).startpc = fs->pc;
+}
+
+/* Remove local variables. */
+static void var_remove(LexState *ls, BCReg tolevel)
+{
+ FuncState *fs = ls->fs;
+ while (fs->nactvar > tolevel)
+ var_get(ls, fs, --fs->nactvar).endpc = fs->pc;
+}
+
+/* Lookup local variable name. */
+static BCReg var_lookup_local(FuncState *fs, GCstr *n)
+{
+ int i;
+ for (i = fs->nactvar-1; i >= 0; i--) {
+ if (n == strref(var_get(fs->ls, fs, i).name))
+ return (BCReg)i;
+ }
+ return (BCReg)-1; /* Not found. */
+}
+
+/* Lookup or add upvalue index. */
+static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e)
+{
+ MSize i, n = fs->nuv;
+ for (i = 0; i < n; i++)
+ if (fs->uvloc[i].vidx == vidx)
+ return i; /* Already exists. */
+ /* Otherwise create a new one. */
+ checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues");
+ lua_assert(e->k == VLOCAL || e->k == VUPVAL);
+ fs->uvloc[n].vidx = (uint16_t)vidx;
+ fs->uvloc[n].slot = (uint16_t)(e->u.s.info | (e->k == VLOCAL ? 0x8000 : 0));
+ fs->nuv = n+1;
+ return n;
+}
+
+/* Forward declaration. */
+static void scope_uvmark(FuncState *fs, BCReg level);
+
+/* Recursively lookup variables in enclosing functions. */
+static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first)
+{
+ if (fs) {
+ BCReg reg = var_lookup_local(fs, name);
+ if ((int32_t)reg >= 0) { /* Local in this function? */
+ expr_init(e, VLOCAL, reg);
+ if (!first)
+ scope_uvmark(fs, reg); /* Scope now has an upvalue. */
+ return (MSize)fs->varmap[reg];
+ } else {
+ MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */
+ if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */
+ e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e);
+ e->k = VUPVAL;
+ return vidx;
+ }
+ }
+ } else { /* Not found in any function, must be a global. */
+ expr_init(e, VGLOBAL, 0);
+ e->u.sval = name;
+ }
+ return (MSize)-1; /* Global. */
+}
+
+/* Lookup variable name. */
+#define var_lookup(ls, e) \
+ var_lookup_((ls)->fs, lex_str(ls), (e), 1)
+
+/* -- Function state management ------------------------------------------- */
+
+/* Fixup bytecode for prototype. */
+static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n)
+{
+ BCInsLine *base = fs->bcbase;
+ MSize i;
+ pt->sizebc = n;
+ bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ fs->framesize, 0);
+ for (i = 1; i < n; i++)
+ bc[i] = base[i].ins;
+}
+
+/* Fixup constants for prototype. */
+static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr)
+{
+ GCtab *kt;
+ TValue *array;
+ Node *node;
+ MSize i, hmask;
+ checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants");
+ checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants");
+ setmref(pt->k, kptr);
+ pt->sizekn = fs->nkn;
+ pt->sizekgc = fs->nkgc;
+ kt = fs->kt;
+ array = tvref(kt->array);
+ for (i = 0; i < kt->asize; i++)
+ if (tvhaskslot(&array[i])) {
+ TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])];
+ if (LJ_DUALNUM)
+ setintV(tv, (int32_t)i);
+ else
+ setnumV(tv, (lua_Number)i);
+ }
+ node = noderef(kt->node);
+ hmask = kt->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (tvhaskslot(&n->val)) {
+ ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val);
+ lua_assert(!tvisint(&n->key));
+ if (tvisnum(&n->key)) {
+ TValue *tv = &((TValue *)kptr)[kidx];
+ if (LJ_DUALNUM) {
+ lua_Number nn = numV(&n->key);
+ int32_t k = lj_num2int(nn);
+ lua_assert(!tvismzero(&n->key));
+ if ((lua_Number)k == nn)
+ setintV(tv, k);
+ else
+ *tv = n->key;
+ } else {
+ *tv = n->key;
+ }
+ } else {
+ GCobj *o = gcV(&n->key);
+ setgcref(((GCRef *)kptr)[~kidx], o);
+ lj_gc_objbarrier(fs->L, pt, o);
+ }
+ }
+ }
+}
+
+/* Fixup upvalues for prototype. */
+static void fs_fixup_uv(FuncState *fs, GCproto *pt, uint16_t *uv)
+{
+ MSize i, n = fs->nuv;
+ setmref(pt->uv, uv);
+ pt->sizeuv = n;
+ for (i = 0; i < n; i++)
+ uv[i] = fs->uvloc[i].slot;
+}
+
+#ifndef LUAJIT_DISABLE_DEBUGINFO
+/* Prepare lineinfo for prototype. */
+static size_t fs_prep_line(FuncState *fs, BCLine numline)
+{
+ return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+}
+
+/* Fixup lineinfo for prototype. */
+static void fs_fixup_line(FuncState *fs, GCproto *pt,
+ void *lineinfo, BCLine numline)
+{
+ BCInsLine *base = fs->bcbase + 1;
+ BCLine first = fs->linedefined;
+ MSize i = 0, n = fs->pc-1;
+ pt->firstline = fs->linedefined;
+ pt->numline = numline;
+ setmref(pt->lineinfo, lineinfo);
+ if (LJ_LIKELY(numline < 256)) {
+ uint8_t *li = (uint8_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0 && delta < 256);
+ li[i] = (uint8_t)delta;
+ } while (++i < n);
+ } else if (LJ_LIKELY(numline < 65536)) {
+ uint16_t *li = (uint16_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0 && delta < 65536);
+ li[i] = (uint16_t)delta;
+ } while (++i < n);
+ } else {
+ uint32_t *li = (uint32_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0);
+ li[i] = (uint32_t)delta;
+ } while (++i < n);
+ }
+}
+
+/* Resize buffer if needed. */
+static LJ_NOINLINE void fs_buf_resize(LexState *ls, MSize len)
+{
+ MSize sz = ls->sb.sz * 2;
+ while (ls->sb.n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, sz);
+}
+
+static LJ_AINLINE void fs_buf_need(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->sb.n + len > ls->sb.sz))
+ fs_buf_resize(ls, len);
+}
+
+/* Add string to buffer. */
+static void fs_buf_str(LexState *ls, const char *str, MSize len)
+{
+ char *p = ls->sb.buf + ls->sb.n;
+ MSize i;
+ ls->sb.n += len;
+ for (i = 0; i < len; i++) p[i] = str[i];
+}
+
+/* Add ULEB128 value to buffer. */
+static void fs_buf_uleb128(LexState *ls, uint32_t v)
+{
+ MSize n = ls->sb.n;
+ uint8_t *p = (uint8_t *)ls->sb.buf;
+ for (; v >= 0x80; v >>= 7)
+ p[n++] = (uint8_t)((v & 0x7f) | 0x80);
+ p[n++] = (uint8_t)v;
+ ls->sb.n = n;
+}
+
+/* Prepare variable info for prototype. */
+static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar)
+{
+ VarInfo *vstack = fs->ls->vstack;
+ MSize i, n;
+ BCPos lastpc;
+ lj_str_resetbuf(&ls->sb); /* Copy to temp. string buffer. */
+ /* Store upvalue names. */
+ for (i = 0, n = fs->nuv; i < n; i++) {
+ GCstr *s = strref(vstack[fs->uvloc[i].vidx].name);
+ MSize len = s->len+1;
+ fs_buf_need(ls, len);
+ fs_buf_str(ls, strdata(s), len);
+ }
+ *ofsvar = ls->sb.n;
+ vstack += fs->vbase;
+ lastpc = 0;
+ /* Store local variable names and compressed ranges. */
+ for (i = 0, n = ls->vtop - fs->vbase; i < n; i++) {
+ GCstr *s = strref(vstack[i].name);
+ BCPos startpc = vstack[i].startpc, endpc = vstack[i].endpc;
+ if ((uintptr_t)s < VARNAME__MAX) {
+ fs_buf_need(ls, 1 + 2*5);
+ ls->sb.buf[ls->sb.n++] = (uint8_t)(uintptr_t)s;
+ } else {
+ MSize len = s->len+1;
+ fs_buf_need(ls, len + 2*5);
+ fs_buf_str(ls, strdata(s), len);
+ }
+ fs_buf_uleb128(ls, startpc-lastpc);
+ fs_buf_uleb128(ls, endpc-startpc);
+ lastpc = startpc;
+ }
+ fs_buf_need(ls, 1);
+ ls->sb.buf[ls->sb.n++] = '\0'; /* Terminator for varinfo. */
+ return ls->sb.n;
+}
+
+/* Fixup variable info for prototype. */
+static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar)
+{
+ setmref(pt->uvinfo, p);
+ setmref(pt->varinfo, (char *)p + ofsvar);
+ memcpy(p, ls->sb.buf, ls->sb.n); /* Copy from temp. string buffer. */
+}
+#else
+
+/* Initialize with empty debug info, if disabled. */
+#define fs_prep_line(fs, numline) (UNUSED(numline), 0)
+#define fs_fixup_line(fs, pt, li, numline) \
+ pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL)
+#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0)
+#define fs_fixup_var(ls, pt, p, ofsvar) \
+ setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL)
+
+#endif
+
+/* Check if bytecode op returns. */
+static int bcopisret(BCOp op)
+{
+ switch (op) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Fixup return instruction for prototype. */
+static void fs_fixup_ret(FuncState *fs)
+{
+ BCPos lastpc = fs->pc;
+ if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) {
+ if (fs->flags & PROTO_CHILD)
+ bcemit_AJ(fs, BC_UCLO, 0, 0);
+ bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */
+ }
+ /* May need to fixup returns encoded before first function was created. */
+ if (fs->flags & PROTO_FIXUP_RETURN) {
+ BCPos pc;
+ for (pc = 0; pc < lastpc; pc++) {
+ BCIns ins = fs->bcbase[pc].ins;
+ BCPos offset;
+ switch (bc_op(ins)) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ offset = bcemit_INS(fs, ins)-(pc+1)+BCBIAS_J; /* Copy return ins. */
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XFIXUP);
+ /* Replace with UCLO plus branch. */
+ fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset);
+ break;
+ case BC_UCLO:
+ return; /* We're done. */
+ default:
+ break;
+ }
+ }
+ }
+}
+
+/* Finish a FuncState and return the new prototype. */
+static GCproto *fs_finish(LexState *ls, BCLine line)
+{
+ lua_State *L = ls->L;
+ FuncState *fs = ls->fs;
+ BCLine numline = line - fs->linedefined;
+ size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar;
+ GCproto *pt;
+
+ /* Apply final fixups. */
+ lua_assert(fs->bl == NULL);
+ fs_fixup_ret(fs);
+ var_remove(ls, 0);
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef);
+ sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1);
+ ofsk = sizept; sizept += fs->nkn*sizeof(TValue);
+ ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2;
+ ofsli = sizept; sizept += fs_prep_line(fs, numline);
+ ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar);
+
+ /* Allocate prototype and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->sizept = (MSize)sizept;
+ pt->trace = 0;
+ pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN));
+ pt->numparams = fs->numparams;
+ pt->framesize = fs->framesize;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0;
+ fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc);
+ fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk));
+ fs_fixup_uv(fs, pt, (uint16_t *)((char *)pt + ofsuv));
+ fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline);
+ fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar);
+
+ lj_vmevent_send(L, BC,
+ setprotoV(L, L->top++, pt);
+ );
+
+ L->top--; /* Pop table of constants. */
+ ls->vtop = fs->vbase; /* Reset variable stack. */
+ ls->fs = fs->prev;
+ lua_assert(ls->fs != NULL || ls->token == TK_eof);
+ return pt;
+}
+
+/* Initialize a new FuncState. */
+static void fs_init(LexState *ls, FuncState *fs)
+{
+ lua_State *L = ls->L;
+ fs->prev = ls->fs; ls->fs = fs; /* Append to list. */
+ fs->ls = ls;
+ fs->vbase = ls->vtop;
+ fs->L = L;
+ fs->pc = 0;
+ fs->lasttarget = 0;
+ fs->jpc = NO_JMP;
+ fs->freereg = 0;
+ fs->nkgc = 0;
+ fs->nkn = 0;
+ fs->nactvar = 0;
+ fs->nuv = 0;
+ fs->bl = NULL;
+ fs->flags = 0;
+ fs->framesize = 1; /* Minimum frame size. */
+ fs->kt = lj_tab_new(L, 0, 0);
+ /* Anchor table of constants in stack to avoid being collected. */
+ settabV(L, L->top, fs->kt);
+ incr_top(L);
+}
+
+/* -- Expressions --------------------------------------------------------- */
+
+/* Forward declaration. */
+static void expr(LexState *ls, ExpDesc *v);
+
+/* Return string expression. */
+static void expr_str(LexState *ls, ExpDesc *e)
+{
+ expr_init(e, VKSTR, 0);
+ e->u.sval = lex_str(ls);
+}
+
+/* Return index expression. */
+static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e)
+{
+ /* Already called: expr_toval(fs, e). */
+ t->k = VINDEXED;
+ if (expr_isnumk(e)) {
+#if LJ_DUALNUM
+ if (tvisint(expr_numtv(e))) {
+ int32_t k = intV(expr_numtv(e));
+ if (checku8(k)) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+ }
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checku8(k) && n == (lua_Number)k) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+#endif
+ } else if (expr_isstrk(e)) {
+ BCReg idx = const_str(fs, e);
+ if (idx <= BCMAX_C) {
+ t->u.s.aux = ~idx; /* -256..-1: const string key */
+ return;
+ }
+ }
+ t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */
+}
+
+/* Parse index expression with named field. */
+static void expr_field(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ lj_lex_next(ls); /* Skip dot or colon. */
+ expr_str(ls, &key);
+ expr_index(fs, v, &key);
+}
+
+/* Parse index expression with brackets. */
+static void expr_bracket(LexState *ls, ExpDesc *v)
+{
+ lj_lex_next(ls); /* Skip '['. */
+ expr(ls, v);
+ expr_toval(ls->fs, v);
+ lex_check(ls, ']');
+}
+
+/* Get value of constant expression. */
+static void expr_kvalue(TValue *v, ExpDesc *e)
+{
+ if (e->k <= VKTRUE) {
+ setitype(v, ~(uint32_t)e->k);
+ } else if (e->k == VKSTR) {
+ setgcref(v->gcr, obj2gco(e->u.sval));
+ setitype(v, LJ_TSTR);
+ } else {
+ lua_assert(tvisnumber(expr_numtv(e)));
+ *v = *expr_numtv(e);
+ }
+}
+
+/* Parse table constructor expression. */
+static void expr_table(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ BCLine line = ls->linenumber;
+ GCtab *t = NULL;
+ int vcall = 0, needarr = 0;
+ int32_t narr = 1; /* First array index. */
+ uint32_t nhash = 0; /* Number of hash entries. */
+ BCReg freg = fs->freereg;
+ BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0);
+ expr_init(e, VNONRELOC, freg);
+ bcreg_reserve(fs, 1);
+ freg++;
+ lex_check(ls, '{');
+ while (ls->token != '}') {
+ ExpDesc key, val;
+ vcall = 0;
+ if (ls->token == '[') {
+ expr_bracket(ls, &key); /* Already calls expr_toval. */
+ if (!expr_isk(&key)) expr_index(fs, e, &key);
+ if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++;
+ lex_check(ls, '=');
+ } else if (ls->token == TK_name && lj_lex_lookahead(ls) == '=') {
+ expr_str(ls, &key);
+ lex_check(ls, '=');
+ nhash++;
+ } else {
+ expr_init(&key, VKNUM, 0);
+ setintV(&key.u.nval, narr);
+ narr++;
+ needarr = vcall = 1;
+ }
+ expr(ls, &val);
+ if (expr_isk_nojump(&val) && expr_isk(&key) && key.k != VKNIL) {
+ TValue k;
+ if (!t) { /* Create template table on demand. */
+ BCReg kidx;
+ t = lj_tab_new(fs->L, 0, 0);
+ kidx = const_gc(fs, obj2gco(t), LJ_TTAB);
+ fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx);
+ }
+ vcall = 0;
+ expr_kvalue(&k, &key);
+ expr_kvalue(lj_tab_set(fs->L, t, &k), &val);
+ lj_gc_anybarriert(fs->L, t);
+ } else {
+ if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; }
+ if (expr_isk(&key)) expr_index(fs, e, &key);
+ bcemit_store(fs, e, &val);
+ }
+ fs->freereg = freg;
+ if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break;
+ }
+ lex_match(ls, '}', '{', line);
+ if (vcall) {
+ BCInsLine *ilp = &fs->bcbase[fs->pc-1];
+ ExpDesc en;
+ lua_assert(bc_a(ilp->ins) == freg &&
+ bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB));
+ expr_init(&en, VKNUM, 0);
+ en.u.nval.u32.lo = narr-1;
+ en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */
+ if (narr > 256) { fs->pc--; ilp--; }
+ ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en));
+ setbc_b(&ilp[-1].ins, 0);
+ }
+ if (pc == fs->pc-1) { /* Make expr relocable if possible. */
+ e->u.s.info = pc;
+ fs->freereg--;
+ e->k = VRELOCABLE;
+ } else {
+ e->k = VNONRELOC; /* May have been changed by expr_index. */
+ }
+ if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */
+ BCIns *ip = &fs->bcbase[pc].ins;
+ if (!needarr) narr = 0;
+ else if (narr < 3) narr = 3;
+ else if (narr > 0x7ff) narr = 0x7ff;
+ setbc_d(ip, (uint32_t)narr|(hsize2hbits(nhash)<<11));
+ }
+}
+
+/* Parse function parameters. */
+static BCReg parse_params(LexState *ls, int needself)
+{
+ FuncState *fs = ls->fs;
+ BCReg nparams = 0;
+ lex_check(ls, '(');
+ if (needself)
+ var_new_lit(ls, nparams++, "self");
+ if (ls->token != ')') {
+ do {
+ if (ls->token == TK_name) {
+ var_new(ls, nparams++, lex_str(ls));
+ } else if (ls->token == TK_dots) {
+ lj_lex_next(ls);
+ fs->flags |= PROTO_VARARG;
+ break;
+ } else {
+ err_syntax(ls, LJ_ERR_XPARAM);
+ }
+ } while (lex_opt(ls, ','));
+ }
+ var_add(ls, nparams);
+ lua_assert(fs->nactvar == nparams);
+ bcreg_reserve(fs, nparams);
+ lex_check(ls, ')');
+ return nparams;
+}
+
+/* Forward declaration. */
+static void parse_chunk(LexState *ls);
+
+/* Parse body of a function. */
+static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line)
+{
+ FuncState fs, *pfs = ls->fs;
+ GCproto *pt;
+ ptrdiff_t oldbase = pfs->bcbase - ls->bcstack;
+ fs_init(ls, &fs);
+ fs.linedefined = line;
+ fs.numparams = (uint8_t)parse_params(ls, needself);
+ fs.bcbase = pfs->bcbase + pfs->pc;
+ fs.bclim = pfs->bclim - pfs->pc;
+ bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */
+ parse_chunk(ls);
+ if (ls->token != TK_end) lex_match(ls, TK_end, TK_function, line);
+ pt = fs_finish(ls, (ls->lastline = ls->linenumber));
+ pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */
+ pfs->bclim = (BCPos)(ls->sizebcstack - oldbase);
+ /* Store new prototype in the constant array of the parent. */
+ expr_init(e, VRELOCABLE,
+ bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO)));
+#if LJ_HASFFI
+ pfs->flags |= (fs.flags & PROTO_FFI);
+#endif
+ if (!(pfs->flags & PROTO_CHILD)) {
+ if (pfs->flags & PROTO_HAS_RETURN)
+ pfs->flags |= PROTO_FIXUP_RETURN;
+ pfs->flags |= PROTO_CHILD;
+ }
+ lj_lex_next(ls);
+}
+
+/* Parse expression list. Last expression is left open. */
+static BCReg expr_list(LexState *ls, ExpDesc *v)
+{
+ BCReg n = 1;
+ expr(ls, v);
+ while (lex_opt(ls, ',')) {
+ expr_tonextreg(ls->fs, v);
+ expr(ls, v);
+ n++;
+ }
+ return n;
+}
+
+/* Parse function argument list. */
+static void parse_args(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc args;
+ BCIns ins;
+ BCReg base;
+ BCLine line = ls->linenumber;
+ if (ls->token == '(') {
+ if (line != ls->lastline)
+ err_syntax(ls, LJ_ERR_XAMBIG);
+ lj_lex_next(ls);
+ if (ls->token == ')') { /* f(). */
+ args.k = VVOID;
+ } else {
+ expr_list(ls, &args);
+ if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */
+ setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */
+ }
+ lex_match(ls, ')', '(', line);
+ } else if (ls->token == '{') {
+ expr_table(ls, &args);
+ } else if (ls->token == TK_string) {
+ expr_init(&args, VKSTR, 0);
+ args.u.sval = strV(&ls->tokenval);
+ lj_lex_next(ls);
+ } else {
+ err_syntax(ls, LJ_ERR_XFUNARG);
+ return; /* Silence compiler. */
+ }
+ lua_assert(e->k == VNONRELOC);
+ base = e->u.s.info; /* Base register for call. */
+ if (args.k == VCALL) {
+ ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1);
+ } else {
+ if (args.k != VVOID)
+ expr_tonextreg(fs, &args);
+ ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base);
+ }
+ expr_init(e, VCALL, bcemit_INS(fs, ins));
+ e->u.s.aux = base;
+ fs->bcbase[fs->pc - 1].line = line;
+ fs->freereg = base+1; /* Leave one result by default. */
+}
+
+/* Parse primary expression. */
+static void expr_primary(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ /* Parse prefix expression. */
+ if (ls->token == '(') {
+ BCLine line = ls->linenumber;
+ lj_lex_next(ls);
+ expr(ls, v);
+ lex_match(ls, ')', '(', line);
+ expr_discharge(ls->fs, v);
+ } else if (ls->token == TK_name) {
+ var_lookup(ls, v);
+ } else {
+ err_syntax(ls, LJ_ERR_XSYMBOL);
+ }
+ for (;;) { /* Parse multiple expression suffixes. */
+ if (ls->token == '.') {
+ expr_field(ls, v);
+ } else if (ls->token == '[') {
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ expr_bracket(ls, &key);
+ expr_index(fs, v, &key);
+ } else if (ls->token == ':') {
+ ExpDesc key;
+ lj_lex_next(ls);
+ expr_str(ls, &key);
+ bcemit_method(fs, v, &key);
+ parse_args(ls, v);
+ } else if (ls->token == '(' || ls->token == TK_string || ls->token == '{') {
+ expr_tonextreg(fs, v);
+ parse_args(ls, v);
+ } else {
+ break;
+ }
+ }
+}
+
+/* Parse simple expression. */
+static void expr_simple(LexState *ls, ExpDesc *v)
+{
+ switch (ls->token) {
+ case TK_number:
+ expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokenval)) ? VKCDATA : VKNUM, 0);
+ copyTV(ls->L, &v->u.nval, &ls->tokenval);
+ break;
+ case TK_string:
+ expr_init(v, VKSTR, 0);
+ v->u.sval = strV(&ls->tokenval);
+ break;
+ case TK_nil:
+ expr_init(v, VKNIL, 0);
+ break;
+ case TK_true:
+ expr_init(v, VKTRUE, 0);
+ break;
+ case TK_false:
+ expr_init(v, VKFALSE, 0);
+ break;
+ case TK_dots: { /* Vararg. */
+ FuncState *fs = ls->fs;
+ BCReg base;
+ checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS);
+ bcreg_reserve(fs, 1);
+ base = fs->freereg-1;
+ expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams));
+ v->u.s.aux = base;
+ break;
+ }
+ case '{': /* Table constructor. */
+ expr_table(ls, v);
+ return;
+ case TK_function:
+ lj_lex_next(ls);
+ parse_body(ls, v, 0, ls->linenumber);
+ return;
+ default:
+ expr_primary(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+}
+
+/* Manage syntactic levels to avoid blowing up the stack. */
+static void synlevel_begin(LexState *ls)
+{
+ if (++ls->level >= LJ_MAX_XLEVEL)
+ lj_lex_error(ls, 0, LJ_ERR_XLEVELS);
+}
+
+#define synlevel_end(ls) ((ls)->level--)
+
+/* Convert token to binary operator. */
+static BinOpr token2binop(LexToken tok)
+{
+ switch (tok) {
+ case '+': return OPR_ADD;
+ case '-': return OPR_SUB;
+ case '*': return OPR_MUL;
+ case '/': return OPR_DIV;
+ case '%': return OPR_MOD;
+ case '^': return OPR_POW;
+ case TK_concat: return OPR_CONCAT;
+ case TK_ne: return OPR_NE;
+ case TK_eq: return OPR_EQ;
+ case '<': return OPR_LT;
+ case TK_le: return OPR_LE;
+ case '>': return OPR_GT;
+ case TK_ge: return OPR_GE;
+ case TK_and: return OPR_AND;
+ case TK_or: return OPR_OR;
+ default: return OPR_NOBINOPR;
+ }
+}
+
+/* Priorities for each binary operator. ORDER OPR. */
+static const struct {
+ uint8_t left; /* Left priority. */
+ uint8_t right; /* Right priority. */
+} priority[] = {
+ {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */
+ {10,9}, {5,4}, /* POW CONCAT (right associative) */
+ {3,3}, {3,3}, /* EQ NE */
+ {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */
+ {2,2}, {1,1} /* AND OR */
+};
+
+#define UNARY_PRIORITY 8 /* Priority for unary operators. */
+
+/* Forward declaration. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit);
+
+/* Parse unary expression. */
+static void expr_unop(LexState *ls, ExpDesc *v)
+{
+ BCOp op;
+ if (ls->token == TK_not) {
+ op = BC_NOT;
+ } else if (ls->token == '-') {
+ op = BC_UNM;
+ } else if (ls->token == '#') {
+ op = BC_LEN;
+ } else {
+ expr_simple(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+ expr_binop(ls, v, UNARY_PRIORITY);
+ bcemit_unop(ls->fs, op, v);
+}
+
+/* Parse binary expressions with priority higher than the limit. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit)
+{
+ BinOpr op;
+ synlevel_begin(ls);
+ expr_unop(ls, v);
+ op = token2binop(ls->token);
+ while (op != OPR_NOBINOPR && priority[op].left > limit) {
+ ExpDesc v2;
+ BinOpr nextop;
+ lj_lex_next(ls);
+ bcemit_binop_left(ls->fs, op, v);
+ /* Parse binary expression with higher priority. */
+ nextop = expr_binop(ls, &v2, priority[op].right);
+ bcemit_binop(ls->fs, op, v, &v2);
+ op = nextop;
+ }
+ synlevel_end(ls);
+ return op; /* Return unconsumed binary operator (if any). */
+}
+
+/* Parse expression. */
+static void expr(LexState *ls, ExpDesc *v)
+{
+ expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */
+}
+
+/* Assign expression to the next register. */
+static void expr_next(LexState *ls)
+{
+ ExpDesc e;
+ expr(ls, &e);
+ expr_tonextreg(ls->fs, &e);
+}
+
+/* Parse conditional expression. */
+static BCPos expr_cond(LexState *ls)
+{
+ ExpDesc v;
+ expr(ls, &v);
+ if (v.k == VKNIL) v.k = VKFALSE;
+ bcemit_branch_t(ls->fs, &v);
+ return v.f;
+}
+
+/* -- Scope handling ------------------------------------------------------ */
+
+/* Begin a scope. */
+static void scope_begin(FuncState *fs, FuncScope *bl, int isbreakable)
+{
+ bl->breaklist = NO_JMP;
+ bl->isbreakable = (uint8_t)isbreakable;
+ bl->nactvar = (uint8_t)fs->nactvar;
+ bl->upval = 0;
+ bl->prev = fs->bl;
+ fs->bl = bl;
+ lua_assert(fs->freereg == fs->nactvar);
+}
+
+/* End a scope. */
+static void scope_end(FuncState *fs)
+{
+ FuncScope *bl = fs->bl;
+ fs->bl = bl->prev;
+ var_remove(fs->ls, bl->nactvar);
+ fs->freereg = fs->nactvar;
+ lua_assert(bl->nactvar == fs->nactvar);
+ /* A scope is either breakable or has upvalues. */
+ lua_assert(!bl->isbreakable || !bl->upval);
+ if (bl->upval)
+ bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0);
+ else /* Avoid in upval case, it clears lasttarget and kills UCLO+JMP join. */
+ jmp_tohere(fs, bl->breaklist);
+}
+
+/* Mark scope as having an upvalue. */
+static void scope_uvmark(FuncState *fs, BCReg level)
+{
+ FuncScope *bl;
+ for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev)
+ ;
+ if (bl)
+ bl->upval = 1;
+}
+
+/* Parse 'break' statement. */
+static void parse_break(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ FuncScope *bl;
+ BCReg savefr;
+ int upval = 0;
+ for (bl = fs->bl; bl && !bl->isbreakable; bl = bl->prev)
+ upval |= bl->upval; /* Collect upvalues in intervening scopes. */
+ if (!bl) /* Error if no breakable scope found. */
+ err_syntax(ls, LJ_ERR_XBREAK);
+ savefr = fs->freereg;
+ fs->freereg = bl->nactvar; /* Shrink slots to help data-flow analysis. */
+ if (upval)
+ bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); /* Close upvalues. */
+ jmp_append(fs, &bl->breaklist, bcemit_jmp(fs));
+ fs->freereg = savefr;
+}
+
+/* Check for end of block. */
+static int endofblock(LexToken token)
+{
+ switch (token) {
+ case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Parse 'return' statement. */
+static void parse_return(LexState *ls)
+{
+ BCIns ins;
+ FuncState *fs = ls->fs;
+ lj_lex_next(ls); /* Skip 'return'. */
+ fs->flags |= PROTO_HAS_RETURN;
+ if (endofblock(ls->token) || ls->token == ';') { /* Bare return. */
+ ins = BCINS_AD(BC_RET0, 0, 1);
+ } else { /* Return with one or more values. */
+ ExpDesc e; /* Receives the _last_ expression in the list. */
+ BCReg nret = expr_list(ls, &e);
+ if (nret == 1) { /* Return one result. */
+ if (e.k == VCALL) { /* Check for tail call. */
+ BCIns *ip = bcptr(fs, &e);
+ /* It doesn't pay off to add BC_VARGT just for 'return ...'. */
+ if (bc_op(*ip) == BC_VARG) goto notailcall;
+ fs->pc--;
+ ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip));
+ } else { /* Can return the result from any register. */
+ ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2);
+ }
+ } else {
+ if (e.k == VCALL) { /* Append all results from a call. */
+ notailcall:
+ setbc_b(bcptr(fs, &e), 0);
+ ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar);
+ } else {
+ expr_tonextreg(fs, &e); /* Force contiguous registers. */
+ ins = BCINS_AD(BC_RET, fs->nactvar, nret+1);
+ }
+ }
+ }
+ if (fs->flags & PROTO_CHILD)
+ bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */
+ bcemit_INS(fs, ins);
+}
+
+/* Parse a block. */
+static void parse_block(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ FuncScope bl;
+ scope_begin(fs, &bl, 0);
+ parse_chunk(ls);
+ lua_assert(bl.breaklist == NO_JMP);
+ scope_end(fs);
+}
+
+/* -- Assignments --------------------------------------------------------- */
+
+/* List of LHS variables. */
+typedef struct LHSVarList {
+ ExpDesc v; /* LHS variable. */
+ struct LHSVarList *prev; /* Link to previous LHS variable. */
+} LHSVarList;
+
+/* Eliminate write-after-read hazards for local variable assignment. */
+static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ BCReg reg = v->u.s.info; /* Check against this variable. */
+ BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */
+ int hazard = 0;
+ for (; lh; lh = lh->prev) {
+ if (lh->v.k == VINDEXED) {
+ if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.info = tmp;
+ }
+ if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.aux = tmp;
+ }
+ }
+ }
+ if (hazard) {
+ bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */
+ bcreg_reserve(fs, 1);
+ }
+}
+
+/* Adjust LHS/RHS of an assignment. */
+static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ int32_t extra = (int32_t)nvars - (int32_t)nexps;
+ if (e->k == VCALL) {
+ extra++; /* Compensate for the VCALL itself. */
+ if (extra < 0) extra = 0;
+ setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */
+ if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1);
+ } else {
+ if (e->k != VVOID)
+ expr_tonextreg(fs, e); /* Close last expression. */
+ if (extra > 0) { /* Leftover LHS are set to nil. */
+ BCReg reg = fs->freereg;
+ bcreg_reserve(fs, (BCReg)extra);
+ bcemit_nil(fs, reg, (BCReg)extra);
+ }
+ }
+}
+
+/* Recursively parse assignment statement. */
+static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars)
+{
+ ExpDesc e;
+ checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX);
+ if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */
+ LHSVarList vl;
+ vl.prev = lh;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VLOCAL)
+ assign_hazard(ls, lh, &vl.v);
+ checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names");
+ parse_assignment(ls, &vl, nvars+1);
+ } else { /* Parse RHS. */
+ BCReg nexps;
+ lex_check(ls, '=');
+ nexps = expr_list(ls, &e);
+ if (nexps == nvars) {
+ if (e.k == VCALL) {
+ if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */
+ ls->fs->freereg--;
+ e.k = VRELOCABLE;
+ } else { /* Multiple call results. */
+ e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */
+ e.k = VNONRELOC;
+ }
+ }
+ bcemit_store(ls->fs, &lh->v, &e);
+ return;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ if (nexps > nvars)
+ ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */
+ }
+ /* Assign RHS to LHS and recurse downwards. */
+ expr_init(&e, VNONRELOC, ls->fs->freereg-1);
+ bcemit_store(ls->fs, &lh->v, &e);
+}
+
+/* Parse call statement or assignment. */
+static void parse_call_assign(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ LHSVarList vl;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VCALL) { /* Function call statement. */
+ setbc_b(bcptr(fs, &vl.v), 1); /* No results. */
+ } else { /* Start of an assignment. */
+ vl.prev = NULL;
+ parse_assignment(ls, &vl, 1);
+ }
+}
+
+/* Parse 'local' statement. */
+static void parse_local(LexState *ls)
+{
+ if (lex_opt(ls, TK_function)) { /* Local function declaration. */
+ ExpDesc v, b;
+ FuncState *fs = ls->fs;
+ var_new(ls, 0, lex_str(ls));
+ expr_init(&v, VLOCAL, fs->freereg);
+ bcreg_reserve(fs, 1);
+ var_add(ls, 1);
+ parse_body(ls, &b, 0, ls->linenumber);
+ bcemit_store(fs, &v, &b);
+ /* The upvalue is in scope, but the local is only valid after the store. */
+ var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc;
+ } else { /* Local variable declaration. */
+ ExpDesc e;
+ BCReg nexps, nvars = 0;
+ do { /* Collect LHS. */
+ var_new(ls, nvars++, lex_str(ls));
+ } while (lex_opt(ls, ','));
+ if (lex_opt(ls, '=')) { /* Optional RHS. */
+ nexps = expr_list(ls, &e);
+ } else { /* Or implicitly set to nil. */
+ e.k = VVOID;
+ nexps = 0;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ var_add(ls, nvars);
+ }
+}
+
+/* Parse 'function' statement. */
+static void parse_func(LexState *ls, BCLine line)
+{
+ FuncState *fs;
+ ExpDesc v, b;
+ int needself = 0;
+ lj_lex_next(ls); /* Skip 'function'. */
+ /* Parse function name. */
+ var_lookup(ls, &v);
+ while (ls->token == '.') /* Multiple dot-separated fields. */
+ expr_field(ls, &v);
+ if (ls->token == ':') { /* Optional colon to signify method call. */
+ needself = 1;
+ expr_field(ls, &v);
+ }
+ parse_body(ls, &b, needself, line);
+ fs = ls->fs;
+ bcemit_store(fs, &v, &b);
+ fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */
+}
+
+/* -- Loop and conditional statements ------------------------------------- */
+
+/* Parse 'while' statement. */
+static void parse_while(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos start, loop, condexit;
+ FuncScope bl;
+ lj_lex_next(ls); /* Skip 'while'. */
+ start = fs->lasttarget = fs->pc;
+ condexit = expr_cond(ls);
+ scope_begin(fs, &bl, 1);
+ lex_check(ls, TK_do);
+ loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_block(ls);
+ jmp_patch(fs, bcemit_jmp(fs), start);
+ lex_match(ls, TK_end, TK_while, line);
+ scope_end(fs);
+ jmp_tohere(fs, condexit);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Parse 'repeat' statement. */
+static void parse_repeat(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos loop = fs->lasttarget = fs->pc;
+ BCPos condexit;
+ FuncScope bl1, bl2;
+ scope_begin(fs, &bl1, 1); /* Breakable loop scope. */
+ scope_begin(fs, &bl2, 0); /* Inner scope. */
+ lj_lex_next(ls); /* Skip 'repeat'. */
+ bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_chunk(ls);
+ lex_match(ls, TK_until, TK_repeat, line);
+ condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */
+ if (!bl2.upval) { /* No upvalues? Just end inner scope. */
+ scope_end(fs);
+ } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */
+ parse_break(ls); /* Break from loop and close upvalues. */
+ jmp_tohere(fs, condexit);
+ scope_end(fs); /* End inner scope and close upvalues. */
+ condexit = bcemit_jmp(fs);
+ }
+ jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */
+ jmp_patchins(fs, loop, fs->pc);
+ scope_end(fs); /* End loop scope. */
+}
+
+/* Parse numeric 'for'. */
+static void parse_for_num(LexState *ls, GCstr *varname, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCReg base = fs->freereg;
+ FuncScope bl;
+ BCPos loop, loopend;
+ /* Hidden control variables. */
+ var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX);
+ var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP);
+ var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP);
+ /* Visible copy of index variable. */
+ var_new(ls, FORL_EXT, varname);
+ lex_check(ls, '=');
+ expr_next(ls);
+ lex_check(ls, ',');
+ expr_next(ls);
+ if (lex_opt(ls, ',')) {
+ expr_next(ls);
+ } else {
+ bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */
+ bcreg_reserve(fs, 1);
+ }
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP);
+ scope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, 1);
+ bcreg_reserve(fs, 1);
+ parse_block(ls);
+ scope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP);
+ fs->bcbase[loopend].line = line; /* Fix line for control ins. */
+ jmp_patchins(fs, loopend, loop+1);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Try to predict whether the iterator is next() and specialize the bytecode.
+** Detecting next() and pairs() by name is simplistic, but quite effective.
+** The interpreter backs off if the check for the closure fails at runtime.
+*/
+static int predict_next(LexState *ls, FuncState *fs, BCPos pc)
+{
+ BCIns ins = fs->bcbase[pc].ins;
+ GCstr *name;
+ cTValue *o;
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name));
+ break;
+ case BC_UGET:
+ name = gco2str(gcref(ls->vstack[fs->uvloc[bc_d(ins)].vidx].name));
+ break;
+ case BC_GGET:
+ /* There's no inverse index (yet), so lookup the strings. */
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ return 0;
+ default:
+ return 0;
+ }
+ return (name->len == 5 && !strcmp(strdata(name), "pairs")) ||
+ (name->len == 4 && !strcmp(strdata(name), "next"));
+}
+
+/* Parse 'for' iterator. */
+static void parse_for_iter(LexState *ls, GCstr *indexname)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc e;
+ BCReg nvars = 0;
+ BCLine line;
+ BCReg base = fs->freereg + 3;
+ BCPos loop, loopend, exprpc = fs->pc;
+ FuncScope bl;
+ int isnext;
+ /* Hidden control variables. */
+ var_new_fixed(ls, nvars++, VARNAME_FOR_GEN);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_STATE);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_CTL);
+ /* Visible variables returned from iterator. */
+ var_new(ls, nvars++, indexname);
+ while (lex_opt(ls, ','))
+ var_new(ls, nvars++, lex_str(ls));
+ lex_check(ls, TK_in);
+ line = ls->linenumber;
+ assign_adjust(ls, 3, expr_list(ls, &e), &e);
+ bcreg_bump(fs, 3); /* The iterator needs another 3 slots (func + 2 args). */
+ isnext = (nvars <= 5 && predict_next(ls, fs, exprpc));
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP);
+ scope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, nvars-3);
+ bcreg_reserve(fs, nvars-3);
+ parse_block(ls);
+ scope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ jmp_patchins(fs, loop, fs->pc);
+ bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1);
+ loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP);
+ fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */
+ fs->bcbase[loopend].line = line;
+ jmp_patchins(fs, loopend, loop+1);
+}
+
+/* Parse 'for' statement. */
+static void parse_for(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ GCstr *varname;
+ FuncScope bl;
+ scope_begin(fs, &bl, 1); /* Breakable loop scope. */
+ lj_lex_next(ls); /* Skip 'for'. */
+ varname = lex_str(ls); /* Get first variable name. */
+ if (ls->token == '=')
+ parse_for_num(ls, varname, line);
+ else if (ls->token == ',' || ls->token == TK_in)
+ parse_for_iter(ls, varname);
+ else
+ err_syntax(ls, LJ_ERR_XFOR);
+ lex_match(ls, TK_end, TK_for, line);
+ scope_end(fs); /* Resolve break list. */
+}
+
+/* Parse condition and 'then' block. */
+static BCPos parse_then(LexState *ls)
+{
+ BCPos condexit;
+ lj_lex_next(ls); /* Skip 'if' or 'elseif'. */
+ condexit = expr_cond(ls);
+ lex_check(ls, TK_then);
+ parse_block(ls);
+ return condexit;
+}
+
+/* Parse 'if' statement. */
+static void parse_if(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos flist;
+ BCPos escapelist = NO_JMP;
+ flist = parse_then(ls);
+ while (ls->token == TK_elseif) { /* Parse multiple 'elseif' blocks. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ flist = parse_then(ls);
+ }
+ if (ls->token == TK_else) { /* Parse optional 'else' block. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ lj_lex_next(ls); /* Skip 'else'. */
+ parse_block(ls);
+ } else {
+ jmp_append(fs, &escapelist, flist);
+ }
+ jmp_tohere(fs, escapelist);
+ lex_match(ls, TK_end, TK_if, line);
+}
+
+/* -- Parse statements ---------------------------------------------------- */
+
+/* Parse a statement. Returns 1 if it must be the last one in a chunk. */
+static int parse_stmt(LexState *ls)
+{
+ BCLine line = ls->linenumber;
+ switch (ls->token) {
+ case TK_if:
+ parse_if(ls, line);
+ break;
+ case TK_while:
+ parse_while(ls, line);
+ break;
+ case TK_do:
+ lj_lex_next(ls);
+ parse_block(ls);
+ lex_match(ls, TK_end, TK_do, line);
+ break;
+ case TK_for:
+ parse_for(ls, line);
+ break;
+ case TK_repeat:
+ parse_repeat(ls, line);
+ break;
+ case TK_function:
+ parse_func(ls, line);
+ break;
+ case TK_local:
+ lj_lex_next(ls);
+ parse_local(ls);
+ break;
+ case TK_return:
+ parse_return(ls);
+ return 1; /* Must be last. */
+ case TK_break:
+ lj_lex_next(ls);
+ parse_break(ls);
+ return 1; /* Must be last. */
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ case ';':
+ lj_lex_next(ls);
+ break;
+#endif
+ default:
+ parse_call_assign(ls);
+ break;
+ }
+ return 0;
+}
+
+/* A chunk is a list of statements optionally separated by semicolons. */
+static void parse_chunk(LexState *ls)
+{
+ int islast = 0;
+ synlevel_begin(ls);
+ while (!islast && !endofblock(ls->token)) {
+ islast = parse_stmt(ls);
+ lex_opt(ls, ';');
+ lua_assert(ls->fs->framesize >= ls->fs->freereg &&
+ ls->fs->freereg >= ls->fs->nactvar);
+ ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */
+ }
+ synlevel_end(ls);
+}
+
+/* Entry point of bytecode parser. */
+GCproto *lj_parse(LexState *ls)
+{
+ FuncState fs;
+ GCproto *pt;
+ lua_State *L = ls->L;
+#ifdef LUAJIT_DISABLE_DEBUGINFO
+ ls->chunkname = lj_str_newlit(L, "=");
+#else
+ ls->chunkname = lj_str_newz(L, ls->chunkarg);
+#endif
+ setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */
+ incr_top(L);
+ ls->level = 0;
+ fs_init(ls, &fs);
+ fs.linedefined = 0;
+ fs.numparams = 0;
+ fs.bcbase = NULL;
+ fs.bclim = 0;
+ fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */
+ bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */
+ lj_lex_next(ls); /* Read-ahead first token. */
+ parse_chunk(ls);
+ if (ls->token != TK_eof)
+ err_token(ls, TK_eof);
+ pt = fs_finish(ls, ls->linenumber);
+ L->top--; /* Drop chunkname. */
+ lua_assert(fs.prev == NULL);
+ lua_assert(ls->fs == NULL);
+ lua_assert(pt->sizeuv == 0);
+ return pt;
+}
+
diff --git a/src/LuaJIT/src/lj_parse.h b/src/LuaJIT/src/lj_parse.h
new file mode 100644
index 000000000..b56e5db62
--- /dev/null
+++ b/src/LuaJIT/src/lj_parse.h
@@ -0,0 +1,18 @@
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_PARSE_H
+#define _LJ_PARSE_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+LJ_FUNC GCproto *lj_parse(LexState *ls);
+LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l);
+#if LJ_HASFFI
+LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_record.c b/src/LuaJIT/src/lj_record.c
new file mode 100644
index 000000000..837f61f4f
--- /dev/null
+++ b/src/LuaJIT/src/lj_record.c
@@ -0,0 +1,2228 @@
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_record_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Sanity checks ------------------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+/* Sanity check the whole IR -- sloooow. */
+static void rec_check_ir(jit_State *J)
+{
+ IRRef i, nins = J->cur.nins, nk = J->cur.nk;
+ lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536);
+ for (i = nins-1; i >= nk; i--) {
+ IRIns *ir = IR(i);
+ uint32_t mode = lj_ir_mode[ir->o];
+ IRRef op1 = ir->op1;
+ IRRef op2 = ir->op2;
+ switch (irm_op1(mode)) {
+ case IRMnone: lua_assert(op1 == 0); break;
+ case IRMref: lua_assert(op1 >= nk);
+ lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break;
+ case IRMlit: break;
+ case IRMcst: lua_assert(i < REF_BIAS); continue;
+ }
+ switch (irm_op2(mode)) {
+ case IRMnone: lua_assert(op2 == 0); break;
+ case IRMref: lua_assert(op2 >= nk);
+ lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break;
+ case IRMlit: break;
+ case IRMcst: lua_assert(0); break;
+ }
+ if (ir->prev) {
+ lua_assert(ir->prev >= nk);
+ lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i);
+ lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o);
+ }
+ }
+}
+
+/* Compare stack slots and frames of the recorder and the VM. */
+static void rec_check_slots(jit_State *J)
+{
+ BCReg s, nslots = J->baseslot + J->maxslot;
+ int32_t depth = 0;
+ cTValue *base = J->L->base - J->baseslot;
+ lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS);
+ lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME));
+ lua_assert(nslots < LJ_MAX_JSLOTS);
+ for (s = 0; s < nslots; s++) {
+ TRef tr = J->slot[s];
+ if (tr) {
+ cTValue *tv = &base[s];
+ IRRef ref = tref_ref(tr);
+ IRIns *ir;
+ lua_assert(ref >= J->cur.nk && ref < J->cur.nins);
+ ir = IR(ref);
+ lua_assert(irt_t(ir->t) == tref_t(tr));
+ if (s == 0) {
+ lua_assert(tref_isfunc(tr));
+ } else if ((tr & TREF_FRAME)) {
+ GCfunc *fn = gco2func(frame_gc(tv));
+ BCReg delta = (BCReg)(tv - frame_prev(tv));
+ lua_assert(tref_isfunc(tr));
+ if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir));
+ lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta));
+ depth++;
+ } else if ((tr & TREF_CONT)) {
+ lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void));
+ lua_assert((J->slot[s+1] & TREF_FRAME));
+ depth++;
+ } else {
+ if (tvisnumber(tv))
+ lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */
+ else
+ lua_assert(itype2irt(tv) == tref_type(tr));
+ if (tref_isk(tr)) { /* Compare constants. */
+ TValue tvk;
+ lj_ir_kvalue(J->L, &tvk, ir);
+ if (!(tvisnum(&tvk) && tvisnan(&tvk)))
+ lua_assert(lj_obj_equal(tv, &tvk));
+ else
+ lua_assert(tvisnum(tv) && tvisnan(tv));
+ }
+ }
+ }
+ }
+ lua_assert(J->framedepth == depth);
+}
+#endif
+
+/* -- Type handling and specialization ------------------------------------ */
+
+/* Note: these functions return tagged references (TRef). */
+
+/* Specialize a slot to a specific type. Note: slot can be negative! */
+static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
+{
+ /* Caller may set IRT_GUARD in t. */
+ TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Specialize a slot to the runtime type. Note: slot can be negative! */
+static TRef sload(jit_State *J, int32_t slot)
+{
+ IRType t = itype2irt(&J->L->base[slot]);
+ TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
+ IRSLOAD_TYPECHECK);
+ if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Get TRef from slot. Load slot and specialize if not done already. */
+#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
+
+/* Get TRef for current function. */
+static TRef getcurrf(jit_State *J)
+{
+ if (J->base[-1])
+ return J->base[-1];
+ lua_assert(J->baseslot == 1);
+ return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY);
+}
+
+/* Compare for raw object equality.
+** Returns 0 if the objects are the same.
+** Returns 1 if they are different, but the same type.
+** Returns 2 for two different types.
+** Comparisons between primitives always return 1 -- no caller cares about it.
+*/
+int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
+{
+ int diff = !lj_obj_equal(av, bv);
+ if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */
+ IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
+ IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
+ if (ta != tb) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tb == IRT_NUM) {
+ a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tb == IRT_INT) {
+ b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
+ } else {
+ return 2; /* Two different types are never equal. */
+ }
+ }
+ emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
+ }
+ return diff;
+}
+
+/* -- Record loop ops ----------------------------------------------------- */
+
+/* Loop event. */
+typedef enum {
+ LOOPEV_LEAVE, /* Loop is left or not entered. */
+ LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */
+ LOOPEV_ENTER /* Loop is entered. */
+} LoopEvent;
+
+/* Canonicalize slots: convert integers to numbers. */
+static void canonicalize_slots(jit_State *J)
+{
+ BCReg s;
+ if (LJ_DUALNUM) return;
+ for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
+ TRef tr = J->slot[s];
+ if (tref_isinteger(tr)) {
+ IRIns *ir = IR(tref_ref(tr));
+ if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY)))
+ J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ }
+ }
+}
+
+/* Stop recording. */
+static void rec_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
+{
+ lj_trace_end(J);
+ J->cur.linktype = (uint8_t)linktype;
+ J->cur.link = (uint16_t)lnk;
+ /* Looping back at the same stack level? */
+ if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
+ if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */
+ goto nocanon; /* Do not canonicalize or we lose the narrowing. */
+ if (J->cur.root) /* Otherwise ensure we always link to the root trace. */
+ J->cur.link = J->cur.root;
+ }
+ canonicalize_slots(J);
+nocanon:
+ /* Note: all loop ops must set J->pc to the following instruction! */
+ lj_snap_add(J); /* Add loop snapshot. */
+ J->needsnap = 0;
+ J->mergesnap = 1; /* In case recording continues. */
+}
+
+/* Search bytecode backwards for a int/num constant slot initializer. */
+static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
+{
+ /* This algorithm is rather simplistic and assumes quite a bit about
+ ** how the bytecode is generated. It works fine for FORI initializers,
+ ** but it won't necessarily work in other cases (e.g. iterator arguments).
+ ** It doesn't do anything fancy, either (like backpropagating MOVs).
+ */
+ const BCIns *pc, *startpc = proto_bc(J->pt);
+ for (pc = endpc-1; pc > startpc; pc--) {
+ BCIns ins = *pc;
+ BCOp op = bc_op(ins);
+ /* First try to find the last instruction that stores to this slot. */
+ if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
+ return 0; /* Multiple results, e.g. from a CALL or KNIL. */
+ } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
+ if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */
+ /* Now try to verify there's no forward jump across it. */
+ const BCIns *kpc = pc;
+ for (; pc > startpc; pc--)
+ if (bc_op(*pc) == BC_JMP) {
+ const BCIns *target = pc+bc_j(*pc)+1;
+ if (target > kpc && target <= endpc)
+ return 0; /* Conditional assignment. */
+ }
+ if (op == BC_KSHORT) {
+ int32_t k = (int32_t)(int16_t)bc_d(ins);
+ return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
+ } else {
+ cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
+ if (t == IRT_INT) {
+ int32_t k = numberVint(tv);
+ if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */
+ return lj_ir_kint(J, k);
+ return 0; /* Type mismatch. */
+ } else {
+ return lj_ir_knum(J, numberVnum(tv));
+ }
+ }
+ }
+ return 0; /* Non-constant initializer. */
+ }
+ }
+ return 0; /* No assignment to this slot found? */
+}
+
+/* Load and optionally convert a FORI argument from a slot. */
+static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
+{
+ int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
+ return sloadt(J, (int32_t)slot,
+ t + (((mode & IRSLOAD_TYPECHECK) ||
+ (conv && t == IRT_INT && !(mode >> 16))) ?
+ IRT_GUARD : 0),
+ mode + conv);
+}
+
+/* Peek before FORI to find a const initializer. Otherwise load from slot. */
+static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
+ IRType t, int mode)
+{
+ TRef tr = J->base[slot];
+ if (!tr) {
+ tr = find_kinit(J, fori, slot, t);
+ if (!tr)
+ tr = fori_load(J, slot, t, mode);
+ }
+ return tr;
+}
+
+/* Return the direction of the FOR loop iterator.
+** It's important to exactly reproduce the semantics of the interpreter.
+*/
+static int rec_for_direction(cTValue *o)
+{
+ return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
+}
+
+/* Simulate the runtime behavior of the FOR loop iterator. */
+static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
+{
+ lua_Number stopv = numberVnum(&o[FORL_STOP]);
+ lua_Number idxv = numberVnum(&o[FORL_IDX]);
+ lua_Number stepv = numberVnum(&o[FORL_STEP]);
+ if (isforl)
+ idxv += stepv;
+ if (rec_for_direction(&o[FORL_STEP])) {
+ if (idxv <= stopv) {
+ *op = IR_LE;
+ return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_GT; return LOOPEV_LEAVE;
+ } else {
+ if (stopv <= idxv) {
+ *op = IR_GE;
+ return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_LT; return LOOPEV_LEAVE;
+ }
+}
+
+/* Record checks for FOR loop overflow and step direction. */
+static void rec_for_check(jit_State *J, IRType t, int dir,
+ TRef stop, TRef step, int init)
+{
+ if (!tref_isk(step)) {
+ /* Non-constant step: need a guard for the direction. */
+ TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
+ emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
+ /* Add hoistable overflow checks for a narrowed FORL index. */
+ if (init && t == IRT_INT) {
+ if (tref_isk(stop)) {
+ /* Constant stop: optimize check away or to a range check for step. */
+ int32_t k = IR(tref_ref(stop))->i;
+ if (dir) {
+ if (k > 0)
+ emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
+ } else {
+ if (k < 0)
+ emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
+ }
+ } else {
+ /* Stop+step variable: need full overflow check. */
+ TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
+ emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */
+ }
+ }
+ } else if (init && t == IRT_INT && !tref_isk(stop)) {
+ /* Constant step: optimize overflow check to a range check for stop. */
+ int32_t k = IR(tref_ref(step))->i;
+ k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
+ emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
+ }
+}
+
+/* Record a FORL instruction. */
+static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
+ int init)
+{
+ BCReg ra = bc_a(*fori);
+ cTValue *tv = &J->L->base[ra];
+ TRef idx = J->base[ra+FORL_IDX];
+ IRType t = idx ? tref_type(idx) :
+ (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
+ int mode = IRSLOAD_INHERIT +
+ ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
+ TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
+ TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
+ int tc, dir = rec_for_direction(&tv[FORL_STEP]);
+ lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI);
+ scev->t.irt = t;
+ scev->dir = dir;
+ scev->stop = tref_ref(stop);
+ scev->step = tref_ref(step);
+ rec_for_check(J, t, dir, stop, step, init);
+ scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
+ tc = (LJ_DUALNUM &&
+ !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
+ tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
+ IRSLOAD_TYPECHECK : 0;
+ if (tc) {
+ J->base[ra+FORL_STOP] = stop;
+ J->base[ra+FORL_STEP] = step;
+ }
+ if (!idx)
+ idx = fori_load(J, ra+FORL_IDX, t,
+ IRSLOAD_INHERIT + tc + (J->scev.start << 16));
+ if (!init)
+ J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
+ J->base[ra+FORL_EXT] = idx;
+ scev->idx = tref_ref(idx);
+ J->maxslot = ra+FORL_EXT+1;
+}
+
+/* Record FORL/JFORL or FORI/JFORI. */
+static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
+{
+ BCReg ra = bc_a(*fori);
+ TValue *tv = &J->L->base[ra];
+ TRef *tr = &J->base[ra];
+ IROp op;
+ LoopEvent ev;
+ TRef stop;
+ IRType t;
+ if (isforl) { /* Handle FORL/JFORL opcodes. */
+ TRef idx = tr[FORL_IDX];
+ if (tref_ref(idx) == J->scev.idx) {
+ t = J->scev.t.irt;
+ stop = J->scev.stop;
+ idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
+ tr[FORL_EXT] = tr[FORL_IDX] = idx;
+ } else {
+ ScEvEntry scev;
+ rec_for_loop(J, fori, &scev, 0);
+ t = scev.t.irt;
+ stop = scev.stop;
+ }
+ } else { /* Handle FORI/JFORI opcodes. */
+ BCReg i;
+ lj_meta_for(J->L, tv);
+ t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
+ IRT_NUM;
+ for (i = FORL_IDX; i <= FORL_STEP; i++) {
+ if (!tr[i]) sload(J, ra+i);
+ lua_assert(tref_isnumber_str(tr[i]));
+ if (tref_isstr(tr[i]))
+ tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
+ if (t == IRT_INT) {
+ if (!tref_isinteger(tr[i]))
+ tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
+ } else {
+ if (!tref_isnum(tr[i]))
+ tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
+ }
+ }
+ tr[FORL_EXT] = tr[FORL_IDX];
+ stop = tr[FORL_STOP];
+ rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
+ stop, tr[FORL_STEP], 1);
+ }
+
+ ev = rec_for_iter(&op, tv, isforl);
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ } else {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ }
+ lj_snap_add(J);
+
+ emitir(IRTG(op, t), tr[FORL_IDX], stop);
+
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ } else {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ }
+ J->needsnap = 1;
+ return ev;
+}
+
+/* Record ITERL/JITERL. */
+static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
+{
+ BCReg ra = bc_a(iterins);
+ lua_assert(J->base[ra] != 0);
+ if (!tref_isnil(J->base[ra])) { /* Looping back? */
+ J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
+ J->maxslot = ra-1+bc_b(J->pc[-1]);
+ J->pc += bc_j(iterins)+1;
+ return LOOPEV_ENTER;
+ } else {
+ J->maxslot = ra-3;
+ J->pc++;
+ return LOOPEV_LEAVE;
+ }
+}
+
+/* Record LOOP/JLOOP. Now, that was easy. */
+static LoopEvent rec_loop(jit_State *J, BCReg ra)
+{
+ if (ra < J->maxslot) J->maxslot = ra;
+ J->pc++;
+ return LOOPEV_ENTER;
+}
+
+/* Check if a loop repeatedly failed to trace because it didn't loop back. */
+static int innerloopleft(jit_State *J, const BCIns *pc)
+{
+ ptrdiff_t i;
+ for (i = 0; i < PENALTY_SLOTS; i++)
+ if (mref(J->penalty[i].pc, const BCIns) == pc) {
+ if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
+ J->penalty[i].reason == LJ_TRERR_LINNER) &&
+ J->penalty[i].val >= 2*PENALTY_MIN)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/* Handle the case when an interpreted loop op is hit. */
+static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
+{
+ if (J->parent == 0) {
+ if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
+ /* Same loop? */
+ if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping root trace. */
+ } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
+ /* It's usually better to abort here and wait until the inner loop
+ ** is traced. But if the inner loop repeatedly didn't loop back,
+ ** this indicates a low trip count. In this case try unrolling
+ ** an inner loop even in a root trace. But it's better to be a bit
+ ** more conservative here and only do it for very short loops.
+ */
+ if (!innerloopleft(J, pc))
+ lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */
+ if ((ev != LOOPEV_ENTERLO &&
+ J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ J->loopref = J->cur.nins;
+ }
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */
+ J->loopref = J->cur.nins;
+ if (--J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* Handle the case when an already compiled loop op is hit. */
+static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
+{
+ if (J->parent == 0) { /* Root trace hit an inner loop. */
+ /* Better let the inner loop spawn a side trace back here. */
+ lj_trace_err(J, LJ_TRERR_LINNER);
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
+ J->instunroll = 0; /* Cannot continue across a compiled loop op. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form an extra loop. */
+ else
+ rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* -- Record calls and returns -------------------------------------------- */
+
+/* Specialize to the runtime value of the called function or its prototype. */
+static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
+{
+ TRef kfunc;
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ /* 3 or more closures created? Probably not a monomorphic function. */
+ if (pt->flags >= 3*PROTO_CLCOUNT) { /* Specialize to prototype instead. */
+ TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC);
+ emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt)));
+ (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
+ return tr;
+ }
+ }
+ /* Otherwise specialize to the function (closure) value itself. */
+ kfunc = lj_ir_kfunc(J, fn);
+ emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
+ return kfunc;
+}
+
+/* Record call setup. */
+static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ RecordIndex ix;
+ TValue *functv = &J->L->base[func];
+ TRef *fbase = &J->base[func];
+ ptrdiff_t i;
+ for (i = 0; i <= nargs; i++)
+ (void)getslot(J, func+i); /* Ensure func and all args have a reference. */
+ if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
+ ix.tab = fbase[0];
+ copyTV(J->L, &ix.tabv, functv);
+ if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ for (i = ++nargs; i > 0; i--) /* Shift arguments up. */
+ fbase[i] = fbase[i-1];
+ fbase[0] = ix.mobj; /* Replace function. */
+ functv = &ix.mobjv;
+ }
+ fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]);
+ J->maxslot = (BCReg)nargs;
+}
+
+/* Record call. */
+void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ /* Bump frame. */
+ J->framedepth++;
+ J->base += func+1;
+ J->baseslot += func+1;
+}
+
+/* Record tail call. */
+void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ if (frame_isvarg(J->L->base - 1)) {
+ BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
+ if (--J->framedepth < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ func += cbase;
+ }
+ /* Move func + args down. */
+ memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1));
+ /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
+ /* Tailcalls can form a loop, so count towards the loop unroll limit. */
+ if (++J->tailcalled > J->loopunroll)
+ lj_trace_err(J, LJ_TRERR_LUNROLL);
+}
+
+/* Check unroll limits for down-recursion. */
+static int check_downrec_unroll(jit_State *J, GCproto *pt)
+{
+ IRRef ptref;
+ for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
+ if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
+ int count = 0;
+ IRRef ref;
+ for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
+ if (IR(ref)->op1 == ptref)
+ count++;
+ if (count) {
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll])
+ return 1;
+ } else {
+ lj_trace_err(J, LJ_TRERR_DOWNREC);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Record return. */
+void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
+{
+ TValue *frame = J->L->base - 1;
+ ptrdiff_t i;
+ for (i = 0; i < gotresults; i++)
+ (void)getslot(J, rbase+i); /* Ensure all results have a reference. */
+ while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lua_assert(J->baseslot > 1);
+ gotresults++;
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */
+ frame = frame_prevd(frame);
+ }
+ /* Return to lower frame via interpreter for unhandled cases. */
+ if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
+ (!frame_islua(frame) ||
+ (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))))) {
+ /* NYI: specialize to frame type and return directly, not via RET*. */
+ for (i = -1; i < (ptrdiff_t)rbase; i++)
+ J->base[i] = 0; /* Purge dead slots. */
+ J->maxslot = rbase + (BCReg)gotresults;
+ rec_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
+ return;
+ }
+ if (frame_isvarg(frame)) {
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lua_assert(J->baseslot > 1);
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ frame = frame_prevd(frame);
+ }
+ if (frame_islua(frame)) { /* Return to Lua frame. */
+ BCIns callins = *(frame_pc(frame)-1);
+ ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
+ BCReg cbase = bc_a(callins);
+ GCproto *pt = funcproto(frame_func(frame - (cbase+1)));
+ if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
+ if (check_downrec_unroll(J, pt)) {
+ J->maxslot = (BCReg)(rbase + gotresults);
+ lj_snap_purge(J);
+ rec_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-recursion. */
+ return;
+ }
+ lj_snap_add(J);
+ }
+ for (i = 0; i < nresults; i++) /* Adjust results. */
+ J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
+ J->maxslot = cbase+(BCReg)nresults;
+ if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
+ J->framedepth--;
+ lua_assert(J->baseslot > cbase+1);
+ J->baseslot -= cbase+1;
+ J->base -= cbase+1;
+ } else if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
+ /* Return to lower frame would leave the loop in a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ } else { /* Return to lower frame. Guard for the target we return to. */
+ TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
+ TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
+ emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc);
+ J->retdepth++;
+ J->needsnap = 1;
+ lua_assert(J->baseslot == 1);
+ /* Shift result slots up and clear the slots of the new frame below. */
+ memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults);
+ memset(J->base-1, 0, sizeof(TRef)*(cbase+1));
+ }
+ } else if (frame_iscont(frame)) { /* Return to continuation frame. */
+ ASMFunction cont = frame_contf(frame);
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if ((J->framedepth -= 2) < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->maxslot = cbase-2;
+ if (cont == lj_cont_ra) {
+ /* Copy result to destination slot. */
+ BCReg dst = bc_a(*(frame_contpc(frame)-1));
+ J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
+ if (dst >= J->maxslot) J->maxslot = dst+1;
+ } else if (cont == lj_cont_nop) {
+ /* Nothing to do here. */
+ } else if (cont == lj_cont_cat) {
+ lua_assert(0);
+ } else {
+ /* Result type already specialized. */
+ lua_assert(cont == lj_cont_condf || cont == lj_cont_condt);
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
+ }
+ lua_assert(J->baseslot >= 1);
+}
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* Prepare to record call to metamethod. */
+static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
+{
+ BCReg s, top = curr_proto(J->L)->framesize;
+ TRef trcont;
+ setcont(&J->L->base[top], cont);
+#if LJ_64
+ trcont = lj_ir_kptr(J, (void *)((int64_t)cont - (int64_t)lj_vm_asm_begin));
+#else
+ trcont = lj_ir_kptr(J, (void *)cont);
+#endif
+ J->base[top] = trcont | TREF_CONT;
+ J->framedepth++;
+ for (s = J->maxslot; s < top; s++)
+ J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
+ return top+1;
+}
+
+/* Record metamethod lookup. */
+int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ RecordIndex mix;
+ GCtab *mt;
+ if (tref_istab(ix->tab)) {
+ mt = tabref(tabV(&ix->tabv)->metatable);
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ } else if (tref_isudata(ix->tab)) {
+ int udtype = udataV(&ix->tabv)->udtype;
+ mt = tabref(udataV(&ix->tabv)->metatable);
+ /* The metatables of special userdata objects are treated as immutable. */
+ if (udtype != UDTYPE_USERDATA) {
+ cTValue *mo;
+ if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
+ /* Specialize to the C library namespace object. */
+ emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
+ } else {
+ /* Specialize to the type of userdata. */
+ TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
+ }
+ immutable_mt:
+ mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
+ if (!mo || tvisnil(mo))
+ return 0; /* No metamethod. */
+ /* Treat metamethod or index table as immutable, too. */
+ if (!(tvisfunc(mo) || tvistab(mo)))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
+ ix->mtv = mt;
+ ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */
+ return 1; /* Got metamethod or index table. */
+ }
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
+ } else {
+ /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
+ mt = tabref(basemt_obj(J2G(J), &ix->tabv));
+ if (mt == NULL) {
+ ix->mt = TREF_NIL;
+ return 0; /* No metamethod. */
+ }
+ /* The cdata metatable is treated as immutable. */
+ if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
+ ix->mt = mix.tab = lj_ir_ktab(J, mt);
+ goto nocheck;
+ }
+ ix->mt = mt ? mix.tab : TREF_NIL;
+ emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
+nocheck:
+ if (mt) {
+ GCstr *mmstr = mmname_str(J2G(J), mm);
+ cTValue *mo = lj_tab_getstr(mt, mmstr);
+ if (mo && !tvisnil(mo))
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mtv = mt;
+ settabV(J->L, &mix.tabv, mt);
+ setstrV(J->L, &mix.keyv, mmstr);
+ mix.key = lj_ir_kstr(J, mmstr);
+ mix.val = 0;
+ mix.idxchain = 0;
+ ix->mobj = lj_record_idx(J, &mix);
+ return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */
+ }
+ return 0; /* No metamethod. */
+}
+
+/* Record call to arithmetic metamethod. */
+static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ /* Set up metamethod call first to save ix->tab and ix->tabv. */
+ BCReg func = rec_mm_prep(J, lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[1] = ix->tab; base[2] = ix->key;
+ copyTV(J->L, basev+1, &ix->tabv);
+ copyTV(J->L, basev+2, &ix->keyv);
+ if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ if (mm != MM_unm) {
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
+ goto ok;
+ }
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ok:
+ base[0] = ix->mobj;
+ copyTV(J->L, basev+0, &ix->mobjv);
+ lj_record_call(J, func, 2);
+ return 0; /* No result yet. */
+}
+
+/* Record call to __len metamethod. */
+static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
+{
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, tv);
+ if (lj_record_mm_lookup(J, &ix, MM_len)) {
+ BCReg func = rec_mm_prep(J, lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
+ base[1] = tr; copyTV(J->L, basev+1, tv);
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ base[2] = tr; copyTV(J->L, basev+2, tv);
+#else
+ base[2] = TREF_NIL; setnilV(basev+2);
+#endif
+ lj_record_call(J, func, 2);
+ } else {
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+ if (tref_istab(tr))
+ return lj_ir_call(J, IRCALL_lj_tab_len, tr);
+#endif
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ return 0; /* No result yet. */
+}
+
+/* Call a comparison metamethod. */
+static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
+{
+ BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
+ TRef *base = J->base + func;
+ TValue *tv = J->L->base + func;
+ base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
+ copyTV(J->L, tv+0, &ix->mobjv);
+ copyTV(J->L, tv+1, &ix->valv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ lj_record_call(J, func, 2);
+}
+
+/* Record call to equality comparison metamethod (for tab and udata only). */
+static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, MM_eq) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ return;
+ }
+ rec_mm_callcomp(J, ix, op);
+ }
+}
+
+/* Record call to ordered comparison metamethods (for arbitrary objects). */
+static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ while (1) {
+ MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */
+ if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, mm) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ goto nomatch;
+ }
+ rec_mm_callcomp(J, ix, op);
+ return;
+ }
+ nomatch:
+ /* First lookup failed. Retry with __lt and swapped operands. */
+ if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */
+ ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ copyTV(J->L, &ix->keyv, &ix->valv);
+ copyTV(J->L, &ix->valv, &ix->tabv);
+ op ^= 3;
+ }
+}
+
+#if LJ_HASFFI
+/* Setup call to cdata comparison metamethod. */
+static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
+{
+ lj_snap_add(J);
+ if (tref_iscdata(ix->val)) {
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ } else {
+ lua_assert(tref_iscdata(ix->key));
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ }
+ lj_record_mm_lookup(J, ix, mm);
+ rec_mm_callcomp(J, ix, op);
+}
+#endif
+
+/* -- Indexed access ------------------------------------------------------ */
+
+/* Record bounds-check. */
+static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
+{
+ /* Try to emit invariant bounds checks. */
+ if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
+ (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
+ IRRef ref = tref_ref(ikey);
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ IRRef ofsref = 0;
+ /* Handle constant offsets. */
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) {
+ ofsref = ir->op2;
+ ofs = IR(ofsref)->i;
+ ref = ir->op1;
+ ir = IR(ref);
+ }
+ /* Got scalar evolution analysis results for this reference? */
+ if (ref == J->scev.idx) {
+ int32_t stop;
+ lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD);
+ stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
+ /* Runtime value for stop of loop is within bounds? */
+ if ((int64_t)stop + ofs < (int64_t)asize) {
+ /* Emit invariant bounds check for stop. */
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
+ emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
+ /* Emit invariant bounds check for start, if not const or negative. */
+ if (!(J->scev.dir && J->scev.start &&
+ (int64_t)IR(J->scev.start)->i + ofs >= 0))
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
+ return;
+ }
+ }
+ }
+ emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */
+}
+
+/* Record indexed key lookup. */
+static TRef rec_idx_key(jit_State *J, RecordIndex *ix)
+{
+ TRef key;
+ GCtab *t = tabV(&ix->tabv);
+ ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
+
+ /* Integer keys are looked up in the array part first. */
+ key = ix->key;
+ if (tref_isnumber(key)) {
+ int32_t k = numberVint(&ix->keyv);
+ if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
+ k = LJ_MAX_ASIZE;
+ if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */
+ TRef ikey = lj_opt_narrow_index(J, key);
+ TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ if ((MSize)k < t->asize) { /* Currently an array key? */
+ TRef arrayref;
+ rec_idx_abc(J, asizeref, ikey, t->asize);
+ arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY);
+ return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey);
+ } else { /* Currently not in array (may be an array extension)? */
+ emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
+ if (k == 0 && tref_isk(key))
+ key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */
+ /* And continue with the hash lookup. */
+ }
+ } else if (!tref_isk(key)) {
+ /* We can rule out const numbers which failed the integerness test
+ ** above. But all other numbers are potential array keys.
+ */
+ if (t->asize == 0) { /* True sparse tables have an empty array part. */
+ /* Guard that the array part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYITMIX);
+ }
+ }
+ }
+
+ /* Otherwise the key is located in the hash part. */
+ if (t->hmask == 0) { /* Shortcut for empty hash part. */
+ /* Guard that the hash part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ }
+ if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */
+ ix->key = key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ if (tref_isk(key)) {
+ /* Optimize lookup of constant hash keys. */
+ MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
+ if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
+ hslot <= 65535*(MSize)sizeof(Node)) {
+ TRef node, kslot;
+ TRef hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
+ node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE);
+ kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
+ return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot);
+ }
+ }
+ /* Fall back to a regular hash lookup. */
+ return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key);
+}
+
+/* Determine whether a key is NOT one of the fast metamethod names. */
+static int nommstr(jit_State *J, TRef key)
+{
+ if (tref_isstr(key)) {
+ if (tref_isk(key)) {
+ GCstr *str = ir_kstr(IR(tref_ref(key)));
+ uint32_t mm;
+ for (mm = 0; mm <= MM_FAST; mm++)
+ if (mmname_str(J2G(J), mm) == str)
+ return 0; /* MUST be one the fast metamethod names. */
+ } else {
+ return 0; /* Variable string key MAY be a metamethod name. */
+ }
+ }
+ return 1; /* CANNOT be a metamethod name. */
+}
+
+/* Record indexed load/store. */
+TRef lj_record_idx(jit_State *J, RecordIndex *ix)
+{
+ TRef xref;
+ IROp xrefop, loadop;
+ cTValue *oldv;
+
+ while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
+ /* Never call raw lj_record_idx() on non-table. */
+ lua_assert(ix->idxchain != 0);
+ if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ handlemm:
+ if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
+ BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *tv = J->L->base + func;
+ base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
+ setfuncV(J->L, tv+0, funcV(&ix->mobjv));
+ copyTV(J->L, tv+1, &ix->tabv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ if (ix->val) {
+ base[3] = ix->val;
+ copyTV(J->L, tv+3, &ix->valv);
+ lj_record_call(J, func, 3); /* mobj(tab, key, val) */
+ return 0;
+ } else {
+ lj_record_call(J, func, 2); /* res = mobj(tab, key) */
+ return 0; /* No result yet. */
+ }
+ }
+ /* Otherwise retry lookup with metaobject. */
+ ix->tab = ix->mobj;
+ copyTV(J->L, &ix->tabv, &ix->mobjv);
+ if (--ix->idxchain == 0)
+ lj_trace_err(J, LJ_TRERR_IDXLOOP);
+ }
+
+ /* First catch nil and NaN keys for tables. */
+ if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
+ if (ix->val) /* Better fail early. */
+ lj_trace_err(J, LJ_TRERR_STORENN);
+ if (tref_isk(ix->key)) {
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ return TREF_NIL;
+ }
+ }
+
+ /* Record the key lookup. */
+ xref = rec_idx_key(J, ix);
+ xrefop = IR(tref_ref(xref))->o;
+ loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
+ /* The lj_meta_tset() inconsistency is gone, but better play safe. */
+ oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
+
+ if (ix->val == 0) { /* Indexed load */
+ IRType t = itype2irt(oldv);
+ TRef res;
+ if (oldv == niltvg(J2G(J))) {
+ emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ res = TREF_NIL;
+ } else {
+ res = emitir(IRTG(loadop, t), xref, 0);
+ }
+ if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
+ return res;
+ } else { /* Indexed store. */
+ GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
+ int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
+ if (tvisnil(oldv)) { /* Previous value was nil? */
+ /* Need to duplicate the hasmm check for the early guards. */
+ int hasmm = 0;
+ if (ix->idxchain && mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
+ hasmm = mo && !tvisnil(mo);
+ }
+ if (hasmm)
+ emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
+ else if (xrefop == IR_HREF)
+ emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32),
+ xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
+ lua_assert(hasmm);
+ goto handlemm;
+ }
+ lua_assert(!hasmm);
+ if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
+ TRef key = ix->key;
+ if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
+ key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key);
+ keybarrier = 0; /* NEWREF already takes care of the key barrier. */
+ }
+ } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
+ /* Cannot derive that the previous value was non-nil, must do checks. */
+ if (xrefop == IR_HREF) /* Guard against store to niltv. */
+ emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain) { /* Metamethod lookup required? */
+ /* A check for NULL metatable is cheaper (hoistable) than a load. */
+ if (!mt) {
+ TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
+ } else {
+ IRType t = itype2irt(oldv);
+ emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */
+ }
+ }
+ } else {
+ keybarrier = 0; /* Previous non-nil value kept the key alive. */
+ }
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(ix->val))
+ ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
+ emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
+ if (keybarrier || tref_isgcv(ix->val))
+ emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
+ /* Invalidate neg. metamethod cache for stores with certain string keys. */
+ if (!nommstr(J, ix->key)) {
+ TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM);
+ emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
+ }
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* -- Upvalue access ------------------------------------------------------ */
+
+/* Record upvalue load/store. */
+static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
+{
+ GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
+ TRef fn = getcurrf(J);
+ IRRef uref;
+ int needbarrier = 0;
+ /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
+ uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
+ if (!uvp->closed) {
+ /* In current stack? */
+ if (uvval(uvp) >= tvref(J->L->stack) &&
+ uvval(uvp) < tvref(J->L->maxstack)) {
+ int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
+ if (slot >= 0) { /* Aliases an SSA slot? */
+ slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
+ /* NYI: add IR to guard that it's still aliasing the same slot. */
+ if (val == 0) {
+ return getslot(J, slot);
+ } else {
+ J->base[slot] = val;
+ if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
+ return 0;
+ }
+ }
+ }
+ uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv));
+ } else {
+ needbarrier = 1;
+ uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv));
+ }
+ if (val == 0) { /* Upvalue load */
+ IRType t = itype2irt(uvval(uvp));
+ TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */
+ return res;
+ } else { /* Upvalue store. */
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(val))
+ val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
+ emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
+ if (needbarrier && tref_isgcv(val))
+ emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* -- Record calls to Lua functions --------------------------------------- */
+
+/* Check unroll limits for calls. */
+static void check_call_unroll(jit_State *J, TraceNo lnk)
+{
+ cTValue *frame = J->L->base - 1;
+ void *pc = mref(frame_func(frame)->l.pc, void);
+ int32_t depth = J->framedepth;
+ int32_t count = 0;
+ if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */
+ for (; depth > 0; depth--) { /* Count frames with same prototype. */
+ frame = frame_prev(frame);
+ if (mref(frame_func(frame)->l.pc, void) == pc)
+ count++;
+ }
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
+ J->pc++;
+ if (J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-recursion. */
+ else
+ rec_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
+ }
+ } else {
+ if (count > J->param[JIT_P_callunroll]) {
+ if (lnk) { /* Possible tail- or up-recursion. */
+ lj_trace_flush(J, lnk); /* Flush trace that only returns. */
+ /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
+ hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4));
+ }
+ lj_trace_err(J, LJ_TRERR_CUNROLL);
+ }
+ }
+}
+
+/* Record Lua function setup. */
+static void rec_func_setup(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, numparams = pt->numparams;
+ if ((pt->flags & PROTO_NOJIT))
+ lj_trace_err(J, LJ_TRERR_CJITOFF);
+ if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ /* Fill up missing parameters with nil. */
+ for (s = J->maxslot; s < numparams; s++)
+ J->base[s] = TREF_NIL;
+ /* The remaining slots should never be read before they are written. */
+ J->maxslot = numparams;
+}
+
+/* Record Lua vararg function setup. */
+static void rec_func_vararg(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, fixargs, vframe = J->maxslot+1;
+ lua_assert((pt->flags & PROTO_VARARG));
+ if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ J->base[vframe-1] = J->base[-1]; /* Copy function up. */
+ /* Copy fixarg slots up and set their original slots to nil. */
+ fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
+ for (s = 0; s < fixargs; s++) {
+ J->base[vframe+s] = J->base[s];
+ J->base[s] = TREF_NIL;
+ }
+ J->maxslot = fixargs;
+ J->framedepth++;
+ J->base += vframe;
+ J->baseslot += vframe;
+}
+
+/* Record entry to a Lua function. */
+static void rec_func_lua(jit_State *J)
+{
+ rec_func_setup(J);
+ check_call_unroll(J, 0);
+}
+
+/* Record entry to an already compiled function. */
+static void rec_func_jit(jit_State *J, TraceNo lnk)
+{
+ GCtrace *T;
+ rec_func_setup(J);
+ T = traceref(J, lnk);
+ if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */
+ check_call_unroll(J, lnk);
+ /* Temporarily unpatch JFUNC* to continue recording across function. */
+ J->patchins = *J->pc;
+ J->patchpc = (BCIns *)J->pc;
+ *J->patchpc = T->startins;
+ return;
+ }
+ J->instunroll = 0; /* Cannot continue across a compiled function. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-recursion. */
+ else
+ rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
+}
+
+/* -- Vararg handling ----------------------------------------------------- */
+
+/* Detect y = select(x, ...) idiom. */
+static int select_detect(jit_State *J)
+{
+ BCIns ins = J->pc[1];
+ if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
+ cTValue *func = &J->L->base[bc_a(ins)];
+ if (tvisfunc(func) && funcV(func)->c.ffid == FF_select)
+ return 1;
+ }
+ return 0;
+}
+
+/* Record vararg instruction. */
+static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
+{
+ int32_t numparams = J->pt->numparams;
+ ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1;
+ lua_assert(frame_isvarg(J->L->base-1));
+ if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
+ ptrdiff_t i;
+ if (nvararg < 0) nvararg = 0;
+ if (nresults == -1) {
+ nresults = nvararg;
+ J->maxslot = dst + (BCReg)nvararg;
+ } else if (dst + nresults > J->maxslot) {
+ J->maxslot = dst + (BCReg)nresults;
+ }
+ for (i = 0; i < nresults; i++) {
+ J->base[dst+i] = i < nvararg ? J->base[i - nvararg - 1] : TREF_NIL;
+ lua_assert(J->base[dst+i] != 0);
+ }
+ } else { /* Unknown number of varargs passed to trace. */
+ TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME);
+ int32_t frofs = 8*(1+numparams)+FRAME_VARG;
+ if (nresults >= 0) { /* Known fixed number of results. */
+ ptrdiff_t i;
+ if (nvararg > 0) {
+ ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
+ TRef vbase;
+ if (nvararg >= nresults)
+ emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
+ else
+ emitir(IRTGI(IR_EQ), fr, lj_ir_kint(J, frame_ftsz(J->L->base-1)));
+ vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
+ for (i = 0; i < nload; i++) {
+ IRType t = itype2irt(&J->L->base[i-1-nvararg]);
+ TRef aref = emitir(IRT(IR_AREF, IRT_P32),
+ vbase, lj_ir_kint(J, (int32_t)i));
+ TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
+ if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
+ J->base[dst+i] = tr;
+ }
+ } else {
+ emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
+ nvararg = 0;
+ }
+ for (i = nvararg; i < nresults; i++)
+ J->base[dst+i] = TREF_NIL;
+ if (dst + (BCReg)nresults > J->maxslot)
+ J->maxslot = dst + (BCReg)nresults;
+ } else if (select_detect(J)) { /* y = select(x, ...) */
+ TRef tridx = J->base[dst-1];
+ TRef tr = TREF_NIL;
+ ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
+ if (idx < 0) goto nyivarg;
+ if (idx != 0 && !tref_isinteger(tridx))
+ tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
+ if (idx != 0 && tref_isk(tridx)) {
+ emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
+ fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
+ frofs -= 8; /* Bias for 1-based index. */
+ } else if (idx <= nvararg) { /* Compute size. */
+ TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
+ if (numparams)
+ emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
+ tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
+ if (idx != 0) {
+ tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
+ rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
+ }
+ } else {
+ TRef tmp = lj_ir_kint(J, frofs);
+ if (idx != 0) {
+ TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
+ tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
+ } else {
+ tr = lj_ir_kint(J, 0);
+ }
+ emitir(IRTGI(IR_LT), fr, tmp);
+ }
+ if (idx != 0 && idx <= nvararg) {
+ IRType t;
+ TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
+ t = itype2irt(&J->L->base[idx-2-nvararg]);
+ aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx);
+ tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
+ if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
+ }
+ J->base[dst-2] = tr;
+ J->maxslot = dst-1;
+ J->bcskip = 2; /* Skip CALLM + select. */
+ } else {
+ nyivarg:
+ setintV(&J->errinfo, BC_VARG);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ }
+ }
+}
+
+/* -- Record allocations -------------------------------------------------- */
+
+static TRef rec_tnew(jit_State *J, uint32_t ah)
+{
+ uint32_t asize = ah & 0x7ff;
+ uint32_t hbits = ah >> 11;
+ if (asize == 0x7ff) asize = 0x801;
+ return emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
+}
+
+/* -- Record bytecode ops ------------------------------------------------- */
+
+/* Prepare for comparison. */
+static void rec_comp_prep(jit_State *J)
+{
+ /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
+ if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
+ emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
+ lj_snap_add(J);
+}
+
+/* Fixup comparison. */
+static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
+{
+ BCIns jmpins = pc[1];
+ const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
+ J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
+ J->needsnap = 1;
+ if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
+ lj_snap_shrink(J); /* Shrink last snapshot if possible. */
+}
+
+/* Record the next bytecode instruction (_before_ it's executed). */
+void lj_record_ins(jit_State *J)
+{
+ cTValue *lbase;
+ RecordIndex ix;
+ const BCIns *pc;
+ BCIns ins;
+ BCOp op;
+ TRef ra, rb, rc;
+
+ /* Perform post-processing action before recording the next instruction. */
+ if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
+ switch (J->postproc) {
+ case LJ_POST_FIXCOMP: /* Fixup comparison. */
+ pc = frame_pc(&J2G(J)->tmptv);
+ rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
+ /* fallthrough */
+ case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
+ case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ J->fold.ins.o ^= 1; /* Flip guard to opposite. */
+ if (J->postproc == LJ_POST_FIXGUARDSNAP) {
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */
+ }
+ }
+ lj_opt_fold(J); /* Emit pending guard. */
+ /* fallthrough */
+ case LJ_POST_FIXBOOL:
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ BCReg s;
+ for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */
+ if (J->base[s] == TREF_TRUE && tvisfalse(&J->L->base[s])) {
+ J->base[s] = TREF_FALSE;
+ break;
+ }
+ }
+ break;
+ case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */
+ if (bc_op(*J->pc) >= BC__MAX)
+ return;
+ break;
+ default: lua_assert(0); break;
+ }
+ J->postproc = LJ_POST_NONE;
+ }
+
+ /* Need snapshot before recording next bytecode (e.g. after a store). */
+ if (J->needsnap) {
+ J->needsnap = 0;
+ lj_snap_purge(J);
+ lj_snap_add(J);
+ J->mergesnap = 1;
+ }
+
+ /* Skip some bytecodes. */
+ if (LJ_UNLIKELY(J->bcskip > 0)) {
+ J->bcskip--;
+ return;
+ }
+
+ /* Record only closed loops for root traces. */
+ pc = J->pc;
+ if (J->framedepth == 0 &&
+ (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+
+#ifdef LUA_USE_ASSERT
+ rec_check_slots(J);
+ rec_check_ir(J);
+#endif
+
+ /* Keep a copy of the runtime values of var/num/str operands. */
+#define rav (&ix.valv)
+#define rbv (&ix.tabv)
+#define rcv (&ix.keyv)
+
+ lbase = J->L->base;
+ ins = *pc;
+ op = bc_op(ins);
+ ra = bc_a(ins);
+ ix.val = 0;
+ switch (bcmode_a(op)) {
+ case BCMvar:
+ copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
+ default: break; /* Handled later. */
+ }
+ rb = bc_b(ins);
+ rc = bc_c(ins);
+ switch (bcmode_b(op)) {
+ case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */
+ case BCMvar:
+ copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
+ default: break; /* Handled later. */
+ }
+ switch (bcmode_c(op)) {
+ case BCMvar:
+ copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
+ case BCMpri: setitype(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
+ case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
+ copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
+ lj_ir_knumint(J, numV(tv)); } break;
+ case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
+ setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
+ default: break; /* Handled later. */
+ }
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
+ break;
+ }
+#endif
+ /* Emit nothing for two numeric or string consts. */
+ if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
+ IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
+ IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
+ int irop;
+ if (ta != tc) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tc == IRT_NUM) {
+ ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tc == IRT_INT) {
+ rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
+ (tc == IRT_FALSE || tc == IRT_TRUE))) {
+ break; /* Interpreter will throw for two different types. */
+ }
+ }
+ rec_comp_prep(J);
+ irop = (int)op - (int)BC_ISLT + (int)IR_LT;
+ if (ta == IRT_NUM) {
+ if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 5;
+ } else if (ta == IRT_INT) {
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 1;
+ } else if (ta == IRT_STR) {
+ if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
+ ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
+ rc = lj_ir_kint(J, 0);
+ ta = IRT_INT;
+ } else {
+ rec_mm_comp(J, &ix, (int)op);
+ break;
+ }
+ emitir(IRTG(irop, ta), ra, rc);
+ rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ case BC_ISEQS: case BC_ISNES:
+ case BC_ISEQN: case BC_ISNEN:
+ case BC_ISEQP: case BC_ISNEP:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, MM_eq);
+ break;
+ }
+#endif
+ /* Emit nothing for two non-table, non-udata consts. */
+ if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
+ int diff;
+ rec_comp_prep(J);
+ diff = lj_record_objcmp(J, ra, rc, rav, rcv);
+ if (diff == 1 && (tref_istab(ra) || tref_isudata(ra))) {
+ /* Only check __eq if different, but the same type (table or udata). */
+ rec_mm_equal(J, &ix, (int)op);
+ break;
+ }
+ rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC:
+ if ((op & 1) == tref_istruecond(rc))
+ rc = 0; /* Don't store if condition is not true. */
+ /* fallthrough */
+ case BC_IST: case BC_ISF: /* Type specialization suffices. */
+ if (bc_a(pc[1]) < J->maxslot)
+ J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_NOT:
+ /* Type specialization already forces const result. */
+ rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
+ break;
+
+ case BC_LEN:
+ if (tref_isstr(rc))
+ rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
+#ifndef LUAJIT_ENABLE_LUA52COMPAT
+ else if (tref_istab(rc))
+ rc = lj_ir_call(J, IRCALL_lj_tab_len, rc);
+#endif
+ else
+ rc = rec_mm_len(J, rc, rcv);
+ break;
+
+ /* -- Arithmetic ops ---------------------------------------------------- */
+
+ case BC_UNM:
+ if (tref_isnumber_str(rc)) {
+ rc = lj_opt_narrow_unm(J, rc, rcv);
+ } else {
+ ix.tab = rc;
+ copyTV(J->L, &ix.tabv, rcv);
+ rc = rec_mm_arith(J, &ix, MM_unm);
+ }
+ break;
+
+ case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
+ /* Swap rb/rc and rbv/rcv. rav is temp. */
+ ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
+ copyTV(J->L, rav, rbv);
+ copyTV(J->L, rbv, rcv);
+ copyTV(J->L, rcv, rav);
+ if (op == BC_MODNV)
+ goto recmod;
+ /* fallthrough */
+ case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
+ case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
+ MMS mm = bcmode_mm(op);
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
+ (int)mm - (int)MM_add + (int)IR_ADD);
+ else
+ rc = rec_mm_arith(J, &ix, mm);
+ break;
+ }
+
+ case BC_MODVN: case BC_MODVV:
+ recmod:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_mod(J, rb, rc, rcv);
+ else
+ rc = rec_mm_arith(J, &ix, MM_mod);
+ break;
+
+ case BC_POW:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv);
+ else
+ rc = rec_mm_arith(J, &ix, MM_pow);
+ break;
+
+ /* -- Constant and move ops --------------------------------------------- */
+
+ case BC_MOV:
+ /* Clear gap of method call to avoid resurrecting previous refs. */
+ if (ra > J->maxslot) J->base[ra-1] = 0;
+ break;
+ case BC_KSTR: case BC_KNUM: case BC_KPRI:
+ break;
+ case BC_KSHORT:
+ rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
+ break;
+ case BC_KNIL:
+ while (ra <= rc)
+ J->base[ra++] = TREF_NIL;
+ if (rc >= J->maxslot) J->maxslot = rc+1;
+ break;
+#if LJ_HASFFI
+ case BC_KCDATA:
+ rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
+ break;
+#endif
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ rc = rec_upvalue(J, rc, 0);
+ break;
+ case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
+ rec_upvalue(J, ra, rc);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_GGET: case BC_GSET:
+ settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
+ ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TGETB: case BC_TSETB:
+ setintV(&ix.keyv, (int32_t)rc);
+ ix.key = lj_ir_kint(J, (int32_t)rc);
+ /* fallthrough */
+ case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TNEW:
+ rc = rec_tnew(J, rc);
+ break;
+ case BC_TDUP:
+ rc = emitir(IRTG(IR_TDUP, IRT_TAB),
+ lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_ITERC:
+ J->base[ra] = getslot(J, ra-3);
+ J->base[ra+1] = getslot(J, ra-2);
+ J->base[ra+2] = getslot(J, ra-1);
+ { /* Do the actual copy now because lj_record_call needs the values. */
+ TValue *b = &J->L->base[ra];
+ copyTV(J->L, b, b-3);
+ copyTV(J->L, b+1, b-2);
+ copyTV(J->L, b+2, b-1);
+ }
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
+ case BC_CALLM:
+ rc = (BCReg)(J->L->top - J->L->base) - ra;
+ /* fallthrough */
+ case BC_CALL:
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_CALLMT:
+ rc = (BCReg)(J->L->top - J->L->base) - ra;
+ /* fallthrough */
+ case BC_CALLT:
+ lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_VARG:
+ rec_varg(J, ra, (ptrdiff_t)rb-1);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
+ rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
+ /* fallthrough */
+ case BC_RET: case BC_RET0: case BC_RET1:
+ lj_record_ret(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORI:
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
+ J->loopref = J->cur.nins;
+ break;
+ case BC_JFORI:
+ lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL);
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
+ rec_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
+ /* Continue tracing if the loop is not entered. */
+ break;
+
+ case BC_FORL:
+ rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
+ break;
+ case BC_ITERL:
+ rec_loop_interp(J, pc, rec_iterl(J, *pc));
+ break;
+ case BC_LOOP:
+ rec_loop_interp(J, pc, rec_loop(J, ra));
+ break;
+
+ case BC_JFORL:
+ rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
+ break;
+ case BC_JITERL:
+ rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
+ break;
+ case BC_JLOOP:
+ rec_loop_jit(J, rc, rec_loop(J, ra));
+ break;
+
+ case BC_IFORL:
+ case BC_IITERL:
+ case BC_ILOOP:
+ case BC_IFUNCF:
+ case BC_IFUNCV:
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ break;
+
+ case BC_JMP:
+ if (ra < J->maxslot)
+ J->maxslot = ra; /* Shrink used slots. */
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCF:
+ rec_func_jit(J, rc);
+ break;
+
+ case BC_FUNCV:
+ rec_func_vararg(J);
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCV:
+ lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ lj_ffrecord_func(J);
+ break;
+
+ default:
+ if (op >= BC__MAX) {
+ lj_ffrecord_func(J);
+ break;
+ }
+ /* fallthrough */
+ case BC_ITERN:
+ case BC_ISNEXT:
+ case BC_CAT:
+ case BC_UCLO:
+ case BC_FNEW:
+ case BC_TSETM:
+ setintV(&J->errinfo, (int32_t)op);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ break;
+ }
+
+ /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
+ if (bcmode_a(op) == BCMdst && rc) {
+ J->base[ra] = rc;
+ if (ra >= J->maxslot) J->maxslot = ra+1;
+ }
+
+#undef rav
+#undef rbv
+#undef rcv
+
+ /* Limit the number of recorded IR instructions. */
+ if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord])
+ lj_trace_err(J, LJ_TRERR_TRACEOV);
+}
+
+/* -- Recording setup ----------------------------------------------------- */
+
+/* Setup recording for a root trace started by a hot loop. */
+static const BCIns *rec_setup_root(jit_State *J)
+{
+ /* Determine the next PC and the bytecode range for the loop. */
+ const BCIns *pcj, *pc = J->pc;
+ BCIns ins = *pc;
+ BCReg ra = bc_a(ins);
+ switch (bc_op(ins)) {
+ case BC_FORL:
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ J->bc_min = pc;
+ break;
+ case BC_ITERL:
+ lua_assert(bc_op(pc[-1]) == BC_ITERC);
+ J->maxslot = ra + bc_b(pc[-1]) - 1;
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ lua_assert(bc_op(pc[-1]) == BC_JMP);
+ J->bc_min = pc;
+ break;
+ case BC_LOOP:
+ /* Only check BC range for real loops, but not for "repeat until true". */
+ pcj = pc + bc_j(ins);
+ ins = *pcj;
+ if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
+ J->bc_min = pcj+1 + bc_j(ins);
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ }
+ J->maxslot = ra;
+ pc++;
+ break;
+ case BC_RET:
+ case BC_RET0:
+ case BC_RET1:
+ /* No bytecode range check for down-recursive root traces. */
+ J->maxslot = ra + bc_d(ins);
+ break;
+ case BC_FUNCF:
+ /* No bytecode range check for root traces started by a hot call. */
+ J->maxslot = J->pt->numparams;
+ pc++;
+ break;
+ default:
+ lua_assert(0);
+ break;
+ }
+ return pc;
+}
+
+/* Setup recording for a side trace. */
+static void rec_setup_side(jit_State *J, GCtrace *T)
+{
+ SnapShot *snap = &T->snap[J->exitno];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ BloomFilter seen = 0;
+ J->framedepth = 0;
+ /* Emit IR for slots inherited from parent snapshot. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ BCReg s = snap_slot(sn);
+ IRIns *ir = &T->ir[ref];
+ IRType t = irt_type(ir->t);
+ TRef tr;
+ /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
+ if (bloomtest(seen, ref)) {
+ MSize j;
+ for (j = 0; j < n; j++)
+ if (snap_ref(map[j]) == ref) {
+ tr = J->slot[snap_slot(map[j])];
+ goto setslot;
+ }
+ }
+ bloomset(seen, ref);
+ switch ((IROp)ir->o) {
+ /* Only have to deal with constants that can occur in stack slots. */
+ case IR_KPRI: tr = TREF_PRI(t); break;
+ case IR_KINT: tr = lj_ir_kint(J, ir->i); break;
+ case IR_KGC: tr = lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t)); break;
+ case IR_KNUM: tr = lj_ir_k64(J, IR_KNUM, ir_knum(ir)); break;
+ case IR_KINT64: tr = lj_ir_k64(J, IR_KINT64, ir_kint64(ir)); break;
+ case IR_KPTR: tr = lj_ir_kptr(J, ir_kptr(ir)); break; /* Continuation. */
+ /* Inherited SLOADs don't need a guard or type check. */
+ case IR_SLOAD:
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
+ tr = emitir_raw(IRT(IR_SLOAD, t), s,
+ (ir->op2&IRSLOAD_READONLY) | IRSLOAD_INHERIT|IRSLOAD_PARENT);
+ break;
+ /* Parent refs are already typed and don't need a guard. */
+ default:
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
+ tr = emitir_raw(IRT(IR_SLOAD, t), s, IRSLOAD_INHERIT|IRSLOAD_PARENT);
+ break;
+ }
+ setslot:
+ J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */
+ J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s);
+ if ((sn & SNAP_FRAME))
+ J->baseslot = s+1;
+ }
+ J->base = J->slot + J->baseslot;
+ J->maxslot = snap->nslots - J->baseslot;
+ lj_snap_add(J);
+}
+
+/* Setup for recording a new trace. */
+void lj_record_setup(jit_State *J)
+{
+ uint32_t i;
+
+ /* Initialize state related to current trace. */
+ memset(J->slot, 0, sizeof(J->slot));
+ memset(J->chain, 0, sizeof(J->chain));
+ memset(J->bpropcache, 0, sizeof(J->bpropcache));
+ J->scev.idx = REF_NIL;
+
+ J->baseslot = 1; /* Invoking function is at base[-1]. */
+ J->base = J->slot + J->baseslot;
+ J->maxslot = 0;
+ J->framedepth = 0;
+ J->retdepth = 0;
+
+ J->instunroll = J->param[JIT_P_instunroll];
+ J->loopunroll = J->param[JIT_P_loopunroll];
+ J->tailcalled = 0;
+ J->loopref = 0;
+
+ J->bc_min = NULL; /* Means no limit. */
+ J->bc_extent = ~(MSize)0;
+
+ /* Emit instructions for fixed references. Also triggers initial IR alloc. */
+ emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno);
+ for (i = 0; i <= 2; i++) {
+ IRIns *ir = IR(REF_NIL-i);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)(IRT_NIL+i);
+ ir->o = IR_KPRI;
+ ir->prev = 0;
+ }
+ J->cur.nk = REF_TRUE;
+
+ J->startpc = J->pc;
+ setmref(J->cur.startpc, J->pc);
+ if (J->parent) { /* Side trace. */
+ GCtrace *T = traceref(J, J->parent);
+ TraceNo root = T->root ? T->root : J->parent;
+ J->cur.root = (uint16_t)root;
+ J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
+ /* Check whether we could at least potentially form an extra loop. */
+ if (J->exitno == 0 && T->snap[0].nent == 0) {
+ /* We can narrow a FORL for some side traces, too. */
+ if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
+ bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
+ lj_snap_add(J);
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ goto sidecheck;
+ }
+ } else {
+ J->startpc = NULL; /* Prevent forming an extra loop. */
+ }
+ rec_setup_side(J, T);
+ sidecheck:
+ if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
+ T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
+ J->param[JIT_P_tryside]) {
+ rec_stop(J, LJ_TRLINK_INTERP, 0);
+ }
+ } else { /* Root trace. */
+ J->cur.root = 0;
+ J->cur.startins = *J->pc;
+ J->pc = rec_setup_root(J);
+ /* Note: the loop instruction itself is recorded at the end and not
+ ** at the start! So snapshot #0 needs to point to the *next* instruction.
+ */
+ lj_snap_add(J);
+ if (bc_op(J->cur.startins) == BC_FORL)
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ }
+#ifdef LUAJIT_ENABLE_CHECKHOOK
+ /* Regularly check for instruction/line hooks from compiled code and
+ ** exit to the interpreter if the hooks are set.
+ **
+ ** This is a compile-time option and disabled by default, since the
+ ** hook checks may be quite expensive in tight loops.
+ **
+ ** Note this is only useful if hooks are *not* set most of the time.
+ ** Use this only if you want to *asynchronously* interrupt the execution.
+ **
+ ** You can set the instruction hook via lua_sethook() with a count of 1
+ ** from a signal handler or another native thread. Please have a look
+ ** at the first few functions in luajit.c for an example (Ctrl-C handler).
+ */
+ {
+ TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
+ lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
+ }
+#endif
+}
+
+#undef IR
+#undef emitir_raw
+#undef emitir
+
+#endif
diff --git a/src/LuaJIT/src/lj_record.h b/src/LuaJIT/src/lj_record.h
new file mode 100644
index 000000000..40ffcb97a
--- /dev/null
+++ b/src/LuaJIT/src/lj_record.h
@@ -0,0 +1,43 @@
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_RECORD_H
+#define _LJ_RECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Context for recording an indexed load/store. */
+typedef struct RecordIndex {
+ TValue tabv; /* Runtime value of table (or indexed object). */
+ TValue keyv; /* Runtime value of key. */
+ TValue valv; /* Runtime value of stored value. */
+ TValue mobjv; /* Runtime value of metamethod object. */
+ GCtab *mtv; /* Runtime value of metatable object. */
+ cTValue *oldv; /* Runtime value of previously stored value. */
+ TRef tab; /* Table (or indexed object) reference. */
+ TRef key; /* Key reference. */
+ TRef val; /* Value reference for a store or 0 for a load. */
+ TRef mt; /* Metatable reference. */
+ TRef mobj; /* Metamethod object reference. */
+ int idxchain; /* Index indirections left or 0 for raw lookup. */
+} RecordIndex;
+
+LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
+ cTValue *av, cTValue *bv);
+
+LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults);
+
+LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm);
+LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix);
+
+LJ_FUNC void lj_record_ins(jit_State *J);
+LJ_FUNC void lj_record_setup(jit_State *J);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_snap.c b/src/LuaJIT/src/lj_snap.c
new file mode 100644
index 000000000..ddf07b6a3
--- /dev/null
+++ b/src/LuaJIT/src/lj_snap.c
@@ -0,0 +1,446 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_snap_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_target.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* -- Snapshot buffer allocation ------------------------------------------ */
+
+/* Grow snapshot buffer. */
+void lj_snap_grow_buf_(jit_State *J, MSize need)
+{
+ MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
+ if (need > maxsnap)
+ lj_trace_err(J, LJ_TRERR_SNAPOV);
+ lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
+ J->cur.snap = J->snapbuf;
+}
+
+/* Grow snapshot map buffer. */
+void lj_snap_grow_map_(jit_State *J, MSize need)
+{
+ if (need < 2*J->sizesnapmap)
+ need = 2*J->sizesnapmap;
+ else if (need < 64)
+ need = 64;
+ J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
+ J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
+ J->cur.snapmap = J->snapmapbuf;
+ J->sizesnapmap = need;
+}
+
+/* -- Snapshot generation ------------------------------------------------- */
+
+/* Add all modified slots to the snapshot. */
+static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
+{
+ IRRef retf = J->chain[IR_RETF]; /* Limits SLOAD restore elimination. */
+ BCReg s;
+ MSize n = 0;
+ for (s = 0; s < nslots; s++) {
+ TRef tr = J->slot[s];
+ IRRef ref = tref_ref(tr);
+ if (ref) {
+ SnapEntry sn = SNAP_TR(s, tr);
+ IRIns *ir = IR(ref);
+ if (!(sn & (SNAP_CONT|SNAP_FRAME)) &&
+ ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
+ /* No need to snapshot unmodified non-inherited slots. */
+ if (!(ir->op2 & IRSLOAD_INHERIT))
+ continue;
+ /* No need to restore readonly slots and unmodified non-parent slots. */
+ if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
+ (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
+ sn |= SNAP_NORESTORE;
+ }
+ if (LJ_SOFTFP && irt_isnum(ir->t))
+ sn |= SNAP_SOFTFPNUM;
+ map[n++] = sn;
+ }
+ }
+ return n;
+}
+
+/* Add frame links at the end of the snapshot. */
+static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map)
+{
+ cTValue *frame = J->L->base - 1;
+ cTValue *lim = J->L->base - J->baseslot;
+ cTValue *ftop = frame + funcproto(frame_func(frame))->framesize;
+ MSize f = 0;
+ map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */
+ while (frame > lim) { /* Backwards traversal of all frames above base. */
+ if (frame_islua(frame)) {
+ map[f++] = SNAP_MKPC(frame_pc(frame));
+ frame = frame_prevl(frame);
+ if (frame + funcproto(frame_func(frame))->framesize > ftop)
+ ftop = frame + funcproto(frame_func(frame))->framesize;
+ } else if (frame_iscont(frame)) {
+ map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+ map[f++] = SNAP_MKPC(frame_contpc(frame));
+ frame = frame_prevd(frame);
+ } else {
+ lua_assert(!frame_isc(frame));
+ map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+ frame = frame_prevd(frame);
+ }
+ }
+ lua_assert(f == (MSize)(1 + J->framedepth));
+ return (BCReg)(ftop - lim);
+}
+
+/* Take a snapshot of the current stack. */
+static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
+{
+ BCReg nslots = J->baseslot + J->maxslot;
+ MSize nent;
+ SnapEntry *p;
+ /* Conservative estimate. */
+ lj_snap_grow_map(J, nsnapmap + nslots + (MSize)J->framedepth+1);
+ p = &J->cur.snapmap[nsnapmap];
+ nent = snapshot_slots(J, p, nslots);
+ snap->topslot = (uint8_t)snapshot_framelinks(J, p + nent);
+ snap->mapofs = (uint16_t)nsnapmap;
+ snap->ref = (IRRef1)J->cur.nins;
+ snap->nent = (uint8_t)nent;
+ snap->nslots = (uint8_t)nslots;
+ snap->count = 0;
+ J->cur.nsnapmap = (uint16_t)(nsnapmap + nent + 1 + J->framedepth);
+}
+
+/* Add or merge a snapshot. */
+void lj_snap_add(jit_State *J)
+{
+ MSize nsnap = J->cur.nsnap;
+ MSize nsnapmap = J->cur.nsnapmap;
+ /* Merge if no ins. inbetween or if requested and no guard inbetween. */
+ if (J->mergesnap ? !irt_isguard(J->guardemit) :
+ (nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins)) {
+ nsnapmap = J->cur.snap[--nsnap].mapofs;
+ } else {
+ lj_snap_grow_buf(J, nsnap+1);
+ J->cur.nsnap = (uint16_t)(nsnap+1);
+ }
+ J->mergesnap = 0;
+ J->guardemit.irt = 0;
+ snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
+}
+
+/* -- Snapshot modification ----------------------------------------------- */
+
+#define SNAP_USEDEF_SLOTS (LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
+
+/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
+static BCReg snap_usedef(jit_State *J, uint8_t *udf,
+ const BCIns *pc, BCReg maxslot)
+{
+ BCReg s;
+ GCobj *o;
+
+ if (maxslot == 0) return 0;
+#ifdef LUAJIT_USE_VALGRIND
+ /* Avoid errors for harmless reads beyond maxslot. */
+ memset(udf, 1, SNAP_USEDEF_SLOTS);
+#else
+ memset(udf, 1, maxslot);
+#endif
+
+ /* Treat open upvalues as used. */
+ o = gcref(J->L->openupval);
+ while (o) {
+ if (uvval(gco2uv(o)) < J->L->base) break;
+ udf[uvval(gco2uv(o)) - J->L->base] = 0;
+ o = gcref(o->gch.nextgc);
+ }
+
+#define USE_SLOT(s) udf[(s)] &= ~1
+#define DEF_SLOT(s) udf[(s)] *= 3
+
+ /* Scan through following bytecode and check for uses/defs. */
+ lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc);
+ for (;;) {
+ BCIns ins = *pc++;
+ BCOp op = bc_op(ins);
+ switch (bcmode_b(op)) {
+ case BCMvar: USE_SLOT(bc_b(ins)); break;
+ default: break;
+ }
+ switch (bcmode_c(op)) {
+ case BCMvar: USE_SLOT(bc_c(ins)); break;
+ case BCMrbase:
+ lua_assert(op == BC_CAT);
+ for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ break;
+ case BCMjump:
+ handle_jump: {
+ BCReg minslot = bc_a(ins);
+ if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
+ else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
+ else if (op == BC_UCLO) { pc += bc_j(ins); break; }
+ for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
+ return minslot < maxslot ? minslot : maxslot;
+ }
+ case BCMlit:
+ if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+ goto handle_jump;
+ } else if (bc_isret(op)) {
+ BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
+ for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+ for (; s < top; s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ return 0;
+ }
+ break;
+ case BCMfunc: return maxslot; /* NYI: will abort, anyway. */
+ default: break;
+ }
+ switch (bcmode_a(op)) {
+ case BCMvar: USE_SLOT(bc_a(ins)); break;
+ case BCMdst:
+ if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
+ break;
+ case BCMbase:
+ if (op >= BC_CALLM && op <= BC_VARG) {
+ BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
+ maxslot : (bc_a(ins) + bc_c(ins));
+ s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
+ for (; s < top; s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ if (op == BC_CALLT || op == BC_CALLMT) {
+ for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+ return 0;
+ }
+ } else if (op == BC_KNIL) {
+ for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
+ } else if (op == BC_TSETM) {
+ for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
+ }
+ break;
+ default: break;
+ }
+ lua_assert(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc);
+ }
+
+#undef USE_SLOT
+#undef DEF_SLOT
+
+ return 0; /* unreachable */
+}
+
+/* Purge dead slots before the next snapshot. */
+void lj_snap_purge(jit_State *J)
+{
+ uint8_t udf[SNAP_USEDEF_SLOTS];
+ BCReg maxslot = J->maxslot;
+ BCReg s = snap_usedef(J, udf, J->pc, maxslot);
+ for (; s < maxslot; s++)
+ if (udf[s] != 0)
+ J->base[s] = 0; /* Purge dead slots. */
+}
+
+/* Shrink last snapshot. */
+void lj_snap_shrink(jit_State *J)
+{
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, m, nlim, nent = snap->nent;
+ uint8_t udf[SNAP_USEDEF_SLOTS];
+ BCReg maxslot = J->maxslot;
+ BCReg minslot = snap_usedef(J, udf, snap_pc(map[nent]), maxslot);
+ BCReg baseslot = J->baseslot;
+ maxslot += baseslot;
+ minslot += baseslot;
+ snap->nslots = (uint8_t)maxslot;
+ for (n = m = 0; n < nent; n++) { /* Remove unused slots from snapshot. */
+ BCReg s = snap_slot(map[n]);
+ if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
+ map[m++] = map[n]; /* Only copy used slots. */
+ }
+ snap->nent = (uint8_t)m;
+ nlim = J->cur.nsnapmap - snap->mapofs - 1;
+ while (n <= nlim) map[m++] = map[n++]; /* Move PC + frame links down. */
+ J->cur.nsnapmap = (uint16_t)(snap->mapofs + m); /* Free up space in map. */
+}
+
+/* -- Snapshot access ----------------------------------------------------- */
+
+/* Initialize a Bloom Filter with all renamed refs.
+** There are very few renames (often none), so the filter has
+** very few bits set. This makes it suitable for negative filtering.
+*/
+static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
+{
+ BloomFilter rfilt = 0;
+ IRIns *ir;
+ for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+ if (ir->op2 <= lim)
+ bloomset(rfilt, ir->op1);
+ return rfilt;
+}
+
+/* Process matching renames to find the original RegSP. */
+static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
+{
+ IRIns *ir;
+ for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+ if (ir->op1 == ref && ir->op2 <= lim)
+ rs = ir->prev;
+ return rs;
+}
+
+/* Convert a snapshot into a linear slot -> RegSP map.
+** Note: unused slots are not initialized!
+*/
+void lj_snap_regspmap(uint16_t *rsmap, GCtrace *T, SnapNo snapno, int hi)
+{
+ SnapShot *snap = &T->snap[snapno];
+ MSize n, nent = snap->nent;
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ BloomFilter rfilt = snap_renamefilter(T, snapno);
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (!irref_isk(ref) &&
+ ((LJ_SOFTFP && hi) ? (ref++, (sn & SNAP_SOFTFPNUM)) : 1)) {
+ IRIns *ir = &T->ir[ref];
+ uint32_t rs = ir->prev;
+ if (bloomtest(rfilt, ref))
+ rs = snap_renameref(T, snapno, ref, rs);
+ rsmap[snap_slot(sn)] = (uint16_t)rs;
+ }
+ }
+}
+
+/* Restore interpreter state from exit state with the help of a snapshot. */
+const BCIns *lj_snap_restore(jit_State *J, void *exptr)
+{
+ ExitState *ex = (ExitState *)exptr;
+ SnapNo snapno = J->exitno; /* For now, snapno == exitno. */
+ GCtrace *T = traceref(J, J->parent);
+ SnapShot *snap = &T->snap[snapno];
+ MSize n, nent = snap->nent;
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1];
+ int32_t ftsz0;
+ TValue *frame;
+ BloomFilter rfilt = snap_renamefilter(T, snapno);
+ const BCIns *pc = snap_pc(map[nent]);
+ lua_State *L = J->L;
+
+ /* Set interpreter PC to the next PC to get correct error messages. */
+ setcframe_pc(cframe_raw(L->cframe), pc+1);
+
+ /* Make sure the stack is big enough for the slots from the snapshot. */
+ if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
+ L->top = curr_topL(L);
+ lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
+ }
+
+ /* Fill stack slots with data from the registers and spill slots. */
+ frame = L->base-1;
+ ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ BCReg s = snap_slot(sn);
+ TValue *o = &frame[s]; /* Stack slots are relative to start frame. */
+ IRIns *ir = &T->ir[ref];
+ if (irref_isk(ref)) { /* Restore constant slot. */
+ lj_ir_kvalue(L, o, ir);
+ } else if (!(sn & SNAP_NORESTORE)) {
+ IRType1 t = ir->t;
+ RegSP rs = ir->prev;
+ if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
+ rs = snap_renameref(T, snapno, ref, rs);
+ if (ra_hasspill(regsp_spill(rs))) { /* Restore from spill slot. */
+ int32_t *sps = &ex->spill[regsp_spill(rs)];
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ o->u32.lo = (uint32_t)*sps;
+ } else if (irt_isinteger(t)) {
+ setintV(o, *sps);
+#if !LJ_SOFTFP
+ } else if (irt_isnum(t)) {
+ o->u64 = *(uint64_t *)sps;
+#endif
+#if LJ_64
+ } else if (irt_islightud(t)) {
+ /* 64 bit lightuserdata which may escape already has the tag bits. */
+ o->u64 = *(uint64_t *)sps;
+#endif
+ } else {
+ lua_assert(!irt_ispri(t)); /* PRI refs never have a spill slot. */
+ setgcrefi(o->gcr, *sps);
+ setitype(o, irt_toitype(t));
+ }
+ } else { /* Restore from register. */
+ Reg r = regsp_reg(rs);
+ lua_assert(ra_hasreg(r));
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ o->u32.lo = (uint32_t)ex->gpr[r-RID_MIN_GPR];
+ } else if (irt_isinteger(t)) {
+ setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
+#if !LJ_SOFTFP
+ } else if (irt_isnum(t)) {
+ setnumV(o, ex->fpr[r-RID_MIN_FPR]);
+#endif
+#if LJ_64
+ } else if (irt_islightud(t)) {
+ /* 64 bit lightuserdata which may escape already has the tag bits. */
+ o->u64 = ex->gpr[r-RID_MIN_GPR];
+#endif
+ } else {
+ if (!irt_ispri(t))
+ setgcrefi(o->gcr, ex->gpr[r-RID_MIN_GPR]);
+ setitype(o, irt_toitype(t));
+ }
+ }
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ rs = (ir+1)->prev;
+ if (LJ_UNLIKELY(bloomtest(rfilt, ref+1)))
+ rs = snap_renameref(T, snapno, ref+1, rs);
+ o->u32.hi = (ra_hasspill(regsp_spill(rs))) ?
+ (uint32_t)*&ex->spill[regsp_spill(rs)] :
+ (uint32_t)ex->gpr[regsp_reg(rs)-RID_MIN_GPR];
+ }
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) { /* Overwrite tag with frame link. */
+ o->fr.tp.ftsz = s != 0 ? (int32_t)*flinks-- : ftsz0;
+ L->base = o+1;
+ }
+ }
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
+ L->top = frame + snap->nslots;
+ break;
+ default:
+ L->top = curr_topL(L);
+ break;
+ }
+ lua_assert(map + nent == flinks);
+ return pc;
+}
+
+#undef IR
+
+#endif
diff --git a/src/LuaJIT/src/lj_snap.h b/src/LuaJIT/src/lj_snap.h
new file mode 100644
index 000000000..9ec1a78eb
--- /dev/null
+++ b/src/LuaJIT/src/lj_snap.h
@@ -0,0 +1,34 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_SNAP_H
+#define _LJ_SNAP_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_snap_add(jit_State *J);
+LJ_FUNC void lj_snap_purge(jit_State *J);
+LJ_FUNC void lj_snap_shrink(jit_State *J);
+LJ_FUNC void lj_snap_regspmap(uint16_t *rsmap, GCtrace *T, SnapNo snapno,
+ int hi);
+LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
+LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
+LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
+
+static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
+}
+
+static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
+}
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_state.c b/src/LuaJIT/src/lj_state.c
new file mode 100644
index 000000000..77c6df678
--- /dev/null
+++ b/src/LuaJIT/src/lj_state.c
@@ -0,0 +1,287 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_state_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_alloc.h"
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Stack sizes. */
+#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */
+#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */
+#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */
+#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
+
+/* Explanation of LJ_STACK_EXTRA:
+**
+** Calls to metamethods store their arguments beyond the current top
+** without checking for the stack limit. This avoids stack resizes which
+** would invalidate passed TValue pointers. The stack check is performed
+** later by the function header. This can safely resize the stack or raise
+** an error. Thus we need some extra slots beyond the current stack limit.
+**
+** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
+** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
+** slots above top, but then mobj is always a function. So we can get by
+** with 5 extra slots.
+*/
+
+/* Resize stack slots and adjust pointers in state. */
+static void resizestack(lua_State *L, MSize n)
+{
+ TValue *st, *oldst = tvref(L->stack);
+ ptrdiff_t delta;
+ MSize oldsize = L->stacksize;
+ MSize realsize = n + 1 + LJ_STACK_EXTRA;
+ GCobj *up;
+ lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1);
+ st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
+ (MSize)(L->stacksize*sizeof(TValue)),
+ (MSize)(realsize*sizeof(TValue)));
+ setmref(L->stack, st);
+ delta = (char *)st - (char *)oldst;
+ setmref(L->maxstack, st + n);
+ while (oldsize < realsize) /* Clear new slots. */
+ setnilV(st + oldsize++);
+ L->stacksize = realsize;
+ L->base = (TValue *)((char *)L->base + delta);
+ L->top = (TValue *)((char *)L->top + delta);
+ for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
+ setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
+ if (obj2gco(L) == gcref(G(L)->jit_L))
+ setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
+}
+
+/* Relimit stack after error, in case the limit was overdrawn. */
+void lj_state_relimitstack(lua_State *L)
+{
+ if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
+ resizestack(L, LJ_STACK_MAX);
+}
+
+/* Try to shrink the stack (called from GC). */
+void lj_state_shrinkstack(lua_State *L, MSize used)
+{
+ if (L->stacksize > LJ_STACK_MAXEX)
+ return; /* Avoid stack shrinking while handling stack overflow. */
+ if (4*used < L->stacksize &&
+ 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
+ obj2gco(L) != gcref(G(L)->jit_L)) /* Don't shrink stack of live trace. */
+ resizestack(L, L->stacksize >> 1);
+}
+
+/* Try to grow stack. */
+void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
+{
+ MSize n;
+ if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */
+ lj_err_throw(L, LUA_ERRERR);
+ n = L->stacksize + need;
+ if (n > LJ_STACK_MAX) {
+ n += 2*LUA_MINSTACK;
+ } else if (n < 2*L->stacksize) {
+ n = 2*L->stacksize;
+ if (n >= LJ_STACK_MAX)
+ n = LJ_STACK_MAX;
+ }
+ resizestack(L, n);
+ if (L->stacksize > LJ_STACK_MAXEX)
+ lj_err_msg(L, LJ_ERR_STKOV);
+}
+
+void LJ_FASTCALL lj_state_growstack1(lua_State *L)
+{
+ lj_state_growstack(L, 1);
+}
+
+/* Allocate basic stack for new state. */
+static void stack_init(lua_State *L1, lua_State *L)
+{
+ TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
+ setmref(L1->stack, st);
+ L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
+ stend = st + L1->stacksize;
+ setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
+ L1->base = L1->top = st+1;
+ setthreadV(L1, st, L1); /* Needed for curr_funcisL() on empty stack. */
+ while (st < stend) /* Clear new slots. */
+ setnilV(st++);
+}
+
+/* -- State handling ------------------------------------------------------ */
+
+/* Open parts that may cause memory-allocation errors. */
+static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ global_State *g = G(L);
+ UNUSED(dummy);
+ UNUSED(ud);
+ stack_init(L, L);
+ /* NOBARRIER: State initialization, all objects are white. */
+ setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
+ settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
+ lj_str_resize(L, LJ_MIN_STRTAB-1);
+ lj_meta_init(L);
+ lj_lex_init(L);
+ fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
+ g->gc.threshold = 4*g->gc.total;
+ lj_trace_initstate(g);
+ return NULL;
+}
+
+static void close_state(lua_State *L)
+{
+ global_State *g = G(L);
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_freeall(g);
+ lua_assert(gcref(g->gc.root) == obj2gco(L));
+ lua_assert(g->strnum == 0);
+ lj_trace_freestate(g);
+#if LJ_HASFFI
+ lj_ctype_freestate(g);
+#endif
+ lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+ lj_str_freebuf(g, &g->tmpbuf);
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+ lua_assert(g->gc.total == sizeof(GG_State));
+#ifndef LUAJIT_USE_SYSMALLOC
+ if (g->allocf == lj_alloc_f)
+ lj_alloc_destroy(g->allocd);
+ else
+#endif
+ g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
+}
+
+#if LJ_64
+lua_State *lj_state_newstate(lua_Alloc f, void *ud)
+#else
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+#endif
+{
+ GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State));
+ lua_State *L = &GG->L;
+ global_State *g = &GG->g;
+ if (GG == NULL || !checkptr32(GG)) return NULL;
+ memset(GG, 0, sizeof(GG_State));
+ L->gct = ~LJ_TTHREAD;
+ L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
+ L->dummy_ffid = FF_C;
+ setmref(L->glref, g);
+ g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
+ g->strempty.marked = LJ_GC_WHITE0;
+ g->strempty.gct = ~LJ_TSTR;
+ g->allocf = f;
+ g->allocd = ud;
+ setgcref(g->mainthref, obj2gco(L));
+ setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
+ setgcref(g->uvhead.next, obj2gco(&g->uvhead));
+ g->strmask = ~(MSize)0;
+ setnilV(registry(L));
+ setnilV(&g->nilnode.val);
+ setnilV(&g->nilnode.key);
+ setmref(g->nilnode.freetop, &g->nilnode);
+ lj_str_initbuf(&g->tmpbuf);
+ g->gc.state = GCSpause;
+ setgcref(g->gc.root, obj2gco(L));
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.total = sizeof(GG_State);
+ g->gc.pause = LUAI_GCPAUSE;
+ g->gc.stepmul = LUAI_GCMUL;
+ lj_dispatch_init((GG_State *)L);
+ L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */
+ if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
+ /* Memory allocation error: free partial state. */
+ close_state(L);
+ return NULL;
+ }
+ L->status = 0;
+ return L;
+}
+
+static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(dummy);
+ UNUSED(ud);
+ lj_gc_finalize_cdata(L);
+ lj_gc_finalize_udata(L);
+ /* Frame pop omitted. */
+ return NULL;
+}
+
+LUA_API void lua_close(lua_State *L)
+{
+ global_State *g = G(L);
+ int i;
+ L = mainthread(g); /* Only the main thread can be closed. */
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
+#if LJ_HASJIT
+ G2J(g)->flags &= ~JIT_F_ON;
+ G2J(g)->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(g);
+#endif
+ for (i = 0; i < 10; ) {
+ hook_enter(g);
+ L->status = 0;
+ L->cframe = NULL;
+ L->base = L->top = tvref(L->stack) + 1;
+ if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) {
+ lj_gc_separateudata(g, 1); /* Separate udata again. */
+ if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
+ break;
+ i++;
+ }
+ }
+ close_state(L);
+}
+
+lua_State *lj_state_new(lua_State *L)
+{
+ lua_State *L1 = lj_mem_newobj(L, lua_State);
+ L1->gct = ~LJ_TTHREAD;
+ L1->dummy_ffid = FF_C;
+ L1->status = 0;
+ L1->stacksize = 0;
+ setmref(L1->stack, NULL);
+ L1->cframe = NULL;
+ /* NOBARRIER: The lua_State is new (marked white). */
+ setgcrefnull(L1->openupval);
+ setmrefr(L1->glref, L->glref);
+ setgcrefr(L1->env, L->env);
+ stack_init(L1, L); /* init stack */
+ lua_assert(iswhite(obj2gco(L1)));
+ return L1;
+}
+
+void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
+{
+ lua_assert(L != mainthread(g));
+ lj_func_closeuv(L, tvref(L->stack));
+ lua_assert(gcref(L->openupval) == NULL);
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+ lj_mem_freet(g, L);
+}
+
diff --git a/src/LuaJIT/src/lj_state.h b/src/LuaJIT/src/lj_state.h
new file mode 100644
index 000000000..b927d7604
--- /dev/null
+++ b/src/LuaJIT/src/lj_state.h
@@ -0,0 +1,35 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STATE_H
+#define _LJ_STATE_H
+
+#include "lj_obj.h"
+
+#define incr_top(L) \
+ (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
+
+#define savestack(L, p) ((char *)(p) - mref(L->stack, char))
+#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n)))
+
+LJ_FUNC void lj_state_relimitstack(lua_State *L);
+LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
+LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
+LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
+
+static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
+{
+ if ((mref(L->maxstack, char) - (char *)L->top) <=
+ (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
+ lj_state_growstack(L, need);
+}
+
+LJ_FUNC lua_State *lj_state_new(lua_State *L);
+LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
+#if LJ_64
+LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_str.c b/src/LuaJIT/src/lj_str.c
new file mode 100644
index 000000000..7bf4848af
--- /dev/null
+++ b/src/LuaJIT/src/lj_str.c
@@ -0,0 +1,415 @@
+/*
+** String handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+
+#define lj_str_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_state.h"
+#include "lj_char.h"
+
+/* -- String interning ---------------------------------------------------- */
+
+/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
+int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
+{
+ MSize i, n = a->len > b->len ? b->len : a->len;
+ for (i = 0; i < n; i += 4) {
+ /* Note: innocuous access up to end of string + 3. */
+ uint32_t va = *(const uint32_t *)(strdata(a)+i);
+ uint32_t vb = *(const uint32_t *)(strdata(b)+i);
+ if (va != vb) {
+#if LJ_LE
+ va = lj_bswap(va); vb = lj_bswap(vb);
+#endif
+ i -= n;
+ if ((int32_t)i >= -3) {
+ va >>= 32+(i<<3); vb >>= 32+(i<<3);
+ if (va == vb) break;
+ }
+ return va < vb ? -1 : 1;
+ }
+ }
+ return (int32_t)(a->len - b->len);
+}
+
+/* Fast string data comparison. Caveat: unaligned access to 1st string! */
+static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len)
+{
+ MSize i = 0;
+ lua_assert(len > 0);
+ lua_assert((((uintptr_t)a + len) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4);
+ do { /* Note: innocuous access up to end of string + 3. */
+ uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i);
+ if (v) {
+ i -= len;
+#if LJ_LE
+ return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1;
+#else
+ return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1;
+#endif
+ }
+ i += 4;
+ } while (i < len);
+ return 0;
+}
+
+/* Resize the string hash table (grow and shrink). */
+void lj_str_resize(lua_State *L, MSize newmask)
+{
+ global_State *g = G(L);
+ GCRef *newhash;
+ MSize i;
+ if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
+ return; /* No resizing during GC traversal or if already too big. */
+ newhash = lj_mem_newvec(L, newmask+1, GCRef);
+ memset(newhash, 0, (newmask+1)*sizeof(GCRef));
+ for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */
+ GCobj *p = gcref(g->strhash[i]);
+ while (p) { /* Follow each hash chain and reinsert all strings. */
+ MSize h = gco2str(p)->hash & newmask;
+ GCobj *next = gcnext(p);
+ /* NOBARRIER: The string table is a GC root. */
+ setgcrefr(p->gch.nextgc, newhash[h]);
+ setgcref(newhash[h], p);
+ p = next;
+ }
+ }
+ lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+ g->strmask = newmask;
+ g->strhash = newhash;
+}
+
+/* Intern a string and return string object. */
+GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
+{
+ global_State *g;
+ GCstr *s;
+ GCobj *o;
+ MSize len = (MSize)lenx;
+ MSize a, b, h = len;
+ if (lenx >= LJ_MAX_STR)
+ lj_err_msg(L, LJ_ERR_STROV);
+ g = G(L);
+ /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */
+ if (len >= 4) { /* Caveat: unaligned access! */
+ a = lj_getu32(str);
+ h ^= lj_getu32(str+len-4);
+ b = lj_getu32(str+(len>>1)-2);
+ h ^= b; h -= lj_rol(b, 14);
+ b += lj_getu32(str+(len>>2)-1);
+ } else if (len > 0) {
+ a = *(const uint8_t *)str;
+ h ^= *(const uint8_t *)(str+len-1);
+ b = *(const uint8_t *)(str+(len>>1));
+ h ^= b; h -= lj_rol(b, 14);
+ } else {
+ return &g->strempty;
+ }
+ a ^= h; a -= lj_rol(h, 11);
+ b ^= a; b -= lj_rol(a, 25);
+ h ^= b; h -= lj_rol(b, 16);
+ /* Check if the string has already been interned. */
+ o = gcref(g->strhash[h & g->strmask]);
+ if (LJ_LIKELY((((uintptr_t)str + len) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) {
+ while (o != NULL) {
+ GCstr *sx = gco2str(o);
+ if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) {
+ /* Resurrect if dead. Can only happen with fixstring() (keywords). */
+ if (isdead(g, o)) flipwhite(o);
+ return sx; /* Return existing string. */
+ }
+ o = gcnext(o);
+ }
+ } else { /* Slow path: end of string is too close to a page boundary. */
+ while (o != NULL) {
+ GCstr *sx = gco2str(o);
+ if (sx->len == len && memcmp(str, strdata(sx), len) == 0) {
+ /* Resurrect if dead. Can only happen with fixstring() (keywords). */
+ if (isdead(g, o)) flipwhite(o);
+ return sx; /* Return existing string. */
+ }
+ o = gcnext(o);
+ }
+ }
+ /* Nope, create a new string. */
+ s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr);
+ newwhite(g, s);
+ s->gct = ~LJ_TSTR;
+ s->len = len;
+ s->hash = h;
+ s->reserved = 0;
+ memcpy(strdatawr(s), str, len);
+ strdatawr(s)[len] = '\0'; /* Zero-terminate string. */
+ /* Add it to string hash table. */
+ h &= g->strmask;
+ s->nextgc = g->strhash[h];
+ /* NOBARRIER: The string table is a GC root. */
+ setgcref(g->strhash[h], obj2gco(s));
+ if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */
+ lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */
+ return s; /* Return newly interned string. */
+}
+
+void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
+{
+ g->strnum--;
+ lj_mem_free(g, s, sizestring(s));
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+/* Convert string object to number. */
+int LJ_FASTCALL lj_str_tonum(GCstr *str, TValue *n)
+{
+ int ok = lj_str_numconv(strdata(str), n);
+ if (ok && tvisint(n))
+ setnumV(n, (lua_Number)intV(n));
+ return ok;
+}
+
+int LJ_FASTCALL lj_str_tonumber(GCstr *str, TValue *n)
+{
+ return lj_str_numconv(strdata(str), n);
+}
+
+/* Convert string to number. */
+int LJ_FASTCALL lj_str_numconv(const char *s, TValue *n)
+{
+#if LJ_DUALNUM
+ int sign = 1;
+#else
+ lua_Number sign = 1;
+#endif
+ const uint8_t *p = (const uint8_t *)s;
+ while (lj_char_isspace(*p)) p++;
+ if (*p == '-') { p++; sign = -1; } else if (*p == '+') { p++; }
+ if ((uint32_t)(*p - '0') < 10) {
+ uint32_t k = (uint32_t)(*p++ - '0');
+ if (k == 0 && ((*p & ~0x20) == 'X')) {
+ p++;
+ if (!lj_char_isxdigit(*p))
+ return 0; /* Don't accept '0x' without hex digits. */
+ do {
+ if (k >= 0x10000000u) goto parsedbl;
+ k = (k << 4) + (*p & 15u);
+ if (!lj_char_isdigit(*p)) k += 9;
+ p++;
+ } while (lj_char_isxdigit(*p));
+ } else {
+ while ((uint32_t)(*p - '0') < 10) {
+ if (LJ_UNLIKELY(k >= 429496729) && (k != 429496729 || *p > '5'))
+ goto parsedbl;
+ k = k * 10u + (uint32_t)(*p++ - '0');
+ }
+ }
+ while (LJ_UNLIKELY(lj_char_isspace(*p))) p++;
+ if (LJ_LIKELY(*p == '\0')) {
+#if LJ_DUALNUM
+ if (sign == 1) {
+ if (k < 0x80000000u) {
+ setintV(n, (int32_t)k);
+ return 1;
+ }
+ } else if (k <= 0x80000000u) {
+ setintV(n, -(int32_t)k);
+ return 1;
+ }
+#endif
+ setnumV(n, sign * (lua_Number)k);
+ return 1;
+ }
+ }
+parsedbl:
+ {
+ TValue tv;
+ char *endptr;
+ setnumV(&tv, lua_str2number(s, &endptr));
+ if (endptr == s) return 0; /* Conversion failed. */
+ if (LJ_UNLIKELY(*endptr != '\0')) {
+ while (lj_char_isspace((uint8_t)*endptr)) endptr++;
+ if (*endptr != '\0') return 0; /* Invalid trailing characters? */
+ }
+ if (LJ_LIKELY(!tvisnan(&tv)))
+ setnumV(n, numV(&tv));
+ else
+ setnanV(n); /* Canonicalize injected NaNs. */
+ return 1;
+ }
+}
+
+/* Print number to buffer. Canonicalizes non-finite values. */
+size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o)
+{
+ if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */
+ lua_Number n = o->n;
+ return (size_t)lua_number2str(s, n);
+ } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) {
+ s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3;
+ } else if ((o->u32.hi & 0x80000000) == 0) {
+ s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3;
+ } else {
+ s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4;
+ }
+}
+
+/* Print integer to buffer. Returns pointer to start. */
+char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k)
+{
+ uint32_t u = (uint32_t)(k < 0 ? -k : k);
+ p += 1+10;
+ do { *--p = (char)('0' + u % 10); } while (u /= 10);
+ if (k < 0) *--p = '-';
+ return p;
+}
+
+/* Convert number to string. */
+GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np)
+{
+ char buf[LJ_STR_NUMBUF];
+ size_t len = lj_str_bufnum(buf, (TValue *)np);
+ return lj_str_new(L, buf, len);
+}
+
+/* Convert integer to string. */
+GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k)
+{
+ char s[1+10];
+ char *p = lj_str_bufint(s, k);
+ return lj_str_new(L, p, (size_t)(s+sizeof(s)-p));
+}
+
+GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o)
+{
+ return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n);
+}
+
+/* -- String formatting --------------------------------------------------- */
+
+static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len)
+{
+ char *p;
+ MSize i;
+ if (sb->n + len > sb->sz) {
+ MSize sz = sb->sz * 2;
+ while (sb->n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ p = sb->buf + sb->n;
+ sb->n += len;
+ for (i = 0; i < len; i++) p[i] = str[i];
+}
+
+static void addchar(lua_State *L, SBuf *sb, int c)
+{
+ if (sb->n + 1 > sb->sz) {
+ MSize sz = sb->sz * 2;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ sb->buf[sb->n++] = (char)c;
+}
+
+/* Push formatted message as a string object to Lua stack. va_list variant. */
+const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp)
+{
+ SBuf *sb = &G(L)->tmpbuf;
+ lj_str_needbuf(L, sb, (MSize)strlen(fmt));
+ lj_str_resetbuf(sb);
+ for (;;) {
+ const char *e = strchr(fmt, '%');
+ if (e == NULL) break;
+ addstr(L, sb, fmt, (MSize)(e-fmt));
+ /* This function only handles %s, %c, %d, %f and %p formats. */
+ switch (e[1]) {
+ case 's': {
+ const char *s = va_arg(argp, char *);
+ if (s == NULL) s = "(null)";
+ addstr(L, sb, s, (MSize)strlen(s));
+ break;
+ }
+ case 'c':
+ addchar(L, sb, va_arg(argp, int));
+ break;
+ case 'd': {
+ char buf[LJ_STR_INTBUF];
+ char *p = lj_str_bufint(buf, va_arg(argp, int32_t));
+ addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p));
+ break;
+ }
+ case 'f': {
+ char buf[LJ_STR_NUMBUF];
+ TValue tv;
+ MSize len;
+ tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER));
+ len = (MSize)lj_str_bufnum(buf, &tv);
+ addstr(L, sb, buf, len);
+ break;
+ }
+ case 'p': {
+#define FMTP_CHARS (2*sizeof(ptrdiff_t))
+ char buf[2+FMTP_CHARS];
+ ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *));
+ ptrdiff_t i, lasti = 2+FMTP_CHARS;
+ if (p == 0) {
+ addstr(L, sb, "NULL", 4);
+ break;
+ }
+#if LJ_64
+ /* Shorten output for 64 bit pointers. */
+ lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0);
+#endif
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = lasti-1; i >= 2; i--, p >>= 4)
+ buf[i] = "0123456789abcdef"[(p & 15)];
+ addstr(L, sb, buf, (MSize)lasti);
+ break;
+ }
+ case '%':
+ addchar(L, sb, '%');
+ break;
+ default:
+ addchar(L, sb, '%');
+ addchar(L, sb, e[1]);
+ break;
+ }
+ fmt = e+2;
+ }
+ addstr(L, sb, fmt, (MSize)strlen(fmt));
+ setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n));
+ incr_top(L);
+ return strVdata(L->top - 1);
+}
+
+/* Push formatted message as a string object to Lua stack. Vararg variant. */
+const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, fmt);
+ msg = lj_str_pushvf(L, fmt, argp);
+ va_end(argp);
+ return msg;
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz)
+{
+ if (sz > sb->sz) {
+ if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ return sb->buf;
+}
+
diff --git a/src/LuaJIT/src/lj_str.h b/src/LuaJIT/src/lj_str.h
new file mode 100644
index 000000000..3d9be4f34
--- /dev/null
+++ b/src/LuaJIT/src/lj_str.h
@@ -0,0 +1,53 @@
+/*
+** String handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STR_H
+#define _LJ_STR_H
+
+#include
+
+#include "lj_obj.h"
+
+/* String interning. */
+LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
+LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
+LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
+LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
+
+#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
+#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
+
+/* Type conversions. */
+LJ_FUNC int LJ_FASTCALL lj_str_numconv(const char *s, TValue *n);
+LJ_FUNC int LJ_FASTCALL lj_str_tonum(GCstr *str, TValue *n);
+LJ_FUNC int LJ_FASTCALL lj_str_tonumber(GCstr *str, TValue *n);
+LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
+LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
+LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
+
+#define LJ_STR_INTBUF (1+10)
+#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR
+
+/* String formatting. */
+LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
+LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+#if defined(__GNUC__)
+ __attribute__ ((format (printf, 2, 3)))
+#endif
+ ;
+
+/* Resizable string buffers. Struct definition in lj_obj.h. */
+LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
+
+#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0)
+#define lj_str_resetbuf(sb) ((sb)->n = 0)
+#define lj_str_resizebuf(L, sb, size) \
+ ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
+ (sb)->sz = (size))
+#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
+
+#endif
diff --git a/src/LuaJIT/src/lj_tab.c b/src/LuaJIT/src/lj_tab.c
new file mode 100644
index 000000000..40f75269a
--- /dev/null
+++ b/src/LuaJIT/src/lj_tab.c
@@ -0,0 +1,622 @@
+/*
+** Table handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_tab_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+
+/* -- Object hashing ------------------------------------------------------ */
+
+/* Hash values are masked with the table hash mask and used as an index. */
+static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash)
+{
+ Node *n = noderef(t->node);
+ return &n[hash & t->hmask];
+}
+
+/* String hashes are precomputed when they are interned. */
+#define hashstr(t, s) hashmask(t, (s)->hash)
+
+#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi)))
+#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1))
+#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS)
+#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS)
+
+/* Hash an arbitrary key and return its anchor position in the hash table. */
+static Node *hashkey(const GCtab *t, cTValue *key)
+{
+ lua_assert(!tvisint(key));
+ if (tvisstr(key))
+ return hashstr(t, strV(key));
+ else if (tvisnum(key))
+ return hashnum(t, key);
+ else if (tvisbool(key))
+ return hashmask(t, boolV(key));
+ else
+ return hashgcref(t, key->gcr);
+ /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */
+}
+
+/* -- Table creation and destruction -------------------------------------- */
+
+/* Create new hash part for table. */
+static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
+{
+ uint32_t hsize;
+ Node *node;
+ lua_assert(hbits != 0);
+ if (hbits > LJ_MAX_HBITS)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ hsize = 1u << hbits;
+ node = lj_mem_newvec(L, hsize, Node);
+ setmref(node->freetop, &node[hsize]);
+ setmref(t->node, node);
+ t->hmask = hsize-1;
+}
+
+/*
+** Q: Why all of these copies of t->hmask, t->node etc. to local variables?
+** A: Because alias analysis for C is _really_ tough.
+** Even state-of-the-art C compilers won't produce good code without this.
+*/
+
+/* Clear hash part of table. */
+static LJ_AINLINE void clearhpart(GCtab *t)
+{
+ uint32_t i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ lua_assert(t->hmask != 0);
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ setmref(n->next, NULL);
+ setnilV(&n->key);
+ setnilV(&n->val);
+ }
+}
+
+/* Clear array part of table. */
+static LJ_AINLINE void clearapart(GCtab *t)
+{
+ uint32_t i, asize = t->asize;
+ TValue *array = tvref(t->array);
+ for (i = 0; i < asize; i++)
+ setnilV(&array[i]);
+}
+
+/* Create a new table. Note: the slots are not initialized (yet). */
+static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t;
+ /* First try to colocate the array part. */
+ if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
+ lua_assert((sizeof(GCtab) & 7) == 0);
+ t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = (int8_t)asize;
+ setmref(t->array, (TValue *)((char *)t + sizeof(GCtab)));
+ setgcrefnull(t->metatable);
+ t->asize = asize;
+ t->hmask = 0;
+ setmref(t->node, &G(L)->nilnode);
+ } else { /* Otherwise separately allocate the array part. */
+ t = lj_mem_newobj(L, GCtab);
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = 0;
+ setmref(t->array, NULL);
+ setgcrefnull(t->metatable);
+ t->asize = 0; /* In case the array allocation fails. */
+ t->hmask = 0;
+ setmref(t->node, &G(L)->nilnode);
+ if (asize > 0) {
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ setmref(t->array, lj_mem_newvec(L, asize, TValue));
+ t->asize = asize;
+ }
+ }
+ if (hbits)
+ newhpart(L, t, hbits);
+ return t;
+}
+
+/* Create a new table.
+**
+** IMPORTANT NOTE: The API differs from lua_createtable()!
+**
+** The array size is non-inclusive. E.g. asize=128 creates array slots
+** for 0..127, but not for 128. If you need slots 1..128, pass asize=129
+** (slot 0 is wasted in this case).
+**
+** The hash size is given in hash bits. hbits=0 means no hash part.
+** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on.
+*/
+GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t = newtab(L, asize, hbits);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+
+#if LJ_HASJIT
+GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
+{
+ GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+#endif
+
+/* Duplicate a table. */
+GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
+{
+ GCtab *t;
+ uint32_t asize, hmask;
+ t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
+ lua_assert(kt->asize == t->asize && kt->hmask == t->hmask);
+ t->nomm = 0; /* Keys with metamethod names may be present. */
+ asize = kt->asize;
+ if (asize > 0) {
+ TValue *array = tvref(t->array);
+ TValue *karray = tvref(kt->array);
+ if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */
+ uint32_t i;
+ for (i = 0; i < asize; i++)
+ copyTV(L, &array[i], &karray[i]);
+ } else {
+ memcpy(array, karray, asize*sizeof(TValue));
+ }
+ }
+ hmask = kt->hmask;
+ if (hmask > 0) {
+ uint32_t i;
+ Node *node = noderef(t->node);
+ Node *knode = noderef(kt->node);
+ ptrdiff_t d = (char *)node - (char *)knode;
+ setmref(node->freetop, (Node *)((char *)noderef(knode->freetop) + d));
+ for (i = 0; i <= hmask; i++) {
+ Node *kn = &knode[i];
+ Node *n = &node[i];
+ Node *next = nextnode(kn);
+ /* Don't use copyTV here, since it asserts on a copy of a dead key. */
+ n->val = kn->val; n->key = kn->key;
+ setmref(n->next, next == NULL? next : (Node *)((char *)next + d));
+ }
+ }
+ return t;
+}
+
+/* Free a table. */
+void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
+{
+ if (t->hmask > 0)
+ lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node);
+ if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ lj_mem_freevec(g, tvref(t->array), t->asize, TValue);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo)
+ lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f));
+ else
+ lj_mem_freet(g, t);
+}
+
+/* -- Table resizing ------------------------------------------------------ */
+
+/* Resize a table to fit the new array/hash part sizes. */
+static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
+{
+ Node *oldnode = noderef(t->node);
+ uint32_t oldasize = t->asize;
+ uint32_t oldhmask = t->hmask;
+ if (asize > oldasize) { /* Array part grows? */
+ TValue *array;
+ uint32_t i;
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) {
+ /* A colocated array must be separated and copied. */
+ TValue *oarray = tvref(t->array);
+ array = lj_mem_newvec(L, asize, TValue);
+ t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */
+ for (i = 0; i < oldasize; i++)
+ copyTV(L, &array[i], &oarray[i]);
+ } else {
+ array = (TValue *)lj_mem_realloc(L, tvref(t->array),
+ oldasize*sizeof(TValue), asize*sizeof(TValue));
+ }
+ setmref(t->array, array);
+ t->asize = asize;
+ for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */
+ setnilV(&array[i]);
+ }
+ /* Create new (empty) hash part. */
+ if (hbits) {
+ newhpart(L, t, hbits);
+ clearhpart(t);
+ } else {
+ global_State *g = G(L);
+ setmref(t->node, &g->nilnode);
+ t->hmask = 0;
+ }
+ if (asize < oldasize) { /* Array part shrinks? */
+ TValue *array = tvref(t->array);
+ uint32_t i;
+ t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */
+ for (i = asize; i < oldasize; i++) /* Reinsert old array values. */
+ if (!tvisnil(&array[i]))
+ copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]);
+ /* Physically shrink only separated arrays. */
+ if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ setmref(t->array, lj_mem_realloc(L, array,
+ oldasize*sizeof(TValue), asize*sizeof(TValue)));
+ }
+ if (oldhmask > 0) { /* Reinsert pairs from old hash part. */
+ global_State *g;
+ uint32_t i;
+ for (i = 0; i <= oldhmask; i++) {
+ Node *n = &oldnode[i];
+ if (!tvisnil(&n->val))
+ copyTV(L, lj_tab_set(L, t, &n->key), &n->val);
+ }
+ g = G(L);
+ lj_mem_freevec(g, oldnode, oldhmask+1, Node);
+ }
+}
+
+static uint32_t countint(cTValue *key, uint32_t *bins)
+{
+ lua_assert(!tvisint(key));
+ if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) {
+ bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static uint32_t countarray(const GCtab *t, uint32_t *bins)
+{
+ uint32_t na, b, i;
+ if (t->asize == 0) return 0;
+ for (na = i = b = 0; b < LJ_MAX_ABITS; b++) {
+ uint32_t n, top = 2u << b;
+ TValue *array;
+ if (top >= t->asize) {
+ top = t->asize-1;
+ if (i > top)
+ break;
+ }
+ array = tvref(t->array);
+ for (n = 0; i <= top; i++)
+ if (!tvisnil(&array[i]))
+ n++;
+ bins[b] += n;
+ na += n;
+ }
+ return na;
+}
+
+static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray)
+{
+ uint32_t total, na, i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (total = na = 0, i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) {
+ na += countint(&n->key, bins);
+ total++;
+ }
+ }
+ *narray += na;
+ return total;
+}
+
+static uint32_t bestasize(uint32_t bins[], uint32_t *narray)
+{
+ uint32_t b, sum, na = 0, sz = 0, nn = *narray;
+ for (b = 0, sum = 0; 2*nn > (1u< 0 && 2*(sum += bins[b]) > (1u<hmask > 0 ? lj_fls(t->hmask)+1 : 0);
+}
+
+/* -- Table getters ------------------------------------------------------- */
+
+cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_getstr(GCtab *t, GCstr *key)
+{
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key)
+{
+ if (tvisstr(key)) {
+ cTValue *tv = lj_tab_getstr(t, strV(key));
+ if (tv)
+ return tv;
+ } else if (tvisint(key)) {
+ cTValue *tv = lj_tab_getint(t, intV(key));
+ if (tv)
+ return tv;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k) {
+ cTValue *tv = lj_tab_getint(t, k);
+ if (tv)
+ return tv;
+ } else {
+ goto genlookup; /* Else use the generic lookup. */
+ }
+ } else if (!tvisnil(key)) {
+ Node *n;
+ genlookup:
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ }
+ return niltv(L);
+}
+
+/* -- Table setters ------------------------------------------------------- */
+
+/* Insert new key. Use Brent's variation to optimize the chain length. */
+TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n = hashkey(t, key);
+ if (!tvisnil(&n->val) || t->hmask == 0) {
+ Node *nodebase = noderef(t->node);
+ Node *collide, *freenode = noderef(nodebase->freetop);
+ lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1);
+ do {
+ if (freenode == nodebase) { /* No free node found? */
+ rehashtab(L, t, key); /* Rehash table. */
+ return lj_tab_set(L, t, key); /* Retry key insertion. */
+ }
+ } while (!tvisnil(&(--freenode)->key));
+ setmref(nodebase->freetop, freenode);
+ lua_assert(freenode != &G(L)->nilnode);
+ collide = hashkey(t, &n->key);
+ if (collide != n) { /* Colliding node not the main node? */
+ while (noderef(collide->next) != n) /* Find predecessor. */
+ collide = nextnode(collide);
+ setmref(collide->next, freenode); /* Relink chain. */
+ /* Copy colliding node into free node and free main node. */
+ freenode->val = n->val;
+ freenode->key = n->key;
+ freenode->next = n->next;
+ setmref(n->next, NULL);
+ setnilV(&n->val);
+ /* Rechain pseudo-resurrected string keys with colliding hashes. */
+ while (nextnode(freenode)) {
+ Node *nn = nextnode(freenode);
+ if (tvisstr(&nn->key) && !tvisnil(&nn->val) &&
+ hashstr(t, strV(&nn->key)) == n) {
+ freenode->next = nn->next;
+ nn->next = n->next;
+ setmref(n->next, nn);
+ } else {
+ freenode = nn;
+ }
+ }
+ } else { /* Otherwise use free node. */
+ setmrefr(freenode->next, n->next); /* Insert into chain. */
+ setmref(n->next, freenode);
+ n = freenode;
+ }
+ }
+ n->key.u64 = key->u64;
+ if (LJ_UNLIKELY(tvismzero(&n->key)))
+ n->key.u64 = 0;
+ lj_gc_anybarriert(L, t);
+ lua_assert(tvisnil(&n->val));
+ return &n->val;
+}
+
+TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key)
+{
+ TValue k;
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ setstrV(L, &k, key);
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n;
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ if (tvisstr(key)) {
+ return lj_tab_setstr(L, t, strV(key));
+ } else if (tvisint(key)) {
+ return lj_tab_setint(L, t, intV(key));
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k)
+ return lj_tab_setint(L, t, k);
+ if (tvisnan(key))
+ lj_err_msg(L, LJ_ERR_NANIDX);
+ /* Else use the generic lookup. */
+ } else if (tvisnil(key)) {
+ lj_err_msg(L, LJ_ERR_NILIDX);
+ }
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, key);
+}
+
+/* -- Table traversal ----------------------------------------------------- */
+
+/* Get the traversal index of a key. */
+static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key)
+{
+ TValue tmp;
+ if (tvisint(key)) {
+ int32_t k = intV(key);
+ if ((uint32_t)k < t->asize)
+ return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
+ setnumV(&tmp, (lua_Number)k);
+ key = &tmp;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < t->asize && nk == (lua_Number)k)
+ return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
+ }
+ if (!tvisnil(key)) {
+ Node *n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return t->asize + (uint32_t)(n - noderef(t->node));
+ /* Hash key indexes: [t->asize..t->asize+t->nmask] */
+ } while ((n = nextnode(n)));
+ lj_err_msg(L, LJ_ERR_NEXTIDX);
+ return 0; /* unreachable */
+ }
+ return ~0u; /* A nil key starts the traversal. */
+}
+
+/* Advance to the next step in a table traversal. */
+int lj_tab_next(lua_State *L, GCtab *t, TValue *key)
+{
+ uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */
+ for (i++; i < t->asize; i++) /* First traverse the array keys. */
+ if (!tvisnil(arrayslot(t, i))) {
+ setintV(key, i);
+ copyTV(L, key+1, arrayslot(t, i));
+ return 1;
+ }
+ for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */
+ Node *n = &noderef(t->node)[i];
+ if (!tvisnil(&n->val)) {
+ copyTV(L, key, &n->key);
+ copyTV(L, key+1, &n->val);
+ return 1;
+ }
+ }
+ return 0; /* End of traversal. */
+}
+
+/* -- Table length calculation -------------------------------------------- */
+
+static MSize unbound_search(GCtab *t, MSize j)
+{
+ cTValue *tv;
+ MSize i = j; /* i is zero or a present index */
+ j++;
+ /* find `i' and `j' such that i is present and j is not */
+ while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) {
+ i = j;
+ j *= 2;
+ if (j > (MSize)(INT_MAX-2)) { /* overflow? */
+ /* table was built with bad purposes: resort to linear search */
+ i = 1;
+ while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++;
+ return i - 1;
+ }
+ }
+ /* now do a binary search between them */
+ while (j - i > 1) {
+ MSize m = (i+j)/2;
+ cTValue *tvb = lj_tab_getint(t, (int32_t)m);
+ if (tvb && !tvisnil(tvb)) i = m; else j = m;
+ }
+ return i;
+}
+
+/*
+** Try to find a boundary in table `t'. A `boundary' is an integer index
+** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
+*/
+MSize LJ_FASTCALL lj_tab_len(GCtab *t)
+{
+ MSize j = (MSize)t->asize;
+ if (j > 1 && tvisnil(arrayslot(t, j-1))) {
+ MSize i = 1;
+ while (j - i > 1) {
+ MSize m = (i+j)/2;
+ if (tvisnil(arrayslot(t, m-1))) j = m; else i = m;
+ }
+ return i-1;
+ }
+ if (j) j--;
+ if (t->hmask <= 0)
+ return j;
+ return unbound_search(t, j);
+}
+
diff --git a/src/LuaJIT/src/lj_tab.h b/src/LuaJIT/src/lj_tab.h
new file mode 100644
index 000000000..c33363e20
--- /dev/null
+++ b/src/LuaJIT/src/lj_tab.h
@@ -0,0 +1,67 @@
+/*
+** Table handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TAB_H
+#define _LJ_TAB_H
+
+#include "lj_obj.h"
+
+/* Hash constants. Tuned using a brute force search. */
+#define HASH_BIAS (-0x04c11db7)
+#define HASH_ROT1 14
+#define HASH_ROT2 5
+#define HASH_ROT3 13
+
+/* Scramble the bits of numbers and pointers. */
+static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
+{
+#if LJ_TARGET_X86ORX64
+ /* Prefer variant that compiles well for a 2-operand CPU. */
+ lo ^= hi; hi = lj_rol(hi, HASH_ROT1);
+ lo -= hi; hi = lj_rol(hi, HASH_ROT2);
+ hi ^= lo; hi -= lj_rol(lo, HASH_ROT3);
+#else
+ lo ^= hi;
+ lo = lo - lj_rol(hi, HASH_ROT1);
+ hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2);
+ hi = hi - lj_rol(lo, HASH_ROT3);
+#endif
+ return hi;
+}
+
+#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
+
+LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
+#if LJ_HASJIT
+LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
+#endif
+LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
+LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
+LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
+
+/* Caveat: all getters except lj_tab_get() can return NULL! */
+
+LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key);
+LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key);
+LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
+
+/* Caveat: all setters require a write barrier for the stored value. */
+
+LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
+LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
+LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key);
+LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
+
+#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize)
+#define arrayslot(t, i) (&tvref((t)->array)[(i)])
+#define lj_tab_getint(t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key)))
+#define lj_tab_setint(L, t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key)))
+
+LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key);
+LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
+
+#endif
diff --git a/src/LuaJIT/src/lj_target.h b/src/LuaJIT/src/lj_target.h
new file mode 100644
index 000000000..13de8fc6d
--- /dev/null
+++ b/src/LuaJIT/src/lj_target.h
@@ -0,0 +1,160 @@
+/*
+** Definitions for target CPU.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_H
+#define _LJ_TARGET_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Registers and spill slots ------------------------------------------- */
+
+/* Register type (uint8_t in ir->r). */
+typedef uint32_t Reg;
+
+/* The hi-bit is NOT set for an allocated register. This means the value
+** can be directly used without masking. The hi-bit is set for a register
+** allocation hint or for RID_INIT.
+*/
+#define RID_NONE 0x80
+#define RID_MASK 0x7f
+#define RID_INIT (RID_NONE|RID_MASK)
+
+#define ra_noreg(r) ((r) & RID_NONE)
+#define ra_hasreg(r) (!((r) & RID_NONE))
+
+/* The ra_hashint() macro assumes a previous test for ra_noreg(). */
+#define ra_hashint(r) ((r) != RID_INIT)
+#define ra_gethint(r) ((Reg)((r) & RID_MASK))
+#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE)
+#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0)
+
+/* Spill slot 0 means no spill slot has been allocated. */
+#define SPS_NONE 0
+
+#define ra_hasspill(s) ((s) != SPS_NONE)
+
+/* Combined register and spill slot (uint16_t in ir->prev). */
+typedef uint32_t RegSP;
+
+#define REGSP(r, s) ((r) + ((s) << 8))
+#define REGSP_HINT(r) ((r)|RID_NONE)
+#define REGSP_INIT REGSP(RID_INIT, 0)
+
+#define regsp_reg(rs) ((rs) & 255)
+#define regsp_spill(rs) ((rs) >> 8)
+#define regsp_used(rs) \
+ (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0))
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Bitset for registers. 32 registers suffice for most architectures.
+** Note that one set holds bits for both GPRs and FPRs.
+*/
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+typedef uint64_t RegSet;
+#else
+typedef uint32_t RegSet;
+#endif
+
+#define RID2RSET(r) (((RegSet)1) << (r))
+#define RSET_EMPTY ((RegSet)0)
+#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo))
+
+#define rset_test(rs, r) ((int)((rs) >> (r)) & 1)
+#define rset_set(rs, r) (rs |= RID2RSET(r))
+#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
+#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
+#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
+#else
+#define rset_picktop(rs) ((Reg)lj_fls(rs))
+#define rset_pickbot(rs) ((Reg)lj_ffs(rs))
+#endif
+
+/* -- Register allocation cost -------------------------------------------- */
+
+/* The register allocation heuristic keeps track of the cost for allocating
+** a specific register:
+**
+** A free register (obviously) has a cost of 0 and a 1-bit in the free mask.
+**
+** An already allocated register has the (non-zero) IR reference in the lowest
+** bits and the result of a blended cost-model in the higher bits.
+**
+** The allocator first checks the free mask for a hit. Otherwise an (unrolled)
+** linear search for the minimum cost is used. The search doesn't need to
+** keep track of the position of the minimum, which makes it very fast.
+** The lowest bits of the minimum cost show the desired IR reference whose
+** register is the one to evict.
+**
+** Without the cost-model this degenerates to the standard heuristics for
+** (reverse) linear-scan register allocation. Since code generation is done
+** in reverse, a live interval extends from the last use to the first def.
+** For an SSA IR the IR reference is the first (and only) def and thus
+** trivially marks the end of the interval. The LSRA heuristics says to pick
+** the register whose live interval has the furthest extent, i.e. the lowest
+** IR reference in our case.
+**
+** A cost-model should take into account other factors, like spill-cost and
+** restore- or rematerialization-cost, which depend on the kind of instruction.
+** E.g. constants have zero spill costs, variant instructions have higher
+** costs than invariants and PHIs should preferably never be spilled.
+**
+** Here's a first cut at simple, but effective blended cost-model for R-LSRA:
+** - Due to careful design of the IR, constants already have lower IR
+** references than invariants and invariants have lower IR references
+** than variants.
+** - The cost in the upper 16 bits is the sum of the IR reference and a
+** weighted score. The score currently only takes into account whether
+** the IRT_ISPHI bit is set in the instruction type.
+** - The PHI weight is the minimum distance (in IR instructions) a PHI
+** reference has to be further apart from a non-PHI reference to be spilled.
+** - It should be a power of two (for speed) and must be between 2 and 32768.
+** Good values for the PHI weight seem to be between 40 and 150.
+** - Further study is required.
+*/
+#define REGCOST_PHI_WEIGHT 64
+
+/* Cost for allocating a specific register. */
+typedef uint32_t RegCost;
+
+/* Note: assumes 16 bit IRRef1. */
+#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16))
+#define regcost_ref(rc) ((IRRef1)(rc))
+
+#define REGCOST_T(t) \
+ ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI))
+#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t)))
+
+/* -- Target-specific definitions ----------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_target_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_target_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_target_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_target_mips.h"
+#else
+#error "Missing include for target CPU"
+#endif
+
+#ifdef EXITSTUBS_PER_GROUP
+/* Return the address of an exit stub. */
+static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
+{
+ lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL);
+ return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
+ EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_addr(J, exitno) \
+ ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno)))
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_target_arm.h b/src/LuaJIT/src/lj_target_arm.h
new file mode 100644
index 000000000..a24fc8192
--- /dev/null
+++ b/src/LuaJIT/src/lj_target_arm.h
@@ -0,0 +1,212 @@
+/*
+** Definitions for ARM CPUs.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_ARM_H
+#define _LJ_TARGET_ARM_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC)
+#if LJ_SOFTFP
+#define FPRDEF(_)
+#else
+#error "NYI: hard-float support for ARM"
+#endif
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_LR,
+
+ /* Calling conventions. */
+ RID_RET = RID_R0,
+ RID_RETLO = RID_R0,
+ RID_RETHI = RID_R1,
+ RID_FPRET = RID_R0,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R9, /* Interpreter BASE. */
+ RID_LPC = RID_R6, /* Interpreter PC. */
+ RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R8, /* Interpreter L. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_PC+1,
+ RID_MIN_FPR = RID_MAX_GPR,
+#if LJ_SOFTFP
+ RID_MAX_FPR = RID_MIN_FPR,
+#else
+#error "NYI: VFP support for ARM"
+#endif
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except sp, lr and pc. */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1))
+#define RSET_GPREVEN \
+ (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \
+ RID2RSET(RID_R8)|RID2RSET(RID_R10))
+#define RSET_GPRODD \
+ (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \
+ RID2RSET(RID_R9)|RID2RSET(RID_R11))
+#if LJ_SOFTFP
+#define RSET_FPR 0
+#define RSET_ALL RSET_GPR
+#else
+#error "NYI: VFP support for ARM"
+#endif
+#define RSET_INIT RSET_ALL
+
+/* ABI-specific register sets. lr is an implicit scratch register. */
+#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12))
+#ifdef __APPLE__
+#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9))
+#else
+#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_
+#endif
+#if LJ_SOFTFP
+#define RSET_SCRATCH_FPR 0
+#else
+#error "NYI: VFP support for ARM"
+#endif
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R0
+#define REGARG_LASTGPR RID_R3
+#define REGARG_NUMGPR 4
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#define SPS_FIXED 2
+#define SPS_FIRST 2
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+#if !LJ_SOFTFP
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+#endif
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* PC after instruction that caused an exit. Used to find the trace number. */
+#define EXITSTATE_PCREG RID_PC
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+#define EXITSTUB_SPACING 4
+#define EXITSTUBS_PER_GROUP 32
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28))
+#define ARMF_N(r) ((r) << 16)
+#define ARMF_D(r) ((r) << 12)
+#define ARMF_S(r) ((r) << 8)
+#define ARMF_M(r) (r)
+#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7))
+#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r))
+
+typedef enum ARMIns {
+ ARMI_CCAL = 0xe0000000,
+ ARMI_S = 0x000100000,
+ ARMI_K12 = 0x02000000,
+ ARMI_KNEG = 0x00200000,
+ ARMI_LS_W = 0x00200000,
+ ARMI_LS_U = 0x00800000,
+ ARMI_LS_P = 0x01000000,
+ ARMI_LS_R = 0x02000000,
+ ARMI_LSX_I = 0x00400000,
+
+ ARMI_AND = 0xe0000000,
+ ARMI_EOR = 0xe0200000,
+ ARMI_SUB = 0xe0400000,
+ ARMI_RSB = 0xe0600000,
+ ARMI_ADD = 0xe0800000,
+ ARMI_ADC = 0xe0a00000,
+ ARMI_SBC = 0xe0c00000,
+ ARMI_RSC = 0xe0e00000,
+ ARMI_TST = 0xe1100000,
+ ARMI_TEQ = 0xe1300000,
+ ARMI_CMP = 0xe1500000,
+ ARMI_CMN = 0xe1700000,
+ ARMI_ORR = 0xe1800000,
+ ARMI_MOV = 0xe1a00000,
+ ARMI_BIC = 0xe1c00000,
+ ARMI_MVN = 0xe1e00000,
+
+ ARMI_NOP = 0xe1a00000,
+
+ ARMI_MUL = 0xe0000090,
+ ARMI_SMULL = 0xe0c00090,
+
+ ARMI_LDR = 0xe4100000,
+ ARMI_LDRB = 0xe4500000,
+ ARMI_LDRH = 0xe01000b0,
+ ARMI_LDRSB = 0xe01000d0,
+ ARMI_LDRSH = 0xe01000f0,
+ ARMI_LDRD = 0xe00000d0,
+ ARMI_STR = 0xe4000000,
+ ARMI_STRB = 0xe4400000,
+ ARMI_STRH = 0xe00000b0,
+ ARMI_STRD = 0xe00000f0,
+ ARMI_PUSH = 0xe92d0000,
+
+ ARMI_B = 0xea000000,
+ ARMI_BL = 0xeb000000,
+ ARMI_BLX = 0xfa000000,
+ ARMI_BLXr = 0xe12fff30,
+
+ /* ARMv6 */
+ ARMI_REV = 0xe6bf0f30,
+ ARMI_SXTB = 0xe6af0070,
+ ARMI_SXTH = 0xe6bf0070,
+ ARMI_UXTB = 0xe6ef0070,
+ ARMI_UXTH = 0xe6ff0070,
+
+ /* ARMv6T2 */
+ ARMI_MOVW = 0xe3000000,
+ ARMI_MOVT = 0xe3400000,
+} ARMIns;
+
+typedef enum ARMShift {
+ ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR
+} ARMShift;
+
+/* ARM condition codes. */
+typedef enum ARMCC {
+ CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
+ CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
+ CC_HS = CC_CS, CC_LO = CC_CC
+} ARMCC;
+
+#endif
diff --git a/src/LuaJIT/src/lj_target_mips.h b/src/LuaJIT/src/lj_target_mips.h
new file mode 100644
index 000000000..1b7727d01
--- /dev/null
+++ b/src/LuaJIT/src/lj_target_mips.h
@@ -0,0 +1,256 @@
+/*
+** Definitions for MIPS CPUs.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_MIPS_H
+#define _LJ_TARGET_MIPS_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_ZERO = RID_R0,
+ RID_TMP = RID_RA,
+
+ /* Calling conventions. */
+ RID_RET = RID_R2,
+#if LJ_LE
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R2,
+#else
+ RID_RETHI = RID_R2,
+ RID_RETLO = RID_R3,
+#endif
+ RID_FPRET = RID_F0,
+ RID_CFUNCADDR = RID_R25,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R16, /* Interpreter BASE. */
+ RID_LPC = RID_R18, /* Interpreter PC. */
+ RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R20, /* Interpreter L. */
+ RID_JGL = RID_R30, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_RA+1,
+ RID_MIN_FPR = RID_F0,
+ RID_MAX_FPR = RID_F31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2 and JGL. */
+#define RSET_FIXED \
+ (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
+ RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
+ RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR \
+ (RSET_RANGE(RID_R1, RID_R15+1)|\
+ RID2RSET(RID_R24)|RID2RSET(RID_R25)|RID2RSET(RID_R28))
+#define RSET_SCRATCH_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R4
+#define REGARG_LASTGPR RID_R7
+#define REGARG_NUMGPR 4
+#define REGARG_FIRSTFPR RID_F12
+#define REGARG_LASTFPR RID_F14
+#define REGARG_NUMFPR 2
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+*/
+#define SPS_FIXED 5
+#define SPS_FIRST 4
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
+{
+ while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */
+ return p;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define MIPSF_S(r) ((r) << 21)
+#define MIPSF_T(r) ((r) << 16)
+#define MIPSF_D(r) ((r) << 11)
+#define MIPSF_R(r) ((r) << 21)
+#define MIPSF_H(r) ((r) << 16)
+#define MIPSF_G(r) ((r) << 11)
+#define MIPSF_F(r) ((r) << 6)
+#define MIPSF_A(n) ((n) << 6)
+#define MIPSF_M(n) ((n) << 11)
+
+typedef enum MIPSIns {
+ /* Integer instructions. */
+ MIPSI_MOVE = 0x00000021,
+ MIPSI_NOP = 0x00000000,
+
+ MIPSI_LI = 0x24000000,
+ MIPSI_LU = 0x34000000,
+ MIPSI_LUI = 0x3c000000,
+
+ MIPSI_ADDIU = 0x24000000,
+ MIPSI_ANDI = 0x30000000,
+ MIPSI_ORI = 0x34000000,
+ MIPSI_XORI = 0x38000000,
+ MIPSI_SLTI = 0x28000000,
+ MIPSI_SLTIU = 0x2c000000,
+
+ MIPSI_ADDU = 0x00000021,
+ MIPSI_SUBU = 0x00000023,
+ MIPSI_MUL = 0x70000002,
+ MIPSI_AND = 0x00000024,
+ MIPSI_OR = 0x00000025,
+ MIPSI_XOR = 0x00000026,
+ MIPSI_NOR = 0x00000027,
+ MIPSI_SLT = 0x0000002a,
+ MIPSI_SLTU = 0x0000002b,
+ MIPSI_MOVZ = 0x0000000a,
+ MIPSI_MOVN = 0x0000000b,
+
+ MIPSI_SLL = 0x00000000,
+ MIPSI_SRL = 0x00000002,
+ MIPSI_SRA = 0x00000003,
+ MIPSI_ROTR = 0x00200002, /* MIPS32R2 */
+ MIPSI_SLLV = 0x00000004,
+ MIPSI_SRLV = 0x00000006,
+ MIPSI_SRAV = 0x00000007,
+ MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */
+
+ MIPSI_SEB = 0x7c000420, /* MIPS32R2 */
+ MIPSI_SEH = 0x7c000620, /* MIPS32R2 */
+ MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */
+
+ MIPSI_B = 0x10000000,
+ MIPSI_J = 0x08000000,
+ MIPSI_JAL = 0x0c000000,
+ MIPSI_JR = 0x00000008,
+ MIPSI_JALR = 0x0000f809,
+
+ MIPSI_BEQ = 0x10000000,
+ MIPSI_BNE = 0x14000000,
+ MIPSI_BLEZ = 0x18000000,
+ MIPSI_BGTZ = 0x1c000000,
+ MIPSI_BLTZ = 0x04000000,
+ MIPSI_BGEZ = 0x04010000,
+
+ /* Load/store instructions. */
+ MIPSI_LW = 0x8c000000,
+ MIPSI_SW = 0xac000000,
+ MIPSI_LB = 0x80000000,
+ MIPSI_SB = 0xa0000000,
+ MIPSI_LH = 0x84000000,
+ MIPSI_SH = 0xa4000000,
+ MIPSI_LBU = 0x90000000,
+ MIPSI_LHU = 0x94000000,
+ MIPSI_LWC1 = 0xc4000000,
+ MIPSI_SWC1 = 0xe4000000,
+ MIPSI_LDC1 = 0xd4000000,
+ MIPSI_SDC1 = 0xf4000000,
+
+ /* FP instructions. */
+ MIPSI_MOV_S = 0x46000006,
+ MIPSI_MOV_D = 0x46200006,
+ MIPSI_MOVT_D = 0x46210011,
+ MIPSI_MOVF_D = 0x46200011,
+
+ MIPSI_ABS_D = 0x46200005,
+ MIPSI_NEG_D = 0x46200007,
+
+ MIPSI_ADD_D = 0x46200000,
+ MIPSI_SUB_D = 0x46200001,
+ MIPSI_MUL_D = 0x46200002,
+ MIPSI_DIV_D = 0x46200003,
+
+ MIPSI_ADD_S = 0x46000000,
+ MIPSI_SUB_S = 0x46000001,
+
+ MIPSI_CVT_D_S = 0x46000021,
+ MIPSI_CVT_W_S = 0x46000024,
+ MIPSI_CVT_S_D = 0x46200020,
+ MIPSI_CVT_W_D = 0x46200024,
+ MIPSI_CVT_S_W = 0x46800020,
+ MIPSI_CVT_D_W = 0x46800021,
+
+ MIPSI_TRUNC_W_S = 0x4600000d,
+ MIPSI_TRUNC_W_D = 0x4620000d,
+ MIPSI_FLOOR_W_S = 0x4600000f,
+ MIPSI_FLOOR_W_D = 0x4620000f,
+
+ MIPSI_MFC1 = 0x44000000,
+ MIPSI_MTC1 = 0x44800000,
+
+ MIPSI_BC1F = 0x45000000,
+ MIPSI_BC1T = 0x45010000,
+
+ MIPSI_C_EQ_D = 0x46200032,
+ MIPSI_C_OLT_D = 0x46200034,
+ MIPSI_C_ULT_D = 0x46200035,
+ MIPSI_C_OLE_D = 0x46200036,
+ MIPSI_C_ULE_D = 0x46200037,
+
+} MIPSIns;
+
+#endif
diff --git a/src/LuaJIT/src/lj_target_ppc.h b/src/LuaJIT/src/lj_target_ppc.h
new file mode 100644
index 000000000..f5e893257
--- /dev/null
+++ b/src/LuaJIT/src/lj_target_ppc.h
@@ -0,0 +1,279 @@
+/*
+** Definitions for PPC CPUs.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_PPC_H
+#define _LJ_TARGET_PPC_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31)
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_R0,
+
+ /* Calling conventions. */
+ RID_RET = RID_R3,
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R4,
+ RID_FPRET = RID_F1,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R14, /* Interpreter BASE. */
+ RID_LPC = RID_R16, /* Interpreter PC. */
+ RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R18, /* Interpreter L. */
+ RID_JGL = RID_R31, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_R31+1,
+ RID_MIN_FPR = RID_F0,
+ RID_MAX_FPR = RID_F31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */
+#define RSET_FIXED \
+ (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\
+ RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1))
+#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R3
+#define REGARG_LASTGPR RID_R10
+#define REGARG_NUMGPR 8
+#define REGARG_FIRSTFPR RID_F1
+#define REGARG_LASTFPR RID_F8
+#define REGARG_NUMFPR 8
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+** [sp+12] tmplo word \
+** [sp+ 8] tmphi word / tmp dword, parameter area for callee
+** [sp+ 4] tmpw, LR of callee
+** [sp+ 0] stack chain
+*/
+#define SPS_FIXED 7
+#define SPS_FIRST 4
+
+/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */
+#define SPOFS_TMPW 4
+#define SPOFS_TMP 8
+#define SPOFS_TMPHI 8
+#define SPOFS_TMPLO 12
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
+{
+ while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */
+ return p + 3 + exitno;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22))
+#define PPCF_T(r) ((r) << 21)
+#define PPCF_A(r) ((r) << 16)
+#define PPCF_B(r) ((r) << 11)
+#define PPCF_C(r) ((r) << 6)
+#define PPCF_MB(n) ((n) << 6)
+#define PPCF_ME(n) ((n) << 1)
+#define PPCF_Y 0x00200000
+#define PPCF_DOT 0x00000001
+
+typedef enum PPCIns {
+ /* Integer instructions. */
+ PPCI_MR = 0x7c000378,
+ PPCI_NOP = 0x60000000,
+
+ PPCI_LI = 0x38000000,
+ PPCI_LIS = 0x3c000000,
+
+ PPCI_ADD = 0x7c000214,
+ PPCI_ADDC = 0x7c000014,
+ PPCI_ADDO = 0x7c000614,
+ PPCI_ADDE = 0x7c000114,
+ PPCI_ADDZE = 0x7c000194,
+ PPCI_ADDME = 0x7c0001d4,
+ PPCI_ADDI = 0x38000000,
+ PPCI_ADDIS = 0x3c000000,
+ PPCI_ADDIC = 0x30000000,
+ PPCI_ADDICDOT = 0x34000000,
+
+ PPCI_SUBF = 0x7c000050,
+ PPCI_SUBFC = 0x7c000010,
+ PPCI_SUBFO = 0x7c000450,
+ PPCI_SUBFE = 0x7c000110,
+ PPCI_SUBFZE = 0x7c000190,
+ PPCI_SUBFME = 0x7c0001d0,
+ PPCI_SUBFIC = 0x20000000,
+
+ PPCI_NEG = 0x7c0000d0,
+
+ PPCI_AND = 0x7c000038,
+ PPCI_ANDC = 0x7c000078,
+ PPCI_NAND = 0x7c0003b8,
+ PPCI_ANDIDOT = 0x70000000,
+ PPCI_ANDISDOT = 0x74000000,
+
+ PPCI_OR = 0x7c000378,
+ PPCI_NOR = 0x7c0000f8,
+ PPCI_ORI = 0x60000000,
+ PPCI_ORIS = 0x64000000,
+
+ PPCI_XOR = 0x7c000278,
+ PPCI_EQV = 0x7c000238,
+ PPCI_XORI = 0x68000000,
+ PPCI_XORIS = 0x6c000000,
+
+ PPCI_CMPW = 0x7c000000,
+ PPCI_CMPLW = 0x7c000040,
+ PPCI_CMPWI = 0x2c000000,
+ PPCI_CMPLWI = 0x28000000,
+
+ PPCI_MULLW = 0x7c0001d6,
+ PPCI_MULLI = 0x1c000000,
+ PPCI_MULLWO = 0x7c0005d6,
+
+ PPCI_EXTSB = 0x7c000774,
+ PPCI_EXTSH = 0x7c000734,
+
+ PPCI_SLW = 0x7c000030,
+ PPCI_SRW = 0x7c000430,
+ PPCI_SRAW = 0x7c000630,
+ PPCI_SRAWI = 0x7c000670,
+
+ PPCI_RLWNM = 0x5c000000,
+ PPCI_RLWINM = 0x54000000,
+ PPCI_RLWIMI = 0x50000000,
+
+ PPCI_B = 0x48000000,
+ PPCI_BL = 0x48000001,
+ PPCI_BC = 0x40800000,
+ PPCI_BCL = 0x40800001,
+ PPCI_BCTR = 0x4e800420,
+ PPCI_BCTRL = 0x4e800421,
+
+ PPCI_CRANDC = 0x4c000102,
+ PPCI_CRXOR = 0x4c000182,
+ PPCI_CRAND = 0x4c000202,
+ PPCI_CREQV = 0x4c000242,
+ PPCI_CRORC = 0x4c000342,
+ PPCI_CROR = 0x4c000382,
+
+ PPCI_MFLR = 0x7c0802a6,
+ PPCI_MTCTR = 0x7c0903a6,
+
+ PPCI_MCRXR = 0x7c000400,
+
+ /* Load/store instructions. */
+ PPCI_LWZ = 0x80000000,
+ PPCI_LBZ = 0x88000000,
+ PPCI_STW = 0x90000000,
+ PPCI_STB = 0x98000000,
+ PPCI_LHZ = 0xa0000000,
+ PPCI_LHA = 0xa8000000,
+ PPCI_STH = 0xb0000000,
+
+ PPCI_STWU = 0x94000000,
+
+ PPCI_LFS = 0xc0000000,
+ PPCI_LFD = 0xc8000000,
+ PPCI_STFS = 0xd0000000,
+ PPCI_STFD = 0xd8000000,
+
+ PPCI_LWZX = 0x7c00002e,
+ PPCI_LBZX = 0x7c0000ae,
+ PPCI_STWX = 0x7c00012e,
+ PPCI_STBX = 0x7c0001ae,
+ PPCI_LHZX = 0x7c00022e,
+ PPCI_LHAX = 0x7c0002ae,
+ PPCI_STHX = 0x7c00032e,
+
+ PPCI_LWBRX = 0x7c00042c,
+ PPCI_STWBRX = 0x7c00052c,
+
+ PPCI_LFSX = 0x7c00042e,
+ PPCI_LFDX = 0x7c0004ae,
+ PPCI_STFSX = 0x7c00052e,
+ PPCI_STFDX = 0x7c0005ae,
+
+ /* FP instructions. */
+ PPCI_FMR = 0xfc000090,
+ PPCI_FNEG = 0xfc000050,
+ PPCI_FABS = 0xfc000210,
+
+ PPCI_FRSP = 0xfc000018,
+ PPCI_FCTIWZ = 0xfc00001e,
+
+ PPCI_FADD = 0xfc00002a,
+ PPCI_FSUB = 0xfc000028,
+ PPCI_FMUL = 0xfc000032,
+ PPCI_FDIV = 0xfc000024,
+
+ PPCI_FMADD = 0xfc00003a,
+ PPCI_FMSUB = 0xfc000038,
+ PPCI_FNMSUB = 0xfc00003c,
+
+ PPCI_FCMPU = 0xfc000000,
+ PPCI_FSEL = 0xfc00002e,
+} PPCIns;
+
+typedef enum PPCCC {
+ CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO
+} PPCCC;
+
+#endif
diff --git a/src/LuaJIT/src/lj_target_x86.h b/src/LuaJIT/src/lj_target_x86.h
new file mode 100644
index 000000000..cc15490b6
--- /dev/null
+++ b/src/LuaJIT/src/lj_target_x86.h
@@ -0,0 +1,339 @@
+/*
+** Definitions for x86 and x64 CPUs.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_X86_H
+#define _LJ_TARGET_X86_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#if LJ_64
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \
+ _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \
+ _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15)
+#else
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
+#endif
+#define VRIDDEF(_) \
+ _(MRM)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
+
+ /* Calling conventions. */
+ RID_RET = RID_EAX,
+#if LJ_64
+ RID_FPRET = RID_XMM0,
+#else
+ RID_RETLO = RID_EAX,
+ RID_RETHI = RID_EDX,
+#endif
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_EDX, /* Interpreter BASE. */
+#if LJ_64 && !LJ_ABI_WIN
+ RID_LPC = RID_EBX, /* Interpreter PC. */
+ RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */
+#else
+ RID_LPC = RID_ESI, /* Interpreter PC. */
+ RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */
+#endif
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_EAX,
+ RID_MIN_FPR = RID_XMM0,
+ RID_MAX_GPR = RID_MIN_FPR,
+ RID_MAX_FPR = RID_MAX,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR,
+};
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except the stack pointer. */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP))
+#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#if LJ_64
+/* Note: this requires the use of FORCE_REX! */
+#define RSET_GPR8 RSET_GPR
+#else
+#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1))
+#endif
+
+/* ABI-specific register sets. */
+#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX))
+#if LJ_64
+#if LJ_ABI_WIN
+/* Windows x64 ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1))
+#define REGARG_GPRS \
+ (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5))
+#define REGARG_NUMGPR 4
+#define REGARG_NUMFPR 4
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM3
+#define STACKARG_OFS (4*8)
+#else
+/* The rest of the civilized x64 world has a common ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR)
+#define REGARG_GPRS \
+ (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \
+ <<5))<<5))<<5))<<5))<<5))
+#define REGARG_NUMGPR 6
+#define REGARG_NUMFPR 8
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM7
+#define STACKARG_OFS 0
+#endif
+#else
+/* Common x86 ABI. */
+#define RSET_SCRATCH (RSET_ACD|RSET_FPR)
+#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */
+#define REGARG_NUMGPR 2 /* Fastcall only. */
+#define REGARG_NUMFPR 0
+#define STACKARG_OFS 0
+#endif
+
+#if LJ_64
+/* Prefer the low 8 regs of each type to reduce REX prefixes. */
+#undef rset_picktop
+#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18)
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#if LJ_64
+#if LJ_ABI_WIN
+#define SPS_FIXED (4*2)
+#define SPS_FIRST (4*2) /* Don't use callee register save area. */
+#else
+#define SPS_FIXED 4
+#define SPS_FIRST 2
+#endif
+#else
+#define SPS_FIXED 6
+#define SPS_FIRST 2
+#endif
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
+#define EXITSTUB_SPACING (2+2)
+#define EXITSTUBS_PER_GROUP 32
+
+/* -- x86 ModRM operand encoding ------------------------------------------ */
+
+typedef enum {
+ XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0,
+ XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0,
+ XM_MASK = 0xc0
+} x86Mode;
+
+/* Structure to hold variable ModRM operand. */
+typedef struct {
+ int32_t ofs; /* Offset. */
+ uint8_t base; /* Base register or RID_NONE. */
+ uint8_t idx; /* Index register or RID_NONE. */
+ uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */
+} x86ModRM;
+
+/* -- Opcodes ------------------------------------------------------------- */
+
+/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */
+#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24)))
+#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24)))
+#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24)))
+#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24)))
+#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24)))
+#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
+#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
+
+/* This list of x86 opcodes is not intended to be complete. Opcodes are only
+** included when needed. Take a look at DynASM or jit.dis_x86 to see the
+** whole mess.
+*/
+typedef enum {
+ /* Fixed length opcodes. XI_* prefix. */
+ XI_NOP = 0x90,
+ XI_CALL = 0xe8,
+ XI_JMP = 0xe9,
+ XI_JMPs = 0xeb,
+ XI_PUSH = 0x50, /* Really 50+r. */
+ XI_JCCs = 0x70, /* Really 7x. */
+ XI_JCCn = 0x80, /* Really 0f8x. */
+ XI_LEA = 0x8d,
+ XI_MOVrib = 0xb0, /* Really b0+r. */
+ XI_MOVri = 0xb8, /* Really b8+r. */
+ XI_ARITHib = 0x80,
+ XI_ARITHi = 0x81,
+ XI_ARITHi8 = 0x83,
+ XI_PUSHi8 = 0x6a,
+ XI_TEST = 0x85,
+ XI_MOVmi = 0xc7,
+ XI_GROUP5 = 0xff,
+
+ /* Note: little-endian byte-order! */
+ XI_FLDZ = 0xeed9,
+ XI_FLD1 = 0xe8d9,
+ XI_FLDLG2 = 0xecd9,
+ XI_FLDLN2 = 0xedd9,
+ XI_FDUP = 0xc0d9, /* Really fld st0. */
+ XI_FPOP = 0xd8dd, /* Really fstp st0. */
+ XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
+ XI_FRNDINT = 0xfcd9,
+ XI_FSIN = 0xfed9,
+ XI_FCOS = 0xffd9,
+ XI_FPTAN = 0xf2d9,
+ XI_FPATAN = 0xf3d9,
+ XI_FSCALE = 0xfdd9,
+ XI_FYL2X = 0xf1d9,
+
+ /* Variable-length opcodes. XO_* prefix. */
+ XO_MOV = XO_(8b),
+ XO_MOVto = XO_(89),
+ XO_MOVtow = XO_66(89),
+ XO_MOVtob = XO_(88),
+ XO_MOVmi = XO_(c7),
+ XO_MOVmib = XO_(c6),
+ XO_LEA = XO_(8d),
+ XO_ARITHib = XO_(80),
+ XO_ARITHi = XO_(81),
+ XO_ARITHi8 = XO_(83),
+ XO_ARITHiw8 = XO_66(83),
+ XO_SHIFTi = XO_(c1),
+ XO_SHIFT1 = XO_(d1),
+ XO_SHIFTcl = XO_(d3),
+ XO_IMUL = XO_0f(af),
+ XO_IMULi = XO_(69),
+ XO_IMULi8 = XO_(6b),
+ XO_CMP = XO_(3b),
+ XO_TEST = XO_(85),
+ XO_GROUP3b = XO_(f6),
+ XO_GROUP3 = XO_(f7),
+ XO_GROUP5b = XO_(fe),
+ XO_GROUP5 = XO_(ff),
+ XO_MOVZXb = XO_0f(b6),
+ XO_MOVZXw = XO_0f(b7),
+ XO_MOVSXb = XO_0f(be),
+ XO_MOVSXw = XO_0f(bf),
+ XO_MOVSXd = XO_(63),
+ XO_BSWAP = XO_0f(c8),
+ XO_CMOV = XO_0f(40),
+
+ XO_MOVSD = XO_f20f(10),
+ XO_MOVSDto = XO_f20f(11),
+ XO_MOVSS = XO_f30f(10),
+ XO_MOVSSto = XO_f30f(11),
+ XO_MOVLPD = XO_660f(12),
+ XO_MOVAPS = XO_0f(28),
+ XO_XORPS = XO_0f(57),
+ XO_ANDPS = XO_0f(54),
+ XO_ADDSD = XO_f20f(58),
+ XO_SUBSD = XO_f20f(5c),
+ XO_MULSD = XO_f20f(59),
+ XO_DIVSD = XO_f20f(5e),
+ XO_SQRTSD = XO_f20f(51),
+ XO_MINSD = XO_f20f(5d),
+ XO_MAXSD = XO_f20f(5f),
+ XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
+ XO_UCOMISD = XO_660f(2e),
+ XO_CVTSI2SD = XO_f20f(2a),
+ XO_CVTSD2SI = XO_f20f(2d),
+ XO_CVTTSD2SI= XO_f20f(2c),
+ XO_CVTSI2SS = XO_f30f(2a),
+ XO_CVTSS2SI = XO_f30f(2d),
+ XO_CVTTSS2SI= XO_f30f(2c),
+ XO_CVTSS2SD = XO_f30f(5a),
+ XO_CVTSD2SS = XO_f20f(5a),
+ XO_ADDSS = XO_f30f(58),
+ XO_MOVD = XO_660f(6e),
+ XO_MOVDto = XO_660f(7e),
+
+ XO_FLDd = XO_(d9), XOg_FLDd = 0,
+ XO_FLDq = XO_(dd), XOg_FLDq = 0,
+ XO_FILDd = XO_(db), XOg_FILDd = 0,
+ XO_FILDq = XO_(df), XOg_FILDq = 5,
+ XO_FSTPd = XO_(d9), XOg_FSTPd = 3,
+ XO_FSTPq = XO_(dd), XOg_FSTPq = 3,
+ XO_FISTPq = XO_(df), XOg_FISTPq = 7,
+ XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1,
+ XO_FADDq = XO_(dc), XOg_FADDq = 0,
+ XO_FLDCW = XO_(d9), XOg_FLDCW = 5,
+ XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7
+} x86Op;
+
+/* x86 opcode groups. */
+typedef uint32_t x86Group;
+
+#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g)))
+#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g)
+#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000)))
+#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000)))
+
+#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27)))
+#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27)))
+
+typedef enum {
+ XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP,
+ XOg_X_IMUL
+} x86Arith;
+
+typedef enum {
+ XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR
+} x86Shift;
+
+typedef enum {
+ XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV
+} x86Group3;
+
+typedef enum {
+ XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH
+} x86Group5;
+
+/* x86 condition codes. */
+typedef enum {
+ CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE,
+ CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE,
+ CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB,
+ CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE,
+ CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL,
+ CC_NG = CC_LE, CC_G = CC_NLE
+} x86CC;
+
+#endif
diff --git a/src/LuaJIT/src/lj_trace.c b/src/LuaJIT/src/lj_trace.c
new file mode 100644
index 000000000..ad00dc67a
--- /dev/null
+++ b/src/LuaJIT/src/lj_trace.c
@@ -0,0 +1,814 @@
+/*
+** Trace management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_trace_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_frame.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_gdbjit.h"
+#include "lj_record.h"
+#include "lj_asm.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+#include "lj_target.h"
+
+/* -- Error handling ------------------------------------------------------ */
+
+/* Synchronous abort with error message. */
+void lj_trace_err(jit_State *J, TraceError e)
+{
+ setnilV(&J->errinfo); /* No error info. */
+ setintV(J->L->top++, (int32_t)e);
+ lj_err_throw(J->L, LUA_ERRRUN);
+}
+
+/* Synchronous abort with error message and error info. */
+void lj_trace_err_info(jit_State *J, TraceError e)
+{
+ setintV(J->L->top++, (int32_t)e);
+ lj_err_throw(J->L, LUA_ERRRUN);
+}
+
+/* -- Trace management ---------------------------------------------------- */
+
+/* The current trace is first assembled in J->cur. The variable length
+** arrays point to shared, growable buffers (J->irbuf etc.). When trace
+** recording ends successfully, the current trace and its data structures
+** are copied to a new (compact) GCtrace object.
+*/
+
+/* Find a free trace number. */
+static TraceNo trace_findfree(jit_State *J)
+{
+ MSize osz, lim;
+ if (J->freetrace == 0)
+ J->freetrace = 1;
+ for (; J->freetrace < J->sizetrace; J->freetrace++)
+ if (traceref(J, J->freetrace) == NULL)
+ return J->freetrace++;
+ /* Need to grow trace array. */
+ lim = (MSize)J->param[JIT_P_maxtrace] + 1;
+ if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
+ osz = J->sizetrace;
+ if (osz >= lim)
+ return 0; /* Too many traces. */
+ lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
+ for (; osz < J->sizetrace; osz++)
+ setgcrefnull(J->trace[osz]);
+ return J->freetrace;
+}
+
+#define TRACE_APPENDVEC(field, szfield, tp) \
+ T->field = (tp *)p; \
+ memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
+ p += J->cur.szfield*sizeof(tp);
+
+#ifdef LUAJIT_USE_PERFTOOLS
+/*
+** Create symbol table of JIT-compiled code. For use with Linux perf tools.
+** Example usage:
+** perf record -f -e cycles luajit test.lua
+** perf report -s symbol
+** rm perf.data /tmp/perf-*.map
+*/
+#include
+#include
+
+static void perftools_addtrace(GCtrace *T)
+{
+ static FILE *fp;
+ GCproto *pt = &gcref(T->startpt)->pt;
+ const BCIns *startpc = mref(T->startpc, const BCIns);
+ const char *name = proto_chunknamestr(pt);
+ BCLine lineno;
+ if (name[0] == '@' || name[0] == '=')
+ name++;
+ else
+ name = "(string)";
+ lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc);
+ lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
+ if (!fp) {
+ char fname[40];
+ sprintf(fname, "/tmp/perf-%d.map", getpid());
+ if (!(fp = fopen(fname, "w"))) return;
+ setlinebuf(fp);
+ }
+ fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
+ (long)T->mcode, T->szmcode, T->traceno, name, lineno);
+}
+#endif
+
+/* Save current trace by copying and compacting it. */
+static void trace_save(jit_State *J)
+{
+ size_t sztr = ((sizeof(GCtrace)+7)&~7);
+ size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
+ size_t sz = sztr + szins +
+ J->cur.nsnap*sizeof(SnapShot) +
+ J->cur.nsnapmap*sizeof(SnapEntry);
+ GCtrace *T = lj_mem_newt(J->L, (MSize)sz, GCtrace);
+ char *p = (char *)T + sztr;
+ memcpy(T, &J->cur, sizeof(GCtrace));
+ setgcrefr(T->nextgc, J2G(J)->gc.root);
+ setgcrefp(J2G(J)->gc.root, T);
+ newwhite(J2G(J), T);
+ T->gct = ~LJ_TTRACE;
+ T->ir = (IRIns *)p - J->cur.nk;
+ memcpy(p, J->cur.ir+J->cur.nk, szins);
+ p += szins;
+ TRACE_APPENDVEC(snap, nsnap, SnapShot)
+ TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
+ J->cur.traceno = 0;
+ setgcrefp(J->trace[T->traceno], T);
+ lj_gc_barriertrace(J2G(J), T->traceno);
+ lj_gdbjit_addtrace(J, T);
+#ifdef LUAJIT_USE_PERFTOOLS
+ perftools_addtrace(T);
+#endif
+}
+
+void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
+{
+ jit_State *J = G2J(g);
+ if (T->traceno) {
+ lj_gdbjit_deltrace(J, T);
+ if (T->traceno < J->freetrace)
+ J->freetrace = T->traceno;
+ setgcrefnull(J->trace[T->traceno]);
+ }
+ lj_mem_free(g, T,
+ ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
+ T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
+}
+
+/* Re-enable compiling a prototype by unpatching any modified bytecode. */
+void lj_trace_reenableproto(GCproto *pt)
+{
+ if ((pt->flags & PROTO_ILOOP)) {
+ BCIns *bc = proto_bc(pt);
+ BCPos i, sizebc = pt->sizebc;;
+ pt->flags &= ~PROTO_ILOOP;
+ if (bc_op(bc[0]) == BC_IFUNCF)
+ setbc_op(&bc[0], BC_FUNCF);
+ for (i = 1; i < sizebc; i++) {
+ BCOp op = bc_op(bc[i]);
+ if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
+ setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
+ }
+ }
+}
+
+/* Unpatch the bytecode modified by a root trace. */
+static void trace_unpatch(jit_State *J, GCtrace *T)
+{
+ BCOp op = bc_op(T->startins);
+ BCIns *pc = mref(T->startpc, BCIns);
+ UNUSED(J);
+ if (op == BC_JMP)
+ return; /* No need to unpatch branches in parent traces (yet). */
+ switch (bc_op(*pc)) {
+ case BC_JFORL:
+ lua_assert(traceref(J, bc_d(*pc)) == T);
+ *pc = T->startins;
+ pc += bc_j(T->startins);
+ lua_assert(bc_op(*pc) == BC_JFORI);
+ setbc_op(pc, BC_FORI);
+ break;
+ case BC_JITERL:
+ case BC_JLOOP:
+ lua_assert(op == BC_ITERL || op == BC_LOOP || bc_isret(op));
+ *pc = T->startins;
+ break;
+ case BC_JMP:
+ lua_assert(op == BC_ITERL);
+ pc += bc_j(*pc)+2;
+ if (bc_op(*pc) == BC_JITERL) {
+ lua_assert(traceref(J, bc_d(*pc)) == T);
+ *pc = T->startins;
+ }
+ break;
+ case BC_JFUNCF:
+ lua_assert(op == BC_FUNCF);
+ *pc = T->startins;
+ break;
+ default: /* Already unpatched. */
+ break;
+ }
+}
+
+/* Flush a root trace. */
+static void trace_flushroot(jit_State *J, GCtrace *T)
+{
+ GCproto *pt = &gcref(T->startpt)->pt;
+ lua_assert(T->root == 0 && pt != NULL);
+ /* First unpatch any modified bytecode. */
+ trace_unpatch(J, T);
+ /* Unlink root trace from chain anchored in prototype. */
+ if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */
+ pt->trace = T->nextroot;
+ } else if (pt->trace) { /* Otherwise search in chain of root traces. */
+ GCtrace *T2 = traceref(J, pt->trace);
+ if (T2) {
+ for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
+ if (T2->nextroot == T->traceno) {
+ T2->nextroot = T->nextroot; /* Unlink from chain. */
+ break;
+ }
+ }
+ }
+}
+
+/* Flush a trace. Only root traces are considered. */
+void lj_trace_flush(jit_State *J, TraceNo traceno)
+{
+ if (traceno > 0 && traceno < J->sizetrace) {
+ GCtrace *T = traceref(J, traceno);
+ if (T && T->root == 0)
+ trace_flushroot(J, T);
+ }
+}
+
+/* Flush all traces associated with a prototype. */
+void lj_trace_flushproto(global_State *g, GCproto *pt)
+{
+ while (pt->trace != 0)
+ trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
+}
+
+/* Flush all traces. */
+int lj_trace_flushall(lua_State *L)
+{
+ jit_State *J = L2J(L);
+ ptrdiff_t i;
+ if ((J2G(J)->hookmask & HOOK_GC))
+ return 1;
+ for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
+ GCtrace *T = traceref(J, i);
+ if (T) {
+ if (T->root == 0)
+ trace_flushroot(J, T);
+ lj_gdbjit_deltrace(J, T);
+ T->traceno = 0;
+ setgcrefnull(J->trace[i]);
+ }
+ }
+ J->cur.traceno = 0;
+ J->freetrace = 0;
+ /* Clear penalty cache. */
+ memset(J->penalty, 0, sizeof(J->penalty));
+ /* Free the whole machine code and invalidate all exit stub groups. */
+ lj_mcode_free(J);
+ memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "flush"));
+ );
+ return 0;
+}
+
+/* Initialize JIT compiler state. */
+void lj_trace_initstate(global_State *g)
+{
+ jit_State *J = G2J(g);
+ TValue *tv;
+ /* Initialize SIMD constants. */
+ tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
+ tv[0].u64 = U64x(7fffffff,ffffffff);
+ tv[1].u64 = U64x(7fffffff,ffffffff);
+ tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
+ tv[0].u64 = U64x(80000000,00000000);
+ tv[1].u64 = U64x(80000000,00000000);
+}
+
+/* Free everything associated with the JIT compiler state. */
+void lj_trace_freestate(global_State *g)
+{
+ jit_State *J = G2J(g);
+#ifdef LUA_USE_ASSERT
+ { /* This assumes all traces have already been freed. */
+ ptrdiff_t i;
+ for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
+ lua_assert(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL);
+ }
+#endif
+ lj_mcode_free(J);
+ lj_ir_k64_freeall(J);
+ lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
+ lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
+ lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
+ lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
+}
+
+/* -- Penalties and blacklisting ------------------------------------------ */
+
+/* Blacklist a bytecode instruction. */
+static void blacklist_pc(GCproto *pt, BCIns *pc)
+{
+ setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
+ pt->flags |= PROTO_ILOOP;
+}
+
+/* Penalize a bytecode instruction. */
+static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
+{
+ uint32_t i, val = PENALTY_MIN;
+ for (i = 0; i < PENALTY_SLOTS; i++)
+ if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */
+ /* First try to bump its hotcount several times. */
+ val = ((uint32_t)J->penalty[i].val << 1) +
+ LJ_PRNG_BITS(J, PENALTY_RNDBITS);
+ if (val > PENALTY_MAX) {
+ blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */
+ return;
+ }
+ goto setpenalty;
+ }
+ /* Assign a new penalty cache slot. */
+ i = J->penaltyslot;
+ J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
+ setmref(J->penalty[i].pc, pc);
+setpenalty:
+ J->penalty[i].val = (uint16_t)val;
+ J->penalty[i].reason = e;
+ hotcount_set(J2GG(J), pc+1, val);
+}
+
+/* -- Trace compiler state machine ---------------------------------------- */
+
+/* Start tracing. */
+static void trace_start(jit_State *J)
+{
+ lua_State *L;
+ TraceNo traceno;
+
+ if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
+ if (J->parent == 0) {
+ /* Lazy bytecode patching to disable hotcount events. */
+ lua_assert(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
+ bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF);
+ setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
+ J->pt->flags |= PROTO_ILOOP;
+ }
+ J->state = LJ_TRACE_IDLE; /* Silently ignored. */
+ return;
+ }
+
+ /* Get a new trace number. */
+ traceno = trace_findfree(J);
+ if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */
+ lua_assert((J2G(J)->hookmask & HOOK_GC) == 0);
+ lj_trace_flushall(J->L);
+ J->state = LJ_TRACE_IDLE; /* Silently ignored. */
+ return;
+ }
+ setgcrefp(J->trace[traceno], &J->cur);
+
+ /* Setup enough of the current trace to be able to send the vmevent. */
+ memset(&J->cur, 0, sizeof(GCtrace));
+ J->cur.traceno = traceno;
+ J->cur.nins = J->cur.nk = REF_BASE;
+ J->cur.ir = J->irbuf;
+ J->cur.snap = J->snapbuf;
+ J->cur.snapmap = J->snapmapbuf;
+ J->mergesnap = 0;
+ J->needsnap = 0;
+ J->bcskip = 0;
+ J->guardemit.irt = 0;
+ J->postproc = LJ_POST_NONE;
+ lj_resetsplit(J);
+ setgcref(J->cur.startpt, obj2gco(J->pt));
+
+ L = J->L;
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "start"));
+ setintV(L->top++, traceno);
+ setfuncV(L, L->top++, J->fn);
+ setintV(L->top++, proto_bcpos(J->pt, J->pc));
+ if (J->parent) {
+ setintV(L->top++, J->parent);
+ setintV(L->top++, J->exitno);
+ }
+ );
+ lj_record_setup(J);
+}
+
+/* Stop tracing. */
+static void trace_stop(jit_State *J)
+{
+ BCIns *pc = mref(J->cur.startpc, BCIns);
+ BCOp op = bc_op(J->cur.startins);
+ GCproto *pt = &gcref(J->cur.startpt)->pt;
+ TraceNo traceno = J->cur.traceno;
+ lua_State *L;
+
+ switch (op) {
+ case BC_FORL:
+ setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */
+ /* fallthrough */
+ case BC_LOOP:
+ case BC_ITERL:
+ case BC_FUNCF:
+ /* Patch bytecode of starting instruction in root trace. */
+ setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
+ setbc_d(pc, traceno);
+ addroot:
+ /* Add to root trace chain in prototype. */
+ J->cur.nextroot = pt->trace;
+ pt->trace = (TraceNo1)traceno;
+ break;
+ case BC_RET:
+ case BC_RET0:
+ case BC_RET1:
+ *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
+ goto addroot;
+ case BC_JMP:
+ /* Patch exit branch in parent to side trace entry. */
+ lua_assert(J->parent != 0 && J->cur.root != 0);
+ lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
+ /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
+ traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE;
+ /* Add to side trace chain in root trace. */
+ {
+ GCtrace *root = traceref(J, J->cur.root);
+ root->nchild++;
+ J->cur.nextside = root->nextside;
+ root->nextside = (TraceNo1)traceno;
+ }
+ break;
+ default:
+ lua_assert(0);
+ break;
+ }
+
+ /* Commit new mcode only after all patching is done. */
+ lj_mcode_commit(J, J->cur.mcode);
+ J->postproc = LJ_POST_NONE;
+ trace_save(J);
+
+ L = J->L;
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "stop"));
+ setintV(L->top++, traceno);
+ );
+}
+
+/* Start a new root trace for down-recursion. */
+static int trace_downrec(jit_State *J)
+{
+ /* Restart recording at the return instruction. */
+ lua_assert(J->pt != NULL);
+ lua_assert(bc_isret(bc_op(*J->pc)));
+ if (bc_op(*J->pc) == BC_RETM)
+ return 0; /* NYI: down-recursion with RETM. */
+ J->parent = 0;
+ J->exitno = 0;
+ J->state = LJ_TRACE_RECORD;
+ trace_start(J);
+ return 1;
+}
+
+/* Abort tracing. */
+static int trace_abort(jit_State *J)
+{
+ lua_State *L = J->L;
+ TraceError e = LJ_TRERR_RECERR;
+ TraceNo traceno;
+
+ J->postproc = LJ_POST_NONE;
+ lj_mcode_abort(J);
+ if (tvisnumber(L->top-1))
+ e = (TraceError)numberVint(L->top-1);
+ if (e == LJ_TRERR_MCODELM) {
+ L->top--; /* Remove error object */
+ J->state = LJ_TRACE_ASM;
+ return 1; /* Retry ASM with new MCode area. */
+ }
+ /* Penalize or blacklist starting bytecode instruction. */
+ if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins)))
+ penalty_pc(J, &gcref(J->cur.startpt)->pt, mref(J->cur.startpc, BCIns), e);
+
+ /* Is there anything to abort? */
+ traceno = J->cur.traceno;
+ if (traceno) {
+ ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
+ J->cur.link = 0;
+ J->cur.linktype = LJ_TRLINK_NONE;
+ lj_vmevent_send(L, TRACE,
+ TValue *frame;
+ const BCIns *pc;
+ GCfunc *fn;
+ setstrV(L, L->top++, lj_str_newlit(L, "abort"));
+ setintV(L->top++, traceno);
+ /* Find original Lua function call to generate a better error message. */
+ frame = J->L->base-1;
+ pc = J->pc;
+ while (!isluafunc(frame_func(frame))) {
+ pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
+ frame = frame_prev(frame);
+ }
+ fn = frame_func(frame);
+ setfuncV(L, L->top++, fn);
+ setintV(L->top++, proto_bcpos(funcproto(fn), pc));
+ copyTV(L, L->top++, restorestack(L, errobj));
+ copyTV(L, L->top++, &J->errinfo);
+ );
+ /* Drop aborted trace after the vmevent (which may still access it). */
+ setgcrefnull(J->trace[traceno]);
+ if (traceno < J->freetrace)
+ J->freetrace = traceno;
+ J->cur.traceno = 0;
+ }
+ L->top--; /* Remove error object */
+ if (e == LJ_TRERR_DOWNREC)
+ return trace_downrec(J);
+ else if (e == LJ_TRERR_MCODEAL)
+ lj_trace_flushall(L);
+ return 0;
+}
+
+/* Perform pending re-patch of a bytecode instruction. */
+static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
+{
+ if (LJ_UNLIKELY(J->patchpc)) {
+ if (force || J->bcskip == 0) {
+ *J->patchpc = J->patchins;
+ J->patchpc = NULL;
+ } else {
+ J->bcskip = 0;
+ }
+ }
+}
+
+/* State machine for the trace compiler. Protected callback. */
+static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ UNUSED(dummy);
+ do {
+ retry:
+ switch (J->state) {
+ case LJ_TRACE_START:
+ J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */
+ trace_start(J);
+ lj_dispatch_update(J2G(J));
+ break;
+
+ case LJ_TRACE_RECORD:
+ trace_pendpatch(J, 0);
+ setvmstate(J2G(J), RECORD);
+ lj_vmevent_send_(L, RECORD,
+ /* Save/restore tmptv state for trace recorder. */
+ TValue savetv = J2G(J)->tmptv;
+ TValue savetv2 = J2G(J)->tmptv2;
+ setintV(L->top++, J->cur.traceno);
+ setfuncV(L, L->top++, J->fn);
+ setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
+ setintV(L->top++, J->framedepth);
+ ,
+ J2G(J)->tmptv = savetv;
+ J2G(J)->tmptv2 = savetv2;
+ );
+ lj_record_ins(J);
+ break;
+
+ case LJ_TRACE_END:
+ trace_pendpatch(J, 1);
+ J->loopref = 0;
+ if ((J->flags & JIT_F_OPT_LOOP) &&
+ J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
+ setvmstate(J2G(J), OPT);
+ lj_opt_dce(J);
+ if (lj_opt_loop(J)) { /* Loop optimization failed? */
+ J->cur.link = 0;
+ J->cur.linktype = LJ_TRLINK_NONE;
+ J->loopref = J->cur.nins;
+ J->state = LJ_TRACE_RECORD; /* Try to continue recording. */
+ break;
+ }
+ J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */
+ }
+ lj_opt_split(J);
+ J->state = LJ_TRACE_ASM;
+ break;
+
+ case LJ_TRACE_ASM:
+ setvmstate(J2G(J), ASM);
+ lj_asm_trace(J, &J->cur);
+ trace_stop(J);
+ setvmstate(J2G(J), INTERP);
+ J->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(J2G(J));
+ return NULL;
+
+ default: /* Trace aborted asynchronously. */
+ setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
+ /* fallthrough */
+ case LJ_TRACE_ERR:
+ trace_pendpatch(J, 1);
+ if (trace_abort(J))
+ goto retry;
+ setvmstate(J2G(J), INTERP);
+ J->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(J2G(J));
+ return NULL;
+ }
+ } while (J->state > LJ_TRACE_RECORD);
+ return NULL;
+}
+
+/* -- Event handling ------------------------------------------------------ */
+
+/* A bytecode instruction is about to be executed. Record it. */
+void lj_trace_ins(jit_State *J, const BCIns *pc)
+{
+ /* Note: J->L must already be set. pc is the true bytecode PC here. */
+ J->pc = pc;
+ J->fn = curr_func(J->L);
+ J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
+ while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
+ J->state = LJ_TRACE_ERR;
+}
+
+/* A hotcount triggered. Start recording a root trace. */
+void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
+{
+ /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
+ ERRNO_SAVE
+ /* Reset hotcount. */
+ hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
+ /* Only start a new trace if not recording or inside __gc call or vmevent. */
+ if (J->state == LJ_TRACE_IDLE &&
+ !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+ J->parent = 0; /* Root trace. */
+ J->exitno = 0;
+ J->state = LJ_TRACE_START;
+ lj_trace_ins(J, pc-1);
+ }
+ ERRNO_RESTORE
+}
+
+/* Check for a hot side exit. If yes, start recording a side trace. */
+static void trace_hotside(jit_State *J, const BCIns *pc)
+{
+ SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
+ if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
+ snap->count != SNAPCOUNT_DONE &&
+ ++snap->count >= J->param[JIT_P_hotexit]) {
+ lua_assert(J->state == LJ_TRACE_IDLE);
+ /* J->parent is non-zero for a side trace. */
+ J->state = LJ_TRACE_START;
+ lj_trace_ins(J, pc);
+ }
+}
+
+/* Tiny struct to pass data to protected call. */
+typedef struct ExitDataCP {
+ jit_State *J;
+ void *exptr; /* Pointer to exit state. */
+ const BCIns *pc; /* Restart interpreter at this PC. */
+} ExitDataCP;
+
+/* Need to protect lj_snap_restore because it may throw. */
+static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ ExitDataCP *exd = (ExitDataCP *)ud;
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ exd->pc = lj_snap_restore(exd->J, exd->exptr);
+ UNUSED(dummy);
+ return NULL;
+}
+
+#ifndef LUAJIT_DISABLE_VMEVENT
+/* Push all registers from exit state. */
+static void trace_exit_regs(lua_State *L, ExitState *ex)
+{
+ int32_t i;
+ setintV(L->top++, RID_NUM_GPR);
+ setintV(L->top++, RID_NUM_FPR);
+ for (i = 0; i < RID_NUM_GPR; i++) {
+ if (sizeof(ex->gpr[i]) == sizeof(int32_t))
+ setintV(L->top++, (int32_t)ex->gpr[i]);
+ else
+ setnumV(L->top++, (lua_Number)ex->gpr[i]);
+ }
+#if !LJ_SOFTFP
+ for (i = 0; i < RID_NUM_FPR; i++) {
+ setnumV(L->top, ex->fpr[i]);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top);
+ L->top++;
+ }
+#endif
+}
+#endif
+
+#ifdef EXITSTATE_PCREG
+/* Determine trace number from pc of exit instruction. */
+static TraceNo trace_exit_find(jit_State *J, MCode *pc)
+{
+ TraceNo traceno;
+ for (traceno = 1; traceno < J->sizetrace; traceno++) {
+ GCtrace *T = traceref(J, traceno);
+ if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
+ return traceno;
+ }
+ lua_assert(0);
+ return 0;
+}
+#endif
+
+/* A trace exited. Restore interpreter state. */
+int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
+{
+ ERRNO_SAVE
+ lua_State *L = J->L;
+ ExitState *ex = (ExitState *)exptr;
+ ExitDataCP exd;
+ int errcode;
+ const BCIns *pc;
+ void *cf;
+ GCtrace *T;
+#ifdef EXITSTATE_PCREG
+ J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
+#endif
+ T = traceref(J, J->parent); UNUSED(T);
+#ifdef EXITSTATE_CHECKEXIT
+ if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */
+ lua_assert(T->root != 0);
+ J->exitno = T->ir[REF_BASE].op2;
+ J->parent = T->ir[REF_BASE].op1;
+ T = traceref(J, J->parent);
+ }
+#endif
+ lua_assert(T != NULL && J->exitno < T->nsnap);
+ exd.J = J;
+ exd.exptr = exptr;
+ errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
+ if (errcode)
+ return -errcode; /* Return negated error code. */
+
+ lj_vmevent_send(L, TEXIT,
+ lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
+ setintV(L->top++, J->parent);
+ setintV(L->top++, J->exitno);
+ trace_exit_regs(L, ex);
+ );
+
+ pc = exd.pc;
+ cf = cframe_raw(L->cframe);
+ setcframe_pc(cf, pc);
+ if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
+ if (!(G(L)->hookmask & HOOK_GC))
+ lj_gc_step(L); /* Exited because of GC: drive GC forward. */
+ } else {
+ trace_hotside(J, pc);
+ }
+ if (bc_op(*pc) == BC_JLOOP) {
+ BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
+ if (bc_isret(bc_op(*retpc))) {
+ if (J->state == LJ_TRACE_RECORD) {
+ J->patchins = *pc;
+ J->patchpc = (BCIns *)pc;
+ *J->patchpc = *retpc;
+ J->bcskip = 1;
+ } else {
+ pc = retpc;
+ setcframe_pc(cf, pc);
+ }
+ }
+ }
+ /* Return MULTRES or 0. */
+ ERRNO_RESTORE
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT:
+ return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc));
+ case BC_RETM:
+ return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
+ case BC_TSETM:
+ return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
+ default:
+ if (bc_op(*pc) >= BC_FUNCF)
+ return (int)((BCReg)(L->top - L->base) + 1);
+ return 0;
+ }
+}
+
+#endif
diff --git a/src/LuaJIT/src/lj_trace.h b/src/LuaJIT/src/lj_trace.h
new file mode 100644
index 000000000..6a9043914
--- /dev/null
+++ b/src/LuaJIT/src/lj_trace.h
@@ -0,0 +1,53 @@
+/*
+** Trace management.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TRACE_H
+#define _LJ_TRACE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* Trace errors. */
+typedef enum {
+#define TREDEF(name, msg) LJ_TRERR_##name,
+#include "lj_traceerr.h"
+ LJ_TRERR__MAX
+} TraceError;
+
+LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
+LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
+
+/* Trace management. */
+LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
+LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
+LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
+LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno);
+LJ_FUNC int lj_trace_flushall(lua_State *L);
+LJ_FUNC void lj_trace_initstate(global_State *g);
+LJ_FUNC void lj_trace_freestate(global_State *g);
+
+/* Event handling. */
+LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
+LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
+LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
+
+/* Signal asynchronous abort of trace or end of trace. */
+#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE)
+#define lj_trace_end(J) (J->state = LJ_TRACE_END)
+
+#else
+
+#define lj_trace_flushall(L) (UNUSED(L), 0)
+#define lj_trace_initstate(g) UNUSED(g)
+#define lj_trace_freestate(g) UNUSED(g)
+#define lj_trace_abort(g) UNUSED(g)
+#define lj_trace_end(J) UNUSED(J)
+
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_traceerr.h b/src/LuaJIT/src/lj_traceerr.h
new file mode 100644
index 000000000..015a7fde7
--- /dev/null
+++ b/src/LuaJIT/src/lj_traceerr.h
@@ -0,0 +1,61 @@
+/*
+** Trace compiler error messages.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* This file may be included multiple times with different TREDEF macros. */
+
+/* Recording. */
+TREDEF(RECERR, "error thrown or hook called during recording")
+TREDEF(TRACEOV, "trace too long")
+TREDEF(STACKOV, "trace too deep")
+TREDEF(SNAPOV, "too many snapshots")
+TREDEF(BLACKL, "blacklisted")
+TREDEF(NYIBC, "NYI: bytecode %d")
+
+/* Recording loop ops. */
+TREDEF(LLEAVE, "leaving loop in root trace")
+TREDEF(LINNER, "inner loop in root trace")
+TREDEF(LUNROLL, "loop unroll limit reached")
+
+/* Recording calls/returns. */
+TREDEF(BADTYPE, "bad argument type")
+TREDEF(CJITOFF, "call to JIT-disabled function")
+TREDEF(CUNROLL, "call unroll limit reached")
+TREDEF(DOWNREC, "down-recursion, restarting")
+TREDEF(NYICF, "NYI: C function %p")
+TREDEF(NYIFF, "NYI: FastFunc %s")
+TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s")
+TREDEF(NYIRETL, "NYI: return to lower frame")
+
+/* Recording indexed load/store. */
+TREDEF(STORENN, "store with nil or NaN key")
+TREDEF(NOMM, "missing metamethod")
+TREDEF(IDXLOOP, "looping index lookup")
+TREDEF(NYITMIX, "NYI: mixed sparse/dense table")
+
+/* Recording C data operations. */
+TREDEF(NOCACHE, "symbol not in cache")
+TREDEF(NYICONV, "NYI: unsupported C type conversion")
+TREDEF(NYICALL, "NYI: unsupported C function type")
+
+/* Optimizations. */
+TREDEF(GFAIL, "guard would always fail")
+TREDEF(PHIOV, "too many PHIs")
+TREDEF(TYPEINS, "persistent type instability")
+
+/* Assembler. */
+TREDEF(MCODEAL, "failed to allocate mcode memory")
+TREDEF(MCODEOV, "machine code too long")
+TREDEF(MCODELM, "hit mcode limit (retrying)")
+TREDEF(SPILLOV, "too many spill slots")
+TREDEF(BADRA, "inconsistent register allocation")
+TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d")
+TREDEF(NYIPHI, "NYI: PHI shuffling too complex")
+TREDEF(NYICOAL, "NYI: register coalescing too complex")
+
+#undef TREDEF
+
+/* Detecting unused error messages:
+ awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh
+*/
diff --git a/src/LuaJIT/src/lj_udata.c b/src/LuaJIT/src/lj_udata.c
new file mode 100644
index 000000000..5e11afbe6
--- /dev/null
+++ b/src/LuaJIT/src/lj_udata.c
@@ -0,0 +1,34 @@
+/*
+** Userdata handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_udata_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_udata.h"
+
+GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env)
+{
+ GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata);
+ global_State *g = G(L);
+ newwhite(g, ud); /* Not finalized. */
+ ud->gct = ~LJ_TUDATA;
+ ud->udtype = UDTYPE_USERDATA;
+ ud->len = sz;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcrefnull(ud->metatable);
+ setgcref(ud->env, obj2gco(env));
+ /* Chain to userdata list (after main thread). */
+ setgcrefr(ud->nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, obj2gco(ud));
+ return ud;
+}
+
+void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud)
+{
+ lj_mem_free(g, ud, sizeudata(ud));
+}
+
diff --git a/src/LuaJIT/src/lj_udata.h b/src/LuaJIT/src/lj_udata.h
new file mode 100644
index 000000000..419da84f4
--- /dev/null
+++ b/src/LuaJIT/src/lj_udata.h
@@ -0,0 +1,14 @@
+/*
+** Userdata handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_UDATA_H
+#define _LJ_UDATA_H
+
+#include "lj_obj.h"
+
+LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env);
+LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud);
+
+#endif
diff --git a/src/LuaJIT/src/lj_vm.h b/src/LuaJIT/src/lj_vm.h
new file mode 100644
index 000000000..2cf041848
--- /dev/null
+++ b/src/LuaJIT/src/lj_vm.h
@@ -0,0 +1,105 @@
+/*
+** Assembler VM interface definitions.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VM_H
+#define _LJ_VM_H
+
+#include "lj_obj.h"
+
+/* Entry points for ASM parts of VM. */
+LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1);
+LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud);
+LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
+ lua_CPFunction cp);
+LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
+LJ_ASMF void lj_vm_unwind_c_eh(void);
+LJ_ASMF void lj_vm_unwind_ff_eh(void);
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_unwind_rethrow(void);
+#endif
+
+/* Miscellaneous functions. */
+#if LJ_TARGET_X86ORX64
+LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]);
+#endif
+#if LJ_TARGET_PPC
+void lj_vm_cachesync(void *start, void *end);
+#endif
+LJ_ASMF double lj_vm_foldarith(double x, double y, int op);
+#if LJ_HASJIT
+LJ_ASMF double lj_vm_foldfpm(double x, int op);
+#endif
+#if !LJ_ARCH_HASFPU
+/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */
+#endif
+
+/* Dispatch targets for recording and hooks. */
+LJ_ASMF void lj_vm_record(void);
+LJ_ASMF void lj_vm_inshook(void);
+LJ_ASMF void lj_vm_rethook(void);
+LJ_ASMF void lj_vm_callhook(void);
+
+/* Trace exit handling. */
+LJ_ASMF void lj_vm_exit_handler(void);
+LJ_ASMF void lj_vm_exit_interp(void);
+
+/* Internal math helper functions. */
+#if LJ_TARGET_X86ORX64
+#define lj_vm_floor(x) floor(x)
+#define lj_vm_ceil(x) ceil(x)
+#else
+LJ_ASMF double lj_vm_floor(double);
+LJ_ASMF double lj_vm_ceil(double);
+#endif
+
+#if LJ_HASJIT
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_floor_sse(void);
+LJ_ASMF void lj_vm_ceil_sse(void);
+LJ_ASMF void lj_vm_trunc_sse(void);
+LJ_ASMF void lj_vm_exp_x87(void);
+LJ_ASMF void lj_vm_exp2_x87(void);
+LJ_ASMF void lj_vm_pow_sse(void);
+LJ_ASMF void lj_vm_powi_sse(void);
+#else
+LJ_ASMF double lj_vm_trunc(double);
+LJ_ASMF double lj_vm_powi(double, int32_t);
+#ifdef LUAJIT_NO_LOG2
+LJ_ASMF double lj_vm_log2(double);
+#else
+#define lj_vm_log2 log2
+#endif
+#ifdef LUAJIT_NO_EXP2
+LJ_ASMF double lj_vm_exp2(double);
+#else
+#define lj_vm_exp2 exp2
+#endif
+#endif
+LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
+#if LJ_HASFFI
+LJ_ASMF int lj_vm_errno(void);
+#endif
+#endif
+
+/* Continuations for metamethods. */
+LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
+LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
+LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
+LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
+LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
+LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
+
+enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
+
+/* Start of the ASM code. */
+LJ_ASMF char lj_vm_asm_begin[];
+
+/* Bytecode offsets are relative to lj_vm_asm_begin. */
+#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs)))
+
+#endif
diff --git a/src/LuaJIT/src/lj_vmevent.c b/src/LuaJIT/src/lj_vmevent.c
new file mode 100644
index 000000000..5a74a5b02
--- /dev/null
+++ b/src/LuaJIT/src/lj_vmevent.c
@@ -0,0 +1,56 @@
+/*
+** VM event handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include
+
+#define lj_vmevent_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev)
+{
+ global_State *g = G(L);
+ GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), s);
+ if (tvistab(tv)) {
+ int hash = VMEVENT_HASH(ev);
+ tv = lj_tab_getint(tabV(tv), hash);
+ if (tv && tvisfunc(tv)) {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ setfuncV(L, L->top++, funcV(tv));
+ return savestack(L, L->top);
+ }
+ }
+ g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */
+ return 0;
+}
+
+void lj_vmevent_call(lua_State *L, ptrdiff_t argbase)
+{
+ global_State *g = G(L);
+ uint8_t oldmask = g->vmevmask;
+ uint8_t oldh = hook_save(g);
+ int status;
+ g->vmevmask = 0; /* Disable all events. */
+ hook_vmevent(g);
+ status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0);
+ if (LJ_UNLIKELY(status)) {
+ /* Really shouldn't use stderr here, but where else to complain? */
+ L->top--;
+ fprintf(stderr, "VM handler failed: %s\n",
+ tvisstr(L->top) ? strVdata(L->top) : "?");
+ }
+ hook_restore(g, oldh);
+ if (g->vmevmask != VMEVENT_NOCACHE)
+ g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */
+}
+
diff --git a/src/LuaJIT/src/lj_vmevent.h b/src/LuaJIT/src/lj_vmevent.h
new file mode 100644
index 000000000..4aeacfa0f
--- /dev/null
+++ b/src/LuaJIT/src/lj_vmevent.h
@@ -0,0 +1,59 @@
+/*
+** VM event handling.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VMEVENT_H
+#define _LJ_VMEVENT_H
+
+#include "lj_obj.h"
+
+/* Registry key for VM event handler table. */
+#define LJ_VMEVENTS_REGKEY "_VMEVENTS"
+#define LJ_VMEVENTS_HSIZE 4
+
+#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7))
+#define VMEVENT_HASH(ev) ((int)(ev) & ~7)
+#define VMEVENT_HASHIDX(h) ((int)(h) << 3)
+#define VMEVENT_NOCACHE 255
+
+#define VMEVENT_DEF(name, hash) \
+ LJ_VMEVENT_##name##_, \
+ LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3)
+
+/* VM event IDs. */
+typedef enum {
+ VMEVENT_DEF(BC, 0x00003883),
+ VMEVENT_DEF(TRACE, 0xb2d91467),
+ VMEVENT_DEF(RECORD, 0x9284bf4f),
+ VMEVENT_DEF(TEXIT, 0xb29df2b0),
+ LJ_VMEVENT__MAX
+} VMEvent;
+
+#ifdef LUAJIT_DISABLE_VMEVENT
+#define lj_vmevent_send(L, ev, args) UNUSED(L)
+#define lj_vmevent_send_(L, ev, args, post) UNUSED(L)
+#else
+#define lj_vmevent_send(L, ev, args) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ } \
+ }
+#define lj_vmevent_send_(L, ev, args, post) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ post \
+ } \
+ }
+
+LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev);
+LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase);
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/lj_vmmath.c b/src/LuaJIT/src/lj_vmmath.c
new file mode 100644
index 000000000..1416b600f
--- /dev/null
+++ b/src/LuaJIT/src/lj_vmmath.c
@@ -0,0 +1,142 @@
+/*
+** Math helper functions for assembler VM.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_vmmath_c
+#define LUA_CORE
+
+#include
+#include
+
+#include "lj_obj.h"
+#if LJ_HASJIT || LJ_TARGET_MIPS
+#include "lj_ir.h"
+#endif
+#include "lj_vm.h"
+
+/* -- Helper functions for generated machine code ------------------------- */
+
+#if LJ_TARGET_X86ORX64
+/* Wrapper functions to avoid linker issues on OSX. */
+LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); }
+LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); }
+LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); }
+#endif
+
+#if LJ_TARGET_MIPS
+double lj_vm_foldarith(double x, double y, int op)
+{
+ switch (op) {
+ case IR_ADD - IR_ADD: return x+y; break;
+ case IR_SUB - IR_ADD: return x-y; break;
+ case IR_MUL - IR_ADD: return x*y; break;
+ case IR_DIV - IR_ADD: return x/y; break;
+ case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break;
+ case IR_POW - IR_ADD: return pow(x, y); break;
+ case IR_NEG - IR_ADD: return -x; break;
+ case IR_ABS - IR_ADD: return fabs(x); break;
+#if LJ_HASJIT
+ case IR_ATAN2 - IR_ADD: return atan2(x, y); break;
+ case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break;
+ case IR_MIN - IR_ADD: return x > y ? y : x; break;
+ case IR_MAX - IR_ADD: return x < y ? y : x; break;
+#endif
+ default: return x;
+ }
+}
+#endif
+
+#if LJ_HASJIT
+
+#ifdef LUAJIT_NO_LOG2
+double lj_vm_log2(double a)
+{
+ return log(a) * 1.4426950408889634074;
+}
+#endif
+
+#ifdef LUAJIT_NO_EXP2
+double lj_vm_exp2(double a)
+{
+ return exp(a * 0.6931471805599453);
+}
+#endif
+
+#if !(LJ_TARGET_ARM || LJ_TARGET_PPC)
+int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
+{
+ uint32_t y, ua, ub;
+ lua_assert(b != 0); /* This must be checked before using this function. */
+ ua = a < 0 ? (uint32_t)-a : (uint32_t)a;
+ ub = b < 0 ? (uint32_t)-b : (uint32_t)b;
+ y = ua % ub;
+ if (y != 0 && (a^b) < 0) y = y - ub;
+ if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y;
+ return (int32_t)y;
+}
+#endif
+
+#if !LJ_TARGET_X86ORX64
+/* Unsigned x^k. */
+static double lj_vm_powui(double x, uint32_t k)
+{
+ double y;
+ lua_assert(k != 0);
+ for (; (k & 1) == 0; k >>= 1) x *= x;
+ y = x;
+ if ((k >>= 1) != 0) {
+ for (;;) {
+ x *= x;
+ if (k == 1) break;
+ if (k & 1) y *= x;
+ k >>= 1;
+ }
+ y *= x;
+ }
+ return y;
+}
+
+/* Signed x^k. */
+double lj_vm_powi(double x, int32_t k)
+{
+ if (k > 1)
+ return lj_vm_powui(x, (uint32_t)k);
+ else if (k == 1)
+ return x;
+ else if (k == 0)
+ return 1.0;
+ else
+ return 1.0 / lj_vm_powui(x, (uint32_t)-k);
+}
+
+/* Computes fpm(x) for extended math functions. */
+double lj_vm_foldfpm(double x, int fpm)
+{
+ switch (fpm) {
+ case IRFPM_FLOOR: return lj_vm_floor(x);
+ case IRFPM_CEIL: return lj_vm_ceil(x);
+ case IRFPM_TRUNC: return lj_vm_trunc(x);
+ case IRFPM_SQRT: return sqrt(x);
+ case IRFPM_EXP: return exp(x);
+ case IRFPM_EXP2: return lj_vm_exp2(x);
+ case IRFPM_LOG: return log(x);
+ case IRFPM_LOG2: return lj_vm_log2(x);
+ case IRFPM_LOG10: return log10(x);
+ case IRFPM_SIN: return sin(x);
+ case IRFPM_COS: return cos(x);
+ case IRFPM_TAN: return tan(x);
+ default: lua_assert(0);
+ }
+ return 0;
+}
+#endif
+
+#if LJ_HASFFI
+int lj_vm_errno(void)
+{
+ return errno;
+}
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/ljamalg.c b/src/LuaJIT/src/ljamalg.c
new file mode 100644
index 000000000..1b58ceb4c
--- /dev/null
+++ b/src/LuaJIT/src/ljamalg.c
@@ -0,0 +1,90 @@
+/*
+** LuaJIT core and libraries amalgamation.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/*
++--------------------------------------------------------------------------+
+| WARNING: Compiling the amalgamation needs a lot of virtual memory |
+| (around 200 MB with GCC 4.x)! If you don't have enough physical memory |
+| your machine will start swapping to disk and the compile will not finish |
+| within a reasonable amount of time. |
+| So either compile on a bigger machine or use the non-amalgamated build. |
++--------------------------------------------------------------------------+
+*/
+
+#define ljamalg_c
+#define LUA_CORE
+
+/* To get the mremap prototype. Must be defined before any system includes. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_gc.c"
+#include "lj_err.c"
+#include "lj_char.c"
+#include "lj_bc.c"
+#include "lj_obj.c"
+#include "lj_str.c"
+#include "lj_tab.c"
+#include "lj_func.c"
+#include "lj_udata.c"
+#include "lj_meta.c"
+#include "lj_debug.c"
+#include "lj_state.c"
+#include "lj_dispatch.c"
+#include "lj_vmevent.c"
+#include "lj_vmmath.c"
+#include "lj_api.c"
+#include "lj_lex.c"
+#include "lj_parse.c"
+#include "lj_bcread.c"
+#include "lj_bcwrite.c"
+#include "lj_ctype.c"
+#include "lj_cdata.c"
+#include "lj_cconv.c"
+#include "lj_ccall.c"
+#include "lj_ccallback.c"
+#include "lj_carith.c"
+#include "lj_clib.c"
+#include "lj_cparse.c"
+#include "lj_lib.c"
+#include "lj_ir.c"
+#include "lj_opt_mem.c"
+#include "lj_opt_fold.c"
+#include "lj_opt_narrow.c"
+#include "lj_opt_dce.c"
+#include "lj_opt_loop.c"
+#include "lj_opt_split.c"
+#include "lj_mcode.c"
+#include "lj_snap.c"
+#include "lj_record.c"
+#include "lj_crecord.c"
+#include "lj_ffrecord.c"
+#include "lj_asm.c"
+#include "lj_trace.c"
+#include "lj_gdbjit.c"
+#include "lj_alloc.c"
+
+#include "lib_aux.c"
+#include "lib_base.c"
+#include "lib_math.c"
+#include "lib_string.c"
+#include "lib_table.c"
+#include "lib_io.c"
+#include "lib_os.c"
+#include "lib_package.c"
+#include "lib_debug.c"
+#include "lib_bit.c"
+#include "lib_jit.c"
+#include "lib_ffi.c"
+#include "lib_init.c"
+
diff --git a/src/lua/src/lua.h b/src/LuaJIT/src/lua.h
similarity index 98%
rename from src/lua/src/lua.h
rename to src/LuaJIT/src/lua.h
index 5bc97b746..0e98b3744 100644
--- a/src/lua/src/lua.h
+++ b/src/LuaJIT/src/lua.h
@@ -1,5 +1,5 @@
/*
-** $Id: lua.h,v 1.218.1.4 2008/01/03 15:41:15 roberto Exp $
+** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $
** Lua - An Extensible Extension Language
** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
** See Copyright Notice at the end of this file
@@ -17,10 +17,10 @@
#define LUA_VERSION "Lua 5.1"
-#define LUA_RELEASE "Lua 5.1.3"
+#define LUA_RELEASE "Lua 5.1.4"
#define LUA_VERSION_NUM 501
#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio"
-#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
+#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
/* mark for precompiled code (`Lua') */
@@ -245,7 +245,7 @@ LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
-/*
+/*
** ===============================================================
** some useful macros
** ===============================================================
diff --git a/src/LuaJIT/src/lua.hpp b/src/LuaJIT/src/lua.hpp
new file mode 100644
index 000000000..07e9002dc
--- /dev/null
+++ b/src/LuaJIT/src/lua.hpp
@@ -0,0 +1,9 @@
+// C++ wrapper for LuaJIT header files.
+
+extern "C" {
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+}
+
diff --git a/src/LuaJIT/src/luaconf.h b/src/LuaJIT/src/luaconf.h
new file mode 100644
index 000000000..dfb79e19b
--- /dev/null
+++ b/src/LuaJIT/src/luaconf.h
@@ -0,0 +1,131 @@
+/*
+** Configuration header.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef luaconf_h
+#define luaconf_h
+
+#include
+#include
+
+/* Default path for loading Lua and C modules with require(). */
+#if defined(_WIN32)
+/*
+** In Windows, any exclamation mark ('!') in the path is replaced by the
+** path of the directory of the executable file of the current process.
+*/
+#define LUA_LDIR "!\\lua\\"
+#define LUA_CDIR "!\\"
+#define LUA_PATH_DEFAULT \
+ ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;"
+#define LUA_CPATH_DEFAULT \
+ ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
+#else
+#define LUA_ROOT "/usr/local/"
+#define LUA_LDIR LUA_ROOT "share/lua/5.1/"
+#define LUA_CDIR LUA_ROOT "lib/lua/5.1/"
+#ifdef LUA_XROOT
+#define LUA_JDIR LUA_XROOT "share/luajit-2.0.0-beta10/"
+#define LUA_XPATH \
+ ";" LUA_XROOT "share/lua/5.1/?.lua;" LUA_XROOT "share/lua/5.1/?/init.lua"
+#define LUA_XCPATH LUA_XROOT "lib/lua/5.1/?.so;"
+#else
+#define LUA_JDIR LUA_ROOT "share/luajit-2.0.0-beta10/"
+#define LUA_XPATH
+#define LUA_XCPATH
+#endif
+#define LUA_PATH_DEFAULT \
+ "./?.lua;" LUA_JDIR"?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua" LUA_XPATH
+#define LUA_CPATH_DEFAULT \
+ "./?.so;" LUA_CDIR"?.so;" LUA_XCPATH LUA_CDIR"loadall.so"
+#endif
+
+/* Environment variable names for path overrides and initialization code. */
+#define LUA_PATH "LUA_PATH"
+#define LUA_CPATH "LUA_CPATH"
+#define LUA_INIT "LUA_INIT"
+
+/* Special file system characters. */
+#if defined(_WIN32)
+#define LUA_DIRSEP "\\"
+#else
+#define LUA_DIRSEP "/"
+#endif
+#define LUA_PATHSEP ";"
+#define LUA_PATH_MARK "?"
+#define LUA_EXECDIR "!"
+#define LUA_IGMARK "-"
+#define LUA_PATH_CONFIG \
+ LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
+ LUA_EXECDIR "\n" LUA_IGMARK
+
+/* Quoting in error messages. */
+#define LUA_QL(x) "'" x "'"
+#define LUA_QS LUA_QL("%s")
+
+/* Various tunables. */
+#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */
+#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */
+#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */
+#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
+#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
+
+/* Compatibility with older library function names. */
+#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */
+#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */
+
+/* Configuration for the frontend (the luajit executable). */
+#if defined(luajit_c)
+#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
+#define LUA_PROMPT "> " /* Interactive prompt. */
+#define LUA_PROMPT2 ">> " /* Continuation prompt. */
+#define LUA_MAXINPUT 512 /* Max. input line length. */
+#endif
+
+/* Note: changing the following defines breaks the Lua 5.1 ABI. */
+#define LUA_INTEGER ptrdiff_t
+#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */
+#define LUAL_BUFFERSIZE BUFSIZ /* Size of lauxlib and io.* buffers. */
+
+/* The following defines are here only for compatibility with luaconf.h
+** from the standard Lua distribution. They must not be changed for LuaJIT.
+*/
+#define LUA_NUMBER_DOUBLE
+#define LUA_NUMBER double
+#define LUAI_UACNUMBER double
+#define LUA_NUMBER_SCAN "%lf"
+#define LUA_NUMBER_FMT "%.14g"
+#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n))
+#define LUAI_MAXNUMBER2STR 32
+#define lua_str2number(s, p) strtod((s), (p))
+#define LUA_INTFRMLEN "l"
+#define LUA_INTFRM_T long
+
+/* Linkage of public API functions. */
+#if defined(LUA_BUILD_AS_DLL)
+#if defined(LUA_CORE) || defined(LUA_LIB)
+#define LUA_API __declspec(dllexport)
+#else
+#define LUA_API __declspec(dllimport)
+#endif
+#else
+#define LUA_API extern
+#endif
+
+#define LUALIB_API LUA_API
+
+/* Support for internal assertions. */
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+#include
+#endif
+#ifdef LUA_USE_ASSERT
+#define lua_assert(x) assert(x)
+#endif
+#ifdef LUA_USE_APICHECK
+#define luai_apicheck(L, o) { (void)L; assert(o); }
+#else
+#define luai_apicheck(L, o) { (void)L; }
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/luajit.c b/src/LuaJIT/src/luajit.c
new file mode 100644
index 000000000..ecf4ef26d
--- /dev/null
+++ b/src/LuaJIT/src/luajit.c
@@ -0,0 +1,558 @@
+/*
+** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include
+#include
+#include
+#include
+
+#define luajit_c
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+
+#include "lj_arch.h"
+
+#if LJ_TARGET_POSIX
+#include
+#define lua_stdin_is_tty() isatty(0)
+#elif LJ_TARGET_WINDOWS
+#include
+#ifdef __BORLANDC__
+#define lua_stdin_is_tty() isatty(_fileno(stdin))
+#else
+#define lua_stdin_is_tty() _isatty(_fileno(stdin))
+#endif
+#else
+#define lua_stdin_is_tty() 1
+#endif
+
+static lua_State *globalL = NULL;
+static const char *progname = LUA_PROGNAME;
+
+static void lstop(lua_State *L, lua_Debug *ar)
+{
+ (void)ar; /* unused arg. */
+ lua_sethook(L, NULL, 0, 0);
+ /* Avoid luaL_error -- a C hook doesn't add an extra frame. */
+ luaL_where(L, 0);
+ lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1));
+ lua_error(L);
+}
+
+static void laction(int i)
+{
+ signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
+ terminate process (default action) */
+ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
+}
+
+static void print_usage(void)
+{
+ fprintf(stderr,
+ "usage: %s [options]... [script [args]...].\n"
+ "Available options are:\n"
+ " -e chunk Execute string " LUA_QL("chunk") ".\n"
+ " -l name Require library " LUA_QL("name") ".\n"
+ " -b ... Save or list bytecode.\n"
+ " -j cmd Perform LuaJIT control command.\n"
+ " -O[opt] Control LuaJIT optimizations.\n"
+ " -i Enter interactive mode after executing " LUA_QL("script") ".\n"
+ " -v Show version information.\n"
+ " -- Stop handling options.\n"
+ " - Execute stdin and stop handling options.\n"
+ ,
+ progname);
+ fflush(stderr);
+}
+
+static void l_message(const char *pname, const char *msg)
+{
+ if (pname) fprintf(stderr, "%s: ", pname);
+ fprintf(stderr, "%s\n", msg);
+ fflush(stderr);
+}
+
+static int report(lua_State *L, int status)
+{
+ if (status && !lua_isnil(L, -1)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg == NULL) msg = "(error object is not a string)";
+ l_message(progname, msg);
+ lua_pop(L, 1);
+ }
+ return status;
+}
+
+static int traceback(lua_State *L)
+{
+ if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */
+ if (lua_isnoneornil(L, 1) ||
+ !luaL_callmeta(L, 1, "__tostring") ||
+ !lua_isstring(L, -1))
+ return 1; /* Return non-string error object. */
+ lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */
+ }
+ lua_getfield(L, LUA_GLOBALSINDEX, "debug");
+ if (!lua_istable(L, -1)) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ lua_getfield(L, -1, "traceback");
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2);
+ return 1;
+ }
+ lua_pushvalue(L, 1); /* Push error message. */
+ lua_pushinteger(L, 2); /* Skip this function and debug.traceback(). */
+ lua_call(L, 2, 1); /* Call debug.traceback(). */
+ return 1;
+}
+
+static int docall(lua_State *L, int narg, int clear)
+{
+ int status;
+ int base = lua_gettop(L) - narg; /* function index */
+ lua_pushcfunction(L, traceback); /* push traceback function */
+ lua_insert(L, base); /* put it under chunk and args */
+ signal(SIGINT, laction);
+ status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
+ signal(SIGINT, SIG_DFL);
+ lua_remove(L, base); /* remove traceback function */
+ /* force a complete garbage collection in case of errors */
+ if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0);
+ return status;
+}
+
+static void print_version(void)
+{
+ fprintf(stderr,
+ LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n");
+}
+
+static void print_jit_status(lua_State *L)
+{
+ int n;
+ const char *s;
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "status");
+ lua_remove(L, -2);
+ n = lua_gettop(L);
+ lua_call(L, 0, LUA_MULTRET);
+ fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stderr);
+ for (n++; (s = lua_tostring(L, n)); n++)
+ fprintf(stderr, " %s", s);
+ fputs("\n", stderr);
+}
+
+static int getargs(lua_State *L, char **argv, int n)
+{
+ int narg;
+ int i;
+ int argc = 0;
+ while (argv[argc]) argc++; /* count total number of arguments */
+ narg = argc - (n + 1); /* number of arguments to the script */
+ luaL_checkstack(L, narg + 3, "too many arguments to script");
+ for (i = n+1; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ lua_createtable(L, narg, n + 1);
+ for (i = 0; i < argc; i++) {
+ lua_pushstring(L, argv[i]);
+ lua_rawseti(L, -2, i - n);
+ }
+ return narg;
+}
+
+static int dofile(lua_State *L, const char *name)
+{
+ int status = luaL_loadfile(L, name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dostring(lua_State *L, const char *s, const char *name)
+{
+ int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dolibrary(lua_State *L, const char *name)
+{
+ lua_getglobal(L, "require");
+ lua_pushstring(L, name);
+ return report(L, docall(L, 1, 1));
+}
+
+static void write_prompt(lua_State *L, int firstline)
+{
+ const char *p;
+ lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
+ p = lua_tostring(L, -1);
+ if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2;
+ fputs(p, stdout);
+ fflush(stdout);
+ lua_pop(L, 1); /* remove global */
+}
+
+static int incomplete(lua_State *L, int status)
+{
+ if (status == LUA_ERRSYNTAX) {
+ size_t lmsg;
+ const char *msg = lua_tolstring(L, -1, &lmsg);
+ const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1);
+ if (strstr(msg, LUA_QL("")) == tp) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ }
+ return 0; /* else... */
+}
+
+static int pushline(lua_State *L, int firstline)
+{
+ char buf[LUA_MAXINPUT];
+ write_prompt(L, firstline);
+ if (fgets(buf, LUA_MAXINPUT, stdin)) {
+ size_t len = strlen(buf);
+ if (len > 0 && buf[len-1] == '\n')
+ buf[len-1] = '\0';
+ if (firstline && buf[0] == '=')
+ lua_pushfstring(L, "return %s", buf+1);
+ else
+ lua_pushstring(L, buf);
+ return 1;
+ }
+ return 0;
+}
+
+static int loadline(lua_State *L)
+{
+ int status;
+ lua_settop(L, 0);
+ if (!pushline(L, 1))
+ return -1; /* no input */
+ for (;;) { /* repeat until gets a complete line */
+ status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
+ if (!incomplete(L, status)) break; /* cannot try to add lines? */
+ if (!pushline(L, 0)) /* no more input? */
+ return -1;
+ lua_pushliteral(L, "\n"); /* add a new line... */
+ lua_insert(L, -2); /* ...between the two lines */
+ lua_concat(L, 3); /* join them */
+ }
+ lua_remove(L, 1); /* remove line */
+ return status;
+}
+
+static void dotty(lua_State *L)
+{
+ int status;
+ const char *oldprogname = progname;
+ progname = NULL;
+ while ((status = loadline(L)) != -1) {
+ if (status == 0) status = docall(L, 0, 0);
+ report(L, status);
+ if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */
+ lua_getglobal(L, "print");
+ lua_insert(L, 1);
+ if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
+ l_message(progname,
+ lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)",
+ lua_tostring(L, -1)));
+ }
+ }
+ lua_settop(L, 0); /* clear stack */
+ fputs("\n", stdout);
+ fflush(stdout);
+ progname = oldprogname;
+}
+
+static int handle_script(lua_State *L, char **argv, int n)
+{
+ int status;
+ const char *fname;
+ int narg = getargs(L, argv, n); /* collect arguments */
+ lua_setglobal(L, "arg");
+ fname = argv[n];
+ if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
+ fname = NULL; /* stdin */
+ status = luaL_loadfile(L, fname);
+ lua_insert(L, -(narg+1));
+ if (status == 0)
+ status = docall(L, narg, 0);
+ else
+ lua_pop(L, narg);
+ return report(L, status);
+}
+
+/* Load add-on module. */
+static int loadjitmodule(lua_State *L)
+{
+ lua_getglobal(L, "require");
+ lua_pushliteral(L, "jit.");
+ lua_pushvalue(L, -3);
+ lua_concat(L, 2);
+ if (lua_pcall(L, 1, 1, 0)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg && !strncmp(msg, "module ", 7)) {
+ err:
+ l_message(progname,
+ "unknown luaJIT command or jit.* modules not installed");
+ return 1;
+ } else {
+ return report(L, 1);
+ }
+ }
+ lua_getfield(L, -1, "start");
+ if (lua_isnil(L, -1)) goto err;
+ lua_remove(L, -2); /* Drop module table. */
+ return 0;
+}
+
+/* Run command with options. */
+static int runcmdopt(lua_State *L, const char *opt)
+{
+ int narg = 0;
+ if (opt && *opt) {
+ for (;;) { /* Split arguments. */
+ const char *p = strchr(opt, ',');
+ narg++;
+ if (!p) break;
+ if (p == opt)
+ lua_pushnil(L);
+ else
+ lua_pushlstring(L, opt, (size_t)(p - opt));
+ opt = p + 1;
+ }
+ if (*opt)
+ lua_pushstring(L, opt);
+ else
+ lua_pushnil(L);
+ }
+ return report(L, lua_pcall(L, narg, 0, 0));
+}
+
+/* JIT engine control command: try jit library first or load add-on module. */
+static int dojitcmd(lua_State *L, const char *cmd)
+{
+ const char *opt = strchr(cmd, '=');
+ lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd));
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_pushvalue(L, -2);
+ lua_gettable(L, -2); /* Lookup library function. */
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */
+ if (loadjitmodule(L))
+ return 1;
+ } else {
+ lua_remove(L, -2); /* Drop jit.* table. */
+ }
+ lua_remove(L, -2); /* Drop module name. */
+ return runcmdopt(L, opt ? opt+1 : opt);
+}
+
+/* Optimization flags. */
+static int dojitopt(lua_State *L, const char *opt)
+{
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "start");
+ lua_remove(L, -2);
+ return runcmdopt(L, opt);
+}
+
+/* Save or list bytecode. */
+static int dobytecode(lua_State *L, char **argv)
+{
+ int narg = 0;
+ lua_pushliteral(L, "bcsave");
+ if (loadjitmodule(L))
+ return 1;
+ if (argv[0][2]) {
+ narg++;
+ argv[0][1] = '-';
+ lua_pushstring(L, argv[0]+1);
+ }
+ for (argv++; *argv != NULL; narg++, argv++)
+ lua_pushstring(L, *argv);
+ return report(L, lua_pcall(L, narg, 0, 0));
+}
+
+/* check that argument has no extra characters at the end */
+#define notail(x) {if ((x)[2] != '\0') return -1;}
+
+#define FLAGS_INTERACTIVE 1
+#define FLAGS_VERSION 2
+#define FLAGS_EXEC 4
+#define FLAGS_OPTION 8
+
+static int collectargs(char **argv, int *flags)
+{
+ int i;
+ for (i = 1; argv[i] != NULL; i++) {
+ if (argv[i][0] != '-') /* Not an option? */
+ return i;
+ switch (argv[i][1]) { /* Check option. */
+ case '-':
+ notail(argv[i]);
+ return (argv[i+1] != NULL ? i+1 : 0);
+ case '\0':
+ return i;
+ case 'i':
+ notail(argv[i]);
+ *flags |= FLAGS_INTERACTIVE;
+ /* fallthrough */
+ case 'v':
+ notail(argv[i]);
+ *flags |= FLAGS_VERSION;
+ break;
+ case 'e':
+ *flags |= FLAGS_EXEC;
+ case 'j': /* LuaJIT extension */
+ case 'l':
+ *flags |= FLAGS_OPTION;
+ if (argv[i][2] == '\0') {
+ i++;
+ if (argv[i] == NULL) return -1;
+ }
+ break;
+ case 'O': break; /* LuaJIT extension */
+ case 'b': /* LuaJIT extension */
+ if (*flags) return -1;
+ *flags |= FLAGS_EXEC;
+ return 0;
+ default: return -1; /* invalid option */
+ }
+ }
+ return 0;
+}
+
+static int runargs(lua_State *L, char **argv, int n)
+{
+ int i;
+ for (i = 1; i < n; i++) {
+ if (argv[i] == NULL) continue;
+ lua_assert(argv[i][0] == '-');
+ switch (argv[i][1]) { /* option */
+ case 'e': {
+ const char *chunk = argv[i] + 2;
+ if (*chunk == '\0') chunk = argv[++i];
+ lua_assert(chunk != NULL);
+ if (dostring(L, chunk, "=(command line)") != 0)
+ return 1;
+ break;
+ }
+ case 'l': {
+ const char *filename = argv[i] + 2;
+ if (*filename == '\0') filename = argv[++i];
+ lua_assert(filename != NULL);
+ if (dolibrary(L, filename))
+ return 1; /* stop if file fails */
+ break;
+ }
+ case 'j': { /* LuaJIT extension */
+ const char *cmd = argv[i] + 2;
+ if (*cmd == '\0') cmd = argv[++i];
+ lua_assert(cmd != NULL);
+ if (dojitcmd(L, cmd))
+ return 1;
+ break;
+ }
+ case 'O': /* LuaJIT extension */
+ if (dojitopt(L, argv[i] + 2))
+ return 1;
+ break;
+ case 'b': /* LuaJIT extension */
+ return dobytecode(L, argv+i);
+ default: break;
+ }
+ }
+ return 0;
+}
+
+static int handle_luainit(lua_State *L)
+{
+ const char *init = getenv(LUA_INIT);
+ if (init == NULL)
+ return 0; /* status OK */
+ else if (init[0] == '@')
+ return dofile(L, init+1);
+ else
+ return dostring(L, init, "=" LUA_INIT);
+}
+
+struct Smain {
+ char **argv;
+ int argc;
+ int status;
+};
+
+static int pmain(lua_State *L)
+{
+ struct Smain *s = (struct Smain *)lua_touserdata(L, 1);
+ char **argv = s->argv;
+ int script;
+ int flags = 0;
+ globalL = L;
+ if (argv[0] && argv[0][0]) progname = argv[0];
+ LUAJIT_VERSION_SYM(); /* linker-enforced version check */
+ lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */
+ luaL_openlibs(L); /* open libraries */
+ lua_gc(L, LUA_GCRESTART, -1);
+ s->status = handle_luainit(L);
+ if (s->status != 0) return 0;
+ script = collectargs(argv, &flags);
+ if (script < 0) { /* invalid args? */
+ print_usage();
+ s->status = 1;
+ return 0;
+ }
+ if ((flags & FLAGS_VERSION)) print_version();
+ s->status = runargs(L, argv, (script > 0) ? script : s->argc);
+ if (s->status != 0) return 0;
+ if (script)
+ s->status = handle_script(L, argv, script);
+ if (s->status != 0) return 0;
+ if ((flags & FLAGS_INTERACTIVE)) {
+ print_jit_status(L);
+ dotty(L);
+ } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) {
+ if (lua_stdin_is_tty()) {
+ print_version();
+ print_jit_status(L);
+ dotty(L);
+ } else {
+ dofile(L, NULL); /* executes stdin as a file */
+ }
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int status;
+ struct Smain s;
+ lua_State *L = lua_open(); /* create state */
+ if (L == NULL) {
+ l_message(argv[0], "cannot create state: not enough memory");
+ return EXIT_FAILURE;
+ }
+ s.argc = argc;
+ s.argv = argv;
+ status = lua_cpcall(L, pmain, &s);
+ report(L, status);
+ lua_close(L);
+ return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/src/LuaJIT/src/luajit.h b/src/LuaJIT/src/luajit.h
new file mode 100644
index 000000000..10e926e15
--- /dev/null
+++ b/src/LuaJIT/src/luajit.h
@@ -0,0 +1,70 @@
+/*
+** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
+**
+** Copyright (C) 2005-2012 Mike Pall. All rights reserved.
+**
+** Permission is hereby granted, free of charge, to any person obtaining
+** a copy of this software and associated documentation files (the
+** "Software"), to deal in the Software without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Software, and to
+** permit persons to whom the Software is furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be
+** included in all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**
+** [ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+*/
+
+#ifndef _LUAJIT_H
+#define _LUAJIT_H
+
+#include "lua.h"
+
+#define LUAJIT_VERSION "LuaJIT 2.0.0-beta10"
+#define LUAJIT_VERSION_NUM 20000 /* Version 2.0.0 = 02.00.00. */
+#define LUAJIT_VERSION_SYM luaJIT_version_2_0_0_beta10
+#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2012 Mike Pall"
+#define LUAJIT_URL "http://luajit.org/"
+
+/* Modes for luaJIT_setmode. */
+#define LUAJIT_MODE_MASK 0x00ff
+
+enum {
+ LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */
+ LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */
+
+ LUAJIT_MODE_FUNC, /* Change mode for a function. */
+ LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */
+ LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */
+
+ LUAJIT_MODE_TRACE, /* Flush a compiled trace. */
+
+ LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */
+
+ LUAJIT_MODE_MAX
+};
+
+/* Flags or'ed in to the mode. */
+#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */
+#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */
+#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */
+
+/* LuaJIT public C API. */
+
+/* Control the JIT engine. */
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+
+/* Enforce (dynamic) linker error for version mismatches. Call from main. */
+LUA_API void LUAJIT_VERSION_SYM(void);
+
+#endif
diff --git a/src/LuaJIT/src/lualib.h b/src/LuaJIT/src/lualib.h
new file mode 100644
index 000000000..f02375a00
--- /dev/null
+++ b/src/LuaJIT/src/lualib.h
@@ -0,0 +1,43 @@
+/*
+** Standard library header.
+** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LUALIB_H
+#define _LUALIB_H
+
+#include "lua.h"
+
+#define LUA_FILEHANDLE "FILE*"
+
+#define LUA_COLIBNAME "coroutine"
+#define LUA_MATHLIBNAME "math"
+#define LUA_STRLIBNAME "string"
+#define LUA_TABLIBNAME "table"
+#define LUA_IOLIBNAME "io"
+#define LUA_OSLIBNAME "os"
+#define LUA_LOADLIBNAME "package"
+#define LUA_DBLIBNAME "debug"
+#define LUA_BITLIBNAME "bit"
+#define LUA_JITLIBNAME "jit"
+#define LUA_FFILIBNAME "ffi"
+
+LUALIB_API int luaopen_base(lua_State *L);
+LUALIB_API int luaopen_math(lua_State *L);
+LUALIB_API int luaopen_string(lua_State *L);
+LUALIB_API int luaopen_table(lua_State *L);
+LUALIB_API int luaopen_io(lua_State *L);
+LUALIB_API int luaopen_os(lua_State *L);
+LUALIB_API int luaopen_package(lua_State *L);
+LUALIB_API int luaopen_debug(lua_State *L);
+LUALIB_API int luaopen_bit(lua_State *L);
+LUALIB_API int luaopen_jit(lua_State *L);
+LUALIB_API int luaopen_ffi(lua_State *L);
+
+LUALIB_API void luaL_openlibs(lua_State *L);
+
+#ifndef lua_assert
+#define lua_assert(x) ((void)0)
+#endif
+
+#endif
diff --git a/src/LuaJIT/src/msvcbuild.bat b/src/LuaJIT/src/msvcbuild.bat
new file mode 100644
index 000000000..09782db42
--- /dev/null
+++ b/src/LuaJIT/src/msvcbuild.bat
@@ -0,0 +1,101 @@
+@rem Script to build LuaJIT with MSVC.
+@rem Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
+@rem
+@rem Either open a "Visual Studio .NET Command Prompt"
+@rem (Note that the Express Edition does not contain an x64 compiler)
+@rem -or-
+@rem Open a "Windows SDK Command Shell" and set the compiler environment:
+@rem setenv /release /x86
+@rem -or-
+@rem setenv /release /x64
+@rem
+@rem Then cd to this directory and run this script.
+
+@if not defined INCLUDE goto :FAIL
+
+@setlocal
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set LJLIB=lib /nologo
+@set DASMDIR=..\dynasm
+@set DASM=lua %DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c
+
+if not exist buildvm_x86.h^
+ %DASM% -LN -o buildvm_x86.h buildvm_x86.dasc
+@if errorlevel 1 goto :BAD
+if not exist buildvm_x64win.h^
+ %DASM% -LN -D X64 -D X64WIN -o buildvm_x64win.h buildvm_x86.dasc
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m peobj -o lj_vm.obj
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o ..\lib\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set LJCOMPILE=%LJCOMPILE% /Zi
+@set LJLINK=%LJLINK% /debug
+:NODEBUG
+@if "%1"=="amalg" goto :AMALGDLL
+@if "%1"=="static" goto :STATIC
+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /DLL /out:lua51.dll lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :MTDLL
+:STATIC
+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:lua51.lib lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :MTDLL
+:AMALGDLL
+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /DLL /out:lua51.dll ljamalg.obj lj_vm.obj
+@if errorlevel 1 goto :BAD
+:MTDLL
+if exist lua51.dll.manifest^
+ %LJMT% -manifest lua51.dll.manifest -outputresource:lua51.dll;2
+
+%LJCOMPILE% luajit.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:luajit.exe luajit.obj lua51.lib
+@if errorlevel 1 goto :BAD
+if exist luajit.exe.manifest^
+ %LJMT% -manifest luajit.exe.manifest -outputresource:luajit.exe
+
+@del *.obj *.manifest buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo You must open a "Visual Studio .NET Command Prompt" to run this script
+:END
diff --git a/src/Makefile.in b/src/Makefile.in
index 12825121e..c6ff03023 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -3,6 +3,7 @@
CC=@CC@
CPPFLAGS=@CPPFLAGS@
CFLAGS=@CFLAGS@
+LUACFLAGS=@LUACFLAGS@
PGCFLAGS=@PGCFLAGS@
LDFLAGS=@LDFLAGS@
DEPFLAGS=@DEPFLAGS@
@@ -103,11 +104,11 @@ all: reversion $(NOIT_LIBS) $(STRATCON_LIBS) $(TARGETS) java-bits make-modules m
make-man:
@(cd man && $(MAKE) -s)
-lua/liblua.lo:
+LuaJIT/src/libluajit.a:
@echo "- building lua bits"
- @(cd lua && $(MAKE) -s liblua.lo)
+ @(cd LuaJIT && $(MAKE) HOST_CC="$(CC) $(LUACFLAGS)")
-make-modules: lua/liblua.lo
+make-modules: LuaJIT/src/libluajit.a
@for dir in $(MODDIR) ; do \
(cd $$dir && $(MAKE) -s) ; \
done
@@ -322,7 +323,7 @@ clean:
(cd utils && $(MAKE) clean)
(cd json-lib && $(MAKE) clean)
(cd yajl-lib && $(MAKE) clean)
- (cd lua && $(MAKE) clean)
+ (cd LuaJIT && $(MAKE) clean)
(cd java && $(MAKE) clean)
Makefile.dep:
@@ -332,7 +333,7 @@ Makefile.dep:
include Makefile.dep
distclean-subdirs:
- for dir in jlog eventer udns man modules modules-lua noitedit utils json-lib yajl-lib lua java ; do \
+ for dir in jlog eventer udns man modules modules-lua noitedit utils json-lib yajl-lib LuaJIT java ; do \
(cd $$dir && $(MAKE) distclean) ; \
done
diff --git a/src/lua/COPYRIGHT b/src/lua/COPYRIGHT
deleted file mode 100644
index 3a53e741e..000000000
--- a/src/lua/COPYRIGHT
+++ /dev/null
@@ -1,34 +0,0 @@
-Lua License
------------
-
-Lua is licensed under the terms of the MIT license reproduced below.
-This means that Lua is free software and can be used for both academic
-and commercial purposes at absolutely no cost.
-
-For details and rationale, see http://www.lua.org/license.html .
-
-===============================================================================
-
-Copyright (C) 1994-2008 Lua.org, PUC-Rio.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-===============================================================================
-
-(end of COPYRIGHT)
diff --git a/src/lua/HISTORY b/src/lua/HISTORY
deleted file mode 100644
index ce0c95bc6..000000000
--- a/src/lua/HISTORY
+++ /dev/null
@@ -1,183 +0,0 @@
-HISTORY for Lua 5.1
-
-* Changes from version 5.0 to 5.1
- -------------------------------
- Language:
- + new module system.
- + new semantics for control variables of fors.
- + new semantics for setn/getn.
- + new syntax/semantics for varargs.
- + new long strings and comments.
- + new `mod' operator (`%')
- + new length operator #t
- + metatables for all types
- API:
- + new functions: lua_createtable, lua_get(set)field, lua_push(to)integer.
- + user supplies memory allocator (lua_open becomes lua_newstate).
- + luaopen_* functions must be called through Lua.
- Implementation:
- + new configuration scheme via luaconf.h.
- + incremental garbage collection.
- + better handling of end-of-line in the lexer.
- + fully reentrant parser (new Lua function `load')
- + better support for 64-bit machines.
- + native loadlib support for Mac OS X.
- + standard distribution in only one library (lualib.a merged into lua.a)
-
-* Changes from version 4.0 to 5.0
- -------------------------------
- Language:
- + lexical scoping.
- + Lua coroutines.
- + standard libraries now packaged in tables.
- + tags replaced by metatables and tag methods replaced by metamethods,
- stored in metatables.
- + proper tail calls.
- + each function can have its own global table, which can be shared.
- + new __newindex metamethod, called when we insert a new key into a table.
- + new block comments: --[[ ... ]].
- + new generic for.
- + new weak tables.
- + new boolean type.
- + new syntax "local function".
- + (f()) returns the first value returned by f.
- + {f()} fills a table with all values returned by f.
- + \n ignored in [[\n .
- + fixed and-or priorities.
- + more general syntax for function definition (e.g. function a.x.y:f()...end).
- + more general syntax for function calls (e.g. (print or write)(9)).
- + new functions (time/date, tmpfile, unpack, require, load*, etc.).
- API:
- + chunks are loaded by using lua_load; new luaL_loadfile and luaL_loadbuffer.
- + introduced lightweight userdata, a simple "void*" without a metatable.
- + new error handling protocol: the core no longer prints error messages;
- all errors are reported to the caller on the stack.
- + new lua_atpanic for host cleanup.
- + new, signal-safe, hook scheme.
- Implementation:
- + new license: MIT.
- + new, faster, register-based virtual machine.
- + support for external multithreading and coroutines.
- + new and consistent error message format.
- + the core no longer needs "stdio.h" for anything (except for a single
- use of sprintf to convert numbers to strings).
- + lua.c now runs the environment variable LUA_INIT, if present. It can
- be "@filename", to run a file, or the chunk itself.
- + support for user extensions in lua.c.
- sample implementation given for command line editing.
- + new dynamic loading library, active by default on several platforms.
- + safe garbage-collector metamethods.
- + precompiled bytecodes checked for integrity (secure binary dostring).
- + strings are fully aligned.
- + position capture in string.find.
- + read('*l') can read lines with embedded zeros.
-
-* Changes from version 3.2 to 4.0
- -------------------------------
- Language:
- + new "break" and "for" statements (both numerical and for tables).
- + uniform treatment of globals: globals are now stored in a Lua table.
- + improved error messages.
- + no more '$debug': full speed *and* full debug information.
- + new read form: read(N) for next N bytes.
- + general read patterns now deprecated.
- (still available with -DCOMPAT_READPATTERNS.)
- + all return values are passed as arguments for the last function
- (old semantics still available with -DLUA_COMPAT_ARGRET)
- + garbage collection tag methods for tables now deprecated.
- + there is now only one tag method for order.
- API:
- + New API: fully re-entrant, simpler, and more efficient.
- + New debug API.
- Implementation:
- + faster than ever: cleaner virtual machine and new hashing algorithm.
- + non-recursive garbage-collector algorithm.
- + reduced memory usage for programs with many strings.
- + improved treatment for memory allocation errors.
- + improved support for 16-bit machines (we hope).
- + code now compiles unmodified as both ANSI C and C++.
- + numbers in bases other than 10 are converted using strtoul.
- + new -f option in Lua to support #! scripts.
- + luac can now combine text and binaries.
-
-* Changes from version 3.1 to 3.2
- -------------------------------
- + redirected all output in Lua's core to _ERRORMESSAGE and _ALERT.
- + increased limit on the number of constants and globals per function
- (from 2^16 to 2^24).
- + debugging info (lua_debug and hooks) moved into lua_state and new API
- functions provided to get and set this info.
- + new debug lib gives full debugging access within Lua.
- + new table functions "foreachi", "sort", "tinsert", "tremove", "getn".
- + new io functions "flush", "seek".
-
-* Changes from version 3.0 to 3.1
- -------------------------------
- + NEW FEATURE: anonymous functions with closures (via "upvalues").
- + new syntax:
- - local variables in chunks.
- - better scope control with DO block END.
- - constructors can now be also written: { record-part; list-part }.
- - more general syntax for function calls and lvalues, e.g.:
- f(x).y=1
- o:f(x,y):g(z)
- f"string" is sugar for f("string")
- + strings may now contain arbitrary binary data (e.g., embedded zeros).
- + major code re-organization and clean-up; reduced module interdependecies.
- + no arbitrary limits on the total number of constants and globals.
- + support for multiple global contexts.
- + better syntax error messages.
- + new traversal functions "foreach" and "foreachvar".
- + the default for numbers is now double.
- changing it to use floats or longs is easy.
- + complete debug information stored in pre-compiled chunks.
- + sample interpreter now prompts user when run interactively, and also
- handles control-C interruptions gracefully.
-
-* Changes from version 2.5 to 3.0
- -------------------------------
- + NEW CONCEPT: "tag methods".
- Tag methods replace fallbacks as the meta-mechanism for extending the
- semantics of Lua. Whereas fallbacks had a global nature, tag methods
- work on objects having the same tag (e.g., groups of tables).
- Existing code that uses fallbacks should work without change.
- + new, general syntax for constructors {[exp] = exp, ... }.
- + support for handling variable number of arguments in functions (varargs).
- + support for conditional compilation ($if ... $else ... $end).
- + cleaner semantics in API simplifies host code.
- + better support for writing libraries (auxlib.h).
- + better type checking and error messages in the standard library.
- + luac can now also undump.
-
-* Changes from version 2.4 to 2.5
- -------------------------------
- + io and string libraries are now based on pattern matching;
- the old libraries are still available for compatibility
- + dofile and dostring can now return values (via return statement)
- + better support for 16- and 64-bit machines
- + expanded documentation, with more examples
-
-* Changes from version 2.2 to 2.4
- -------------------------------
- + external compiler creates portable binary files that can be loaded faster
- + interface for debugging and profiling
- + new "getglobal" fallback
- + new functions for handling references to Lua objects
- + new functions in standard lib
- + only one copy of each string is stored
- + expanded documentation, with more examples
-
-* Changes from version 2.1 to 2.2
- -------------------------------
- + functions now may be declared with any "lvalue" as a name
- + garbage collection of functions
- + support for pipes
-
-* Changes from version 1.1 to 2.1
- -------------------------------
- + object-oriented support
- + fallbacks
- + simplified syntax for tables
- + many internal improvements
-
-(end of HISTORY)
diff --git a/src/lua/INSTALL b/src/lua/INSTALL
deleted file mode 100644
index 17eb8aee8..000000000
--- a/src/lua/INSTALL
+++ /dev/null
@@ -1,99 +0,0 @@
-INSTALL for Lua 5.1
-
-* Building Lua
- ------------
- Lua is built in the src directory, but the build process can be
- controlled from the top-level Makefile.
-
- Building Lua on Unix systems should be very easy. First do "make" and
- see if your platform is listed. If so, just do "make xxx", where xxx
- is your platform name. The platforms currently supported are:
- aix ansi bsd freebsd generic linux macosx mingw posix solaris
-
- If your platform is not listed, try the closest one or posix, generic,
- ansi, in this order.
-
- See below for customization instructions and for instructions on how
- to build with other Windows compilers.
-
- If you want to check that Lua has been built correctly, do "make test"
- after building Lua. Also, have a look at the example programs in test.
-
-* Installing Lua
- --------------
- Once you have built Lua, you may want to install it in an official
- place in your system. In this case, do "make install". The official
- place and the way to install files are defined in Makefile. You must
- have the right permissions to install files.
-
- If you want to build and install Lua in one step, do "make xxx install",
- where xxx is your platform name.
-
- If you want to install Lua locally, then do "make local". This will
- create directories bin, include, lib, man, and install Lua there as
- follows:
-
- bin: lua luac
- include: lua.h luaconf.h lualib.h lauxlib.h lua.hpp
- lib: liblua.a
- man/man1: lua.1 luac.1
-
- These are the only directories you need for development.
-
- There are man pages for lua and luac, in both nroff and html, and a
- reference manual in html in doc, some sample code in test, and some
- useful stuff in etc. You don't need these directories for development.
-
- If you want to install Lua locally, but in some other directory, do
- "make install INSTALL_TOP=xxx", where xxx is your chosen directory.
-
- See below for instructions for Windows and other systems.
-
-* Customization
- -------------
- Three things can be customized by editing a file:
- - Where and how to install Lua -- edit Makefile.
- - How to build Lua -- edit src/Makefile.
- - Lua features -- edit src/luaconf.h.
-
- You don't actually need to edit the Makefiles because you may set the
- relevant variables when invoking make.
-
- On the other hand, if you need to select some Lua features, you'll need
- to edit src/luaconf.h. The edited file will be the one installed, and
- it will be used by any Lua clients that you build, to ensure consistency.
-
- We strongly recommend that you enable dynamic loading. This is done
- automatically for all platforms listed above that have this feature
- (and also Windows). See src/luaconf.h and also src/Makefile.
-
-* Building Lua on Windows and other systems
- -----------------------------------------
- If you're not using the usual Unix tools, then the instructions for
- building Lua depend on the compiler you use. You'll need to create
- projects (or whatever your compiler uses) for building the library,
- the interpreter, and the compiler, as follows:
-
- library: lapi.c lcode.c ldebug.c ldo.c ldump.c lfunc.c lgc.c llex.c
- lmem.c lobject.c lopcodes.c lparser.c lstate.c lstring.c
- ltable.c ltm.c lundump.c lvm.c lzio.c
- lauxlib.c lbaselib.c ldblib.c liolib.c lmathlib.c loslib.c
- ltablib.c lstrlib.c loadlib.c linit.c
-
- interpreter: library, lua.c
-
- compiler: library, luac.c print.c
-
- If you use Visual Studio .NET, you can use etc/luavs.bat in its
- "Command Prompt".
-
- If all you want is to build the Lua interpreter, you may put all .c files
- in a single project, except for luac.c and print.c. Or just use etc/all.c.
-
- To use Lua as a library in your own programs, you'll need to know how to
- create and use libraries with your compiler.
-
- As mentioned above, you may edit luaconf.h to select some features before
- building Lua.
-
-(end of INSTALL)
diff --git a/src/lua/Makefile.in b/src/lua/Makefile.in
deleted file mode 100644
index ce1951f00..000000000
--- a/src/lua/Makefile.in
+++ /dev/null
@@ -1,27 +0,0 @@
-.SUFFIXES: .lo
-
-CC=@CC@
-LD=@LD@
-CPPFLAGS=@CPPFLAGS@
-CFLAGS=@CFLAGS@
-SHCFLAGS=@SHCFLAGS@
-MODULELD=@MODULELD@
-MODULEEXT=@MODULEEXT@
-LDFLAGS=@LDFLAGS@
-AR=@AR@
-RANLIB=@RANLIB@
-LIBS=@LIBS@
-PGLIBS=@PGLIBS@
-INSTALL=@INSTALL@
-
-top_srcdir=@top_srcdir@
-
-liblua.lo: liblua.c
- $(CC) $(SHCFLAGS) -I./src -o $@ -c liblua.c
-
-clean:
- rm -f liblua.lo
-
-distclean: clean
- rm -f Makefile
-
diff --git a/src/lua/Makefile.stock b/src/lua/Makefile.stock
deleted file mode 100644
index fec201156..000000000
--- a/src/lua/Makefile.stock
+++ /dev/null
@@ -1,120 +0,0 @@
-# makefile for installing Lua
-# see INSTALL for installation instructions
-# see src/Makefile and src/luaconf.h for further customization
-
-# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT =======================
-
-# Your platform. See PLATS for possible values.
-PLAT= none
-
-# Where to install. The installation starts in the src directory, so take care
-# if INSTALL_TOP is not an absolute path. (Man pages are installed from the
-# doc directory.) You may want to make these paths consistent with LUA_ROOT,
-# LUA_LDIR, and LUA_CDIR in luaconf.h (and also with etc/lua.pc).
-#
-INSTALL_TOP= /usr/local
-INSTALL_BIN= $(INSTALL_TOP)/bin
-INSTALL_INC= $(INSTALL_TOP)/include
-INSTALL_LIB= $(INSTALL_TOP)/lib
-INSTALL_MAN= $(INSTALL_TOP)/man/man1
-INSTALL_LMOD= $(INSTALL_TOP)/share/lua/$V
-INSTALL_CMOD= $(INSTALL_TOP)/lib/lua/$V
-
-# How to install. If you don't have "install" (unlikely) then get install-sh at
-# http://dev.w3.org/cvsweb/libwww/config/install-sh
-# or use cp instead.
-INSTALL_EXEC= $(INSTALL) -p -m 0755
-INSTALL_DATA= $(INSTALL) -p -m 0644
-
-# Utilities.
-INSTALL= install
-MKDIR= mkdir
-
-# == END OF USER SETTINGS. NO NEED TO CHANGE ANYTHING BELOW THIS LINE =========
-
-# Convenience platforms targets.
-PLATS= aix ansi bsd freebsd generic linux macosx mingw posix solaris
-
-# What to install.
-TO_BIN= lua luac
-TO_INC= lua.h luaconf.h lualib.h lauxlib.h ../etc/lua.hpp
-TO_LIB= liblua.a
-TO_MAN= lua.1 luac.1
-
-# Lua version and release.
-V= 5.1
-R= 5.1.3
-
-all: $(PLAT)
-
-$(PLATS) clean:
- cd src && $(MAKE) $@
-
-test: dummy
- src/lua test/hello.lua
-
-install: dummy
- cd src && $(MKDIR) -p $(INSTALL_BIN) $(INSTALL_INC) $(INSTALL_LIB) $(INSTALL_MAN) $(INSTALL_LMOD) $(INSTALL_CMOD)
- cd src && $(INSTALL_EXEC) $(TO_BIN) $(INSTALL_BIN)
- cd src && $(INSTALL_DATA) $(TO_INC) $(INSTALL_INC)
- cd src && $(INSTALL_DATA) $(TO_LIB) $(INSTALL_LIB)
- cd doc && $(INSTALL_DATA) $(TO_MAN) $(INSTALL_MAN)
-
-local:
- $(MAKE) install INSTALL_TOP=..
-
-none:
- @echo "Please do"
- @echo " make PLATFORM"
- @echo "where PLATFORM is one of these:"
- @echo " $(PLATS)"
- @echo "See INSTALL for complete instructions."
-
-# make may get confused with test/ and INSTALL in a case-insensitive OS
-dummy:
-
-# echo config parameters
-echo:
- @echo ""
- @echo "These are the parameters currently set in src/Makefile to build Lua $R:"
- @echo ""
- @cd src && $(MAKE) -s echo
- @echo ""
- @echo "These are the parameters currently set in Makefile to install Lua $R:"
- @echo ""
- @echo "PLAT = $(PLAT)"
- @echo "INSTALL_TOP = $(INSTALL_TOP)"
- @echo "INSTALL_BIN = $(INSTALL_BIN)"
- @echo "INSTALL_INC = $(INSTALL_INC)"
- @echo "INSTALL_LIB = $(INSTALL_LIB)"
- @echo "INSTALL_MAN = $(INSTALL_MAN)"
- @echo "INSTALL_LMOD = $(INSTALL_LMOD)"
- @echo "INSTALL_CMOD = $(INSTALL_CMOD)"
- @echo "INSTALL_EXEC = $(INSTALL_EXEC)"
- @echo "INSTALL_DATA = $(INSTALL_DATA)"
- @echo ""
- @echo "See also src/luaconf.h ."
- @echo ""
-
-# echo private config parameters
-pecho:
- @echo "V = $(V)"
- @echo "R = $(R)"
- @echo "TO_BIN = $(TO_BIN)"
- @echo "TO_INC = $(TO_INC)"
- @echo "TO_LIB = $(TO_LIB)"
- @echo "TO_MAN = $(TO_MAN)"
-
-# echo config parameters as Lua code
-# uncomment the last sed expression if you want nil instead of empty strings
-lecho:
- @echo "-- installation parameters for Lua $R"
- @echo "VERSION = '$V'"
- @echo "RELEASE = '$R'"
- @$(MAKE) echo | grep = | sed -e 's/= /= "/' -e 's/$$/"/' #-e 's/""/nil/'
- @echo "-- EOF"
-
-# list targets that do not create files (but not all makes understand .PHONY)
-.PHONY: all $(PLATS) clean test install local none dummy echo pecho lecho
-
-# (end of Makefile)
diff --git a/src/lua/README b/src/lua/README
deleted file mode 100644
index 11b4dff70..000000000
--- a/src/lua/README
+++ /dev/null
@@ -1,37 +0,0 @@
-README for Lua 5.1
-
-See INSTALL for installation instructions.
-See HISTORY for a summary of changes since the last released version.
-
-* What is Lua?
- ------------
- Lua is a powerful, light-weight programming language designed for extending
- applications. Lua is also frequently used as a general-purpose, stand-alone
- language. Lua is free software.
-
- For complete information, visit Lua's web site at http://www.lua.org/ .
- For an executive summary, see http://www.lua.org/about.html .
-
- Lua has been used in many different projects around the world.
- For a short list, see http://www.lua.org/uses.html .
-
-* Availability
- ------------
- Lua is freely available for both academic and commercial purposes.
- See COPYRIGHT and http://www.lua.org/license.html for details.
- Lua can be downloaded at http://www.lua.org/download.html .
-
-* Installation
- ------------
- Lua is implemented in pure ANSI C, and compiles unmodified in all known
- platforms that have an ANSI C compiler. In most Unix-like platforms, simply
- do "make" with a suitable target. See INSTALL for detailed instructions.
-
-* Origin
- ------
- Lua is developed at Lua.org, a laboratory of the Department of Computer
- Science of PUC-Rio (the Pontifical Catholic University of Rio de Janeiro
- in Brazil).
- For more information about the authors, see http://www.lua.org/authors.html .
-
-(end of README)
diff --git a/src/lua/etc/Makefile b/src/lua/etc/Makefile
deleted file mode 100644
index 3e54cac1d..000000000
--- a/src/lua/etc/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-# makefile for Lua etc
-
-TOP= ..
-LIB= $(TOP)/src
-INC= $(TOP)/src
-BIN= $(TOP)/src
-SRC= $(TOP)/src
-TST= $(TOP)/test
-
-CC= gcc
-CFLAGS= -O2 -Wall -I$(INC) $(MYCFLAGS)
-MYCFLAGS=
-MYLDFLAGS= -Wl,-E
-MYLIBS= -lm
-#MYLIBS= -lm -Wl,-E -ldl -lreadline -lhistory -lncurses
-RM= rm -f
-
-default:
- @echo 'Please choose a target: min noparser one strict clean'
-
-min: min.c
- $(CC) $(CFLAGS) $@.c -L$(LIB) -llua $(MYLIBS)
- echo 'print"Hello there!"' | ./a.out
-
-noparser: noparser.o
- $(CC) noparser.o $(SRC)/lua.o -L$(LIB) -llua $(MYLIBS)
- $(BIN)/luac $(TST)/hello.lua
- -./a.out luac.out
- -./a.out -e'a=1'
-
-one:
- $(CC) $(CFLAGS) all.c $(MYLIBS)
- ./a.out $(TST)/hello.lua
-
-strict:
- -$(BIN)/lua -e 'print(a);b=2'
- -$(BIN)/lua -lstrict -e 'print(a)'
- -$(BIN)/lua -e 'function f() b=2 end f()'
- -$(BIN)/lua -lstrict -e 'function f() b=2 end f()'
-
-clean:
- $(RM) a.out core core.* *.o luac.out
-
-distclean: clean
-
-.PHONY: default min noparser one strict clean
diff --git a/src/lua/etc/README b/src/lua/etc/README
deleted file mode 100644
index 5149fc91d..000000000
--- a/src/lua/etc/README
+++ /dev/null
@@ -1,37 +0,0 @@
-This directory contains some useful files and code.
-Unlike the code in ../src, everything here is in the public domain.
-
-If any of the makes fail, you're probably not using the same libraries
-used to build Lua. Set MYLIBS in Makefile accordingly.
-
-all.c
- Full Lua interpreter in a single file.
- Do "make one" for a demo.
-
-lua.hpp
- Lua header files for C++ using 'extern "C"'.
-
-lua.ico
- A Lua icon for Windows (and web sites: save as favicon.ico).
- Drawn by hand by Markus Gritsch .
-
-lua.pc
- pkg-config data for Lua
-
-luavs.bat
- Script to build Lua under "Visual Studio .NET Command Prompt".
- Run it from the toplevel as etc\luavs.bat.
-
-min.c
- A minimal Lua interpreter.
- Good for learning and for starting your own.
- Do "make min" for a demo.
-
-noparser.c
- Linking with noparser.o avoids loading the parsing modules in lualib.a.
- Do "make noparser" for a demo.
-
-strict.lua
- Traps uses of undeclared global variables.
- Do "make strict" for a demo.
-
diff --git a/src/lua/etc/all.c b/src/lua/etc/all.c
deleted file mode 100644
index 4958d0a57..000000000
--- a/src/lua/etc/all.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-* all.c -- Lua core, libraries and interpreter in a single file
-** See Copyright Notice in lua.h
-*/
-
-#define luaall_c
-
-#include "lapi.c"
-#include "lcode.c"
-#include "ldebug.c"
-#include "ldo.c"
-#include "ldump.c"
-#include "lfunc.c"
-#include "lgc.c"
-#include "llex.c"
-#include "lmem.c"
-#include "lobject.c"
-#include "lopcodes.c"
-#include "lparser.c"
-#include "lstate.c"
-#include "lstring.c"
-#include "ltable.c"
-#include "ltm.c"
-#include "lundump.c"
-#include "lvm.c"
-#include "lzio.c"
-
-#include "lauxlib.c"
-#include "lbaselib.c"
-#include "ldblib.c"
-#include "liolib.c"
-#include "linit.c"
-#include "lmathlib.c"
-#include "loadlib.c"
-#include "loslib.c"
-#include "lstrlib.c"
-#include "ltablib.c"
-
-#include "lua.c"
diff --git a/src/lua/etc/lua.hpp b/src/lua/etc/lua.hpp
deleted file mode 100644
index 0f3b3d477..000000000
--- a/src/lua/etc/lua.hpp
+++ /dev/null
@@ -1,10 +0,0 @@
-// lua.hpp
-// Lua header files for C++
-// See Copyright Notice in lua.h
-// <> not supplied automatically because Lua also compiles as C++
-
-extern "C" {
-#include "lua.h"
-#include "lualib.h"
-#include "lauxlib.h"
-}
diff --git a/src/lua/etc/lua.ico b/src/lua/etc/lua.ico
deleted file mode 100644
index ccbabc4e2..000000000
Binary files a/src/lua/etc/lua.ico and /dev/null differ
diff --git a/src/lua/etc/lua.pc b/src/lua/etc/lua.pc
deleted file mode 100644
index 19a5c9153..000000000
--- a/src/lua/etc/lua.pc
+++ /dev/null
@@ -1,31 +0,0 @@
-# lua.pc -- pkg-config data for Lua
-
-# vars from install Makefile
-
-# grep '^V=' ../Makefile
-V= 5.1
-# grep '^R=' ../Makefile
-R= 5.1.3
-
-# grep '^INSTALL_.*=' ../Makefile | sed 's/INSTALL_TOP/prefix/'
-prefix= /usr/local
-INSTALL_BIN= ${prefix}/bin
-INSTALL_INC= ${prefix}/include
-INSTALL_LIB= ${prefix}/lib
-INSTALL_MAN= ${prefix}/man/man1
-INSTALL_LMOD= ${prefix}/share/lua/${V}
-INSTALL_CMOD= ${prefix}/lib/lua/${V}
-
-# canonical vars
-exec_prefix=${prefix}
-libdir=${exec_prefix}/lib
-includedir=${prefix}/include
-
-Name: Lua
-Description: An Extensible Extension Language
-Version: ${R}
-Requires:
-Libs: -L${libdir} -llua -lm
-Cflags: -I${includedir}
-
-# (end of lua.pc)
diff --git a/src/lua/etc/luavs.bat b/src/lua/etc/luavs.bat
deleted file mode 100644
index 08c2beddf..000000000
--- a/src/lua/etc/luavs.bat
+++ /dev/null
@@ -1,28 +0,0 @@
-@rem Script to build Lua under "Visual Studio .NET Command Prompt".
-@rem Do not run from this directory; run it from the toplevel: etc\luavs.bat .
-@rem It creates lua51.dll, lua51.lib, lua.exe, and luac.exe in src.
-@rem (contributed by David Manura and Mike Pall)
-
-@setlocal
-@set MYCOMPILE=cl /nologo /MD /O2 /W3 /c /D_CRT_SECURE_NO_DEPRECATE
-@set MYLINK=link /nologo
-@set MYMT=mt /nologo
-
-cd src
-%MYCOMPILE% /DLUA_BUILD_AS_DLL l*.c
-del lua.obj luac.obj
-%MYLINK% /DLL /out:lua51.dll l*.obj
-if exist lua51.dll.manifest^
- %MYMT% -manifest lua51.dll.manifest -outputresource:lua51.dll;2
-%MYCOMPILE% /DLUA_BUILD_AS_DLL lua.c
-%MYLINK% /out:lua.exe lua.obj lua51.lib
-if exist lua.exe.manifest^
- %MYMT% -manifest lua.exe.manifest -outputresource:lua.exe
-%MYCOMPILE% l*.c print.c
-del lua.obj linit.obj lbaselib.obj ldblib.obj liolib.obj lmathlib.obj^
- loslib.obj ltablib.obj lstrlib.obj loadlib.obj
-%MYLINK% /out:luac.exe *.obj
-if exist luac.exe.manifest^
- %MYMT% -manifest luac.exe.manifest -outputresource:luac.exe
-del *.obj *.manifest
-cd ..
diff --git a/src/lua/etc/min.c b/src/lua/etc/min.c
deleted file mode 100644
index bd7f3df6a..000000000
--- a/src/lua/etc/min.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
-* min.c -- a minimal Lua interpreter
-* loads stdin only with minimal error handling.
-* no interaction, and no standard library, only a "print" function.
-** See Copyright Notice in lua.h
-*/
-
-#include
-
-#include "lua.h"
-#include "lauxlib.h"
-
-static int print(lua_State *L)
-{
- int n=lua_gettop(L);
- int i;
- for (i=1; i<=n; i++)
- {
- if (i>1) printf("\t");
- if (lua_isstring(L,i))
- printf("%s",lua_tostring(L,i));
- else if (lua_isnil(L,i))
- printf("%s","nil");
- else if (lua_isboolean(L,i))
- printf("%s",lua_toboolean(L,i) ? "true" : "false");
- else
- printf("%s:%p",luaL_typename(L,i),lua_topointer(L,i));
- }
- printf("\n");
- return 0;
-}
-
-int main(void)
-{
- lua_State *L=lua_open();
- lua_register(L,"print",print);
- if (luaL_dofile(L,NULL)!=0) fprintf(stderr,"%s\n",lua_tostring(L,-1));
- lua_close(L);
- return 0;
-}
diff --git a/src/lua/etc/noparser.c b/src/lua/etc/noparser.c
deleted file mode 100644
index 03103f7a6..000000000
--- a/src/lua/etc/noparser.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-* The code below can be used to make a Lua core that does not contain the
-* parsing modules (lcode, llex, lparser), which represent 35% of the total core.
-* You'll only be able to load binary files and strings, precompiled with luac.
-* (Of course, you'll have to build luac with the original parsing modules!)
-*
-* To use this module, simply compile it ("make noparser" does that) and list
-* its object file before the Lua libraries. The linker should then not load
-* the parsing modules. To try it, do "make luab".
-*
-* If you also want to avoid the dump module (ldump.o), define NODUMP.
-* #define NODUMP
-** See Copyright Notice in lua.h
-*/
-
-#define LUA_CORE
-
-#include "llex.h"
-#include "lparser.h"
-#include "lzio.h"
-
-LUAI_FUNC void luaX_init (lua_State *L) {
- UNUSED(L);
-}
-
-LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) {
- UNUSED(z);
- UNUSED(buff);
- UNUSED(name);
- lua_pushliteral(L,"parser not loaded");
- lua_error(L);
- return NULL;
-}
-
-#ifdef NODUMP
-#include "lundump.h"
-
-LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip) {
- UNUSED(f);
- UNUSED(w);
- UNUSED(data);
- UNUSED(strip);
-#if 1
- UNUSED(L);
- return 0;
-#else
- lua_pushliteral(L,"dumper not loaded");
- lua_error(L);
-#endif
-}
-#endif
diff --git a/src/lua/liblua.c b/src/lua/liblua.c
deleted file mode 100644
index 84a9ed198..000000000
--- a/src/lua/liblua.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-* all.c -- Lua core, libraries and interpreter in a single file
-** See Copyright Notice in lua.h
-*/
-
-#define luaall_c
-
-#include "lapi.c"
-#include "lcode.c"
-#include "ldebug.c"
-#include "ldo.c"
-#include "ldump.c"
-#include "lfunc.c"
-#include "lgc.c"
-#include "llex.c"
-#include "lmem.c"
-#include "lobject.c"
-#include "lopcodes.c"
-#include "lparser.c"
-#include "lstate.c"
-#include "lstring.c"
-#include "ltable.c"
-#include "ltm.c"
-#include "lundump.c"
-#include "lvm.c"
-#include "lzio.c"
-
-#include "lauxlib.c"
-#include "lbaselib.c"
-#include "ldblib.c"
-#include "liolib.c"
-#include "linit.c"
-#include "lmathlib.c"
-#include "loadlib.c"
-#include "loslib.c"
-#include "lstrlib.c"
-#include "ltablib.c"
-
diff --git a/src/lua/src/Makefile b/src/lua/src/Makefile
deleted file mode 100644
index 2ec0658ea..000000000
--- a/src/lua/src/Makefile
+++ /dev/null
@@ -1,184 +0,0 @@
-# makefile for building Lua
-# see ../INSTALL for installation instructions
-# see ../Makefile and luaconf.h for further customization
-
-# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT =======================
-
-# Your platform. See PLATS for possible values.
-PLAT= none
-
-CC= gcc
-CFLAGS= -O2 -Wall $(MYCFLAGS)
-AR= ar rcu
-RANLIB= ranlib
-RM= rm -f
-LIBS= -lm $(MYLIBS)
-
-MYCFLAGS=
-MYLDFLAGS=
-MYLIBS=
-
-# == END OF USER SETTINGS. NO NEED TO CHANGE ANYTHING BELOW THIS LINE =========
-
-PLATS= aix ansi bsd freebsd generic linux macosx mingw posix solaris
-
-LUA_A= liblua.a
-CORE_O= lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o \
- lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o \
- lundump.o lvm.o lzio.o
-LIB_O= lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o \
- lstrlib.o loadlib.o linit.o
-
-LUA_T= lua
-LUA_O= lua.o
-
-LUAC_T= luac
-LUAC_O= luac.o print.o
-
-ALL_O= $(CORE_O) $(LIB_O) $(LUA_O) $(LUAC_O)
-ALL_T= $(LUA_A) $(LUA_T) $(LUAC_T)
-ALL_A= $(LUA_A)
-
-default: $(PLAT)
-
-all: $(ALL_T)
-
-o: $(ALL_O)
-
-a: $(ALL_A)
-
-$(LUA_A): $(CORE_O) $(LIB_O)
- $(AR) $@ $?
- $(RANLIB) $@
-
-$(LUA_T): $(LUA_O) $(LUA_A)
- $(CC) -o $@ $(MYLDFLAGS) $(LUA_O) $(LUA_A) $(LIBS)
-
-$(LUAC_T): $(LUAC_O) $(LUA_A)
- $(CC) -o $@ $(MYLDFLAGS) $(LUAC_O) $(LUA_A) $(LIBS)
-
-clean:
- $(RM) $(ALL_T) $(ALL_O)
-
-distclean: clean
-
-depend:
- @$(CC) $(CFLAGS) -MM l*.c print.c
-
-echo:
- @echo "PLAT = $(PLAT)"
- @echo "CC = $(CC)"
- @echo "CFLAGS = $(CFLAGS)"
- @echo "AR = $(AR)"
- @echo "RANLIB = $(RANLIB)"
- @echo "RM = $(RM)"
- @echo "MYCFLAGS = $(MYCFLAGS)"
- @echo "MYLDFLAGS = $(MYLDFLAGS)"
- @echo "MYLIBS = $(MYLIBS)"
-
-# convenience targets for popular platforms
-
-none:
- @echo "Please choose a platform:"
- @echo " $(PLATS)"
-
-aix:
- $(MAKE) all CC="xlc" CFLAGS="-O2 -DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-ldl" MYLDFLAGS="-brtl -bexpall"
-
-ansi:
- $(MAKE) all MYCFLAGS=-DLUA_ANSI
-
-bsd:
- $(MAKE) all MYCFLAGS="-DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-Wl,-E"
-
-freebsd:
- $(MAKE) all MYCFLAGS="-DLUA_USE_LINUX" MYLIBS="-Wl,-E -lreadline"
-
-generic:
- $(MAKE) all MYCFLAGS=
-
-linux:
- $(MAKE) all MYCFLAGS=-DLUA_USE_LINUX MYLIBS="-Wl,-E -ldl -lreadline -lhistory -lncurses"
-
-macosx:
- $(MAKE) all MYCFLAGS=-DLUA_USE_LINUX MYLIBS="-lreadline"
-# use this on Mac OS X 10.3-
-# $(MAKE) all MYCFLAGS=-DLUA_USE_MACOSX
-
-mingw:
- $(MAKE) "LUA_A=lua51.dll" "LUA_T=lua.exe" \
- "AR=$(CC) -shared -o" "RANLIB=strip --strip-unneeded" \
- "MYCFLAGS=-DLUA_BUILD_AS_DLL" "MYLIBS=" "MYLDFLAGS=-s" lua.exe
- $(MAKE) "LUAC_T=luac.exe" luac.exe
-
-posix:
- $(MAKE) all MYCFLAGS=-DLUA_USE_POSIX
-
-solaris:
- $(MAKE) all MYCFLAGS="-DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-ldl"
-
-# list targets that do not create files (but not all makes understand .PHONY)
-.PHONY: all $(PLATS) default o a clean depend echo none
-
-# DO NOT DELETE
-
-lapi.o: lapi.c lua.h luaconf.h lapi.h lobject.h llimits.h ldebug.h \
- lstate.h ltm.h lzio.h lmem.h ldo.h lfunc.h lgc.h lstring.h ltable.h \
- lundump.h lvm.h
-lauxlib.o: lauxlib.c lua.h luaconf.h lauxlib.h
-lbaselib.o: lbaselib.c lua.h luaconf.h lauxlib.h lualib.h
-lcode.o: lcode.c lua.h luaconf.h lcode.h llex.h lobject.h llimits.h \
- lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h lgc.h \
- ltable.h
-ldblib.o: ldblib.c lua.h luaconf.h lauxlib.h lualib.h
-ldebug.o: ldebug.c lua.h luaconf.h lapi.h lobject.h llimits.h lcode.h \
- llex.h lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h \
- lfunc.h lstring.h lgc.h ltable.h lvm.h
-ldo.o: ldo.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \
- lzio.h lmem.h ldo.h lfunc.h lgc.h lopcodes.h lparser.h lstring.h \
- ltable.h lundump.h lvm.h
-ldump.o: ldump.c lua.h luaconf.h lobject.h llimits.h lstate.h ltm.h \
- lzio.h lmem.h lundump.h
-lfunc.o: lfunc.c lua.h luaconf.h lfunc.h lobject.h llimits.h lgc.h lmem.h \
- lstate.h ltm.h lzio.h
-lgc.o: lgc.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \
- lzio.h lmem.h ldo.h lfunc.h lgc.h lstring.h ltable.h
-linit.o: linit.c lua.h luaconf.h lualib.h lauxlib.h
-liolib.o: liolib.c lua.h luaconf.h lauxlib.h lualib.h
-llex.o: llex.c lua.h luaconf.h ldo.h lobject.h llimits.h lstate.h ltm.h \
- lzio.h lmem.h llex.h lparser.h lstring.h lgc.h ltable.h
-lmathlib.o: lmathlib.c lua.h luaconf.h lauxlib.h lualib.h
-lmem.o: lmem.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \
- ltm.h lzio.h lmem.h ldo.h
-loadlib.o: loadlib.c lua.h luaconf.h lauxlib.h lualib.h
-lobject.o: lobject.c lua.h luaconf.h ldo.h lobject.h llimits.h lstate.h \
- ltm.h lzio.h lmem.h lstring.h lgc.h lvm.h
-lopcodes.o: lopcodes.c lopcodes.h llimits.h lua.h luaconf.h
-loslib.o: loslib.c lua.h luaconf.h lauxlib.h lualib.h
-lparser.o: lparser.c lua.h luaconf.h lcode.h llex.h lobject.h llimits.h \
- lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h \
- lfunc.h lstring.h lgc.h ltable.h
-lstate.o: lstate.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \
- ltm.h lzio.h lmem.h ldo.h lfunc.h lgc.h llex.h lstring.h ltable.h
-lstring.o: lstring.c lua.h luaconf.h lmem.h llimits.h lobject.h lstate.h \
- ltm.h lzio.h lstring.h lgc.h
-lstrlib.o: lstrlib.c lua.h luaconf.h lauxlib.h lualib.h
-ltable.o: ltable.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \
- ltm.h lzio.h lmem.h ldo.h lgc.h ltable.h
-ltablib.o: ltablib.c lua.h luaconf.h lauxlib.h lualib.h
-ltm.o: ltm.c lua.h luaconf.h lobject.h llimits.h lstate.h ltm.h lzio.h \
- lmem.h lstring.h lgc.h ltable.h
-lua.o: lua.c lua.h luaconf.h lauxlib.h lualib.h
-luac.o: luac.c lua.h luaconf.h lauxlib.h ldo.h lobject.h llimits.h \
- lstate.h ltm.h lzio.h lmem.h lfunc.h lopcodes.h lstring.h lgc.h \
- lundump.h
-lundump.o: lundump.c lua.h luaconf.h ldebug.h lstate.h lobject.h \
- llimits.h ltm.h lzio.h lmem.h ldo.h lfunc.h lstring.h lgc.h lundump.h
-lvm.o: lvm.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \
- lzio.h lmem.h ldo.h lfunc.h lgc.h lopcodes.h lstring.h ltable.h lvm.h
-lzio.o: lzio.c lua.h luaconf.h llimits.h lmem.h lstate.h lobject.h ltm.h \
- lzio.h
-print.o: print.c ldebug.h lstate.h lua.h luaconf.h lobject.h llimits.h \
- ltm.h lzio.h lmem.h lopcodes.h lundump.h
-
-# (end of Makefile)
diff --git a/src/lua/src/lapi.c b/src/lua/src/lapi.c
deleted file mode 100644
index d7e8931e4..000000000
--- a/src/lua/src/lapi.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-/*
-** $Id: lapi.c,v 2.55.1.3 2008/01/03 15:20:39 roberto Exp $
-** Lua API
-** See Copyright Notice in lua.h
-*/
-
-
-#include
-#include
-#include
-#include
-
-#define lapi_c
-#define LUA_CORE
-
-#include "lua.h"
-
-#include "lapi.h"
-#include "ldebug.h"
-#include "ldo.h"
-#include "lfunc.h"
-#include "lgc.h"
-#include "lmem.h"
-#include "lobject.h"
-#include "lstate.h"
-#include "lstring.h"
-#include "ltable.h"
-#include "ltm.h"
-#include "lundump.h"
-#include "lvm.h"
-
-
-
-const char lua_ident[] =
- "$Lua: " LUA_RELEASE " " LUA_COPYRIGHT " $\n"
- "$Authors: " LUA_AUTHORS " $\n"
- "$URL: www.lua.org $\n";
-
-
-
-#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base))
-
-#define api_checkvalidindex(L, i) api_check(L, (i) != luaO_nilobject)
-
-#define api_incr_top(L) {api_check(L, L->top < L->ci->top); L->top++;}
-
-
-
-static TValue *index2adr (lua_State *L, int idx) {
- if (idx > 0) {
- TValue *o = L->base + (idx - 1);
- api_check(L, idx <= L->ci->top - L->base);
- if (o >= L->top) return cast(TValue *, luaO_nilobject);
- else return o;
- }
- else if (idx > LUA_REGISTRYINDEX) {
- api_check(L, idx != 0 && -idx <= L->top - L->base);
- return L->top + idx;
- }
- else switch (idx) { /* pseudo-indices */
- case LUA_REGISTRYINDEX: return registry(L);
- case LUA_ENVIRONINDEX: {
- Closure *func = curr_func(L);
- sethvalue(L, &L->env, func->c.env);
- return &L->env;
- }
- case LUA_GLOBALSINDEX: return gt(L);
- default: {
- Closure *func = curr_func(L);
- idx = LUA_GLOBALSINDEX - idx;
- return (idx <= func->c.nupvalues)
- ? &func->c.upvalue[idx-1]
- : cast(TValue *, luaO_nilobject);
- }
- }
-}
-
-
-static Table *getcurrenv (lua_State *L) {
- if (L->ci == L->base_ci) /* no enclosing function? */
- return hvalue(gt(L)); /* use global table as environment */
- else {
- Closure *func = curr_func(L);
- return func->c.env;
- }
-}
-
-
-void luaA_pushobject (lua_State *L, const TValue *o) {
- setobj2s(L, L->top, o);
- api_incr_top(L);
-}
-
-
-LUA_API int lua_checkstack (lua_State *L, int size) {
- int res;
- lua_lock(L);
- if ((L->top - L->base + size) > LUAI_MAXCSTACK)
- res = 0; /* stack overflow */
- else {
- luaD_checkstack(L, size);
- if (L->ci->top < L->top + size)
- L->ci->top = L->top + size;
- res = 1;
- }
- lua_unlock(L);
- return res;
-}
-
-
-LUA_API void lua_xmove (lua_State *from, lua_State *to, int n) {
- int i;
- if (from == to) return;
- lua_lock(to);
- api_checknelems(from, n);
- api_check(from, G(from) == G(to));
- api_check(from, to->ci->top - to->top >= n);
- from->top -= n;
- for (i = 0; i < n; i++) {
- setobj2s(to, to->top++, from->top + i);
- }
- lua_unlock(to);
-}
-
-
-LUA_API void lua_setlevel (lua_State *from, lua_State *to) {
- to->nCcalls = from->nCcalls;
-}
-
-
-LUA_API lua_CFunction lua_atpanic (lua_State *L, lua_CFunction panicf) {
- lua_CFunction old;
- lua_lock(L);
- old = G(L)->panic;
- G(L)->panic = panicf;
- lua_unlock(L);
- return old;
-}
-
-
-LUA_API lua_State *lua_newthread (lua_State *L) {
- lua_State *L1;
- lua_lock(L);
- luaC_checkGC(L);
- L1 = luaE_newthread(L);
- setthvalue(L, L->top, L1);
- api_incr_top(L);
- lua_unlock(L);
- luai_userstatethread(L, L1);
- return L1;
-}
-
-
-
-/*
-** basic stack manipulation
-*/
-
-
-LUA_API int lua_gettop (lua_State *L) {
- return cast_int(L->top - L->base);
-}
-
-
-LUA_API void lua_settop (lua_State *L, int idx) {
- lua_lock(L);
- if (idx >= 0) {
- api_check(L, idx <= L->stack_last - L->base);
- while (L->top < L->base + idx)
- setnilvalue(L->top++);
- L->top = L->base + idx;
- }
- else {
- api_check(L, -(idx+1) <= (L->top - L->base));
- L->top += idx+1; /* `subtract' index (index is negative) */
- }
- lua_unlock(L);
-}
-
-
-LUA_API void lua_remove (lua_State *L, int idx) {
- StkId p;
- lua_lock(L);
- p = index2adr(L, idx);
- api_checkvalidindex(L, p);
- while (++p < L->top) setobjs2s(L, p-1, p);
- L->top--;
- lua_unlock(L);
-}
-
-
-LUA_API void lua_insert (lua_State *L, int idx) {
- StkId p;
- StkId q;
- lua_lock(L);
- p = index2adr(L, idx);
- api_checkvalidindex(L, p);
- for (q = L->top; q>p; q--) setobjs2s(L, q, q-1);
- setobjs2s(L, p, L->top);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_replace (lua_State *L, int idx) {
- StkId o;
- lua_lock(L);
- /* explicit test for incompatible code */
- if (idx == LUA_ENVIRONINDEX && L->ci == L->base_ci)
- luaG_runerror(L, "no calling environment");
- api_checknelems(L, 1);
- o = index2adr(L, idx);
- api_checkvalidindex(L, o);
- if (idx == LUA_ENVIRONINDEX) {
- Closure *func = curr_func(L);
- api_check(L, ttistable(L->top - 1));
- func->c.env = hvalue(L->top - 1);
- luaC_barrier(L, func, L->top - 1);
- }
- else {
- setobj(L, o, L->top - 1);
- if (idx < LUA_GLOBALSINDEX) /* function upvalue? */
- luaC_barrier(L, curr_func(L), L->top - 1);
- }
- L->top--;
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushvalue (lua_State *L, int idx) {
- lua_lock(L);
- setobj2s(L, L->top, index2adr(L, idx));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-
-/*
-** access functions (stack -> C)
-*/
-
-
-LUA_API int lua_type (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- return (o == luaO_nilobject) ? LUA_TNONE : ttype(o);
-}
-
-
-LUA_API const char *lua_typename (lua_State *L, int t) {
- UNUSED(L);
- return (t == LUA_TNONE) ? "no value" : luaT_typenames[t];
-}
-
-
-LUA_API int lua_iscfunction (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- return iscfunction(o);
-}
-
-
-LUA_API int lua_isnumber (lua_State *L, int idx) {
- TValue n;
- const TValue *o = index2adr(L, idx);
- return tonumber(o, &n);
-}
-
-
-LUA_API int lua_isstring (lua_State *L, int idx) {
- int t = lua_type(L, idx);
- return (t == LUA_TSTRING || t == LUA_TNUMBER);
-}
-
-
-LUA_API int lua_isuserdata (lua_State *L, int idx) {
- const TValue *o = index2adr(L, idx);
- return (ttisuserdata(o) || ttislightuserdata(o));
-}
-
-
-LUA_API int lua_rawequal (lua_State *L, int index1, int index2) {
- StkId o1 = index2adr(L, index1);
- StkId o2 = index2adr(L, index2);
- return (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0
- : luaO_rawequalObj(o1, o2);
-}
-
-
-LUA_API int lua_equal (lua_State *L, int index1, int index2) {
- StkId o1, o2;
- int i;
- lua_lock(L); /* may call tag method */
- o1 = index2adr(L, index1);
- o2 = index2adr(L, index2);
- i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 : equalobj(L, o1, o2);
- lua_unlock(L);
- return i;
-}
-
-
-LUA_API int lua_lessthan (lua_State *L, int index1, int index2) {
- StkId o1, o2;
- int i;
- lua_lock(L); /* may call tag method */
- o1 = index2adr(L, index1);
- o2 = index2adr(L, index2);
- i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0
- : luaV_lessthan(L, o1, o2);
- lua_unlock(L);
- return i;
-}
-
-
-
-LUA_API lua_Number lua_tonumber (lua_State *L, int idx) {
- TValue n;
- const TValue *o = index2adr(L, idx);
- if (tonumber(o, &n))
- return nvalue(o);
- else
- return 0;
-}
-
-
-LUA_API lua_Integer lua_tointeger (lua_State *L, int idx) {
- TValue n;
- const TValue *o = index2adr(L, idx);
- if (tonumber(o, &n)) {
- lua_Integer res;
- lua_Number num = nvalue(o);
- lua_number2integer(res, num);
- return res;
- }
- else
- return 0;
-}
-
-
-LUA_API int lua_toboolean (lua_State *L, int idx) {
- const TValue *o = index2adr(L, idx);
- return !l_isfalse(o);
-}
-
-
-LUA_API const char *lua_tolstring (lua_State *L, int idx, size_t *len) {
- StkId o = index2adr(L, idx);
- if (!ttisstring(o)) {
- lua_lock(L); /* `luaV_tostring' may create a new string */
- if (!luaV_tostring(L, o)) { /* conversion failed? */
- if (len != NULL) *len = 0;
- lua_unlock(L);
- return NULL;
- }
- luaC_checkGC(L);
- o = index2adr(L, idx); /* previous call may reallocate the stack */
- lua_unlock(L);
- }
- if (len != NULL) *len = tsvalue(o)->len;
- return svalue(o);
-}
-
-
-LUA_API size_t lua_objlen (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- switch (ttype(o)) {
- case LUA_TSTRING: return tsvalue(o)->len;
- case LUA_TUSERDATA: return uvalue(o)->len;
- case LUA_TTABLE: return luaH_getn(hvalue(o));
- case LUA_TNUMBER: {
- size_t l;
- lua_lock(L); /* `luaV_tostring' may create a new string */
- l = (luaV_tostring(L, o) ? tsvalue(o)->len : 0);
- lua_unlock(L);
- return l;
- }
- default: return 0;
- }
-}
-
-
-LUA_API lua_CFunction lua_tocfunction (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- return (!iscfunction(o)) ? NULL : clvalue(o)->c.f;
-}
-
-
-LUA_API void *lua_touserdata (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- switch (ttype(o)) {
- case LUA_TUSERDATA: return (rawuvalue(o) + 1);
- case LUA_TLIGHTUSERDATA: return pvalue(o);
- default: return NULL;
- }
-}
-
-
-LUA_API lua_State *lua_tothread (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- return (!ttisthread(o)) ? NULL : thvalue(o);
-}
-
-
-LUA_API const void *lua_topointer (lua_State *L, int idx) {
- StkId o = index2adr(L, idx);
- switch (ttype(o)) {
- case LUA_TTABLE: return hvalue(o);
- case LUA_TFUNCTION: return clvalue(o);
- case LUA_TTHREAD: return thvalue(o);
- case LUA_TUSERDATA:
- case LUA_TLIGHTUSERDATA:
- return lua_touserdata(L, idx);
- default: return NULL;
- }
-}
-
-
-
-/*
-** push functions (C -> stack)
-*/
-
-
-LUA_API void lua_pushnil (lua_State *L) {
- lua_lock(L);
- setnilvalue(L->top);
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushnumber (lua_State *L, lua_Number n) {
- lua_lock(L);
- setnvalue(L->top, n);
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushinteger (lua_State *L, lua_Integer n) {
- lua_lock(L);
- setnvalue(L->top, cast_num(n));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushlstring (lua_State *L, const char *s, size_t len) {
- lua_lock(L);
- luaC_checkGC(L);
- setsvalue2s(L, L->top, luaS_newlstr(L, s, len));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushstring (lua_State *L, const char *s) {
- if (s == NULL)
- lua_pushnil(L);
- else
- lua_pushlstring(L, s, strlen(s));
-}
-
-
-LUA_API const char *lua_pushvfstring (lua_State *L, const char *fmt,
- va_list argp) {
- const char *ret;
- lua_lock(L);
- luaC_checkGC(L);
- ret = luaO_pushvfstring(L, fmt, argp);
- lua_unlock(L);
- return ret;
-}
-
-
-LUA_API const char *lua_pushfstring (lua_State *L, const char *fmt, ...) {
- const char *ret;
- va_list argp;
- lua_lock(L);
- luaC_checkGC(L);
- va_start(argp, fmt);
- ret = luaO_pushvfstring(L, fmt, argp);
- va_end(argp);
- lua_unlock(L);
- return ret;
-}
-
-
-LUA_API void lua_pushcclosure (lua_State *L, lua_CFunction fn, int n) {
- Closure *cl;
- lua_lock(L);
- luaC_checkGC(L);
- api_checknelems(L, n);
- cl = luaF_newCclosure(L, n, getcurrenv(L));
- cl->c.f = fn;
- L->top -= n;
- while (n--)
- setobj2n(L, &cl->c.upvalue[n], L->top+n);
- setclvalue(L, L->top, cl);
- lua_assert(iswhite(obj2gco(cl)));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushboolean (lua_State *L, int b) {
- lua_lock(L);
- setbvalue(L->top, (b != 0)); /* ensure that true is 1 */
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_pushlightuserdata (lua_State *L, void *p) {
- lua_lock(L);
- setpvalue(L->top, p);
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API int lua_pushthread (lua_State *L) {
- lua_lock(L);
- setthvalue(L, L->top, L);
- api_incr_top(L);
- lua_unlock(L);
- return (G(L)->mainthread == L);
-}
-
-
-
-/*
-** get functions (Lua -> stack)
-*/
-
-
-LUA_API void lua_gettable (lua_State *L, int idx) {
- StkId t;
- lua_lock(L);
- t = index2adr(L, idx);
- api_checkvalidindex(L, t);
- luaV_gettable(L, t, L->top - 1, L->top - 1);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_getfield (lua_State *L, int idx, const char *k) {
- StkId t;
- TValue key;
- lua_lock(L);
- t = index2adr(L, idx);
- api_checkvalidindex(L, t);
- setsvalue(L, &key, luaS_new(L, k));
- luaV_gettable(L, t, &key, L->top);
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_rawget (lua_State *L, int idx) {
- StkId t;
- lua_lock(L);
- t = index2adr(L, idx);
- api_check(L, ttistable(t));
- setobj2s(L, L->top - 1, luaH_get(hvalue(t), L->top - 1));
- lua_unlock(L);
-}
-
-
-LUA_API void lua_rawgeti (lua_State *L, int idx, int n) {
- StkId o;
- lua_lock(L);
- o = index2adr(L, idx);
- api_check(L, ttistable(o));
- setobj2s(L, L->top, luaH_getnum(hvalue(o), n));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API void lua_createtable (lua_State *L, int narray, int nrec) {
- lua_lock(L);
- luaC_checkGC(L);
- sethvalue(L, L->top, luaH_new(L, narray, nrec));
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-LUA_API int lua_getmetatable (lua_State *L, int objindex) {
- const TValue *obj;
- Table *mt = NULL;
- int res;
- lua_lock(L);
- obj = index2adr(L, objindex);
- switch (ttype(obj)) {
- case LUA_TTABLE:
- mt = hvalue(obj)->metatable;
- break;
- case LUA_TUSERDATA:
- mt = uvalue(obj)->metatable;
- break;
- default:
- mt = G(L)->mt[ttype(obj)];
- break;
- }
- if (mt == NULL)
- res = 0;
- else {
- sethvalue(L, L->top, mt);
- api_incr_top(L);
- res = 1;
- }
- lua_unlock(L);
- return res;
-}
-
-
-LUA_API void lua_getfenv (lua_State *L, int idx) {
- StkId o;
- lua_lock(L);
- o = index2adr(L, idx);
- api_checkvalidindex(L, o);
- switch (ttype(o)) {
- case LUA_TFUNCTION:
- sethvalue(L, L->top, clvalue(o)->c.env);
- break;
- case LUA_TUSERDATA:
- sethvalue(L, L->top, uvalue(o)->env);
- break;
- case LUA_TTHREAD:
- setobj2s(L, L->top, gt(thvalue(o)));
- break;
- default:
- setnilvalue(L->top);
- break;
- }
- api_incr_top(L);
- lua_unlock(L);
-}
-
-
-/*
-** set functions (stack -> Lua)
-*/
-
-
-LUA_API void lua_settable (lua_State *L, int idx) {
- StkId t;
- lua_lock(L);
- api_checknelems(L, 2);
- t = index2adr(L, idx);
- api_checkvalidindex(L, t);
- luaV_settable(L, t, L->top - 2, L->top - 1);
- L->top -= 2; /* pop index and value */
- lua_unlock(L);
-}
-
-
-LUA_API void lua_setfield (lua_State *L, int idx, const char *k) {
- StkId t;
- TValue key;
- lua_lock(L);
- api_checknelems(L, 1);
- t = index2adr(L, idx);
- api_checkvalidindex(L, t);
- setsvalue(L, &key, luaS_new(L, k));
- luaV_settable(L, t, &key, L->top - 1);
- L->top--; /* pop value */
- lua_unlock(L);
-}
-
-
-LUA_API void lua_rawset (lua_State *L, int idx) {
- StkId t;
- lua_lock(L);
- api_checknelems(L, 2);
- t = index2adr(L, idx);
- api_check(L, ttistable(t));
- setobj2t(L, luaH_set(L, hvalue(t), L->top-2), L->top-1);
- luaC_barriert(L, hvalue(t), L->top-1);
- L->top -= 2;
- lua_unlock(L);
-}
-
-
-LUA_API void lua_rawseti (lua_State *L, int idx, int n) {
- StkId o;
- lua_lock(L);
- api_checknelems(L, 1);
- o = index2adr(L, idx);
- api_check(L, ttistable(o));
- setobj2t(L, luaH_setnum(L, hvalue(o), n), L->top-1);
- luaC_barriert(L, hvalue(o), L->top-1);
- L->top--;
- lua_unlock(L);
-}
-
-
-LUA_API int lua_setmetatable (lua_State *L, int objindex) {
- TValue *obj;
- Table *mt;
- lua_lock(L);
- api_checknelems(L, 1);
- obj = index2adr(L, objindex);
- api_checkvalidindex(L, obj);
- if (ttisnil(L->top - 1))
- mt = NULL;
- else {
- api_check(L, ttistable(L->top - 1));
- mt = hvalue(L->top - 1);
- }
- switch (ttype(obj)) {
- case LUA_TTABLE: {
- hvalue(obj)->metatable = mt;
- if (mt)
- luaC_objbarriert(L, hvalue(obj), mt);
- break;
- }
- case LUA_TUSERDATA: {
- uvalue(obj)->metatable = mt;
- if (mt)
- luaC_objbarrier(L, rawuvalue(obj), mt);
- break;
- }
- default: {
- G(L)->mt[ttype(obj)] = mt;
- break;
- }
- }
- L->top--;
- lua_unlock(L);
- return 1;
-}
-
-
-LUA_API int lua_setfenv (lua_State *L, int idx) {
- StkId o;
- int res = 1;
- lua_lock(L);
- api_checknelems(L, 1);
- o = index2adr(L, idx);
- api_checkvalidindex(L, o);
- api_check(L, ttistable(L->top - 1));
- switch (ttype(o)) {
- case LUA_TFUNCTION:
- clvalue(o)->c.env = hvalue(L->top - 1);
- break;
- case LUA_TUSERDATA:
- uvalue(o)->env = hvalue(L->top - 1);
- break;
- case LUA_TTHREAD:
- sethvalue(L, gt(thvalue(o)), hvalue(L->top - 1));
- break;
- default:
- res = 0;
- break;
- }
- if (res) luaC_objbarrier(L, gcvalue(o), hvalue(L->top - 1));
- L->top--;
- lua_unlock(L);
- return res;
-}
-
-
-/*
-** `load' and `call' functions (run Lua code)
-*/
-
-
-#define adjustresults(L,nres) \
- { if (nres == LUA_MULTRET && L->top >= L->ci->top) L->ci->top = L->top; }
-
-
-#define checkresults(L,na,nr) \
- api_check(L, (nr) == LUA_MULTRET || (L->ci->top - L->top >= (nr) - (na)))
-
-
-LUA_API void lua_call (lua_State *L, int nargs, int nresults) {
- StkId func;
- lua_lock(L);
- api_checknelems(L, nargs+1);
- checkresults(L, nargs, nresults);
- func = L->top - (nargs+1);
- luaD_call(L, func, nresults);
- adjustresults(L, nresults);
- lua_unlock(L);
-}
-
-
-
-/*
-** Execute a protected call.
-*/
-struct CallS { /* data to `f_call' */
- StkId func;
- int nresults;
-};
-
-
-static void f_call (lua_State *L, void *ud) {
- struct CallS *c = cast(struct CallS *, ud);
- luaD_call(L, c->func, c->nresults);
-}
-
-
-
-LUA_API int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc) {
- struct CallS c;
- int status;
- ptrdiff_t func;
- lua_lock(L);
- api_checknelems(L, nargs+1);
- checkresults(L, nargs, nresults);
- if (errfunc == 0)
- func = 0;
- else {
- StkId o = index2adr(L, errfunc);
- api_checkvalidindex(L, o);
- func = savestack(L, o);
- }
- c.func = L->top - (nargs+1); /* function to be called */
- c.nresults = nresults;
- status = luaD_pcall(L, f_call, &c, savestack(L, c.func), func);
- adjustresults(L, nresults);
- lua_unlock(L);
- return status;
-}
-
-
-/*
-** Execute a protected C call.
-*/
-struct CCallS { /* data to `f_Ccall' */
- lua_CFunction func;
- void *ud;
-};
-
-
-static void f_Ccall (lua_State *L, void *ud) {
- struct CCallS *c = cast(struct CCallS *, ud);
- Closure *cl;
- cl = luaF_newCclosure(L, 0, getcurrenv(L));
- cl->c.f = c->func;
- setclvalue(L, L->top, cl); /* push function */
- api_incr_top(L);
- setpvalue(L->top, c->ud); /* push only argument */
- api_incr_top(L);
- luaD_call(L, L->top - 2, 0);
-}
-
-
-LUA_API int lua_cpcall (lua_State *L, lua_CFunction func, void *ud) {
- struct CCallS c;
- int status;
- lua_lock(L);
- c.func = func;
- c.ud = ud;
- status = luaD_pcall(L, f_Ccall, &c, savestack(L, L->top), 0);
- lua_unlock(L);
- return status;
-}
-
-
-LUA_API int lua_load (lua_State *L, lua_Reader reader, void *data,
- const char *chunkname) {
- ZIO z;
- int status;
- lua_lock(L);
- if (!chunkname) chunkname = "?";
- luaZ_init(L, &z, reader, data);
- status = luaD_protectedparser(L, &z, chunkname);
- lua_unlock(L);
- return status;
-}
-
-
-LUA_API int lua_dump (lua_State *L, lua_Writer writer, void *data) {
- int status;
- TValue *o;
- lua_lock(L);
- api_checknelems(L, 1);
- o = L->top - 1;
- if (isLfunction(o))
- status = luaU_dump(L, clvalue(o)->l.p, writer, data, 0);
- else
- status = 1;
- lua_unlock(L);
- return status;
-}
-
-
-LUA_API int lua_status (lua_State *L) {
- return L->status;
-}
-
-
-/*
-** Garbage-collection function
-*/
-
-LUA_API int lua_gc (lua_State *L, int what, int data) {
- int res = 0;
- global_State *g;
- lua_lock(L);
- g = G(L);
- switch (what) {
- case LUA_GCSTOP: {
- g->GCthreshold = MAX_LUMEM;
- break;
- }
- case LUA_GCRESTART: {
- g->GCthreshold = g->totalbytes;
- break;
- }
- case LUA_GCCOLLECT: {
- luaC_fullgc(L);
- break;
- }
- case LUA_GCCOUNT: {
- /* GC values are expressed in Kbytes: #bytes/2^10 */
- res = cast_int(g->totalbytes >> 10);
- break;
- }
- case LUA_GCCOUNTB: {
- res = cast_int(g->totalbytes & 0x3ff);
- break;
- }
- case LUA_GCSTEP: {
- lu_mem a = (cast(lu_mem, data) << 10);
- if (a <= g->totalbytes)
- g->GCthreshold = g->totalbytes - a;
- else
- g->GCthreshold = 0;
- while (g->GCthreshold <= g->totalbytes)
- luaC_step(L);
- if (g->gcstate == GCSpause) /* end of cycle? */
- res = 1; /* signal it */
- break;
- }
- case LUA_GCSETPAUSE: {
- res = g->gcpause;
- g->gcpause = data;
- break;
- }
- case LUA_GCSETSTEPMUL: {
- res = g->gcstepmul;
- g->gcstepmul = data;
- break;
- }
- default: res = -1; /* invalid option */
- }
- lua_unlock(L);
- return res;
-}
-
-
-
-/*
-** miscellaneous functions
-*/
-
-
-LUA_API int lua_error (lua_State *L) {
- lua_lock(L);
- api_checknelems(L, 1);
- luaG_errormsg(L);
- lua_unlock(L);
- return 0; /* to avoid warnings */
-}
-
-
-LUA_API int lua_next (lua_State *L, int idx) {
- StkId t;
- int more;
- lua_lock(L);
- t = index2adr(L, idx);
- api_check(L, ttistable(t));
- more = luaH_next(L, hvalue(t), L->top - 1);
- if (more) {
- api_incr_top(L);
- }
- else /* no more elements */
- L->top -= 1; /* remove key */
- lua_unlock(L);
- return more;
-}
-
-
-LUA_API void lua_concat (lua_State *L, int n) {
- lua_lock(L);
- api_checknelems(L, n);
- if (n >= 2) {
- luaC_checkGC(L);
- luaV_concat(L, n, cast_int(L->top - L->base) - 1);
- L->top -= (n-1);
- }
- else if (n == 0) { /* push empty string */
- setsvalue2s(L, L->top, luaS_newlstr(L, "", 0));
- api_incr_top(L);
- }
- /* else n == 1; nothing to do */
- lua_unlock(L);
-}
-
-
-LUA_API lua_Alloc lua_getallocf (lua_State *L, void **ud) {
- lua_Alloc f;
- lua_lock(L);
- if (ud) *ud = G(L)->ud;
- f = G(L)->frealloc;
- lua_unlock(L);
- return f;
-}
-
-
-LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud) {
- lua_lock(L);
- G(L)->ud = ud;
- G(L)->frealloc = f;
- lua_unlock(L);
-}
-
-
-LUA_API void *lua_newuserdata (lua_State *L, size_t size) {
- Udata *u;
- lua_lock(L);
- luaC_checkGC(L);
- u = luaS_newudata(L, size, getcurrenv(L));
- setuvalue(L, L->top, u);
- api_incr_top(L);
- lua_unlock(L);
- return u + 1;
-}
-
-
-
-
-static const char *aux_upvalue (StkId fi, int n, TValue **val) {
- Closure *f;
- if (!ttisfunction(fi)) return NULL;
- f = clvalue(fi);
- if (f->c.isC) {
- if (!(1 <= n && n <= f->c.nupvalues)) return NULL;
- *val = &f->c.upvalue[n-1];
- return "";
- }
- else {
- Proto *p = f->l.p;
- if (!(1 <= n && n <= p->sizeupvalues)) return NULL;
- *val = f->l.upvals[n-1]->v;
- return getstr(p->upvalues[n-1]);
- }
-}
-
-
-LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n) {
- const char *name;
- TValue *val;
- lua_lock(L);
- name = aux_upvalue(index2adr(L, funcindex), n, &val);
- if (name) {
- setobj2s(L, L->top, val);
- api_incr_top(L);
- }
- lua_unlock(L);
- return name;
-}
-
-
-LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n) {
- const char *name;
- TValue *val;
- StkId fi;
- lua_lock(L);
- fi = index2adr(L, funcindex);
- api_checknelems(L, 1);
- name = aux_upvalue(fi, n, &val);
- if (name) {
- L->top--;
- setobj(L, val, L->top);
- luaC_barrier(L, clvalue(fi), L->top);
- }
- lua_unlock(L);
- return name;
-}
-
diff --git a/src/lua/src/lapi.h b/src/lua/src/lapi.h
deleted file mode 100644
index 2c3fab244..000000000
--- a/src/lua/src/lapi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
-** $Id: lapi.h,v 2.2.1.1 2007/12/27 13:02:25 roberto Exp $
-** Auxiliary functions from Lua API
-** See Copyright Notice in lua.h
-*/
-
-#ifndef lapi_h
-#define lapi_h
-
-
-#include "lobject.h"
-
-
-LUAI_FUNC void luaA_pushobject (lua_State *L, const TValue *o);
-
-#endif
diff --git a/src/lua/src/lauxlib.c b/src/lua/src/lauxlib.c
deleted file mode 100644
index 10f14e2c0..000000000
--- a/src/lua/src/lauxlib.c
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
-** $Id: lauxlib.c,v 1.159.1.3 2008/01/21 13:20:51 roberto Exp $
-** Auxiliary functions for building Lua libraries
-** See Copyright Notice in lua.h
-*/
-
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-
-/* This file uses only the official API of Lua.
-** Any function declared here could be written as an application function.
-*/
-
-#define lauxlib_c
-#define LUA_LIB
-
-#include "lua.h"
-
-#include "lauxlib.h"
-
-
-#define FREELIST_REF 0 /* free list of references */
-
-
-/* convert a stack index to positive */
-#define abs_index(L, i) ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : \
- lua_gettop(L) + (i) + 1)
-
-
-/*
-** {======================================================
-** Error-report functions
-** =======================================================
-*/
-
-
-LUALIB_API int luaL_argerror (lua_State *L, int narg, const char *extramsg) {
- lua_Debug ar;
- if (!lua_getstack(L, 0, &ar)) /* no stack frame? */
- return luaL_error(L, "bad argument #%d (%s)", narg, extramsg);
- lua_getinfo(L, "n", &ar);
- if (strcmp(ar.namewhat, "method") == 0) {
- narg--; /* do not count `self' */
- if (narg == 0) /* error is in the self argument itself? */
- return luaL_error(L, "calling " LUA_QS " on bad self (%s)",
- ar.name, extramsg);
- }
- if (ar.name == NULL)
- ar.name = "?";
- return luaL_error(L, "bad argument #%d to " LUA_QS " (%s)",
- narg, ar.name, extramsg);
-}
-
-
-LUALIB_API int luaL_typerror (lua_State *L, int narg, const char *tname) {
- const char *msg = lua_pushfstring(L, "%s expected, got %s",
- tname, luaL_typename(L, narg));
- return luaL_argerror(L, narg, msg);
-}
-
-
-static void tag_error (lua_State *L, int narg, int tag) {
- luaL_typerror(L, narg, lua_typename(L, tag));
-}
-
-
-LUALIB_API void luaL_where (lua_State *L, int level) {
- lua_Debug ar;
- if (lua_getstack(L, level, &ar)) { /* check function at level */
- lua_getinfo(L, "Sl", &ar); /* get info about it */
- if (ar.currentline > 0) { /* is there info? */
- lua_pushfstring(L, "%s:%d: ", ar.short_src, ar.currentline);
- return;
- }
- }
- lua_pushliteral(L, ""); /* else, no information available... */
-}
-
-
-LUALIB_API int luaL_error (lua_State *L, const char *fmt, ...) {
- va_list argp;
- va_start(argp, fmt);
- luaL_where(L, 1);
- lua_pushvfstring(L, fmt, argp);
- va_end(argp);
- lua_concat(L, 2);
- return lua_error(L);
-}
-
-/* }====================================================== */
-
-
-LUALIB_API int luaL_checkoption (lua_State *L, int narg, const char *def,
- const char *const lst[]) {
- const char *name = (def) ? luaL_optstring(L, narg, def) :
- luaL_checkstring(L, narg);
- int i;
- for (i=0; lst[i]; i++)
- if (strcmp(lst[i], name) == 0)
- return i;
- return luaL_argerror(L, narg,
- lua_pushfstring(L, "invalid option " LUA_QS, name));
-}
-
-
-LUALIB_API int luaL_newmetatable (lua_State *L, const char *tname) {
- lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get registry.name */
- if (!lua_isnil(L, -1)) /* name already in use? */
- return 0; /* leave previous value on top, but return 0 */
- lua_pop(L, 1);
- lua_newtable(L); /* create metatable */
- lua_pushvalue(L, -1);
- lua_setfield(L, LUA_REGISTRYINDEX, tname); /* registry.name = metatable */
- return 1;
-}
-
-
-LUALIB_API void *luaL_checkudata (lua_State *L, int ud, const char *tname) {
- void *p = lua_touserdata(L, ud);
- if (p != NULL) { /* value is a userdata? */
- if (lua_getmetatable(L, ud)) { /* does it have a metatable? */
- lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get correct metatable */
- if (lua_rawequal(L, -1, -2)) { /* does it have the correct mt? */
- lua_pop(L, 2); /* remove both metatables */
- return p;
- }
- }
- }
- luaL_typerror(L, ud, tname); /* else error */
- return NULL; /* to avoid warnings */
-}
-
-
-LUALIB_API void luaL_checkstack (lua_State *L, int space, const char *mes) {
- if (!lua_checkstack(L, space))
- luaL_error(L, "stack overflow (%s)", mes);
-}
-
-
-LUALIB_API void luaL_checktype (lua_State *L, int narg, int t) {
- if (lua_type(L, narg) != t)
- tag_error(L, narg, t);
-}
-
-
-LUALIB_API void luaL_checkany (lua_State *L, int narg) {
- if (lua_type(L, narg) == LUA_TNONE)
- luaL_argerror(L, narg, "value expected");
-}
-
-
-LUALIB_API const char *luaL_checklstring (lua_State *L, int narg, size_t *len) {
- const char *s = lua_tolstring(L, narg, len);
- if (!s) tag_error(L, narg, LUA_TSTRING);
- return s;
-}
-
-
-LUALIB_API const char *luaL_optlstring (lua_State *L, int narg,
- const char *def, size_t *len) {
- if (lua_isnoneornil(L, narg)) {
- if (len)
- *len = (def ? strlen(def) : 0);
- return def;
- }
- else return luaL_checklstring(L, narg, len);
-}
-
-
-LUALIB_API lua_Number luaL_checknumber (lua_State *L, int narg) {
- lua_Number d = lua_tonumber(L, narg);
- if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */
- tag_error(L, narg, LUA_TNUMBER);
- return d;
-}
-
-
-LUALIB_API lua_Number luaL_optnumber (lua_State *L, int narg, lua_Number def) {
- return luaL_opt(L, luaL_checknumber, narg, def);
-}
-
-
-LUALIB_API lua_Integer luaL_checkinteger (lua_State *L, int narg) {
- lua_Integer d = lua_tointeger(L, narg);
- if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */
- tag_error(L, narg, LUA_TNUMBER);
- return d;
-}
-
-
-LUALIB_API lua_Integer luaL_optinteger (lua_State *L, int narg,
- lua_Integer def) {
- return luaL_opt(L, luaL_checkinteger, narg, def);
-}
-
-
-LUALIB_API int luaL_getmetafield (lua_State *L, int obj, const char *event) {
- if (!lua_getmetatable(L, obj)) /* no metatable? */
- return 0;
- lua_pushstring(L, event);
- lua_rawget(L, -2);
- if (lua_isnil(L, -1)) {
- lua_pop(L, 2); /* remove metatable and metafield */
- return 0;
- }
- else {
- lua_remove(L, -2); /* remove only metatable */
- return 1;
- }
-}
-
-
-LUALIB_API int luaL_callmeta (lua_State *L, int obj, const char *event) {
- obj = abs_index(L, obj);
- if (!luaL_getmetafield(L, obj, event)) /* no metafield? */
- return 0;
- lua_pushvalue(L, obj);
- lua_call(L, 1, 1);
- return 1;
-}
-
-
-LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
- const luaL_Reg *l) {
- luaI_openlib(L, libname, l, 0);
-}
-
-
-static int libsize (const luaL_Reg *l) {
- int size = 0;
- for (; l->name; l++) size++;
- return size;
-}
-
-
-LUALIB_API void luaI_openlib (lua_State *L, const char *libname,
- const luaL_Reg *l, int nup) {
- if (libname) {
- int size = libsize(l);
- /* check whether lib already exists */
- luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 1);
- lua_getfield(L, -1, libname); /* get _LOADED[libname] */
- if (!lua_istable(L, -1)) { /* not found? */
- lua_pop(L, 1); /* remove previous result */
- /* try global variable (and create one if it does not exist) */
- if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
- luaL_error(L, "name conflict for module " LUA_QS, libname);
- lua_pushvalue(L, -1);
- lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
- }
- lua_remove(L, -2); /* remove _LOADED table */
- lua_insert(L, -(nup+1)); /* move library table to below upvalues */
- }
- for (; l->name; l++) {
- int i;
- for (i=0; ifunc, nup);
- lua_setfield(L, -(nup+2), l->name);
- }
- lua_pop(L, nup); /* remove upvalues */
-}
-
-
-
-/*
-** {======================================================
-** getn-setn: size for arrays
-** =======================================================
-*/
-
-#if defined(LUA_COMPAT_GETN)
-
-static int checkint (lua_State *L, int topop) {
- int n = (lua_type(L, -1) == LUA_TNUMBER) ? lua_tointeger(L, -1) : -1;
- lua_pop(L, topop);
- return n;
-}
-
-
-static void getsizes (lua_State *L) {
- lua_getfield(L, LUA_REGISTRYINDEX, "LUA_SIZES");
- if (lua_isnil(L, -1)) { /* no `size' table? */
- lua_pop(L, 1); /* remove nil */
- lua_newtable(L); /* create it */
- lua_pushvalue(L, -1); /* `size' will be its own metatable */
- lua_setmetatable(L, -2);
- lua_pushliteral(L, "kv");
- lua_setfield(L, -2, "__mode"); /* metatable(N).__mode = "kv" */
- lua_pushvalue(L, -1);
- lua_setfield(L, LUA_REGISTRYINDEX, "LUA_SIZES"); /* store in register */
- }
-}
-
-
-LUALIB_API void luaL_setn (lua_State *L, int t, int n) {
- t = abs_index(L, t);
- lua_pushliteral(L, "n");
- lua_rawget(L, t);
- if (checkint(L, 1) >= 0) { /* is there a numeric field `n'? */
- lua_pushliteral(L, "n"); /* use it */
- lua_pushinteger(L, n);
- lua_rawset(L, t);
- }
- else { /* use `sizes' */
- getsizes(L);
- lua_pushvalue(L, t);
- lua_pushinteger(L, n);
- lua_rawset(L, -3); /* sizes[t] = n */
- lua_pop(L, 1); /* remove `sizes' */
- }
-}
-
-
-LUALIB_API int luaL_getn (lua_State *L, int t) {
- int n;
- t = abs_index(L, t);
- lua_pushliteral(L, "n"); /* try t.n */
- lua_rawget(L, t);
- if ((n = checkint(L, 1)) >= 0) return n;
- getsizes(L); /* else try sizes[t] */
- lua_pushvalue(L, t);
- lua_rawget(L, -2);
- if ((n = checkint(L, 2)) >= 0) return n;
- return (int)lua_objlen(L, t);
-}
-
-#endif
-
-/* }====================================================== */
-
-
-
-LUALIB_API const char *luaL_gsub (lua_State *L, const char *s, const char *p,
- const char *r) {
- const char *wild;
- size_t l = strlen(p);
- luaL_Buffer b;
- luaL_buffinit(L, &b);
- while ((wild = strstr(s, p)) != NULL) {
- luaL_addlstring(&b, s, wild - s); /* push prefix */
- luaL_addstring(&b, r); /* push replacement in place of pattern */
- s = wild + l; /* continue after `p' */
- }
- luaL_addstring(&b, s); /* push last suffix */
- luaL_pushresult(&b);
- return lua_tostring(L, -1);
-}
-
-
-LUALIB_API const char *luaL_findtable (lua_State *L, int idx,
- const char *fname, int szhint) {
- const char *e;
- lua_pushvalue(L, idx);
- do {
- e = strchr(fname, '.');
- if (e == NULL) e = fname + strlen(fname);
- lua_pushlstring(L, fname, e - fname);
- lua_rawget(L, -2);
- if (lua_isnil(L, -1)) { /* no such field? */
- lua_pop(L, 1); /* remove this nil */
- lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
- lua_pushlstring(L, fname, e - fname);
- lua_pushvalue(L, -2);
- lua_settable(L, -4); /* set new table into field */
- }
- else if (!lua_istable(L, -1)) { /* field has a non-table value? */
- lua_pop(L, 2); /* remove table and value */
- return fname; /* return problematic part of the name */
- }
- lua_remove(L, -2); /* remove previous table */
- fname = e + 1;
- } while (*e == '.');
- return NULL;
-}
-
-
-
-/*
-** {======================================================
-** Generic Buffer manipulation
-** =======================================================
-*/
-
-
-#define bufflen(B) ((B)->p - (B)->buffer)
-#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B)))
-
-#define LIMIT (LUA_MINSTACK/2)
-
-
-static int emptybuffer (luaL_Buffer *B) {
- size_t l = bufflen(B);
- if (l == 0) return 0; /* put nothing on stack */
- else {
- lua_pushlstring(B->L, B->buffer, l);
- B->p = B->buffer;
- B->lvl++;
- return 1;
- }
-}
-
-
-static void adjuststack (luaL_Buffer *B) {
- if (B->lvl > 1) {
- lua_State *L = B->L;
- int toget = 1; /* number of levels to concat */
- size_t toplen = lua_strlen(L, -1);
- do {
- size_t l = lua_strlen(L, -(toget+1));
- if (B->lvl - toget + 1 >= LIMIT || toplen > l) {
- toplen += l;
- toget++;
- }
- else break;
- } while (toget < B->lvl);
- lua_concat(L, toget);
- B->lvl = B->lvl - toget + 1;
- }
-}
-
-
-LUALIB_API char *luaL_prepbuffer (luaL_Buffer *B) {
- if (emptybuffer(B))
- adjuststack(B);
- return B->buffer;
-}
-
-
-LUALIB_API void luaL_addlstring (luaL_Buffer *B, const char *s, size_t l) {
- while (l--)
- luaL_addchar(B, *s++);
-}
-
-
-LUALIB_API void luaL_addstring (luaL_Buffer *B, const char *s) {
- luaL_addlstring(B, s, strlen(s));
-}
-
-
-LUALIB_API void luaL_pushresult (luaL_Buffer *B) {
- emptybuffer(B);
- lua_concat(B->L, B->lvl);
- B->lvl = 1;
-}
-
-
-LUALIB_API void luaL_addvalue (luaL_Buffer *B) {
- lua_State *L = B->L;
- size_t vl;
- const char *s = lua_tolstring(L, -1, &vl);
- if (vl <= bufffree(B)) { /* fit into buffer? */
- memcpy(B->p, s, vl); /* put it there */
- B->p += vl;
- lua_pop(L, 1); /* remove from stack */
- }
- else {
- if (emptybuffer(B))
- lua_insert(L, -2); /* put buffer before new value */
- B->lvl++; /* add new value into B stack */
- adjuststack(B);
- }
-}
-
-
-LUALIB_API void luaL_buffinit (lua_State *L, luaL_Buffer *B) {
- B->L = L;
- B->p = B->buffer;
- B->lvl = 0;
-}
-
-/* }====================================================== */
-
-
-LUALIB_API int luaL_ref (lua_State *L, int t) {
- int ref;
- t = abs_index(L, t);
- if (lua_isnil(L, -1)) {
- lua_pop(L, 1); /* remove from stack */
- return LUA_REFNIL; /* `nil' has a unique fixed reference */
- }
- lua_rawgeti(L, t, FREELIST_REF); /* get first free element */
- ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */
- lua_pop(L, 1); /* remove it from stack */
- if (ref != 0) { /* any free element? */
- lua_rawgeti(L, t, ref); /* remove it from list */
- lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */
- }
- else { /* no free elements */
- ref = (int)lua_objlen(L, t);
- ref++; /* create new reference */
- }
- lua_rawseti(L, t, ref);
- return ref;
-}
-
-
-LUALIB_API void luaL_unref (lua_State *L, int t, int ref) {
- if (ref >= 0) {
- t = abs_index(L, t);
- lua_rawgeti(L, t, FREELIST_REF);
- lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */
- lua_pushinteger(L, ref);
- lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */
- }
-}
-
-
-
-/*
-** {======================================================
-** Load functions
-** =======================================================
-*/
-
-typedef struct LoadF {
- int extraline;
- FILE *f;
- char buff[LUAL_BUFFERSIZE];
-} LoadF;
-
-
-static const char *getF (lua_State *L, void *ud, size_t *size) {
- LoadF *lf = (LoadF *)ud;
- (void)L;
- if (lf->extraline) {
- lf->extraline = 0;
- *size = 1;
- return "\n";
- }
- if (feof(lf->f)) return NULL;
- *size = fread(lf->buff, 1, sizeof(lf->buff), lf->f);
- return (*size > 0) ? lf->buff : NULL;
-}
-
-
-static int errfile (lua_State *L, const char *what, int fnameindex) {
- const char *serr = strerror(errno);
- const char *filename = lua_tostring(L, fnameindex) + 1;
- lua_pushfstring(L, "cannot %s %s: %s", what, filename, serr);
- lua_remove(L, fnameindex);
- return LUA_ERRFILE;
-}
-
-
-LUALIB_API int luaL_loadfile (lua_State *L, const char *filename) {
- LoadF lf;
- int status, readstatus;
- int c;
- int fnameindex = lua_gettop(L) + 1; /* index of filename on the stack */
- lf.extraline = 0;
- if (filename == NULL) {
- lua_pushliteral(L, "=stdin");
- lf.f = stdin;
- }
- else {
- lua_pushfstring(L, "@%s", filename);
- lf.f = fopen(filename, "r");
- if (lf.f == NULL) return errfile(L, "open", fnameindex);
- }
- c = getc(lf.f);
- if (c == '#') { /* Unix exec. file? */
- lf.extraline = 1;
- while ((c = getc(lf.f)) != EOF && c != '\n') ; /* skip first line */
- if (c == '\n') c = getc(lf.f);
- }
- if (c == LUA_SIGNATURE[0] && filename) { /* binary file? */
- lf.f = freopen(filename, "rb", lf.f); /* reopen in binary mode */
- if (lf.f == NULL) return errfile(L, "reopen", fnameindex);
- /* skip eventual `#!...' */
- while ((c = getc(lf.f)) != EOF && c != LUA_SIGNATURE[0]) ;
- lf.extraline = 0;
- }
- ungetc(c, lf.f);
- status = lua_load(L, getF, &lf, lua_tostring(L, -1));
- readstatus = ferror(lf.f);
- if (filename) fclose(lf.f); /* close file (even in case of errors) */
- if (readstatus) {
- lua_settop(L, fnameindex); /* ignore results from `lua_load' */
- return errfile(L, "read", fnameindex);
- }
- lua_remove(L, fnameindex);
- return status;
-}
-
-
-typedef struct LoadS {
- const char *s;
- size_t size;
-} LoadS;
-
-
-static const char *getS (lua_State *L, void *ud, size_t *size) {
- LoadS *ls = (LoadS *)ud;
- (void)L;
- if (ls->size == 0) return NULL;
- *size = ls->size;
- ls->size = 0;
- return ls->s;
-}
-
-
-LUALIB_API int luaL_loadbuffer (lua_State *L, const char *buff, size_t size,
- const char *name) {
- LoadS ls;
- ls.s = buff;
- ls.size = size;
- return lua_load(L, getS, &ls, name);
-}
-
-
-LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s) {
- return luaL_loadbuffer(L, s, strlen(s), s);
-}
-
-
-
-/* }====================================================== */
-
-
-static void *l_alloc (void *ud, void *ptr, size_t osize, size_t nsize) {
- (void)ud;
- (void)osize;
- if (nsize == 0) {
- free(ptr);
- return NULL;
- }
- else
- return realloc(ptr, nsize);
-}
-
-
-static int panic (lua_State *L) {
- (void)L; /* to avoid warnings */
- fprintf(stderr, "PANIC: unprotected error in call to Lua API (%s)\n",
- lua_tostring(L, -1));
- return 0;
-}
-
-
-LUALIB_API lua_State *luaL_newstate (void) {
- lua_State *L = lua_newstate(l_alloc, NULL);
- if (L) lua_atpanic(L, &panic);
- return L;
-}
-
diff --git a/src/lua/src/lbaselib.c b/src/lua/src/lbaselib.c
deleted file mode 100644
index eb06bcef8..000000000
--- a/src/lua/src/lbaselib.c
+++ /dev/null
@@ -1,651 +0,0 @@
-/*
-** $Id: lbaselib.c,v 1.191.1.4 2008/01/20 13:53:22 roberto Exp $
-** Basic library
-** See Copyright Notice in lua.h
-*/
-
-
-
-#include
-#include
-#include