aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/shared/general/meshimport/include/utils
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/shared/general/meshimport/include/utils
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/shared/general/meshimport/include/utils')
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiAsc2Bin.h39
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.h80
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.inl446
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiFastXml.h85
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiFileBuf.h305
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiFileBuffer.h224
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiFileInterface.h44
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiFloatMath.h562
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.h108
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.inl342
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiInparser.h215
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiMapSet.h1009
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiMapSetInternal.h5915
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiMath.h2128
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiMemoryBuffer.h340
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiPlatformConfig.h269
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiStringDictionary.h255
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiStringTable.h145
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiSutil.h56
-rw-r--r--APEX_1.4/shared/general/meshimport/include/utils/MiVector.h569
20 files changed, 13136 insertions, 0 deletions
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiAsc2Bin.h b/APEX_1.4/shared/general/meshimport/include/utils/MiAsc2Bin.h
new file mode 100644
index 00000000..b6d78d8e
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiAsc2Bin.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef ASC2BIN_H
+#define ASC2BIN_H
+
+// types:
+//
+// f : 4 byte MiF32
+// d : 4 byte integer
+// c : 1 byte character
+// b : 1 byte integer
+// h : 2 byte integer
+// p : 4 byte const char *
+// x1 : 1 byte hex
+// x2 : 2 byte hex
+// x4 : 4 byte hex (etc)
+// example usage:
+//
+// Asc2Bin("1 2 3 4 5 6",1,"fffff",0);
+
+#include "MiPlatformConfig.h"
+
+namespace mimp
+{
+
+void * Asc2Bin(const char *source,const MiI32 count,const char *ctype,void *dest=0);
+
+};
+
+#endif // ASC2BIN_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.h b/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.h
new file mode 100644
index 00000000..b38ec542
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef MI_ASCII_CONVERSION_H
+#define MI_ASCII_CONVERSION_H
+
+/*!
+\file
+\brief MiAsciiConversion namespace contains string/value helper functions
+*/
+
+
+#include "MiPlatformConfig.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <float.h>
+
+namespace mimp
+{
+
+ namespace MiAsc
+ {
+
+const MiU32 MiF32StrLen = 24;
+const MiU32 MiF64StrLen = 32;
+const MiU32 IntStrLen = 32;
+
+MI_INLINE bool isWhiteSpace(char c);
+MI_INLINE const char * skipNonWhiteSpace(const char *scan);
+MI_INLINE const char * skipWhiteSpace(const char *scan);
+
+//////////////////////////
+// str to value functions
+//////////////////////////
+MI_INLINE bool strToBool(const char *str, const char **endptr);
+MI_INLINE MiI8 strToI8(const char *str, const char **endptr);
+MI_INLINE MiI16 strToI16(const char *str, const char **endptr);
+MI_INLINE MiI32 strToI32(const char *str, const char **endptr);
+MI_INLINE MiI64 strToI64(const char *str, const char **endptr);
+MI_INLINE MiU8 strToU8(const char *str, const char **endptr);
+MI_INLINE MiU16 strToU16(const char *str, const char **endptr);
+MI_INLINE MiU32 strToU32(const char *str, const char **endptr);
+MI_INLINE MiU64 strToU64(const char *str, const char **endptr);
+MI_INLINE MiF32 strToF32(const char *str, const char **endptr);
+MI_INLINE MiF64 strToF64(const char *str, const char **endptr);
+MI_INLINE void strToF32s(MiF32 *v,MiU32 count,const char *str, const char**endptr);
+
+
+//////////////////////////
+// value to str functions
+//////////////////////////
+MI_INLINE const char * valueToStr( bool val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiI8 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiI16 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiI32 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiI64 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiU8 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiU16 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiU32 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiU64 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiF32 val, char *buf, MiU32 n );
+MI_INLINE const char * valueToStr( MiF64 val, char *buf, MiU32 n );
+
+#include "MiAsciiConversion.inl"
+
+ }; // end of MiAsc namespace
+
+}; // end of namespace
+
+
+#endif // MI_ASCII_CONVERSION_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.inl b/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.inl
new file mode 100644
index 00000000..93f0fb8e
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiAsciiConversion.inl
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+/*!
+\file
+\brief MiAsciiConversion namespace contains string/value helper functions
+*/
+
+MI_INLINE bool isWhiteSpace(char c)
+{
+ bool ret = false;
+ if ( c == 32 || c == 9 || c == 13 || c == 10 || c == ',' ) ret = true;
+ return ret;
+}
+
+MI_INLINE const char * skipNonWhiteSpace(const char *scan)
+{
+ while ( !isWhiteSpace(*scan) && *scan) scan++;
+ if ( *scan == 0 ) scan = NULL;
+ return scan;
+}
+MI_INLINE const char * skipWhiteSpace(const char *scan)
+{
+ while ( isWhiteSpace(*scan) && *scan ) scan++;
+ if ( *scan == 0 ) scan = NULL;
+ return scan;
+}
+
+//////////////////////////
+// str to value functions
+//////////////////////////
+MI_INLINE bool strToBool(const char *str, const char **endptr)
+{
+ bool ret = false;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ MiI32 len = (MiI32)(end - begin);
+ if ( MESH_IMPORT_STRING::strnicmp(begin,"true", (size_t)len) == 0 || MESH_IMPORT_STRING::strnicmp(begin,"1", (size_t)len) == 0 )
+ ret = true;
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiI8 strToI8(const char *str, const char **endptr)
+{
+ MiI8 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ if( strncmp(begin, "PX_MIN_I8", size_t(end-begin)) == 0)
+ ret = MI_MIN_I8;
+ else if( strncmp(begin, "PX_MAX_I8", size_t(end-begin)) == 0)
+ ret = MI_MAX_I8;
+ else
+ ret = (MiI8)strtol(begin, 0, 0); //FIXME
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiI16 strToI16(const char *str, const char **endptr)
+{
+ MiI16 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ if( strncmp(begin, "PX_MIN_I16", size_t(end-begin)) == 0)
+ ret = MI_MIN_I16;
+ else if( strncmp(begin, "PX_MAX_I16", size_t(end-begin)) == 0)
+ ret = MI_MAX_I16;
+ else
+ ret = (MiI16)strtol(begin, 0, 0); //FIXME
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiI32 strToI32(const char *str, const char **endptr)
+{
+ MiI32 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ if( strncmp(begin, "PX_MIN_I32", size_t(end-begin)) == 0)
+ ret = MI_MIN_I32;
+ else if( strncmp(begin, "PX_MAX_I32", size_t(end-begin)) == 0)
+ ret = MI_MAX_I32;
+ else
+ ret = (MiI32)strtol(begin, 0, 0); //FIXME
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiI64 strToI64(const char *str, const char **endptr)
+{
+ MiI64 ret;
+ const char *begin = skipWhiteSpace(str);
+
+ //FIXME
+#ifdef _WIN32 //MI_WINDOWS, MI_XBOX
+ ret = (MiI64)(MiU64)_strtoi64(begin,0,10);
+#else
+ ret = (MiI64)(MiU64)strtoll(begin,0,10);
+#endif
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiU8 strToU8(const char *str, const char **endptr)
+{
+ MiU8 ret;
+ const char *begin = skipWhiteSpace(str);
+
+ ret = (MiU8)strtoul(begin, 0, 0);
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiU16 strToU16(const char *str, const char **endptr)
+{
+ MiU16 ret;
+ const char *end;
+ const char *begin = skipWhiteSpace(str);
+
+ end = skipNonWhiteSpace(begin);
+ if( !end )
+ end = begin + strlen(str);
+
+ if( strncmp(begin, "PX_MAX_U16", size_t(end-begin)) == 0)
+ ret = MI_MAX_U16;
+ else
+ ret = (MiU16)strtoul(begin,0,0);
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiU32 strToU32(const char *str, const char **endptr)
+{
+ MiU32 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ if( strncmp(begin, "UINT32_MAX", size_t(end-begin)) == 0)
+ ret = MI_MAX_U32;
+ else
+ ret = (MiU32)strtoul(begin,0,0);
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiU64 strToU64(const char *str, const char **endptr)
+{
+ MiU64 ret;
+ const char *begin;
+ begin = skipWhiteSpace(str);
+
+ //FIXME
+#ifdef _WIN32 //MI_WINDOWS, MI_XBOX
+ ret = (MiU64)_strtoui64(begin,0,10);
+#else
+ ret = (MiU64)strtoull(begin,0,10);
+#endif
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE MiF32 strToF32(const char *str, const char **endptr)
+{
+ MiF32 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+ char tmpStr[MiF32StrLen];
+
+ if( !end )
+ end = begin + strlen(str);
+
+ // str can be very very long, so we should copy the value to a tmp buffer
+// MESH_IMPORT_STRING::strncpy_s(tmpStr, MiF32StrLen, begin, size_t(end-begin));
+ MESH_IMPORT_STRING::strlcpy(tmpStr, MiF32StrLen, begin); // BRG1.4 - we can drop the last argument of the (former) strncpy_s, since we set a NUL terminator on the next line
+ tmpStr[end-begin] = 0;
+
+ if( strncmp(tmpStr, "PX_MIN_F32", size_t(end-begin)) == 0)
+ ret = -MI_MAX_F32;
+ else if( strncmp(tmpStr, "PX_MAX_F32", size_t(end-begin)) == 0)
+ ret = MI_MAX_F32;
+ else
+ ret = (MiF32)atof(tmpStr);
+
+#if DEBUGGING_MISMATCHES
+ MiF32 testRet = (MiF32)atof(begin);
+ if( ret != testRet )
+ {
+ MI_ASSERT(0 && "Inaccurate float string");
+ }
+#endif
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+
+MI_INLINE MiF64 strToF64(const char *str, const char **endptr)
+{
+ MiF64 ret;
+ const char *begin = skipWhiteSpace(str);
+ const char *end = skipNonWhiteSpace(begin);
+ char tmpStr[MiF64StrLen];
+
+ end = skipNonWhiteSpace(begin);
+
+ if( !end )
+ end = begin + strlen(str);
+
+ // str can be very very long, so we should copy the value to a tmp buffer
+// MESH_IMPORT_STRING::strncpy_s(tmpStr, MiF64StrLen, begin, size_t(end-begin));
+ MESH_IMPORT_STRING::strlcpy(tmpStr, MiF64StrLen, begin); // BRG1.4 - we can drop the last argument of the (former) strncpy_s, since we set a NUL terminator on the next line
+ tmpStr[end-begin] = 0;
+
+ if( strncmp(tmpStr, "PX_MIN_F64", size_t(end-begin)) == 0)
+ ret = -MI_MAX_F64;
+ else if( strncmp(tmpStr, "PX_MAX_F64", size_t(end-begin)) == 0)
+ ret = MI_MAX_F64;
+ else
+ ret = (MiF64)atof(tmpStr);
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+
+ return ret;
+}
+
+MI_INLINE void strToF32s(MiF32 *v,MiU32 count,const char *str, const char**endptr)
+{
+ const char *begin = skipWhiteSpace(str);
+
+ if ( *begin == '(' ) begin++;
+ for (MiU32 i=0; i<count && *begin; i++)
+ {
+ v[i] = (MiF32)strToF32(begin, &begin);
+ }
+
+ if( endptr )
+ *endptr = skipNonWhiteSpace(begin);
+}
+
+
+//////////////////////////
+// value to str functions
+//////////////////////////
+MI_INLINE const char * valueToStr( bool val, char *buf, MiU32 n )
+{
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s",val ? "true" : "false");
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiI8 val, char *buf, MiU32 n )
+{
+ if( val == MI_MIN_I8 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MIN_I8" );
+ else if( val == MI_MAX_I8 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_I8" );
+ else
+ MESH_IMPORT_STRING::snprintf(buf, n, "%d", val);
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiI16 val, char *buf, MiU32 n )
+{
+ if( val == MI_MIN_I16 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MIN_I16" );
+ else if( val == MI_MAX_I16 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_I16" );
+ else
+ MESH_IMPORT_STRING::snprintf(buf, n,"%d",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiI32 val, char *buf, MiU32 n )
+{
+ if( val == MI_MIN_I32 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MIN_I32" );
+ else if( val == MI_MAX_I32 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_I32" );
+ else
+ MESH_IMPORT_STRING::snprintf(buf, n,"%d",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiI64 val, char *buf, MiU32 n )
+{
+ MESH_IMPORT_STRING::snprintf(buf, n,"%lld",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiU8 val, char *buf, MiU32 n )
+{
+ MESH_IMPORT_STRING::snprintf(buf, n, "%u", val);
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiU16 val, char *buf, MiU32 n )
+{
+ if( val == MI_MAX_U16 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_U16" );
+ else
+ MESH_IMPORT_STRING::snprintf(buf, n,"%u",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiU32 val, char *buf, MiU32 n )
+{
+ if( val == MI_MAX_U32 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","UINT32_MAX" );
+ else
+ MESH_IMPORT_STRING::snprintf(buf, n,"%u",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiU64 val, char *buf, MiU32 n )
+{
+ MESH_IMPORT_STRING::snprintf(buf, n,"%llu",val );
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiF32 val, char *buf, MiU32 n )
+{
+ if( !MESH_IMPORT_INTRINSICS::isFinite(val) )
+ {
+ MI_ASSERT( 0 && "invalid floating point" );
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","0" );
+ }
+ else if( val == -MI_MAX_F32 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MIN_F32" );
+ else if( val == MI_MAX_F32 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_F32" );
+ else if ( val == 1 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "1");
+ else if ( val == 0 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "0");
+ else if ( val == - 1 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "-1");
+ else
+ {
+ MESH_IMPORT_STRING::snprintf(buf,n,"%.9g", (double)val ); // %g expects double
+ const char *dot = strchr(buf,'.');
+ const char *e = strchr(buf,'e');
+ if ( dot && !e )
+ {
+ MiI32 len = (MiI32)strlen(buf);
+ char *foo = &buf[len-1];
+ while ( *foo == '0' ) foo--;
+ if ( *foo == '.' )
+ *foo = 0;
+ else
+ foo[1] = 0;
+ }
+ }
+ return buf;
+}
+
+MI_INLINE const char * valueToStr( MiF64 val, char *buf, MiU32 n )
+{
+ if( !MESH_IMPORT_INTRINSICS::isFinite(val) )
+ {
+ MI_ASSERT( 0 && "invalid floating point" );
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","0" );
+ }
+ else if( val == -MI_MAX_F64 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MIN_F64" );
+ else if( val == MI_MAX_F64 )
+ MESH_IMPORT_STRING::snprintf(buf, n,"%s","PX_MAX_F64" );
+ else if ( val == 1 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "1");
+ else if ( val == 0 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "0");
+ else if ( val == - 1 )
+ MESH_IMPORT_STRING::strlcpy(buf, n, "-1");
+ else
+ {
+ MESH_IMPORT_STRING::snprintf(buf,n,"%.18g", val );
+ const char *dot = strchr(buf,'.');
+ const char *e = strchr(buf,'e');
+ if ( dot && !e )
+ {
+ MiI32 len = (MiI32)strlen(buf);
+ char *foo = &buf[len-1];
+ while ( *foo == '0' ) foo--;
+ if ( *foo == '.' )
+ *foo = 0;
+ else
+ foo[1] = 0;
+ }
+ }
+ return buf;
+}
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiFastXml.h b/APEX_1.4/shared/general/meshimport/include/utils/MiFastXml.h
new file mode 100644
index 00000000..de5ea32b
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiFastXml.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_FAST_XML_H
+#define MI_FAST_XML_H
+
+#include "MiPlatformConfig.h"
+#include "MiFileBuf.h" // defines the basic file stream interface.
+
+namespace mimp
+{
+
+class FastXml
+{
+public:
+ /***
+ * Callbacks to the user with the contents of the XML file properly digested.
+ */
+ class Callback
+ {
+ public:
+
+ virtual bool processComment(const char *comment) = 0; // encountered a comment in the XML
+
+ // 'element' is the name of the element that is being closed.
+ // depth is the recursion depth of this element.
+ // Return true to continue processing the XML file.
+ // Return false to stop processing the XML file; leaves the read pointer of the stream right after this close tag.
+ // The bool 'isError' indicates whether processing was stopped due to an error, or intentionally canceled early.
+ virtual bool processClose(const char *element,MiU32 depth,bool &isError) = 0; // process the 'close' indicator for a previously encountered element
+
+ // return true to continue processing the XML document, false to skip.
+ virtual bool processElement(
+ const char *elementName, // name of the element
+ MiI32 argc, // number of attributes pairs
+ const char **argv, // list of attributes.
+ const char *elementData, // element data, null if none
+ MiI32 lineno) = 0; // line number in the source XML file
+
+ // process the XML declaration header
+ virtual bool processXmlDeclaration(
+ MiI32 /*argc*/,
+ const char ** /*argv*/,
+ const char * /*elementData*/,
+ MiI32 /*lineno*/)
+ {
+ return true;
+ }
+
+ virtual bool processDoctype(
+ const char * /*rootElement*/, //Root element tag
+ const char * /*type*/, //SYSTEM or PUBLIC
+ const char * /*fpi*/, //Formal Public Identifier
+ const char * /*uri*/) //Path to schema file
+ {
+ return true;
+ }
+
+ virtual void * fastxml_malloc(MiU32 size) = 0;
+ virtual void fastxml_free(void *mem) = 0;
+
+ };
+
+ virtual bool processXml(MiFileBuf &buff,bool streamFromMemory=false) = 0;
+
+ virtual const char *getError(MiI32 &lineno) = 0; // report the reason for a parsing error, and the line number where it occurred.
+
+ virtual void release(void) = 0;
+};
+
+const char *getAttribute(const char *attr, MiI32 argc, const char **argv);
+
+FastXml * createFastXml(FastXml::Callback *iface);
+
+}; // end of namespace mimp
+
+#endif // FAST_XML_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuf.h b/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuf.h
new file mode 100644
index 00000000..5cda4608
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuf.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_FILE_BUF_H
+#define MI_FILE_BUF_H
+
+
+/** \addtogroup foundation
+ @{
+*/
+
+namespace mimp
+{
+
+MI_PUSH_PACK_DEFAULT
+
+/**
+\brief Callback class for data serialization.
+
+The user needs to supply an MiFileBuf implementation to a number of methods to allow the SDK to read or write
+chunks of binary data. This allows flexibility for the source/destination of the data. For example the MiFileBuf
+could store data in a file, memory buffer or custom file format.
+
+\note It is the users responsibility to ensure that the data is written to the appropriate offset.
+
+*/
+class MiFileBuf
+{
+public:
+
+ enum EndianMode
+ {
+ ENDIAN_NONE = 0, // do no conversion for endian mode
+ ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.)
+ ENDIAN_LITTLE = 2, // always read/write data as natively little endian (Intel, etc.) Default Behavior!
+ };
+
+ MiFileBuf(EndianMode mode=ENDIAN_LITTLE)
+ {
+ setEndianMode(mode);
+ }
+
+ virtual ~MiFileBuf(void)
+ {
+
+ }
+
+ /**
+ \brief Declares a constant to seek to the end of the stream.
+ *
+ * Does not support streams longer than 32 bits
+ */
+ static const MiU32 STREAM_SEEK_END=0xFFFFFFFF;
+
+ enum OpenMode
+ {
+ OPEN_FILE_NOT_FOUND,
+ OPEN_READ_ONLY, // open file buffer stream for read only access
+ OPEN_WRITE_ONLY, // open file buffer stream for write only access
+ OPEN_READ_WRITE_NEW, // open a new file for both read/write access
+ OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access
+ };
+
+ virtual OpenMode getOpenMode(void) const = 0;
+
+ bool isOpen(void) const
+ {
+ return getOpenMode()!=OPEN_FILE_NOT_FOUND;
+ }
+
+ enum SeekType
+ {
+ SEEKABLE_NO = 0,
+ SEEKABLE_READ = 0x1,
+ SEEKABLE_WRITE = 0x2,
+ SEEKABLE_READWRITE = 0x3
+ };
+
+ virtual SeekType isSeekable(void) const = 0;
+
+ void setEndianMode(EndianMode e)
+ {
+ mEndianMode = e;
+ if ( (e==ENDIAN_BIG && !isBigEndian() ) ||
+ (e==ENDIAN_LITTLE && isBigEndian() ) )
+ {
+ mEndianSwap = true;
+ }
+ else
+ {
+ mEndianSwap = false;
+ }
+ }
+
+ EndianMode getEndianMode(void) const
+ {
+ return mEndianMode;
+ }
+
+ virtual MiU32 getFileLength(void) const = 0;
+
+ /**
+ \brief Seeks the stream to a particular location for reading
+ *
+ * If the location passed exceeds the length of the stream, then it will seek to the end.
+ * Returns the location it ended up at (useful if you seek to the end) to get the file position
+ */
+ virtual MiU32 seekRead(MiU32 loc) = 0;
+
+ /**
+ \brief Seeks the stream to a particular location for writing
+ *
+ * If the location passed exceeds the length of the stream, then it will seek to the end.
+ * Returns the location it ended up at (useful if you seek to the end) to get the file position
+ */
+ virtual MiU32 seekWrite(MiU32 loc) = 0;
+
+ /**
+ \brief Reads from the stream into a buffer.
+
+ \param[out] mem The buffer to read the stream into.
+ \param[in] len The number of bytes to stream into the buffer
+
+ \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
+ */
+ virtual MiU32 read(void *mem,MiU32 len) = 0;
+
+
+ /**
+ \brief Reads from the stream into a buffer but does not advance the read location.
+
+ \param[out] mem The buffer to read the stream into.
+ \param[in] len The number of bytes to stream into the buffer
+
+ \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream.
+ */
+ virtual MiU32 peek(void *mem,MiU32 len) = 0;
+
+ /**
+ \brief Writes a buffer of memory to the stream
+
+ \param[in] mem The address of a buffer of memory to send to the stream.
+ \param[in] len The number of bytes to send to the stream.
+
+ \return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason.
+ */
+ virtual MiU32 write(const void *mem,MiU32 len) = 0;
+
+ /**
+ \brief Reports the current stream location read aqccess.
+
+ \return Returns the current stream read location.
+ */
+ virtual MiU32 tellRead(void) const = 0;
+
+ /**
+ \brief Reports the current stream location for write access.
+
+ \return Returns the current stream write location.
+ */
+ virtual MiU32 tellWrite(void) const = 0;
+
+ /**
+ \brief Causes any temporarily cached data to be flushed to the stream.
+ */
+ virtual void flush(void) = 0;
+
+ /**
+ \brief Close the stream.
+ */
+ virtual void close(void) {}
+
+ void release(void)
+ {
+ delete this;
+ }
+
+ static MI_INLINE bool isBigEndian()
+ {
+ MiI32 i = 1;
+ return *((char*)&i)==0;
+ }
+
+ MI_INLINE void swap2Bytes(void* _data) const
+ {
+ char *data = (char *)_data;
+ char one_byte;
+ one_byte = data[0]; data[0] = data[1]; data[1] = one_byte;
+ }
+
+ MI_INLINE void swap4Bytes(void* _data) const
+ {
+ char *data = (char *)_data;
+ char one_byte;
+ one_byte = data[0]; data[0] = data[3]; data[3] = one_byte;
+ one_byte = data[1]; data[1] = data[2]; data[2] = one_byte;
+ }
+
+ MI_INLINE void swap8Bytes(void *_data) const
+ {
+ char *data = (char *)_data;
+ char one_byte;
+ one_byte = data[0]; data[0] = data[7]; data[7] = one_byte;
+ one_byte = data[1]; data[1] = data[6]; data[6] = one_byte;
+ one_byte = data[2]; data[2] = data[5]; data[5] = one_byte;
+ one_byte = data[3]; data[3] = data[4]; data[4] = one_byte;
+ }
+
+
+ MI_INLINE void storeDword(MiU32 v)
+ {
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+
+ write(&v,sizeof(v));
+ }
+
+ MI_INLINE void storeFloat(MiF32 v)
+ {
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ write(&v,sizeof(v));
+ }
+
+ MI_INLINE void storeDouble(MiF64 v)
+ {
+ if ( mEndianSwap )
+ swap8Bytes(&v);
+ write(&v,sizeof(v));
+ }
+
+ MI_INLINE void storeByte(MiU8 b)
+ {
+ write(&b,sizeof(b));
+ }
+
+ MI_INLINE void storeWord(MiU16 w)
+ {
+ if ( mEndianSwap )
+ swap2Bytes(&w);
+ write(&w,sizeof(w));
+ }
+
+ MiU8 readByte(void)
+ {
+ MiU8 v=0;
+ read(&v,sizeof(v));
+ return v;
+ }
+
+ MiU16 readWord(void)
+ {
+ MiU16 v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap2Bytes(&v);
+ return v;
+ }
+
+ MiU32 readDword(void)
+ {
+ MiU32 v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ return v;
+ }
+
+ MiF32 readFloat(void)
+ {
+ MiF32 v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap4Bytes(&v);
+ return v;
+ }
+
+ MiF64 readDouble(void)
+ {
+ MiF64 v=0;
+ read(&v,sizeof(v));
+ if ( mEndianSwap )
+ swap8Bytes(&v);
+ return v;
+ }
+
+private:
+ bool mEndianSwap; // whether or not the endian should be swapped on the current platform
+ EndianMode mEndianMode; // the current endian mode behavior for the stream
+};
+
+MI_POP_PACK
+
+
+}; // end of namespace
+
+#endif // MI_FILE_BUF_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuffer.h b/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuffer.h
new file mode 100644
index 00000000..a12160a6
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiFileBuffer.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_FILE_BUFFER_H
+#define MI_FILE_BUFFER_H
+
+#include "Ps.h"
+#include "MiFileBuf.h"
+#include "PsUserAllocated.h"
+
+namespace mimp
+{
+//Use this class if you want to use your own allocator
+class MiFileBufferBase : public MiFileBuf
+{
+public:
+ MiFileBufferBase(const char *fileName,OpenMode mode)
+ {
+ mOpenMode = mode;
+ mFph = NULL;
+ mFileLength = 0;
+ mSeekRead = 0;
+ mSeekWrite = 0;
+ mSeekCurrent = 0;
+ switch ( mode )
+ {
+ case OPEN_READ_ONLY:
+ mimp::shdfnd::fopen_s(&mFph,fileName,"rb");
+ break;
+ case OPEN_WRITE_ONLY:
+ mimp::shdfnd::fopen_s(&mFph,fileName,"wb");
+ break;
+ case OPEN_READ_WRITE_NEW:
+ mimp::shdfnd::fopen_s(&mFph,fileName,"wb+");
+ break;
+ case OPEN_READ_WRITE_EXISTING:
+ mimp::shdfnd::fopen_s(&mFph,fileName,"rb+");
+ break;
+ }
+ if ( mFph )
+ {
+ fseek(mFph,0L,SEEK_END);
+ mFileLength = ftell(mFph);
+ fseek(mFph,0L,SEEK_SET);
+ }
+ else
+ {
+ mOpenMode = OPEN_FILE_NOT_FOUND;
+ }
+ }
+
+ virtual ~MiFileBufferBase()
+ {
+ close();
+ }
+
+ virtual void close()
+ {
+ if( mFph )
+ {
+ fclose(mFph);
+ mFph = 0;
+ }
+ }
+
+ virtual SeekType isSeekable(void) const
+ {
+ return mSeekType;
+ }
+
+ virtual MiU32 read(void* buffer, MiU32 size)
+ {
+ MiU32 ret = 0;
+ if ( mFph )
+ {
+ setSeekRead();
+ ret = (MiU32)::fread(buffer,1,size,mFph);
+ mSeekRead+=ret;
+ mSeekCurrent+=ret;
+ }
+ return ret;
+ }
+
+ virtual MiU32 peek(void* buffer, MiU32 size)
+ {
+ MiU32 ret = 0;
+ if ( mFph )
+ {
+ MiU32 loc = tellRead();
+ setSeekRead();
+ ret = (MiU32)::fread(buffer,1,size,mFph);
+ mSeekCurrent+=ret;
+ seekRead(loc);
+ }
+ return ret;
+ }
+
+ virtual MiU32 write(const void* buffer, MiU32 size)
+ {
+ MiU32 ret = 0;
+ if ( mFph )
+ {
+ setSeekWrite();
+ ret = (MiU32)::fwrite(buffer,1,size,mFph);
+ mSeekWrite+=ret;
+ mSeekCurrent+=ret;
+ if ( mSeekWrite > mFileLength )
+ {
+ mFileLength = mSeekWrite;
+ }
+ }
+ return ret;
+ }
+
+ virtual MiU32 tellRead(void) const
+ {
+ return mSeekRead;
+ }
+
+ virtual MiU32 tellWrite(void) const
+ {
+ return mSeekWrite;
+ }
+
+ virtual MiU32 seekRead(MiU32 loc)
+ {
+ mSeekRead = loc;
+ if ( mSeekRead > mFileLength )
+ {
+ mSeekRead = mFileLength;
+ }
+ return mSeekRead;
+ }
+
+ virtual MiU32 seekWrite(MiU32 loc)
+ {
+ mSeekWrite = loc;
+ if ( mSeekWrite > mFileLength )
+ {
+ mSeekWrite = mFileLength;
+ }
+ return mSeekWrite;
+ }
+
+ virtual void flush(void)
+ {
+ if ( mFph )
+ {
+ ::fflush(mFph);
+ }
+ }
+
+ virtual OpenMode getOpenMode(void) const
+ {
+ return mOpenMode;
+ }
+
+ virtual MiU32 getFileLength(void) const
+ {
+ return mFileLength;
+ }
+
+private:
+ // Moves the actual file pointer to the current read location
+ void setSeekRead(void)
+ {
+ if ( mSeekRead != mSeekCurrent && mFph )
+ {
+ if ( mSeekRead >= mFileLength )
+ {
+ ::fseek(mFph,0L,SEEK_END);
+ }
+ else
+ {
+ ::fseek(mFph,mSeekRead,SEEK_SET);
+ }
+ mSeekCurrent = mSeekRead = ::ftell(mFph);
+ }
+ }
+ // Moves the actual file pointer to the current write location
+ void setSeekWrite(void)
+ {
+ if ( mSeekWrite != mSeekCurrent && mFph )
+ {
+ if ( mSeekWrite >= mFileLength )
+ {
+ ::fseek(mFph,0L,SEEK_END);
+ }
+ else
+ {
+ ::fseek(mFph,mSeekWrite,SEEK_SET);
+ }
+ mSeekCurrent = mSeekWrite = ::ftell(mFph);
+ }
+ }
+
+
+ FILE *mFph;
+ MiU32 mSeekRead;
+ MiU32 mSeekWrite;
+ MiU32 mSeekCurrent;
+ MiU32 mFileLength;
+ SeekType mSeekType;
+ OpenMode mOpenMode;
+};
+
+//Use this class if you want to use PhysX memory allocator
+class MiFileBuffer: public MiFileBufferBase, public MeshImportAllocated
+{
+public:
+ MiFileBuffer(const char *fileName,OpenMode mode): MiFileBufferBase(fileName, mode) {}
+};
+
+}
+
+#endif // MI_FILE_BUFFER_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiFileInterface.h b/APEX_1.4/shared/general/meshimport/include/utils/MiFileInterface.h
new file mode 100644
index 00000000..46b59b81
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiFileInterface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FILE_INTERFACE_H
+
+#define FILE_INTERFACE_H
+
+#include <stdio.h>
+
+namespace mimp
+{
+
+typedef struct
+{
+ void *mInterface;
+} FILE_INTERFACE;
+
+FILE_INTERFACE * fi_fopen(const char *fname,const char *spec,void *mem,size_t len);
+size_t fi_fclose(FILE_INTERFACE *file);
+size_t fi_fread(void *buffer,size_t size,size_t count,FILE_INTERFACE *fph);
+size_t fi_fwrite(const void *buffer,size_t size,size_t count,FILE_INTERFACE *fph);
+size_t fi_fprintf(FILE_INTERFACE *fph,const char *fmt,...);
+size_t fi_fflush(FILE_INTERFACE *fph);
+size_t fi_fseek(FILE_INTERFACE *fph,size_t loc,size_t mode);
+size_t fi_ftell(FILE_INTERFACE *fph);
+size_t fi_fputc(char c,FILE_INTERFACE *fph);
+size_t fi_fputs(const char *str,FILE_INTERFACE *fph);
+size_t fi_feof(FILE_INTERFACE *fph);
+size_t fi_ferror(FILE_INTERFACE *fph);
+void fi_clearerr(FILE_INTERFACE *fph);
+void * fi_getMemBuffer(FILE_INTERFACE *fph,size_t *outputLength); // return the buffer and length of the file.
+
+
+};
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiFloatMath.h b/APEX_1.4/shared/general/meshimport/include/utils/MiFloatMath.h
new file mode 100644
index 00000000..8ace00c6
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiFloatMath.h
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FLOAT_MATH_LIB_H
+
+#define FLOAT_MATH_LIB_H
+
+// a set of routines that let you do common 3d math
+// operations without any vector, matrix, or quaternion
+// classes or templates.
+//
+// a vector (or point) is a 'MiF32 *' to 3 floating point numbers.
+// a matrix is a 'MiF32 *' to an array of 16 floating point numbers representing a 4x4 transformation matrix compatible with D3D or OGL
+// a quaternion is a 'MiF32 *' to 4 floats representing a quaternion x,y,z,w
+//
+//
+
+
+#include <float.h>
+#include "MiPlatformConfig.h"
+
+namespace mimp
+{
+
+enum FM_ClipState
+{
+ FMCS_XMIN = (1<<0),
+ FMCS_XMAX = (1<<1),
+ FMCS_YMIN = (1<<2),
+ FMCS_YMAX = (1<<3),
+ FMCS_ZMIN = (1<<4),
+ FMCS_ZMAX = (1<<5),
+};
+
+enum FM_Axis
+{
+ FM_XAXIS = (1<<0),
+ FM_YAXIS = (1<<1),
+ FM_ZAXIS = (1<<2)
+};
+
+enum LineSegmentType
+{
+ LS_START,
+ LS_MIDDLE,
+ LS_END
+};
+
+
+const MiF32 FM_PI = 3.1415926535897932384626433832795028841971693993751f;
+const MiF32 FM_DEG_TO_RAD = ((2.0f * FM_PI) / 360.0f);
+const MiF32 FM_RAD_TO_DEG = (360.0f / (2.0f * FM_PI));
+
+//***************** Float versions
+//***
+//*** vectors are assumed to be 3 floats or 3 doubles representing X, Y, Z
+//*** quaternions are assumed to be 4 floats or 4 doubles representing X,Y,Z,W
+//*** matrices are assumed to be 16 floats or 16 doubles representing a standard D3D or OpenGL style 4x4 matrix
+//*** bounding volumes are expressed as two sets of 3 floats/MiF64 representing bmin(x,y,z) and bmax(x,y,z)
+//*** Plane equations are assumed to be 4 floats or 4 doubles representing Ax,By,Cz,D
+
+FM_Axis fm_getDominantAxis(const MiF32 normal[3]);
+FM_Axis fm_getDominantAxis(const MiF64 normal[3]);
+
+void fm_decomposeTransform(const MiF32 local_transform[16],MiF32 trans[3],MiF32 rot[4],MiF32 scale[3]);
+void fm_decomposeTransform(const MiF64 local_transform[16],MiF64 trans[3],MiF64 rot[4],MiF64 scale[3]);
+
+void fm_multiplyTransform(const MiF32 *pA,const MiF32 *pB,MiF32 *pM);
+void fm_multiplyTransform(const MiF64 *pA,const MiF64 *pB,MiF64 *pM);
+
+void fm_inverseTransform(const MiF32 matrix[16],MiF32 inverse_matrix[16]);
+void fm_inverseTransform(const MiF64 matrix[16],MiF64 inverse_matrix[16]);
+
+void fm_identity(MiF32 matrix[16]); // set 4x4 matrix to identity.
+void fm_identity(MiF64 matrix[16]); // set 4x4 matrix to identity.
+
+void fm_inverseRT(const MiF32 matrix[16], const MiF32 pos[3], MiF32 t[3]); // inverse rotate translate the point.
+void fm_inverseRT(const MiF64 matrix[16],const MiF64 pos[3],MiF64 t[3]); // inverse rotate translate the point.
+
+void fm_transform(const MiF32 matrix[16], const MiF32 pos[3], MiF32 t[3]); // rotate and translate this point.
+void fm_transform(const MiF64 matrix[16],const MiF64 pos[3],MiF64 t[3]); // rotate and translate this point.
+
+MiF32 fm_getDeterminant(const MiF32 matrix[16]);
+MiF64 fm_getDeterminant(const MiF64 matrix[16]);
+
+void fm_getSubMatrix(MiI32 ki,MiI32 kj,MiF32 pDst[16],const MiF32 matrix[16]);
+void fm_getSubMatrix(MiI32 ki,MiI32 kj,MiF64 pDst[16],const MiF32 matrix[16]);
+
+void fm_rotate(const MiF32 matrix[16],const MiF32 pos[3],MiF32 t[3]); // only rotate the point by a 4x4 matrix, don't translate.
+void fm_rotate(const MiF64 matri[16],const MiF64 pos[3],MiF64 t[3]); // only rotate the point by a 4x4 matrix, don't translate.
+
+void fm_eulerToMatrix(MiF32 ax,MiF32 ay,MiF32 az,MiF32 matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero)
+void fm_eulerToMatrix(MiF64 ax,MiF64 ay,MiF64 az,MiF64 matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero)
+
+void fm_getAABB(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 bmin[3],MiF32 bmax[3]);
+void fm_getAABB(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 bmin[3],MiF64 bmax[3]);
+
+void fm_getAABBCenter(const MiF32 bmin[3],const MiF32 bmax[3],MiF32 center[3]);
+void fm_getAABBCenter(const MiF64 bmin[3],const MiF64 bmax[3],MiF64 center[3]);
+
+void fm_transformAABB(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 matrix[16],MiF32 tbmin[3],MiF32 tbmax[3]);
+void fm_transformAABB(const MiF64 bmin[3],const MiF64 bmax[3],const MiF64 matrix[16],MiF64 tbmin[3],MiF64 tbmax[3]);
+
+void fm_eulerToQuat(MiF32 x,MiF32 y,MiF32 z,MiF32 quat[4]); // convert euler angles to quaternion.
+void fm_eulerToQuat(MiF64 x,MiF64 y,MiF64 z,MiF64 quat[4]); // convert euler angles to quaternion.
+
+void fm_quatToEuler(const MiF32 quat[4],MiF32 &ax,MiF32 &ay,MiF32 &az);
+void fm_quatToEuler(const MiF64 quat[4],MiF64 &ax,MiF64 &ay,MiF64 &az);
+
+void fm_eulerToQuat(const MiF32 euler[3],MiF32 quat[4]); // convert euler angles to quaternion. Angles must be radians not degrees!
+void fm_eulerToQuat(const MiF64 euler[3],MiF64 quat[4]); // convert euler angles to quaternion.
+
+void fm_scale(MiF32 x,MiF32 y,MiF32 z,MiF32 matrix[16]); // apply scale to the matrix.
+void fm_scale(MiF64 x,MiF64 y,MiF64 z,MiF64 matrix[16]); // apply scale to the matrix.
+
+void fm_eulerToQuatDX(MiF32 x,MiF32 y,MiF32 z,MiF32 quat[4]); // convert euler angles to quaternion using the fucked up DirectX method
+void fm_eulerToQuatDX(MiF64 x,MiF64 y,MiF64 z,MiF64 quat[4]); // convert euler angles to quaternion using the fucked up DirectX method
+
+void fm_eulerToMatrixDX(MiF32 x,MiF32 y,MiF32 z,MiF32 matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method.
+void fm_eulerToMatrixDX(MiF64 x,MiF64 y,MiF64 z,MiF64 matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method.
+
+void fm_quatToMatrix(const MiF32 quat[4],MiF32 matrix[16]); // convert quaterinion rotation to matrix, translation set to zero.
+void fm_quatToMatrix(const MiF64 quat[4],MiF64 matrix[16]); // convert quaterinion rotation to matrix, translation set to zero.
+
+void fm_quatRotate(const MiF32 quat[4],const MiF32 v[3],MiF32 r[3]); // rotate a vector directly by a quaternion.
+void fm_quatRotate(const MiF64 quat[4],const MiF64 v[3],MiF64 r[3]); // rotate a vector directly by a quaternion.
+
+void fm_getTranslation(const MiF32 matrix[16],MiF32 t[3]);
+void fm_getTranslation(const MiF64 matrix[16],MiF64 t[3]);
+
+void fm_setTranslation(const MiF32 *translation,MiF32 matrix[16]);
+void fm_setTranslation(const MiF64 *translation,MiF64 matrix[16]);
+
+void fm_multiplyQuat(const MiF32 *qa,const MiF32 *qb,MiF32 *quat);
+void fm_multiplyQuat(const MiF64 *qa,const MiF64 *qb,MiF64 *quat);
+
+void fm_matrixToQuat(const MiF32 matrix[16],MiF32 quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w
+void fm_matrixToQuat(const MiF64 matrix[16],MiF64 quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w
+
+MiF32 fm_sphereVolume(MiF32 radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed )
+MiF64 fm_sphereVolume(MiF64 radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed )
+
+MiF32 fm_cylinderVolume(MiF32 radius,MiF32 h);
+MiF64 fm_cylinderVolume(MiF64 radius,MiF64 h);
+
+MiF32 fm_capsuleVolume(MiF32 radius,MiF32 h);
+MiF64 fm_capsuleVolume(MiF64 radius,MiF64 h);
+
+MiF32 fm_distance(const MiF32 p1[3],const MiF32 p2[3]);
+MiF64 fm_distance(const MiF64 p1[3],const MiF64 p2[3]);
+
+MiF32 fm_distanceSquared(const MiF32 p1[3],const MiF32 p2[3]);
+MiF64 fm_distanceSquared(const MiF64 p1[3],const MiF64 p2[3]);
+
+MiF32 fm_distanceSquaredXZ(const MiF32 p1[3],const MiF32 p2[3]);
+MiF64 fm_distanceSquaredXZ(const MiF64 p1[3],const MiF64 p2[3]);
+
+MiF32 fm_computePlane(const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3],MiF32 *n); // return D
+MiF64 fm_computePlane(const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3],MiF64 *n); // return D
+
+MiF32 fm_distToPlane(const MiF32 plane[4],const MiF32 pos[3]); // computes the distance of this point from the plane.
+MiF64 fm_distToPlane(const MiF64 plane[4],const MiF64 pos[3]); // computes the distance of this point from the plane.
+
+MiF32 fm_dot(const MiF32 p1[3],const MiF32 p2[3]);
+MiF64 fm_dot(const MiF64 p1[3],const MiF64 p2[3]);
+
+void fm_cross(MiF32 cross[3],const MiF32 a[3],const MiF32 b[3]);
+void fm_cross(MiF64 cross[3],const MiF64 a[3],const MiF64 b[3]);
+
+void fm_computeNormalVector(MiF32 n[3],const MiF32 p1[3],const MiF32 p2[3]); // as P2-P1 normalized.
+void fm_computeNormalVector(MiF64 n[3],const MiF64 p1[3],const MiF64 p2[3]); // as P2-P1 normalized.
+
+bool fm_computeWindingOrder(const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3]); // returns true if the triangle is clockwise.
+bool fm_computeWindingOrder(const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3]); // returns true if the triangle is clockwise.
+
+MiF32 fm_normalize(MiF32 n[3]); // normalize this vector and return the distance
+MiF64 fm_normalize(MiF64 n[3]); // normalize this vector and return the distance
+
+void fm_matrixMultiply(const MiF32 A[16],const MiF32 B[16],MiF32 dest[16]);
+void fm_matrixMultiply(const MiF64 A[16],const MiF64 B[16],MiF64 dest[16]);
+
+void fm_composeTransform(const MiF32 position[3],const MiF32 quat[4],const MiF32 scale[3],MiF32 matrix[16]);
+void fm_composeTransform(const MiF64 position[3],const MiF64 quat[4],const MiF64 scale[3],MiF64 matrix[16]);
+
+MiF32 fm_computeArea(const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3]);
+MiF64 fm_computeArea(const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3]);
+
+void fm_lerp(const MiF32 p1[3],const MiF32 p2[3],MiF32 dest[3],MiF32 lerpValue);
+void fm_lerp(const MiF64 p1[3],const MiF64 p2[3],MiF64 dest[3],MiF64 lerpValue);
+
+bool fm_insideTriangleXZ(const MiF32 test[3],const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3]);
+bool fm_insideTriangleXZ(const MiF64 test[3],const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3]);
+
+bool fm_insideAABB(const MiF32 pos[3],const MiF32 bmin[3],const MiF32 bmax[3]);
+bool fm_insideAABB(const MiF64 pos[3],const MiF64 bmin[3],const MiF64 bmax[3]);
+
+bool fm_insideAABB(const MiF32 obmin[3],const MiF32 obmax[3],const MiF32 tbmin[3],const MiF32 tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax
+bool fm_insideAABB(const MiF64 obmin[3],const MiF64 obmax[3],const MiF64 tbmin[3],const MiF64 tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax
+
+MiU32 fm_clipTestPoint(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 pos[3]);
+MiU32 fm_clipTestPoint(const MiF64 bmin[3],const MiF64 bmax[3],const MiF64 pos[3]);
+
+MiU32 fm_clipTestPointXZ(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 pos[3]); // only tests X and Z, not Y
+MiU32 fm_clipTestPointXZ(const MiF64 bmin[3],const MiF64 bmax[3],const MiF64 pos[3]); // only tests X and Z, not Y
+
+
+MiU32 fm_clipTestAABB(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3],MiU32 &andCode);
+MiU32 fm_clipTestAABB(const MiF64 bmin[3],const MiF64 bmax[3],const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3],MiU32 &andCode);
+
+
+bool fm_lineTestAABBXZ(const MiF32 p1[3],const MiF32 p2[3],const MiF32 bmin[3],const MiF32 bmax[3],MiF32 &time);
+bool fm_lineTestAABBXZ(const MiF64 p1[3],const MiF64 p2[3],const MiF64 bmin[3],const MiF64 bmax[3],MiF64 &time);
+
+bool fm_lineTestAABB(const MiF32 p1[3],const MiF32 p2[3],const MiF32 bmin[3],const MiF32 bmax[3],MiF32 &time);
+bool fm_lineTestAABB(const MiF64 p1[3],const MiF64 p2[3],const MiF64 bmin[3],const MiF64 bmax[3],MiF64 &time);
+
+
+void fm_initMinMax(const MiF32 p[3],MiF32 bmin[3],MiF32 bmax[3]);
+void fm_initMinMax(const MiF64 p[3],MiF64 bmin[3],MiF64 bmax[3]);
+
+void fm_initMinMax(MiF32 bmin[3],MiF32 bmax[3]);
+void fm_initMinMax(MiF64 bmin[3],MiF64 bmax[3]);
+
+void fm_minmax(const MiF32 p[3],MiF32 bmin[3],MiF32 bmax[3]); // accmulate to a min-max value
+void fm_minmax(const MiF64 p[3],MiF64 bmin[3],MiF64 bmax[3]); // accmulate to a min-max value
+
+
+MiF32 fm_solveX(const MiF32 plane[4],MiF32 y,MiF32 z); // solve for X given this plane equation and the other two components.
+MiF64 fm_solveX(const MiF64 plane[4],MiF64 y,MiF64 z); // solve for X given this plane equation and the other two components.
+
+MiF32 fm_solveY(const MiF32 plane[4],MiF32 x,MiF32 z); // solve for Y given this plane equation and the other two components.
+MiF64 fm_solveY(const MiF64 plane[4],MiF64 x,MiF64 z); // solve for Y given this plane equation and the other two components.
+
+MiF32 fm_solveZ(const MiF32 plane[4],MiF32 x,MiF32 y); // solve for Z given this plane equation and the other two components.
+MiF64 fm_solveZ(const MiF64 plane[4],MiF64 x,MiF64 y); // solve for Z given this plane equation and the other two components.
+
+bool fm_computeBestFitPlane(MiU32 vcount, // number of input data points
+ const MiF32 *points, // starting address of points array.
+ MiU32 vstride, // stride between input points.
+ const MiF32 *weights, // *optional point weighting values.
+ MiU32 wstride, // weight stride for each vertex.
+ MiF32 plane[4]);
+
+bool fm_computeBestFitPlane(MiU32 vcount, // number of input data points
+ const MiF64 *points, // starting address of points array.
+ MiU32 vstride, // stride between input points.
+ const MiF64 *weights, // *optional point weighting values.
+ MiU32 wstride, // weight stride for each vertex.
+ MiF64 plane[4]);
+
+bool fm_computeCentroid(MiU32 vcount, // number of input data points
+ const MiF32 *points, // starting address of points array.
+ MiU32 vstride, // stride between input points.
+ MiF32 *center);
+
+bool fm_computeCentroid(MiU32 vcount, // number of input data points
+ const MiF64 *points, // starting address of points array.
+ MiU32 vstride, // stride between input points.
+ MiF64 *center);
+
+
+MiF32 fm_computeBestFitAABB(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 bmin[3],MiF32 bmax[3]); // returns the diagonal distance
+MiF64 fm_computeBestFitAABB(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 bmin[3],MiF64 bmax[3]); // returns the diagonal distance
+
+MiF32 fm_computeBestFitSphere(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 center[3]);
+MiF64 fm_computeBestFitSphere(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 center[3]);
+
+bool fm_lineSphereIntersect(const MiF32 center[3],MiF32 radius,const MiF32 p1[3],const MiF32 p2[3],MiF32 intersect[3]);
+bool fm_lineSphereIntersect(const MiF64 center[3],MiF64 radius,const MiF64 p1[3],const MiF64 p2[3],MiF64 intersect[3]);
+
+bool fm_intersectRayAABB(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 pos[3],const MiF32 dir[3],MiF32 intersect[3]);
+bool fm_intersectLineSegmentAABB(const MiF32 bmin[3],const MiF32 bmax[3],const MiF32 p1[3],const MiF32 p2[3],MiF32 intersect[3]);
+
+bool fm_lineIntersectsTriangle(const MiF32 rayStart[3],const MiF32 rayEnd[3],const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3],MiF32 sect[3]);
+bool fm_lineIntersectsTriangle(const MiF64 rayStart[3],const MiF64 rayEnd[3],const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3],MiF64 sect[3]);
+
+bool fm_rayIntersectsTriangle(const MiF32 origin[3],const MiF32 dir[3],const MiF32 v0[3],const MiF32 v1[3],const MiF32 v2[3],MiF32 &t);
+bool fm_rayIntersectsTriangle(const MiF64 origin[3],const MiF64 dir[3],const MiF64 v0[3],const MiF64 v1[3],const MiF64 v2[3],MiF64 &t);
+
+bool fm_raySphereIntersect(const MiF32 center[3],MiF32 radius,const MiF32 pos[3],const MiF32 dir[3],MiF32 distance,MiF32 intersect[3]);
+bool fm_raySphereIntersect(const MiF64 center[3],MiF64 radius,const MiF64 pos[3],const MiF64 dir[3],MiF64 distance,MiF64 intersect[3]);
+
+void fm_catmullRom(MiF32 out_vector[3],const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3],const MiF32 *p4, const MiF32 s);
+void fm_catmullRom(MiF64 out_vector[3],const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3],const MiF64 *p4, const MiF64 s);
+
+bool fm_intersectAABB(const MiF32 bmin1[3],const MiF32 bmax1[3],const MiF32 bmin2[3],const MiF32 bmax2[3]);
+bool fm_intersectAABB(const MiF64 bmin1[3],const MiF64 bmax1[3],const MiF64 bmin2[3],const MiF64 bmax2[3]);
+
+
+// computes the rotation quaternion to go from unit-vector v0 to unit-vector v1
+void fm_rotationArc(const MiF32 v0[3],const MiF32 v1[3],MiF32 quat[4]);
+void fm_rotationArc(const MiF64 v0[3],const MiF64 v1[3],MiF64 quat[4]);
+
+MiF32 fm_distancePointLineSegment(const MiF32 Point[3],const MiF32 LineStart[3],const MiF32 LineEnd[3],MiF32 intersection[3],LineSegmentType &type,MiF32 epsilon);
+MiF64 fm_distancePointLineSegment(const MiF64 Point[3],const MiF64 LineStart[3],const MiF64 LineEnd[3],MiF64 intersection[3],LineSegmentType &type,MiF64 epsilon);
+
+
+bool fm_colinear(const MiF64 p1[3],const MiF64 p2[3],const MiF64 p3[3],MiF64 epsilon=0.999); // true if these three points in a row are co-linear
+bool fm_colinear(const MiF32 p1[3],const MiF32 p2[3],const MiF32 p3[3],MiF32 epsilon=0.999f);
+
+bool fm_colinear(const MiF32 a1[3],const MiF32 a2[3],const MiF32 b1[3],const MiF32 b2[3],MiF32 epsilon=0.999f); // true if these two line segments are co-linear.
+bool fm_colinear(const MiF64 a1[3],const MiF64 a2[3],const MiF64 b1[3],const MiF64 b2[3],MiF64 epsilon=0.999); // true if these two line segments are co-linear.
+
+enum IntersectResult
+{
+ IR_DONT_INTERSECT,
+ IR_DO_INTERSECT,
+ IR_COINCIDENT,
+ IR_PARALLEL,
+};
+
+IntersectResult fm_intersectLineSegments2d(const MiF32 a1[3], const MiF32 a2[3], const MiF32 b1[3], const MiF32 b2[3], MiF32 intersectionPoint[3]);
+IntersectResult fm_intersectLineSegments2d(const MiF64 a1[3],const MiF64 a2[3],const MiF64 b1[3],const MiF64 b2[3],MiF64 intersectionPoint[3]);
+
+IntersectResult fm_intersectLineSegments2dTime(const MiF32 a1[3], const MiF32 a2[3], const MiF32 b1[3], const MiF32 b2[3],MiF32 &t1,MiF32 &t2);
+IntersectResult fm_intersectLineSegments2dTime(const MiF64 a1[3],const MiF64 a2[3],const MiF64 b1[3],const MiF64 b2[3],MiF64 &t1,MiF64 &t2);
+
+// Plane-Triangle splitting
+
+enum PlaneTriResult
+{
+ PTR_ON_PLANE,
+ PTR_FRONT,
+ PTR_BACK,
+ PTR_SPLIT,
+};
+
+PlaneTriResult fm_planeTriIntersection(const MiF32 plane[4], // the plane equation in Ax+By+Cz+D format
+ const MiF32 *triangle, // the source triangle.
+ MiU32 tstride, // stride in bytes of the input and output *vertices*
+ MiF32 epsilon, // the co-planer epsilon value.
+ MiF32 *front, // the triangle in front of the
+ MiU32 &fcount, // number of vertices in the 'front' triangle
+ MiF32 *back, // the triangle in back of the plane
+ MiU32 &bcount); // the number of vertices in the 'back' triangle.
+
+
+PlaneTriResult fm_planeTriIntersection(const MiF64 plane[4], // the plane equation in Ax+By+Cz+D format
+ const MiF64 *triangle, // the source triangle.
+ MiU32 tstride, // stride in bytes of the input and output *vertices*
+ MiF64 epsilon, // the co-planer epsilon value.
+ MiF64 *front, // the triangle in front of the
+ MiU32 &fcount, // number of vertices in the 'front' triangle
+ MiF64 *back, // the triangle in back of the plane
+ MiU32 &bcount); // the number of vertices in the 'back' triangle.
+
+
+void fm_intersectPointPlane(const MiF32 p1[3],const MiF32 p2[3],MiF32 *split,const MiF32 plane[4]);
+void fm_intersectPointPlane(const MiF64 p1[3],const MiF64 p2[3],MiF64 *split,const MiF64 plane[4]);
+
+PlaneTriResult fm_getSidePlane(const MiF32 p[3],const MiF32 plane[4],MiF32 epsilon);
+PlaneTriResult fm_getSidePlane(const MiF64 p[3],const MiF64 plane[4],MiF64 epsilon);
+
+
+void fm_computeBestFitOBB(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 *sides,MiF32 matrix[16],bool bruteForce=true);
+void fm_computeBestFitOBB(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 *sides,MiF64 matrix[16],bool bruteForce=true);
+
+void fm_computeBestFitOBB(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 *sides,MiF32 pos[3],MiF32 quat[4],bool bruteForce=true);
+void fm_computeBestFitOBB(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 *sides,MiF64 pos[3],MiF64 quat[4],bool bruteForce=true);
+
+void fm_computeBestFitABB(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 *sides,MiF32 pos[3]);
+void fm_computeBestFitABB(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF64 *sides,MiF64 pos[3]);
+
+
+//** Note, if the returned capsule height is less than zero, then you must represent it is a sphere of size radius.
+void fm_computeBestFitCapsule(MiU32 vcount,const MiF32 *points,MiU32 pstride,MiF32 &radius,MiF32 &height,MiF32 matrix[16],bool bruteForce=true);
+void fm_computeBestFitCapsule(MiU32 vcount,const MiF64 *points,MiU32 pstride,MiF32 &radius,MiF32 &height,MiF64 matrix[16],bool bruteForce=true);
+
+
+void fm_planeToMatrix(const MiF32 plane[4],MiF32 matrix[16]); // convert a plane equation to a 4x4 rotation matrix. Reference vector is 0,1,0
+void fm_planeToQuat(const MiF32 plane[4],MiF32 quat[4],MiF32 pos[3]); // convert a plane equation to a quaternion and translation
+
+void fm_planeToMatrix(const MiF64 plane[4],MiF64 matrix[16]); // convert a plane equation to a 4x4 rotation matrix
+void fm_planeToQuat(const MiF64 plane[4],MiF64 quat[4],MiF64 pos[3]); // convert a plane equation to a quaternion and translation
+
+inline void fm_doubleToFloat3(const MiF64 p[3],MiF32 t[3]) { t[0] = (MiF32) p[0]; t[1] = (MiF32)p[1]; t[2] = (MiF32)p[2]; };
+inline void fm_floatToDouble3(const MiF32 p[3],MiF64 t[3]) { t[0] = (MiF64)p[0]; t[1] = (MiF64)p[1]; t[2] = (MiF64)p[2]; };
+
+
+void fm_eulerMatrix(MiF32 ax,MiF32 ay,MiF32 az,MiF32 matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero)
+void fm_eulerMatrix(MiF64 ax,MiF64 ay,MiF64 az,MiF64 matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero)
+
+
+MiF32 fm_computeMeshVolume(const MiF32 *vertices,MiU32 tcount,const MiU32 *indices);
+MiF64 fm_computeMeshVolume(const MiF64 *vertices,MiU32 tcount,const MiU32 *indices);
+
+
+#define FM_DEFAULT_GRANULARITY 0.001f // 1 millimeter is the default granularity
+
+class fm_VertexIndex
+{
+public:
+ virtual MiU32 getIndex(const MiF32 pos[3],bool &newPos) = 0; // get welded index for this MiF32 vector[3]
+ virtual MiU32 getIndex(const MiF64 pos[3],bool &newPos) = 0; // get welded index for this MiF64 vector[3]
+ virtual const MiF32 * getVerticesFloat(void) const = 0;
+ virtual const MiF64 * getVerticesDouble(void) const = 0;
+ virtual const MiF32 * getVertexFloat(MiU32 index) const = 0;
+ virtual const MiF64 * getVertexDouble(MiU32 index) const = 0;
+ virtual MiU32 getVcount(void) const = 0;
+ virtual bool isDouble(void) const = 0;
+ virtual bool saveAsObj(const char *fname,MiU32 tcount,MiU32 *indices) = 0;
+};
+
+fm_VertexIndex * fm_createVertexIndex(MiF64 granularity,bool snapToGrid); // create an indexed vertex system for doubles
+fm_VertexIndex * fm_createVertexIndex(MiF32 granularity,bool snapToGrid); // create an indexed vertext system for floats
+void fm_releaseVertexIndex(fm_VertexIndex *vindex);
+
+
+
+#if 0 // currently disabled
+
+class fm_LineSegment
+{
+public:
+ fm_LineSegment(void)
+ {
+ mE1 = mE2 = 0;
+ }
+
+ fm_LineSegment(MiU32 e1,MiU32 e2)
+ {
+ mE1 = e1;
+ mE2 = e2;
+ }
+
+ MiU32 mE1;
+ MiU32 mE2;
+};
+
+
+// LineSweep *only* supports doublees. As a geometric operation it needs as much precision as possible.
+class fm_LineSweep
+{
+public:
+
+ virtual fm_LineSegment * performLineSweep(const fm_LineSegment *segments,
+ MiU32 icount,
+ const MiF64 *planeEquation,
+ fm_VertexIndex *pool,
+ MiU32 &scount) = 0;
+
+
+};
+
+fm_LineSweep * fm_createLineSweep(void);
+void fm_releaseLineSweep(fm_LineSweep *sweep);
+
+#endif
+
+class fm_Triangulate
+{
+public:
+ virtual const MiF64 * triangulate3d(MiU32 pcount,
+ const MiF64 *points,
+ MiU32 vstride,
+ MiU32 &tcount,
+ bool consolidate,
+ MiF64 epsilon) = 0;
+
+ virtual const MiF32 * triangulate3d(MiU32 pcount,
+ const MiF32 *points,
+ MiU32 vstride,
+ MiU32 &tcount,
+ bool consolidate,
+ MiF32 epsilon) = 0;
+};
+
+fm_Triangulate * fm_createTriangulate(void);
+void fm_releaseTriangulate(fm_Triangulate *t);
+
+
+const MiF32 * fm_getPoint(const MiF32 *points,MiU32 pstride,MiU32 index);
+const MiF64 * fm_getPoint(const MiF64 *points,MiU32 pstride,MiU32 index);
+
+bool fm_insideTriangle(MiF32 Ax, MiF32 Ay,MiF32 Bx, MiF32 By,MiF32 Cx, MiF32 Cy,MiF32 Mi, MiF32 Py);
+bool fm_insideTriangle(MiF64 Ax, MiF64 Ay,MiF64 Bx, MiF64 By,MiF64 Cx, MiF64 Cy,MiF64 Mi, MiF64 Py);
+MiF32 fm_areaPolygon2d(MiU32 pcount,const MiF32 *points,MiU32 pstride);
+MiF64 fm_areaPolygon2d(MiU32 pcount,const MiF64 *points,MiU32 pstride);
+
+bool fm_pointInsidePolygon2d(MiU32 pcount,const MiF32 *points,MiU32 pstride,const MiF32 *point,MiU32 xindex=0,MiU32 yindex=1);
+bool fm_pointInsidePolygon2d(MiU32 pcount,const MiF64 *points,MiU32 pstride,const MiF64 *point,MiU32 xindex=0,MiU32 yindex=1);
+
+MiU32 fm_consolidatePolygon(MiU32 pcount,const MiF32 *points,MiU32 pstride,MiF32 *dest,MiF32 epsilon=0.999999f); // collapses co-linear edges.
+MiU32 fm_consolidatePolygon(MiU32 pcount,const MiF64 *points,MiU32 pstride,MiF64 *dest,MiF64 epsilon=0.999999); // collapses co-linear edges.
+
+
+bool fm_computeSplitPlane(MiU32 vcount,const MiF64 *vertices,MiU32 tcount,const MiU32 *indices,MiF64 *plane);
+bool fm_computeSplitPlane(MiU32 vcount,const MiF32 *vertices,MiU32 tcount,const MiU32 *indices,MiF32 *plane);
+
+void fm_nearestPointInTriangle(const MiF32 *pos,const MiF32 *p1,const MiF32 *p2,const MiF32 *p3,MiF32 *nearest);
+void fm_nearestPointInTriangle(const MiF64 *pos,const MiF64 *p1,const MiF64 *p2,const MiF64 *p3,MiF64 *nearest);
+
+MiF32 fm_areaTriangle(const MiF32 *p1,const MiF32 *p2,const MiF32 *p3);
+MiF64 fm_areaTriangle(const MiF64 *p1,const MiF64 *p2,const MiF64 *p3);
+
+void fm_subtract(const MiF32 *A,const MiF32 *B,MiF32 *diff); // compute A-B and store the result in 'diff'
+void fm_subtract(const MiF64 *A,const MiF64 *B,MiF64 *diff); // compute A-B and store the result in 'diff'
+
+void fm_multiply(MiF32 *A,MiF32 scaler);
+void fm_multiply(MiF64 *A,MiF64 scaler);
+
+void fm_add(const MiF32 *A,const MiF32 *B,MiF32 *sum);
+void fm_add(const MiF64 *A,const MiF64 *B,MiF64 *sum);
+
+void fm_copy3(const MiF32 *source,MiF32 *dest);
+void fm_copy3(const MiF64 *source,MiF64 *dest);
+
+// re-indexes an indexed triangle mesh but drops unused vertices. The output_indices can be the same pointer as the input indices.
+// the output_vertices can point to the input vertices if you desire. The output_vertices buffer should be at least the same size
+// is the input buffer. The routine returns the new vertex count after re-indexing.
+MiU32 fm_copyUniqueVertices(MiU32 vcount,const MiF32 *input_vertices,MiF32 *output_vertices,MiU32 tcount,const MiU32 *input_indices,MiU32 *output_indices);
+MiU32 fm_copyUniqueVertices(MiU32 vcount,const MiF64 *input_vertices,MiF64 *output_vertices,MiU32 tcount,const MiU32 *input_indices,MiU32 *output_indices);
+
+bool fm_isMeshCoplanar(MiU32 tcount,const MiU32 *indices,const MiF32 *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar!
+bool fm_isMeshCoplanar(MiU32 tcount,const MiU32 *indices,const MiF64 *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar!
+
+bool fm_samePlane(const MiF32 p1[4],const MiF32 p2[4],MiF32 normalEpsilon=0.01f,MiF32 dEpsilon=0.001f,bool doubleSided=false); // returns true if these two plane equations are identical within an epsilon
+bool fm_samePlane(const MiF64 p1[4],const MiF64 p2[4],MiF64 normalEpsilon=0.01,MiF64 dEpsilon=0.001,bool doubleSided=false);
+
+void fm_OBBtoAABB(const MiF32 obmin[3],const MiF32 obmax[3],const MiF32 matrix[16],MiF32 abmin[3],MiF32 abmax[3]);
+
+// a utility class that will tesseleate a mesh.
+class fm_Tesselate
+{
+public:
+ virtual const MiU32 * tesselate(fm_VertexIndex *vindex,MiU32 tcount,const MiU32 *indices,MiF32 longEdge,MiU32 maxDepth,MiU32 &outcount) = 0;
+};
+
+fm_Tesselate * fm_createTesselate(void);
+void fm_releaseTesselate(fm_Tesselate *t);
+
+void fm_computeMeanNormals(MiU32 vcount, // the number of vertices
+ const MiF32 *vertices, // the base address of the vertex position data.
+ MiU32 vstride, // the stride between position data.
+ MiF32 *normals, // the base address of the destination for mean vector normals
+ MiU32 nstride, // the stride between normals
+ MiU32 tcount, // the number of triangles
+ const MiU32 *indices); // the triangle indices
+
+void fm_computeMeanNormals(MiU32 vcount, // the number of vertices
+ const MiF64 *vertices, // the base address of the vertex position data.
+ MiU32 vstride, // the stride between position data.
+ MiF64 *normals, // the base address of the destination for mean vector normals
+ MiU32 nstride, // the stride between normals
+ MiU32 tcount, // the number of triangles
+ const MiU32 *indices); // the triangle indices
+
+
+bool fm_isValidTriangle(const MiF32 *p1,const MiF32 *p2,const MiF32 *p3,MiF32 epsilon=0.00001f);
+bool fm_isValidTriangle(const MiF64 *p1,const MiF64 *p2,const MiF64 *p3,MiF64 epsilon=0.00001f);
+
+};
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.h b/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.h
new file mode 100644
index 00000000..d087b690
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_IOSTREAM_H
+#define MI_IOSTREAM_H
+
+/*!
+\file
+\brief MiIOStream class
+*/
+#include "MiFileBuf.h"
+#include <string.h>
+#include <stdlib.h>
+#include "MiAsciiConversion.h"
+
+#define safePrintf string::sprintf_s
+
+MI_PUSH_PACK_DEFAULT
+
+namespace mimp
+{
+
+ /**
+\brief A wrapper class for MiFileBuf that provides both binary and ASCII streaming capabilities
+*/
+class MiIOStream
+{
+ static const MiU32 MAX_STREAM_STRING = 1024;
+public:
+ /**
+ \param [in] stream the MiFileBuf through which all reads and writes will be performed
+ \param [in] streamLen the length of the input data stream when de-serializing
+ */
+ MiIOStream(MiFileBuf &stream,MiU32 streamLen) : mBinary(true), mStreamLen(streamLen), mStream(stream) { };
+ ~MiIOStream(void) { };
+
+ /**
+ \brief Set the stream to binary or ASCII
+
+ \param [in] state if true, stream is binary, if false, stream is ASCII
+
+ If the stream is binary, stream access is passed straight through to the respecitve
+ MiFileBuf methods. If the stream is ASCII, all stream reads and writes are converted to
+ human readable ASCII.
+ */
+ MI_INLINE void setBinary(bool state) { mBinary = state; }
+ MI_INLINE bool getBinary() { return mBinary; }
+
+ MI_INLINE MiIOStream& operator<<(bool v);
+ MI_INLINE MiIOStream& operator<<(char c);
+ MI_INLINE MiIOStream& operator<<(MiU8 v);
+ MI_INLINE MiIOStream& operator<<(MiI8 v);
+
+ MI_INLINE MiIOStream& operator<<(const char *c);
+ MI_INLINE MiIOStream& operator<<(MiI64 v);
+ MI_INLINE MiIOStream& operator<<(MiU64 v);
+ MI_INLINE MiIOStream& operator<<(MiF64 v);
+ MI_INLINE MiIOStream& operator<<(MiF32 v);
+ MI_INLINE MiIOStream& operator<<(MiU32 v);
+ MI_INLINE MiIOStream& operator<<(MiI32 v);
+ MI_INLINE MiIOStream& operator<<(MiU16 v);
+ MI_INLINE MiIOStream& operator<<(MiI16 v);
+
+ MI_INLINE MiIOStream& operator>>(const char *&c);
+ MI_INLINE MiIOStream& operator>>(bool &v);
+ MI_INLINE MiIOStream& operator>>(char &c);
+ MI_INLINE MiIOStream& operator>>(MiU8 &v);
+ MI_INLINE MiIOStream& operator>>(MiI8 &v);
+ MI_INLINE MiIOStream& operator>>(MiI64 &v);
+ MI_INLINE MiIOStream& operator>>(MiU64 &v);
+ MI_INLINE MiIOStream& operator>>(MiF64 &v);
+ MI_INLINE MiIOStream& operator>>(MiF32 &v);
+ MI_INLINE MiIOStream& operator>>(MiU32 &v);
+ MI_INLINE MiIOStream& operator>>(MiI32 &v);
+ MI_INLINE MiIOStream& operator>>(MiU16 &v);
+ MI_INLINE MiIOStream& operator>>(MiI16 &v);
+
+ MiU32 getStreamLen(void) const { return mStreamLen; }
+
+ MiFileBuf& getStream(void) { return mStream; }
+
+ MI_INLINE void storeString(const char *c,bool zeroTerminate=false);
+
+private:
+ MiIOStream& operator=( const MiIOStream& );
+
+
+ bool mBinary; // true if we are serializing binary data. Otherwise, everything is assumed converted to ASCII
+ MiU32 mStreamLen; // the length of the input data stream when de-serializing.
+ MiFileBuf &mStream;
+ char mReadString[MAX_STREAM_STRING]; // a temp buffer for streaming strings on input.
+};
+
+#include "MiIOStream.inl" // inline methods...
+
+}; // end of physx namespace
+
+MI_POP_PACK
+
+#endif // MI_IOSTREAM_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.inl b/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.inl
new file mode 100644
index 00000000..442eb621
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiIOStream.inl
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+/*!
+\file
+\brief MiIOStream inline implementation
+*/
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(bool v)
+{
+ if ( mBinary )
+ {
+ mStream.storeByte((MiU8)v);
+ }
+ else
+ {
+ char scratch[6];
+ storeString( MiAsc::valueToStr(v, scratch, 6) );
+ }
+ return *this;
+};
+
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(char c)
+{
+ mStream.storeByte((MiU8)c);
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiU8 c)
+{
+ if ( mBinary )
+ {
+ mStream.storeByte((MiU8)c);
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(c, scratch, MiAsc::IntStrLen) );
+ }
+
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiI8 c)
+{
+ if ( mBinary )
+ {
+ mStream.storeByte((MiU8)c);
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(c, scratch, MiAsc::IntStrLen) );
+ }
+
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(const char *c)
+{
+ if ( mBinary )
+ {
+ c = c ? c : ""; // it it is a null pointer, assign it to an empty string.
+ MiU32 len = (MiU32)strlen(c);
+ MI_ASSERT( len < (MAX_STREAM_STRING-1));
+ if ( len > (MAX_STREAM_STRING-1) )
+ {
+ len = MAX_STREAM_STRING-1;
+ }
+ mStream.storeDword(len);
+ if ( len )
+ mStream.write(c,len);
+ }
+ else
+ {
+ storeString(c);
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiU64 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeDouble( (MiF64) v );
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiI64 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeDouble( (MiF64) v );
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiF64 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeDouble( (MiF64) v );
+ }
+ else
+ {
+ char scratch[MiAsc::MiF64StrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::MiF64StrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiF32 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeFloat(v);
+ }
+ else
+ {
+ char scratch[MiAsc::MiF32StrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::MiF32StrLen) );
+
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiU32 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeDword(v);
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiI32 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeDword( (MiU32) v );
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiU16 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeWord(v);
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+MI_INLINE MiIOStream& MiIOStream::operator<<(MiI16 v)
+{
+ if ( mBinary )
+ {
+ mStream.storeWord( (MiU16) v );
+ }
+ else
+ {
+ char scratch[MiAsc::IntStrLen];
+ storeString( MiAsc::valueToStr(v, scratch, MiAsc::IntStrLen) );
+ }
+ return *this;
+};
+
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiU32 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readDword();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(char &v)
+{
+ if ( mBinary )
+ {
+ v = (char)mStream.readByte();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiU8 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readByte();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiI8 &v)
+{
+ if ( mBinary )
+ {
+ v = (MiI8)mStream.readByte();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiI64 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readDword();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiU64 &v)
+{
+ if ( mBinary )
+ {
+ v = (MiU64)mStream.readDouble();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiF64 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readDouble();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiF32 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readFloat();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiI32 &v)
+{
+ if ( mBinary )
+ {
+ v = (MiI32)mStream.readDword();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiU16 &v)
+{
+ if ( mBinary )
+ {
+ v = mStream.readWord();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(MiI16 &v)
+{
+ if ( mBinary )
+ {
+ v = (MiI16)mStream.readWord();
+ }
+ return *this;
+}
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(bool &v)
+{
+ MiU8 iv;
+ iv = mStream.readByte();
+ v = iv ? true : false;
+ return *this;
+};
+
+#define IOSTREAM_COMMA_SEPARATOR if(!mBinary) *this << ' ';
+
+MI_INLINE MiIOStream& MiIOStream::operator>>(const char *&str)
+{
+ str = NULL; // by default no string streamed...
+ if ( mBinary )
+ {
+ MiU32 len=0;
+ *this >> len;
+ MI_ASSERT( len < (MAX_STREAM_STRING-1) );
+ if ( len < (MAX_STREAM_STRING-1) )
+ {
+ mStream.read(mReadString,len);
+ mReadString[len] = 0;
+ str = mReadString;
+ }
+ }
+ return *this;
+}
+
+
+MI_INLINE void MiIOStream::storeString(const char *c,bool zeroTerminate)
+{
+ while ( *c )
+ {
+ mStream.storeByte((MiU8)*c);
+ c++;
+ }
+ if ( zeroTerminate )
+ {
+ mStream.storeByte(0);
+ }
+}
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiInparser.h b/APEX_1.4/shared/general/meshimport/include/utils/MiInparser.h
new file mode 100644
index 00000000..c94b8c63
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiInparser.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef INPARSER_H
+#define INPARSER_H
+
+#include <assert.h>
+
+#define MAXARGS 512
+
+
+/** @file inparser.h
+ * @brief Parse ASCII text, in place, very quickly.
+ *
+ * This class provides for high speed in-place (destructive) parsing of an ASCII text file.
+ * This class will either load an ASCII text file from disk, or can be constructed with a pointer to
+ * a piece of ASCII text in memory. It can only be called once, and the contents are destroyed.
+ * To speed the process of parsing, it simply builds pointers to the original ascii data and replaces the
+ * seperators with a zero byte to indicate end of string. It performs callbacks to parse each line, in argc/argv format,
+ * offering the option to cancel the parsing process at any time.
+ *
+ *
+ * By default the only valid seperator is whitespace. It will not treat commas or any other symbol as a separator.
+ * You can specify any character to be a 'hard' seperator, such as an '=' for example and that will come back as a
+ * distinct argument string.
+ *
+ * To use the parser simply inherit the pure virtual base class 'InPlaceParserInterface'. Define the method 'ParseLine'.
+ * When you invoke the Parse method on the InPlaceParser class, you will get an ARGC - ARGV style callback for each line
+ * in the source file. If you return 'false' at any time, it will abort parsing. The entire thing is stack based, so you
+ * can recursively call multiple parser instances.
+ *
+ * It is important to note. Since this parser is 'in place' it writes 'zero bytes' (EOS marker) on top of the whitespace.
+ * While it can handle text in quotes, it does not handle escape sequences. This is a limitation which could be resolved.
+ * There is a hard coded maximum limit of 512 arguments per line.
+ *
+ * Here is the full example usage:
+ *
+ * InPlaceParser ipp("parse_me.txt");
+ *
+ * ipp.Parse(this);
+ *
+ * That's it, and you will receive an ARGC - ARGV callback for every line in the file.
+ *
+ * If you want to parse some text in memory of your own. (It *MUST* be terminated by a zero byte, and lines seperated by carriage return
+ * or line-feed. You will receive an assertion if it does not. If you specify the source data than *you* are responsible for that memory
+ * and must de-allocate it yourself. If the data was loaded from a file on disk, then it is automatically de-allocated by the InPlaceParser.
+ *
+ * You can also construct the InPlaceParser without passing any data, so you can simply pass it a line of data at a time yourself. The
+ * line of data should be zero-byte terminated.
+*/
+
+#include "MiPlatformConfig.h"
+
+namespace mimp
+{
+class InPlaceParserInterface
+{
+public:
+ virtual MiI32 ParseLine(MiI32 lineno,MiI32 argc,const char **argv) =0; // return TRUE to continue parsing, return FALSE to abort parsing process
+ virtual bool preParseLine(MiI32 /* lineno */,const char * /* line */) { return false; }; // optional chance to pre-parse the line as raw data. If you return 'true' the line will be skipped assuming you snarfed it.
+};
+
+enum SeparatorType
+{
+ ST_DATA, // is data
+ ST_HARD, // is a hard separator
+ ST_SOFT, // is a soft separator
+ ST_EOS, // is a comment symbol, and everything past this character should be ignored
+ ST_LINE_FEED
+};
+
+class InPlaceParser
+{
+public:
+ InPlaceParser(void)
+ {
+ Init();
+ }
+
+ InPlaceParser(char *data,MiI32 len)
+ {
+ Init();
+ SetSourceData(data,len);
+ }
+
+ InPlaceParser(const char *fname)
+ {
+ Init();
+ SetFile(fname);
+ }
+
+ ~InPlaceParser(void);
+
+ void Init(void)
+ {
+ mQuoteChar = 34;
+ mData = 0;
+ mLen = 0;
+ mMyAlloc = false;
+ for (MiI32 i=0; i<256; i++)
+ {
+ mHard[i] = ST_DATA;
+ mHardString[i*2] = (char)i;
+ mHardString[i*2+1] = 0;
+ }
+ mHard[0] = ST_EOS;
+ mHard[32] = ST_SOFT;
+ mHard[9] = ST_SOFT;
+ mHard[13] = ST_LINE_FEED;
+ mHard[10] = ST_LINE_FEED;
+ }
+
+ void SetFile(const char *fname);
+
+ void SetSourceData(char *data,MiI32 len)
+ {
+ mData = data;
+ mLen = len;
+ mMyAlloc = false;
+ };
+
+ MiI32 Parse(const char *str,InPlaceParserInterface *callback); // returns true if entire file was parsed, false if it aborted for some reason
+ MiI32 Parse(InPlaceParserInterface *callback); // returns true if entire file was parsed, false if it aborted for some reason
+
+ MiI32 ProcessLine(MiI32 lineno,char *line,InPlaceParserInterface *callback);
+
+ const char ** GetArglist(char *source,MiI32 &count); // convert source string into an arg list, this is a destructive parse.
+
+ void SetHardSeparator(char c) // add a hard separator
+ {
+ mHard[(unsigned char)c] = ST_HARD;
+ }
+
+ void SetHard(char c) // add a hard separator
+ {
+ mHard[(unsigned char)c] = ST_HARD;
+ }
+
+ void SetSoft(char c) // add a hard separator
+ {
+ mHard[(unsigned char)c] = ST_SOFT;
+ }
+
+
+ void SetCommentSymbol(char c) // comment character, treated as 'end of string'
+ {
+ mHard[(unsigned char)c] = ST_EOS;
+ }
+
+ void ClearHardSeparator(char c)
+ {
+ mHard[(unsigned char)c] = ST_DATA;
+ }
+
+
+ void DefaultSymbols(void); // set up default symbols for hard seperator and comment symbol of the '#' character.
+
+ bool EOS(char c)
+ {
+ if ( mHard[(unsigned char)c] == ST_EOS )
+ {
+ return true;
+ }
+ return false;
+ }
+
+ void SetQuoteChar(char c)
+ {
+ mQuoteChar = c;
+ }
+
+ bool HasData( void ) const
+ {
+ return ( mData != 0 );
+ }
+
+ void setLineFeed(char c)
+ {
+ mHard[(unsigned char)c] = ST_LINE_FEED;
+ }
+
+ bool isLineFeed(char c)
+ {
+ if ( mHard[(unsigned char)c] == ST_LINE_FEED ) return true;
+ return false;
+ }
+
+private:
+
+ inline char * AddHard(MiI32 &argc,const char **argv,char *foo);
+ inline bool IsHard(char c);
+ inline char * SkipSpaces(char *foo);
+ inline bool IsWhiteSpace(char c);
+ inline bool IsNonSeparator(char c); // non seperator,neither hard nor soft
+
+ bool mMyAlloc; // whether or not *I* allocated the buffer and am responsible for deleting it.
+ char *mData; // ascii data to parse.
+ MiI32 mLen; // length of data
+ SeparatorType mHard[256];
+ char mHardString[256*2];
+ char mQuoteChar;
+ const char *argv[MAXARGS];
+};
+
+};
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiMapSet.h b/APEX_1.4/shared/general/meshimport/include/utils/MiMapSet.h
new file mode 100644
index 00000000..e90fef01
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiMapSet.h
@@ -0,0 +1,1009 @@
+/*
+ * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// Written by Paul Pedriana.
+//
+// Refactored to be compliant with the PhysX data types by John W. Ratcliff
+// on March 23, 2011
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef MI_MAP_SET_H
+#define MI_MAP_SET_H
+
+#include "MiPlatformConfig.h"
+#include "MiMapSetInternal.h"
+
+
+namespace mimp
+{
+
+ /// EASTL_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MAP_DEFAULT_NAME
+ #define EASTL_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " map" // Unless the user overrides something, this is "EASTL map".
+ #endif
+
+
+ /// EASTL_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_NAME
+ #define EASTL_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multimap" // Unless the user overrides something, this is "EASTL multimap".
+ #endif
+
+
+ /// EASTL_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MAP_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// map
+ ///
+ /// Implements a canonical map.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a map container, your pool
+ /// needs to contain items of type map::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef map<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = mimp::less<Key>, typename Allocator = EASTLAllocatorType>
+ class map
+ : public rbtree<Key, mimp::pair<const Key, T>, Compare, Allocator, mimp::use_first<mimp::pair<const Key, T> >, true, true>
+ {
+ public:
+ typedef rbtree<Key, mimp::pair<const Key, T>, Compare, Allocator,
+ mimp::use_first<mimp::pair<const Key, T> >, true, true> base_type;
+ typedef map<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ using base_type::erase;
+ #endif
+
+ public:
+ map(const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const this_type& x);
+
+ template <typename Iterator>
+ map(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const Key& key);
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(iterator position, const value_type& value) { return base_type::insert(position, value); }
+ iterator erase(iterator position) { return base_type::erase(position); }
+ iterator erase(iterator first, iterator last) { return base_type::erase(first, last); }
+ #endif
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ mimp::pair<iterator, iterator> equal_range(const Key& key);
+ mimp::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ T& operator[](const Key& key); // Of map, multimap, set, and multimap, only map has operator[].
+
+ }; // map
+
+
+
+
+
+
+ /// multimap
+ ///
+ /// Implements a canonical multimap.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multimap container, your pool
+ /// needs to contain items of type multimap::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multimap<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = mimp::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multimap
+ : public rbtree<Key, mimp::pair<const Key, T>, Compare, Allocator, mimp::use_first<mimp::pair<const Key, T> >, true, false>
+ {
+ public:
+ typedef rbtree<Key, mimp::pair<const Key, T>, Compare, Allocator,
+ mimp::use_first<mimp::pair<const Key, T> >, true, false> base_type;
+ typedef multimap<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ using base_type::erase;
+ #endif
+
+ public:
+ multimap(const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const Compare& compare, const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const this_type& x);
+
+ template <typename Iterator>
+ multimap(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const Key& key);
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(iterator position, const value_type& value) { return base_type::insert(position, value); }
+ iterator erase(iterator position) { return base_type::erase(position); }
+ iterator erase(iterator first, iterator last) { return base_type::erase(first, last); }
+ #endif
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ mimp::pair<iterator, iterator> equal_range(const Key& key);
+ mimp::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ mimp::pair<iterator, iterator> equal_range_small(const Key& key);
+ mimp::pair<const_iterator, const_iterator> equal_range_small(const Key& key) const;
+
+ }; // multimap
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const allocator_type& allocator)
+ : base_type(allocator) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const this_type& x)
+ : base_type(x) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline map<Key, T, Compare, Allocator>::map(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MAP_DEFAULT_ALLOCATOR) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::insert_return_type
+ map<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(key, true_type());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const iterator it(find(key));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const const_iterator it(find(key));
+ return (it != end()) ? 1 : 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename map<Key, T, Compare, Allocator>::iterator,
+ typename map<Key, T, Compare, Allocator>::iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || mCompare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return mimp::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return mimp::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename map<Key, T, Compare, Allocator>::const_iterator,
+ typename map<Key, T, Compare, Allocator>::const_iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || mCompare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return mimp::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return mimp::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline T& map<Key, T, Compare, Allocator>::operator[](const Key& key)
+ {
+ iterator itLower(lower_bound(key)); // itLower->first is >= key.
+
+ if((itLower == end()) || mCompare(key, (*itLower).first))
+ {
+ itLower = base_type::insert(itLower, value_type(key, T()));
+
+ // To do: Convert this to use the more efficient:
+ // itLower = DoInsertKey(itLower, key, true_type());
+ // when we gain confidence in that function.
+ }
+
+ return (*itLower).second;
+
+ // Reference implementation of this function, which may not be as fast:
+ //iterator it(base_type::insert(mimp::pair<iterator, iterator>(key, T())).first);
+ //return it->second;
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTIMAP_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::insert_return_type
+ multimap<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(key, false_type());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const mimp::pair<iterator, iterator> range(equal_range(key));
+ const size_type n = (size_type)mimp::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const mimp::pair<const_iterator, const_iterator> range(equal_range(key));
+ return (size_type)mimp::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return mimp::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return mimp::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return mimp::pair<const_iterator, const_iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(key));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return mimp::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline mimp::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(key));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return mimp::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+ /// EASTL_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SET_DEFAULT_NAME
+ #define EASTL_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " set" // Unless the user overrides something, this is "EASTL set".
+ #endif
+
+
+ /// EASTL_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_NAME
+ #define EASTL_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multiset" // Unless the user overrides something, this is "EASTL multiset".
+ #endif
+
+
+ /// EASTL_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SET_DEFAULT_ALLOCATOR
+ #define EASTL_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_SET_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// set
+ ///
+ /// Implements a canonical set.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Note that the 'bMutableIterators' template parameter to rbtree is set to false.
+ /// This means that set::iterator is const and the same as set::const_iterator.
+ /// This is by design and it follows the C++ standard defect report recommendation.
+ /// If the user wants to modify a container element, the user needs to either use
+ /// mutable data members or use const_cast on the iterator's data member. Both of
+ /// these solutions are recommended by the C++ standard defect report.
+ /// To consider: Expose the bMutableIterators template policy here at the set level
+ /// so the user can have non-const set iterators via a template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a set container, your pool
+ /// needs to contain items of type set::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef set<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = mimp::less<Key>, typename Allocator = EASTLAllocatorType>
+ class set
+ : public rbtree<Key, Key, Compare, Allocator, mimp::use_self<Key>, false, true>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, mimp::use_self<Key>, false, true> base_type;
+ typedef set<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ public:
+ set(const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const Compare& compare, const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const this_type& x);
+
+ template <typename Iterator>
+ set(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ public:
+ size_type erase(const Key& k);
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ mimp::pair<iterator, iterator> equal_range(const Key& k);
+ mimp::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ }; // set
+
+
+
+
+
+ /// multiset
+ ///
+ /// Implements a canonical multiset.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// See notes above in 'set' regarding multable iterators.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multiset container, your pool
+ /// needs to contain items of type multiset::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multiset<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = mimp::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multiset
+ : public rbtree<Key, Key, Compare, Allocator, mimp::use_self<Key>, false, false>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, mimp::use_self<Key>, false, false> base_type;
+ typedef multiset<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ public:
+ multiset(const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const Compare& compare, const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const this_type& x);
+
+ template <typename Iterator>
+ multiset(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ public:
+ size_type erase(const Key& k);
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ mimp::pair<iterator, iterator> equal_range(const Key& k);
+ mimp::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ mimp::pair<iterator, iterator> equal_range_small(const Key& k);
+ mimp::pair<const_iterator, const_iterator> equal_range_small(const Key& k) const;
+
+ }; // multiset
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline set<Key, Compare, Allocator>::set(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_SET_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const iterator it(find(k));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(iterator first, iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const const_iterator it(find(k));
+ return (it != end()) ? (size_type)1 : (size_type)0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename set<Key, Compare, Allocator>::iterator,
+ typename set<Key, Compare, Allocator>::iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return mimp::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return mimp::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename set<Key, Compare, Allocator>::const_iterator,
+ typename set<Key, Compare, Allocator>::const_iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return mimp::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return mimp::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multiset<Key, Compare, Allocator>::multiset(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTISET_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const mimp::pair<iterator, iterator> range(equal_range(k));
+ const size_type n = (size_type)mimp::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(iterator first, iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const mimp::pair<const_iterator, const_iterator> range(equal_range(k));
+ return (size_type)mimp::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return mimp::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return mimp::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return mimp::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, itUpper.mpNode->mValue))
+ ++itUpper;
+
+ return mimp::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline mimp::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(k));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, *itUpper))
+ ++itUpper;
+
+ return mimp::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+} // namespace mimp
+
+
+#endif // Header include guard
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiMapSetInternal.h b/APEX_1.4/shared/general/meshimport/include/utils/MiMapSetInternal.h
new file mode 100644
index 00000000..02117cc4
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiMapSetInternal.h
@@ -0,0 +1,5915 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_MAP_SET_INTERNAL_H
+
+#define MI_MAP_SET_INTERNAL_H
+
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// Written by Paul Pedriana.
+//
+// Refactored to be compliant with the PhysX data types by John W. Ratcliff
+// on March 23, 2011
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#include "MiPlatformConfig.h"
+#include <limits.h>
+#include <stddef.h>
+#include <new>
+
+#pragma warning(push)
+#pragma warning(disable:4512)
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL namespace
+//
+// We define this so that users that #include this config file can reference
+// these namespaces without seeing any other files that happen to use them.
+///////////////////////////////////////////////////////////////////////////////
+
+/// EA Standard Template Library
+namespace mimp
+{
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALIGN_OF
+//
+// Determines the alignment of a type.
+//
+// Example usage:
+// size_t alignment = EASTL_ALIGN_OF(int);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALIGN_OF
+#if defined(__MWERKS__)
+#define EASTL_ALIGN_OF(type) ((size_t)__alignof__(type))
+#elif !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x doesn't do __alignof correctly all the time.
+#define EASTL_ALIGN_OF __alignof
+#else
+#define EASTL_ALIGN_OF(type) ((size_t)offsetof(struct{ char c; type m; }, m))
+#endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG
+//
+// Defined as an integer >= 0. Default is 1 for debug builds and 0 for
+// release builds. This define is also a master switch for the default value
+// of some other settings.
+//
+// Example usage:
+// #if EASTL_DEBUG
+// ...
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(_DEBUG)
+ #define EASTL_DEBUG 1
+#else
+ #define EASTL_DEBUG 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUGPARAMS_LEVEL
+//
+// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to
+// the allocator by default.
+// This value may be defined by the user ... if not it will default to 1 for
+// EA_DEBUG builds, otherwise 0.
+//
+// 0 - no debug information is passed through to allocator calls.
+// 1 - 'name' is passed through to allocator calls.
+// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls.
+//
+// This parameter mirrors the equivalent parameter in the CoreAllocator package.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUGPARAMS_LEVEL
+ #if EASTL_DEBUG
+ #define EASTL_DEBUGPARAMS_LEVEL 2
+ #else
+ #define EASTL_DEBUGPARAMS_LEVEL 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL
+//
+// Used to wrap debug string names. In a release build, the definition
+// goes away. These are present to avoid release build compiler warnings
+// and to make code simpler.
+//
+// Example usage of EASTL_NAME:
+// // pName will defined away in a release build and thus prevent compiler warnings.
+// void allocator::set_name(const char* EASTL_NAME(pName))
+// {
+// #if EASTL_NAME_ENABLED
+// mpName = pName;
+// #endif
+// }
+//
+// Example usage of EASTL_NAME_VAL:
+// // "xxx" is defined to NULL in a release build.
+// vector<T, Allocator>::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx")));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NAME_ENABLED
+ #define EASTL_NAME_ENABLED EASTL_DEBUG
+#endif
+
+#ifndef EASTL_NAME
+ #if EASTL_NAME_ENABLED
+ #define EASTL_NAME(x) x
+ #define EASTL_NAME_VAL(x) x
+ #else
+ #define EASTL_NAME(x)
+ #define EASTL_NAME_VAL(x) ((const char*)NULL)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_NAME_PREFIX
+//
+// Defined as a string literal. Defaults to "EASTL".
+// This define is used as the default name for EASTL where such a thing is
+// referenced in EASTL. For example, if the user doesn't specify an allocator
+// name for their deque, it is named "EASTL deque". However, you can override
+// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX.
+//
+// Example usage (which is simply taken from how deque.h uses this define):
+// #ifndef EASTL_DEQUE_DEFAULT_NAME
+// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque"
+// #endif
+//
+#ifndef EASTL_DEFAULT_NAME_PREFIX
+ #define EASTL_DEFAULT_NAME_PREFIX "EASTL"
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_DEBUG.
+// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via
+// the assertion mechanism.
+//
+// Example usage:
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(v.size() > 17);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED.
+// This is like EASTL_ASSERT_ENABLED, except it is for empty container
+// references. Sometime people like to be able to take a reference to
+// the front of the container, but not use it if the container is empty.
+// In practice it's often easier and more efficient to do this than to write
+// extra code to check if the container is empty.
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// inline typename vector<T, Allocator>::reference
+// vector<T, Allocator>::front()
+// {
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(mpEnd > mpBegin);
+// #endif
+//
+// return *mpBegin;
+// }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SetAssertionFailureFunction
+//
+// Allows the user to set a custom assertion failure mechanism.
+//
+// Example usage:
+// void Assert(const char* pExpression, void* pContext);
+// SetAssertionFailureFunction(Assert, this);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERTION_FAILURE_DEFINED
+ #define EASTL_ASSERTION_FAILURE_DEFINED
+
+ typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext);
+ void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext);
+
+ // These are the internal default functions that implement asserts.
+ void AssertionFailure(const char* pExpression);
+ void AssertionFailureFunctionDefault(const char* pExpression, void* pContext);
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT
+//
+// Assertion macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_ASSERT(intVector.size() < 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_ASSERT(expression)
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FAIL_MSG
+//
+// Failure macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_FAIL("detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FAIL_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_FAIL_MSG(message) (mimp::AssertionFailure(message))
+ #else
+ #define EASTL_FAIL_MSG(message)
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED
+//
+// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+//
+// Example usage:
+// EASTL_CT_ASSERT(sizeof(mimp::MiU32 == 4));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(EASTL_DEBUG) && !defined(EASTL_CT_ASSERT)
+ template <bool> struct EASTL_CT_ASSERTION_FAILURE;
+ template <> struct EASTL_CT_ASSERTION_FAILURE<true>{ enum { value = 1 }; }; // We create a specialization for true, but not for false.
+ template <int x> struct EASTL_CT_ASSERTION_TEST{};
+
+ #define EASTL_PREPROCESSOR_JOIN(a, b) EASTL_PREPROCESSOR_JOIN1(a, b)
+ #define EASTL_PREPROCESSOR_JOIN1(a, b) EASTL_PREPROCESSOR_JOIN2(a, b)
+ #define EASTL_PREPROCESSOR_JOIN2(a, b) a##b
+
+ #if defined(_MSC_VER)
+ #define EASTL_CT_ASSERT(expression) typedef EASTL_CT_ASSERTION_TEST< sizeof(EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >)> EASTL_CT_ASSERT_FAILURE
+ #elif defined(__ICL) || defined(__ICC)
+ #define EASTL_CT_ASSERT(expression) typedef char EASTL_PREPROCESSOR_JOIN(EASTL_CT_ASSERT_FAILURE_, __LINE__) [EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >::value]
+ #elif defined(__MWERKS__)
+ #define EASTL_CT_ASSERT(expression) enum { EASTL_PREPROCESSOR_JOIN(EASTL_CT_ASSERT_FAILURE_, __LINE__) = sizeof(EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >) }
+ #else // GCC, etc.
+ #define EASTL_CT_ASSERT(expression) typedef EASTL_CT_ASSERTION_TEST< sizeof(EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >)> EASTL_PREPROCESSOR_JOIN1(EASTL_CT_ASSERT_FAILURE_, __LINE__)
+ #endif
+#else
+ #define EASTL_CT_ASSERT(expression)
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 0 (disabled) until some future date.
+// If enabled (1) then container operator= copies the allocator from the
+// source container. It ideally should be set to enabled but for backwards
+// compatibility with older versions of EASTL it is currently set to 0.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_COPY_ENABLED
+ #define EASTL_ALLOCATOR_COPY_ENABLED 0
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // EASTL_FIXED_SIZE_TRACKING_ENABLED
+ //
+ // Defined as an integer >= 0. Default is same as EASTL_DEBUG.
+ // If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed
+ // containers in debug builds track the max count of objects
+ // that have been in the container. This allows for the tuning
+ // of fixed container sizes to their minimum required size.
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED
+#define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_OPT_XXXX
+//
+// Enables some options / optimizations options that cause the string class
+// to behave slightly different from the C++ standard basic_string. These are
+// options whereby you can improve performance by avoiding operations that
+// in practice may never occur for you.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_OPT_CHAR_INIT
+// Defined as 0 or 1. Default is 1.
+// Defines if newly created characters are initialized to 0 or left
+// as random values.
+// The C++ string standard is to initialize chars to 0.
+#define EASTL_STRING_OPT_CHAR_INIT 1
+#endif
+
+#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS
+// Defined as 0 or 1. Default is 0.
+// Defines if we should implement explicity in constructors where the C++
+// standard string does not. The advantage of enabling explicit constructors
+// is that you can do this: string s = "hello"; in addition to string s("hello");
+// The disadvantage of enabling explicity constructors is that there can be
+// silent conversions done which impede performance if the user isn't paying
+// attention.
+// C++ standard string ctors are not explicit.
+#define EASTL_STRING_OPT_EXPLICIT_CTORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_LENGTH_ERRORS
+// Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+// Defines if we check for string values going beyond kMaxSize
+// (a very large value) and throw exections if so.
+// C++ standard strings are expected to do such checks.
+#define EASTL_STRING_OPT_LENGTH_ERRORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_RANGE_ERRORS
+// Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+// Defines if we check for out-of-bounds references to string
+// positions and throw exceptions if so. Well-behaved code shouldn't
+// refence out-of-bounds positions and so shouldn't need these checks.
+// C++ standard strings are expected to do such range checks.
+#define EASTL_STRING_OPT_RANGE_ERRORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS
+// Defined as 0 or 1. Default is 0.
+// Defines if we check for NULL ptr arguments passed to string
+// functions by the user and throw exceptions if so. Well-behaved code
+// shouldn't pass bad arguments and so shouldn't need these checks.
+// Also, some users believe that strings should check for NULL pointers
+// in all their arguments and do no-ops if so. This is very debatable.
+// C++ standard strings are not required to check for such argument errors.
+#define EASTL_STRING_OPT_ARGUMENT_ERRORS 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ABSTRACT_STRING_ENABLED
+//
+// Defined as 0 or 1. Default is 0 until abstract string is fully tested.
+// Defines whether the proposed replacement for the string module is enabled.
+// See bonus/abstract_string.h for more information.
+//
+#ifndef EASTL_ABSTRACT_STRING_ENABLED
+#define EASTL_ABSTRACT_STRING_ENABLED 0
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_SIZE_T
+//
+// Defined as 0 or 1. Default is 1.
+// Controls whether bitset uses size_t or size_t.
+//
+#ifndef EASTL_BITSET_SIZE_T
+#define EASTL_BITSET_SIZE_T 1
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_SIZE_CACHE
+//
+// Defined as 0 or 1. Default is 0.
+// If defined as 1, the list and slist containers (and possibly any additional
+// containers as well) keep a member mSize (or similar) variable which allows
+// the size() member function to execute in constant time (a.k.a. O(1)).
+// There are debates on both sides as to whether it is better to have this
+// cached value or not, as having it entails some cost (memory and code).
+// To consider: Make list size caching an optional template parameter.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIST_SIZE_CACHE
+#define EASTL_LIST_SIZE_CACHE 0
+#endif
+
+#ifndef EASTL_SLIST_SIZE_CACHE
+#define EASTL_SLIST_SIZE_CACHE 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAX_STACK_USAGE
+//
+// Defined as an integer greater than zero. Default is 4000.
+// There are some places in EASTL where temporary objects are put on the
+// stack. A common example of this is in the implementation of container
+// swap functions whereby a temporary copy of the container is made.
+// There is a problem, however, if the size of the item created on the stack
+// is very large. This can happen with fixed-size containers, for example.
+// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory
+// (in bytes) that the given platform/compiler will safely allow on the stack.
+// Platforms such as Windows will generally allow larger values than embedded
+// systems or console machines, but it is usually a good idea to stick with
+// a max usage value that is portable across all platforms, lest the user be
+// surprised when something breaks as it is ported to another platform.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_MAX_STACK_USAGE
+#define EASTL_MAX_STACK_USAGE 4000
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VA_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others.
+// Some compilers on some platforms implement va_list whereby its contents
+// are destroyed upon usage, even if passed by value to another function.
+// With these compilers you can use va_copy to restore the a va_list.
+// Known compiler/platforms that destroy va_list contents upon usage include:
+// CodeWarrior on PowerPC
+// GCC on x86-64
+// However, va_copy is part of the C99 standard and not part of earlier C and
+// C++ standards. So not all compilers support it. VC++ doesn't support va_copy,
+// but it turns out that VC++ doesn't need it on the platforms it supports.
+// For example usage, see the EASTL string.h file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VA_COPY_ENABLED
+#if defined(__MWERKS__) || (defined(__GNUC__) && (__GNUC__ >= 3) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__))
+#define EASTL_VA_COPY_ENABLED 1
+#else
+#define EASTL_VA_COPY_ENABLED 0
+#endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_PROXY_ENABLED
+//
+#if !defined(EASTL_LIST_PROXY_ENABLED)
+// GCC with -fstrict-aliasing has bugs (or undocumented functionality in their
+// __may_alias__ implementation. The compiler gets confused about function signatures.
+// VC8 (1400) doesn't need the proxy because it has built-in smart debugging capabilities.
+#if defined(EASTL_DEBUG) && (!defined(__GNUC__) || defined(__SNC__)) && (!defined(_MSC_VER) || (_MSC_VER < 1400))
+#define EASTL_LIST_PROXY_ENABLED 1
+#define EASTL_LIST_PROXY_MAY_ALIAS EASTL_MAY_ALIAS
+#else
+#define EASTL_LIST_PROXY_ENABLED 0
+#define EASTL_LIST_PROXY_MAY_ALIAS
+#endif
+#endif
+
+#define EASTL_ITC_NS mimp
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATION_ENABLED
+//
+// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG.
+// If nonzero, then a certain amount of automatic runtime validation is done.
+// Runtime validation is not considered the same thing as asserting that user
+// input values are valid. Validation refers to internal consistency checking
+// of the validity of containers and their iterators. Validation checking is
+// something that often involves significantly more than basic assertion
+// checking, and it may sometimes be desirable to disable it.
+// This macro would generally be used internally by EASTL.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATION_ENABLED
+#define EASTL_VALIDATION_ENABLED EASTL_DEBUG
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_COMPARE
+//
+// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled.
+// This is used to validate user-supplied comparison functions, particularly for sorting purposes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_COMPARE_ENABLED
+#define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED
+#endif
+
+#if EASTL_VALIDATE_COMPARE_ENABLED
+#define EASTL_VALIDATE_COMPARE EASTL_ASSERT
+#else
+#define EASTL_VALIDATE_COMPARE(expression)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_INTRUSIVE_LIST
+//
+// Defined as an integral value >= 0. Controls the amount of automatic validation
+// done by intrusive_list. A value of 0 means no automatic validation is done.
+// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes
+// the intrusive_list_node become a non-POD, which may be an issue for some code.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_INTRUSIVE_LIST
+#define EASTL_VALIDATE_INTRUSIVE_LIST 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FORCE_INLINE
+//
+// Defined as a "force inline" expression or defined away.
+// You generally don't need to use forced inlining with the Microsoft and
+// Metrowerks compilers, but you may need it with the GCC compiler (any version).
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// EASTL_FORCE_INLINE typename vector<T, Allocator>::size_type
+// vector<T, Allocator>::size() const
+// { return mpEnd - mpBegin; }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FORCE_INLINE
+#define EASTL_FORCE_INLINE MI_INLINE
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAY_ALIAS
+//
+// Defined as a macro that wraps the GCC may_alias attribute. This attribute
+// has no significance for VC++ because VC++ doesn't support the concept of
+// strict aliasing. Users should avoid writing code that breaks strict
+// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative.
+//
+// Example usage:
+// mimp::MiU32 value EASTL_MAY_ALIAS;
+//
+// Example usage:
+// typedef mimp::MiU32 EASTL_MAY_ALIAS value_type;
+// value_type value;
+//
+#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303)
+#define EASTL_MAY_ALIAS __attribute__((__may_alias__))
+#else
+#define EASTL_MAY_ALIAS
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIKELY / EASTL_UNLIKELY
+//
+// Defined as a macro which gives a hint to the compiler for branch
+// prediction. GCC gives you the ability to manually give a hint to
+// the compiler about the result of a comparison, though it's often
+// best to compile shipping code with profiling feedback under both
+// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+// are times when you feel very sure that a boolean expression will
+// usually evaluate to either true or false and can help the compiler
+// by using an explicity directive...
+//
+// Example usage:
+// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+// { ... }
+//
+// Example usage:
+// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+// { ... }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIKELY
+#if defined(__GNUC__) && (__GNUC__ >= 3)
+#define EASTL_LIKELY(x) __builtin_expect(!!(x), true)
+#define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false)
+#else
+#define EASTL_LIKELY(x) (x)
+#define EASTL_UNLIKELY(x) (x)
+#endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MINMAX_ENABLED
+//
+// Defined as 0 or 1; default is 1.
+// Specifies whether the min and max algorithms are available.
+// It may be useful to disable the min and max algorithems because sometimes
+// #defines for min and max exist which would collide with EASTL min and max.
+// Note that there are already alternative versions of min and max in EASTL
+// with the min_alt and max_alt functions. You can use these without colliding
+// with min/max macros that may exist.
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_MINMAX_ENABLED
+#define EASTL_MINMAX_ENABLED 1
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NOMINMAX
+//
+// Defined as 0 or 1; default is 1.
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and
+// max if they are #defined by an external library. This allows our min and
+// max definitions in algorithm.h to work as expected. An alternative to
+// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project
+// settings if you are compiling for Windows.
+// Note that this does not control the availability of the EASTL min and max
+// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NOMINMAX
+#define EASTL_NOMINMAX 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddRef / Release
+//
+// AddRef and Release are used for "intrusive" reference counting. By the term
+// "intrusive", we mean that the reference count is maintained by the object
+// and not by the user of the object. Given that an object implements referencing
+// counting, the user of the object needs to be able to increment and decrement
+// that reference count. We do that via the venerable AddRef and Release functions
+// which the object must supply. These defines here allow us to specify the name
+// of the functions. They could just as well be defined to addref and delref or
+// IncRef and DecRef.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTLAddRef
+#define EASTLAddRef AddRef
+#endif
+
+#ifndef EASTLRelease
+#define EASTLRelease Release
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_EXPLICIT_ENABLED
+//
+// Defined as 0 or 1. Default is 0 for now but ideally would be changed to
+// 1 some day. It's 0 because setting it to 1 breaks some existing code.
+// This option enables the allocator ctor to be explicit, which avoids
+// some undesirable silent conversions, especially with the string class.
+//
+// Example usage:
+// class allocator
+// {
+// public:
+// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName);
+// };
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED
+#define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0
+#endif
+
+#if EASTL_ALLOCATOR_EXPLICIT_ENABLED
+#define EASTL_ALLOCATOR_EXPLICIT explicit
+#else
+#define EASTL_ALLOCATOR_EXPLICIT
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL allocator
+//
+// The EASTL allocator system allows you to redefine how memory is allocated
+// via some defines that are set up here. In the container code, memory is
+// allocated via macros which expand to whatever the user has them set to
+// expand to. Given that there are multiple allocator systems available,
+// this system allows you to configure it to use whatever system you want,
+// provided your system meets the requirements of this library.
+// The requirements are:
+//
+// - Must be constructable via a const char* (name) parameter.
+// Some uses of allocators won't require this, however.
+// - Allocate a block of memory of size n and debug name string.
+// - Allocate a block of memory of size n, debug name string,
+// alignment a, and offset o.
+// - Free memory allocated via either of the allocation functions above.
+// - Provide a default allocator instance which can be used if the user
+// doesn't provide a specific one.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// namespace mimp
+// {
+// class allocator
+// {
+// allocator(const char* pName = NULL);
+//
+// void* allocate(size_t n, int flags = 0);
+// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+// void deallocate(void* p, size_t n);
+//
+// const char* get_name() const;
+// void set_name(const char* pName);
+// };
+//
+// allocator* GetDefaultAllocator(); // This is used for anonymous allocations.
+// }
+
+#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+#define EASTLAlloc(allocator, n) (allocator).allocate(n);
+#endif
+
+#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+#define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, flags);
+#endif
+
+#ifndef EASTLAllocAligned
+#define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), (alignment), (offset))
+#endif
+
+#ifndef EASTLFree
+#define EASTLFree(allocator, p, size) (allocator).deallocate((p), (size))
+#endif
+
+#ifndef EASTLAllocatorType
+#define EASTLAllocatorType mimp::allocator
+#endif
+
+#ifndef EASTLAllocatorDefault
+// EASTLAllocatorDefault returns the default allocator instance. This is not a global
+// allocator which implements all container allocations but is the allocator that is
+// used when EASTL needs to allocate memory internally. There are very few cases where
+// EASTL allocates memory internally, and in each of these it is for a sensible reason
+// that is documented to behave as such.
+#define EASTLAllocatorDefault mimp::GetDefaultAllocator
+#endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // integral_constant
+ //
+ // This is the base class for various type traits, as defined by the proposed
+ // C++ standard. This is essentially a utility base class for defining properties
+ // as both class constants (value) and as types (type).
+ //
+ template <typename T, T v>
+ struct integral_constant
+ {
+ static const T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // true_type / false_type
+ //
+ // These are commonly used types in the implementation of type_traits.
+ // Other integral constant types can be defined, such as those based on int.
+ //
+ typedef integral_constant<bool, true> true_type;
+ typedef integral_constant<bool, false> false_type;
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // yes_type / no_type
+ //
+ // These are used as a utility to differentiate between two things.
+ //
+ typedef char yes_type; // sizeof(yes_type) == 1
+ struct no_type { char padding[8]; }; // sizeof(no_type) != 1
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_select
+ //
+ // This is used to declare a type from one of two type options.
+ // The result is based on the condition type. This has certain uses
+ // in template metaprogramming.
+ //
+ // Example usage:
+ // typedef ChosenType = type_select<is_integral<SomeType>::value, ChoiceAType, ChoiceBType>::type;
+ //
+ template <bool bCondition, class ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select { typedef ConditionIsTrueType type; };
+
+ template <typename ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select<false, ConditionIsTrueType, ConditionIsFalseType> { typedef ConditionIsFalseType type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_or
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = false, bool b4 = false, bool b5 = false>
+ struct type_or;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_or { static const bool value = true; };
+
+ template <>
+ struct type_or<false, false, false, false, false> { static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_and
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = true, bool b4 = true, bool b5 = true>
+ struct type_and;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_and{ static const bool value = false; };
+
+ template <>
+ struct type_and<true, true, true, true, true>{ static const bool value = true; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_equal{ static const bool value = (b1 == b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_not_equal{ static const bool value = (b1 != b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b>
+ struct type_not{ static const bool value = true; };
+
+ template <>
+ struct type_not<true>{ static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // empty
+ //
+ template <typename T>
+ struct empty{ };
+
+
+
+// The following files implement the type traits themselves.
+#if defined(__GNUC__) && (__GNUC__ <= 2)
+#else
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_fundamental.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_void
+ //
+ // is_void<T>::value == true if and only if T is one of the following types:
+ // [const][volatile] void
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_void : public false_type{};
+
+ template <> struct is_void<void> : public true_type{};
+ template <> struct is_void<void const> : public true_type{};
+ template <> struct is_void<void volatile> : public true_type{};
+ template <> struct is_void<void const volatile> : public true_type{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_integral
+ //
+ // is_integral<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] bool
+ // [const] [volatile] char
+ // [const] [volatile] signed char
+ // [const] [volatile] unsigned char
+ // [const] [volatile] wchar_t
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_integral : public false_type{};
+
+ // To do: Need to define volatile and const volatile versions of these.
+ template <> struct is_integral<unsigned char> : public true_type{};
+ template <> struct is_integral<const unsigned char> : public true_type{};
+ template <> struct is_integral<unsigned short> : public true_type{};
+ template <> struct is_integral<const unsigned short> : public true_type{};
+ template <> struct is_integral<unsigned int> : public true_type{};
+ template <> struct is_integral<const unsigned int> : public true_type{};
+ template <> struct is_integral<unsigned long> : public true_type{};
+ template <> struct is_integral<const unsigned long> : public true_type{};
+ template <> struct is_integral<unsigned long long> : public true_type{};
+ template <> struct is_integral<const unsigned long long> : public true_type{};
+
+ template <> struct is_integral<signed char> : public true_type{};
+ template <> struct is_integral<const signed char> : public true_type{};
+ template <> struct is_integral<signed short> : public true_type{};
+ template <> struct is_integral<const signed short> : public true_type{};
+ template <> struct is_integral<signed int> : public true_type{};
+ template <> struct is_integral<const signed int> : public true_type{};
+ template <> struct is_integral<signed long> : public true_type{};
+ template <> struct is_integral<const signed long> : public true_type{};
+ template <> struct is_integral<signed long long> : public true_type{};
+ template <> struct is_integral<const signed long long> : public true_type{};
+
+ template <> struct is_integral<bool> : public true_type{};
+ template <> struct is_integral<const bool> : public true_type{};
+ template <> struct is_integral<char> : public true_type{};
+ template <> struct is_integral<const char> : public true_type{};
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ template <> struct is_integral<wchar_t> : public true_type{};
+ template <> struct is_integral<const wchar_t> : public true_type{};
+#endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_floating_point
+ //
+ // is_floating_point<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] float
+ // [const] [volatile] double
+ // [const] [volatile] long double
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_floating_point : public false_type{};
+
+ // To do: Need to define volatile and const volatile versions of these.
+ template <> struct is_floating_point<float> : public true_type{};
+ template <> struct is_floating_point<const float> : public true_type{};
+ template <> struct is_floating_point<double> : public true_type{};
+ template <> struct is_floating_point<const double> : public true_type{};
+ template <> struct is_floating_point<long double> : public true_type{};
+ template <> struct is_floating_point<const long double> : public true_type{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_arithmetic
+ //
+ // is_arithmetic<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_arithmetic : public integral_constant<bool,
+ is_integral<T>::value || is_floating_point<T>::value
+ >{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_fundamental
+ //
+ // is_fundamental<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true, or
+ // is_void<T>::value == true
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_fundamental : public integral_constant<bool,
+ is_void<T>::value || is_integral<T>::value || is_floating_point<T>::value
+ >{};
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_transformations.h
+// Written and maintained by Paul Pedriana - 2005
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+ // The following transformations are defined here. If the given item
+ // is missing then it simply hasn't been implemented, at least not yet.
+ // add_unsigned
+ // add_signed
+ // remove_const
+ // remove_volatile
+ // remove_cv
+ // add_const
+ // add_volatile
+ // add_cv
+ // remove_reference
+ // add_reference
+ // remove_extent
+ // remove_all_extents
+ // remove_pointer
+ // add_pointer
+ // aligned_storage
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_signed
+ //
+ // Adds signed-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_signed<int>::type is int
+ // add_signed<unsigned int>::type is int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_signed
+ { typedef T type; };
+
+ template<>
+ struct add_signed<unsigned char>
+ { typedef signed char type; };
+
+#if (defined(CHAR_MAX) && defined(UCHAR_MAX) && (CHAR_MAX == UCHAR_MAX)) // If char is unsigned (which is usually not the case)...
+ template<>
+ struct add_signed<char>
+ { typedef signed char type; };
+#endif
+
+ template<>
+ struct add_signed<unsigned short>
+ { typedef short type; };
+
+ template<>
+ struct add_signed<unsigned int>
+ { typedef int type; };
+
+ template<>
+ struct add_signed<unsigned long>
+ { typedef long type; };
+
+ template<>
+ struct add_signed<unsigned long long>
+ { typedef long long type; };
+
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+#if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 4294967295U)) // If wchar_t is a 32 bit unsigned value...
+ template<>
+ struct add_signed<wchar_t>
+ { typedef int32_t type; };
+#elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 65535)) // If wchar_t is a 16 bit unsigned value...
+ template<>
+ struct add_signed<wchar_t>
+ { typedef int16_t type; };
+#endif
+#endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_unsigned
+ //
+ // Adds unsigned-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_unsigned<int>::type is unsigned int
+ // add_unsigned<unsigned int>::type is unsigned int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_unsigned
+ { typedef T type; };
+
+ template<>
+ struct add_unsigned<signed char>
+ { typedef unsigned char type; };
+
+#if (defined(CHAR_MAX) && defined(SCHAR_MAX) && (CHAR_MAX == SCHAR_MAX)) // If char is signed (which is usually so)...
+ template<>
+ struct add_unsigned<char>
+ { typedef unsigned char type; };
+#endif
+
+ template<>
+ struct add_unsigned<short>
+ { typedef unsigned short type; };
+
+ template<>
+ struct add_unsigned<int>
+ { typedef unsigned int type; };
+
+ template<>
+ struct add_unsigned<long>
+ { typedef unsigned long type; };
+
+ template<>
+ struct add_unsigned<long long>
+ { typedef unsigned long long type; };
+
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+#if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 2147483647)) // If wchar_t is a 32 bit signed value...
+ template<>
+ struct add_unsigned<wchar_t>
+ { typedef mimp::MiU32 type; };
+#elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 32767)) // If wchar_t is a 16 bit signed value...
+ template<>
+ struct add_unsigned<wchar_t>
+ { typedef MiU16 type; };
+#endif
+#endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_cv
+ //
+ // Remove const and volatile from a type.
+ //
+ // The remove_cv transformation trait removes top-level const and/or volatile
+ // qualification (if any) from the type to which it is applied. For a given type T,
+ // remove_cv<T const volatile>::type is equivalent to T. For example,
+ // remove_cv<char* volatile>::type is equivalent to char*, while remove_cv<const char*>::type
+ // is equivalent to const char*. In the latter case, the const qualifier modifies
+ // char, not *, and is therefore not at the top level.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct remove_cv_imp{};
+ template <typename T> struct remove_cv_imp<T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<const T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<volatile T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<const volatile T*> { typedef T unqualified_type; };
+
+ template <typename T> struct remove_cv{ typedef typename remove_cv_imp<T*>::unqualified_type type; };
+ template <typename T> struct remove_cv<T&>{ typedef T& type; }; // References are automatically not const nor volatile. See section 8.3.2p1 of the C++ standard.
+
+ template <typename T, size_t N> struct remove_cv<T const[N]> { typedef T type[N]; };
+ template <typename T, size_t N> struct remove_cv<T volatile[N]> { typedef T type[N]; };
+ template <typename T, size_t N> struct remove_cv<T const volatile[N]>{ typedef T type[N]; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_reference
+ //
+ // Add reference to a type.
+ //
+ // The add_reference transformation trait adds a level of indirection
+ // by reference to the type to which it is applied. For a given type T,
+ // add_reference<T>::type is equivalent to T& if is_reference<T>::value == false,
+ // and T otherwise.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct add_reference_impl{ typedef T& type; };
+
+ template <typename T>
+ struct add_reference_impl<T&>{ typedef T& type; };
+
+ template <>
+ struct add_reference_impl<void>{ typedef void type; };
+
+ template <typename T>
+ struct add_reference { typedef typename add_reference_impl<T>::type type; };
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_properties.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_const
+ //
+ // is_const<T>::value == true if and only if T has const-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_const_value : public false_type{};
+ template <typename T> struct is_const_value<const T*> : public true_type{};
+ template <typename T> struct is_const_value<const volatile T*> : public true_type{};
+
+ template <typename T> struct is_const : public is_const_value<T*>{};
+ template <typename T> struct is_const<T&> : public false_type{}; // Note here that T is const, not the reference to T. So is_const is false. See section 8.3.2p1 of the C++ standard.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_volatile
+ //
+ // is_volatile<T>::value == true if and only if T has volatile-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct is_volatile_value : public false_type{};
+ template <typename T> struct is_volatile_value<volatile T*> : public true_type{};
+ template <typename T> struct is_volatile_value<const volatile T*> : public true_type{};
+
+ template <typename T> struct is_volatile : public is_volatile_value<T*>{};
+ template <typename T> struct is_volatile<T&> : public false_type{}; // Note here that T is volatile, not the reference to T. So is_const is false. See section 8.3.2p1 of the C++ standard.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_abstract
+ //
+ // is_abstract<T>::value == true if and only if T is a class or struct
+ // that has at least one pure virtual function. is_abstract may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_signed
+ //
+ // is_signed<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] signed char
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_SIGNED macro to allow you to
+ // set a given class to be identified as a signed type.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_signed : public false_type{};
+
+ template <> struct is_signed<signed char> : public true_type{};
+ template <> struct is_signed<const signed char> : public true_type{};
+ template <> struct is_signed<signed short> : public true_type{};
+ template <> struct is_signed<const signed short> : public true_type{};
+ template <> struct is_signed<signed int> : public true_type{};
+ template <> struct is_signed<const signed int> : public true_type{};
+ template <> struct is_signed<signed long> : public true_type{};
+ template <> struct is_signed<const signed long> : public true_type{};
+ template <> struct is_signed<signed long long> : public true_type{};
+ template <> struct is_signed<const signed long long> : public true_type{};
+
+#if (CHAR_MAX == SCHAR_MAX)
+ template <> struct is_signed<char> : public true_type{};
+ template <> struct is_signed<const char> : public true_type{};
+#endif
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+#if defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 32767)) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_signed<wchar_t> : public true_type{};
+ template <> struct is_signed<const wchar_t> : public true_type{};
+#endif
+#endif
+
+#define EASTL_DECLARE_SIGNED(T) namespace mimp{ template <> struct is_signed<T> : public true_type{}; template <> struct is_signed<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_unsigned
+ //
+ // is_unsigned<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] unsigned char
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_UNSIGNED macro to allow you to
+ // set a given class to be identified as an unsigned type.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_unsigned : public false_type{};
+
+ template <> struct is_unsigned<unsigned char> : public true_type{};
+ template <> struct is_unsigned<const unsigned char> : public true_type{};
+ template <> struct is_unsigned<unsigned short> : public true_type{};
+ template <> struct is_unsigned<const unsigned short> : public true_type{};
+ template <> struct is_unsigned<unsigned int> : public true_type{};
+ template <> struct is_unsigned<const unsigned int> : public true_type{};
+ template <> struct is_unsigned<unsigned long> : public true_type{};
+ template <> struct is_unsigned<const unsigned long> : public true_type{};
+ template <> struct is_unsigned<unsigned long long> : public true_type{};
+ template <> struct is_unsigned<const unsigned long long> : public true_type{};
+
+#if (CHAR_MAX == UCHAR_MAX)
+ template <> struct is_unsigned<char> : public true_type{};
+ template <> struct is_unsigned<const char> : public true_type{};
+#endif
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+#if defined(_MSC_VER) || (defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 4294967295U) || (__WCHAR_MAX__ == 65535))) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_unsigned<wchar_t> : public true_type{};
+ template <> struct is_unsigned<const wchar_t> : public true_type{};
+#endif
+#endif
+
+#define EASTL_DECLARE_UNSIGNED(T) namespace mimp{ template <> struct is_unsigned<T> : public true_type{}; template <> struct is_unsigned<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // alignment_of
+ //
+ // alignment_of<T>::value is an integral value representing, in bytes,
+ // the memory alignment of objects of type T.
+ //
+ // alignment_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct alignment_of_value{ static const size_t value = EASTL_ALIGN_OF(T); };
+
+ template <typename T>
+ struct alignment_of : public integral_constant<size_t, alignment_of_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aligned
+ //
+ // Defined as true if the type has alignment requirements greater
+ // than default alignment, which is taken to be 8. This allows for
+ // doing specialized object allocation and placement for such types.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_aligned_value{ static const bool value = (EASTL_ALIGN_OF(T) > 8); };
+
+ template <typename T>
+ struct is_aligned : public integral_constant<bool, is_aligned_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rank
+ //
+ // rank<T>::value is an integral value representing the number of
+ // dimensions possessed by an array type. For example, given a
+ // multi-dimensional array type T[M][N], std::tr1::rank<T[M][N]>::value == 2.
+ // For a given non-array type T, std::tr1::rank<T>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // extent
+ //
+ // extent<T, I>::value is an integral type representing the number of
+ // elements in the Ith dimension of array type T.
+ //
+ // For a given array type T[N], std::tr1::extent<T[N]>::value == N.
+ // For a given multi-dimensional array type T[M][N], std::tr1::extent<T[M][N], 0>::value == N.
+ // For a given multi-dimensional array type T[M][N], std::tr1::extent<T[M][N], 1>::value == M.
+ // For a given array type T and a given dimension I where I >= rank<T>::value, std::tr1::extent<T, I>::value == 0.
+ // For a given array type of unknown extent T[], std::tr1::extent<T[], 0>::value == 0.
+ // For a given non-array type T and an arbitrary dimension I, std::tr1::extent<T, I>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_base_of
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived,
+ // or Base and Derived are the same type.
+ //
+ // is_base_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_compound.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+ // is_array
+ // is_pointer
+ // is_reference
+ // is_member_object_pointer
+ // is_member_function_pointer
+ // is_member_pointer
+ // is_enum
+ // is_union
+ // is_class
+ // is_polymorphic
+ // is_function
+ // is_object
+ // is_scalar
+ // is_compound
+ // is_same
+ // is_convertible
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array
+ //
+ // is_array<T>::value == true if and only if T is an array type.
+ // As of this writing, the SNC compiler (EDG-based) doesn't compile
+ // the code below and says that returning an array is illegal.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ T (*is_array_tester1(empty<T>))(empty<T>);
+ char is_array_tester1(...); // May need to use __cdecl under VC++.
+
+ template <typename T>
+ no_type is_array_tester2(T(*)(empty<T>));
+ yes_type is_array_tester2(...); // May need to use __cdecl under VC++.
+
+ template <typename T>
+ struct is_array_helper {
+ static empty<T> emptyInstance;
+ };
+
+ template <typename T>
+ struct is_array : public integral_constant<bool,
+ sizeof(is_array_tester2(is_array_tester1(is_array_helper<T>::emptyInstance))) == 1
+ >{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_reference
+ //
+ // is_reference<T>::value == true if and only if T is a reference type.
+ // This category includes reference to function types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_reference : public false_type{};
+ template <typename T> struct is_reference<T&> : public true_type{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_function_pointer
+ //
+ // is_member_function_pointer<T>::value == true if and only if T is a
+ // pointer to member function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ // We detect member functions with 0 to N arguments. We can extend this
+ // for additional arguments if necessary.
+ // To do: Make volatile and const volatile versions of these in addition to non-const and const.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_mem_fun_pointer_value : public false_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)()> : public true_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)() const> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0)> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7) const> : public true_type{};
+
+ template <typename T>
+ struct is_member_function_pointer : public integral_constant<bool, is_mem_fun_pointer_value<T>::value>{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_pointer
+ //
+ // is_member_pointer<T>::value == true if and only if:
+ // is_member_object_pointer<T>::value == true, or
+ // is_member_function_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_member_pointer : public integral_constant<bool, is_member_function_pointer<T>::value>{};
+
+ template <typename T, typename U> struct is_member_pointer<U T::*> : public true_type{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pointer
+ //
+ // is_pointer<T>::value == true if and only if T is a pointer type.
+ // This category includes function pointer types, but not pointer to
+ // member types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_pointer_helper : public false_type{};
+
+ template <typename T> struct is_pointer_helper<T*> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* volatile> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const volatile> : public true_type{};
+
+ template <typename T>
+ struct is_pointer_value : public type_and<is_pointer_helper<T>::value, type_not<is_member_pointer<T>::value>::value> {};
+
+ template <typename T>
+ struct is_pointer : public integral_constant<bool, is_pointer_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_same
+ //
+ // Given two (possibly identical) types T and U, is_same<T, U>::value == true
+ // if and only if T and U are the same type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template<typename T, typename U>
+ struct is_same : public false_type { };
+
+ template<typename T>
+ struct is_same<T, T> : public true_type { };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_convertible
+ //
+ // Given two (possible identical) types From and To, is_convertible<From, To>::value == true
+ // if and only if an lvalue of type From can be implicitly converted to type To,
+ // or is_void<To>::value == true
+ //
+ // is_convertible may only be applied to complete types.
+ // Type To may not be an abstract type.
+ // If the conversion is ambiguous, the program is ill-formed.
+ // If either or both of From and To are class types, and the conversion would invoke
+ // non-public member functions of either From or To (such as a private constructor of To,
+ // or a private conversion operator of From), the program is ill-formed.
+ //
+ // Note that without compiler help, both is_convertible and is_base
+ // can produce compiler errors if the conversion is ambiguous.
+ // Example:
+ // struct A {};
+ // struct B : A {};
+ // struct C : A {};
+ // struct D : B, C {};
+ // is_convertible<D*, A*>::value; // Generates compiler error.
+ ///////////////////////////////////////////////////////////////////////
+#if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x doesn't like the code below.
+ template <typename From, typename To, bool is_from_void = false, bool is_to_void = false>
+ struct is_convertible_helper {
+ static yes_type Test(To); // May need to use __cdecl under VC++.
+ static no_type Test(...); // May need to use __cdecl under VC++.
+ static From from;
+ typedef integral_constant<bool, sizeof(Test(from)) == sizeof(yes_type)> result;
+ };
+
+ // void is not convertible to non-void
+ template <typename From, typename To>
+ struct is_convertible_helper<From, To, true, false> { typedef false_type result; };
+
+ // Anything is convertible to void
+ template <typename From, typename To, bool is_from_void>
+ struct is_convertible_helper<From, To, is_from_void, true> { typedef true_type result; };
+
+ template <typename From, typename To>
+ struct is_convertible : public is_convertible_helper<From, To, is_void<From>::value, is_void<To>::value>::result {};
+
+#else
+ template <typename From, typename To>
+ struct is_convertible : public false_type{};
+#endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_union
+ //
+ // is_union<T>::value == true if and only if T is a union type.
+ //
+ // There is no way to tell if a type is a union without compiler help.
+ // As of this writing, only Metrowerks v8+ supports such functionality
+ // via 'msl::is_union<T>::value'. The user can force something to be
+ // evaluated as a union via EASTL_DECLARE_UNION.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_union : public false_type{};
+
+#define EASTL_DECLARE_UNION(T) namespace mimp{ template <> struct is_union<T> : public true_type{}; template <> struct is_union<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_class
+ //
+ // is_class<T>::value == true if and only if T is a class or struct
+ // type (and not a union type).
+ //
+ // Without specific compiler help, it is not possible to
+ // distinguish between unions and classes. As a result, is_class
+ // will erroneously evaluate to true for union types.
+ ///////////////////////////////////////////////////////////////////////
+#if defined(__MWERKS__)
+ // To do: Switch this to use msl_utility type traits.
+ template <typename T>
+ struct is_class : public false_type{};
+#elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+
+ template <typename U> static yes_type is_class_helper(void (U::*)());
+ template <typename U> static no_type is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_type) && !is_union<T>::value
+ >{};
+#else
+ // GCC 2.x version, due to GCC being broken.
+ template <typename T>
+ struct is_class : public false_type{};
+#endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_enum
+ //
+ // is_enum<T>::value == true if and only if T is an enumeration type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ struct int_convertible{ int_convertible(int); };
+
+ template <bool is_arithmetic_or_reference>
+ struct is_enum_helper { template <typename T> struct nest : public is_convertible<T, int_convertible>{}; };
+
+ template <>
+ struct is_enum_helper<true> { template <typename T> struct nest : public false_type {}; };
+
+ template <typename T>
+ struct is_enum_helper2
+ {
+ typedef type_or<is_arithmetic<T>::value, is_reference<T>::value, is_class<T>::value> selector;
+ typedef is_enum_helper<selector::value> helper_t;
+ typedef typename add_reference<T>::type ref_t;
+ typedef typename helper_t::template nest<ref_t> result;
+ };
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, is_enum_helper2<T>::result::value>{};
+
+ template <> struct is_enum<void> : public false_type {};
+ template <> struct is_enum<void const> : public false_type {};
+ template <> struct is_enum<void volatile> : public false_type {};
+ template <> struct is_enum<void const volatile> : public false_type {};
+
+#define EASTL_DECLARE_ENUM(T) namespace mimp{ template <> struct is_enum<T> : public true_type{}; template <> struct is_enum<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_polymorphic
+ //
+ // is_polymorphic<T>::value == true if and only if T is a class or struct
+ // that declares or inherits a virtual function. is_polymorphic may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_polymorphic_imp1
+ {
+ typedef typename remove_cv<T>::type t;
+
+ struct helper_1 : public t
+ {
+ helper_1();
+ ~helper_1() throw();
+ char pad[64];
+ };
+
+ struct helper_2 : public t
+ {
+ helper_2();
+ virtual ~helper_2() throw();
+#ifndef _MSC_VER
+ virtual void foo();
+#endif
+ char pad[64];
+ };
+
+ static const bool value = (sizeof(helper_1) == sizeof(helper_2));
+ };
+
+ template <typename T>
+ struct is_polymorphic_imp2{ static const bool value = false; };
+
+ template <bool is_class>
+ struct is_polymorphic_selector{ template <typename T> struct rebind{ typedef is_polymorphic_imp2<T> type; }; };
+
+ template <>
+ struct is_polymorphic_selector<true>{ template <typename T> struct rebind{ typedef is_polymorphic_imp1<T> type; }; };
+
+ template <typename T>
+ struct is_polymorphic_value{
+ typedef is_polymorphic_selector<is_class<T>::value> selector;
+ typedef typename selector::template rebind<T> binder;
+ typedef typename binder::type imp_type;
+ static const bool value = imp_type::value;
+ };
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, is_polymorphic_value<T>::value>{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_function
+ //
+ // is_function<T>::value == true if and only if T is a function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename R> struct is_function_ptr_helper : public false_type{};
+ template <typename R> struct is_function_ptr_helper<R (*)()> : public true_type{};
+ template <typename R, typename Arg0> struct is_function_ptr_helper<R (*)(Arg0)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1> struct is_function_ptr_helper<R (*)(Arg0, Arg1)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7)> : public true_type{};
+
+ template <bool is_ref = true>
+ struct is_function_chooser{ template <typename T> struct result_ : public false_type{}; };
+
+ template <>
+ struct is_function_chooser<false>{ template <typename T> struct result_ : public is_function_ptr_helper<T*>{}; };
+
+ template <typename T>
+ struct is_function_value : public is_function_chooser<is_reference<T>::value>::template result_<T>{};
+
+ template <typename T>
+ struct is_function : public integral_constant<bool, is_function_value<T>::value>{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_object
+ //
+ // is_object<T>::value == true if and only if:
+ // is_reference<T>::value == false, and
+ // is_function<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ // The C++ standard, section 3.9p9, states: "An object type is a
+ // (possibly cv-qualified) type that is not a function type, not a
+ // reference type, and not incomplete (except for an incompletely
+ // defined object type).
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct is_object : public integral_constant<bool,
+ !is_reference<T>::value && !is_void<T>::value && !is_function<T>::value
+ >{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_scalar
+ //
+ // is_scalar<T>::value == true if and only if:
+ // is_arithmetic<T>::value == true, or
+ // is_enum<T>::value == true, or
+ // is_pointer<T>::value == true, or
+ // is_member_pointer<T>::value
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_scalar : public integral_constant<bool, is_arithmetic<T>::value || is_enum<T>::value>{};
+
+ template <typename T> struct is_scalar<T*> : public true_type {};
+ template <typename T> struct is_scalar<T* const> : public true_type {};
+ template <typename T> struct is_scalar<T* volatile> : public true_type {};
+ template <typename T> struct is_scalar<T* const volatile> : public true_type {};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_compound
+ //
+ // Compound means anything but fundamental. See C++ standard, section 3.9.2.
+ //
+ // is_compound<T>::value == true if and only if:
+ // is_fundamental<T>::value == false
+ //
+ // Thus, is_compound<T>::value == true if and only if:
+ // is_floating_point<T>::value == false, and
+ // is_integral<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_compound : public integral_constant<bool, !is_fundamental<T>::value>{};
+
+
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+ // is_empty
+ // is_pod
+ // has_trivial_constructor
+ // has_trivial_copy
+ // has_trivial_assign
+ // has_trivial_destructor
+ // has_trivial_relocate -- EA extension to the C++ standard proposal.
+ // has_nothrow_constructor
+ // has_nothrow_copy
+ // has_nothrow_assign
+ // has_virtual_destructor
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_empty
+ //
+ // is_empty<T>::value == true if and only if T is an empty class or struct.
+ // is_empty may only be applied to complete types.
+ //
+ // is_empty cannot be used with union types until is_union can be made to work.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_empty_helper_t1 : public T { char m[64]; };
+ struct is_empty_helper_t2 { char m[64]; };
+
+ // The inheritance in empty_helper_t1 will not work with non-class types
+ template <typename T, bool is_a_class = false>
+ struct is_empty_helper : public false_type{};
+
+ template <typename T>
+ struct is_empty_helper<T, true> : public integral_constant<bool,
+ sizeof(is_empty_helper_t1<T>) == sizeof(is_empty_helper_t2)
+ >{};
+
+ template <typename T>
+ struct is_empty_helper2
+ {
+ typedef typename remove_cv<T>::type _T;
+ typedef is_empty_helper<_T, is_class<_T>::value> type;
+ };
+
+ template <typename T>
+ struct is_empty : public is_empty_helper2<T>::type {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pod
+ //
+ // is_pod<T>::value == true if and only if, for a given type T:
+ // - is_scalar<T>::value == true, or
+ // - T is a class or struct that has no user-defined copy
+ // assignment operator or destructor, and T has no non-static
+ // data members M for which is_pod<M>::value == false, and no
+ // members of reference type, or
+ // - T is a class or struct that has no user-defined copy assignment
+ // operator or destructor, and T has no non-static data members M for
+ // which is_pod<M>::value == false, and no members of reference type, or
+ // - T is the type of an array of objects E for which is_pod<E>::value == true
+ //
+ // is_pod may only be applied to complete types.
+ //
+ // Without some help from the compiler or user, is_pod will not report
+ // that a struct or class is a POD, but will correctly report that
+ // built-in types such as int are PODs. The user can help the compiler
+ // by using the EASTL_DECLARE_POD macro on a class.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_pod : public integral_constant<bool, is_void<T>::value || is_scalar<T>::value>{};
+
+ template <typename T, size_t N>
+ struct is_pod<T[N]> : public is_pod<T>{};
+
+ template <typename T>
+ struct is_POD : public is_pod<T>{};
+
+#define EASTL_DECLARE_POD(T) namespace mimp{ template <> struct is_pod<T> : public true_type{}; template <> struct is_pod<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_constructor
+ //
+ // has_trivial_constructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial constructor. A constructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_constructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_constructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_constructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_constructor will not
+ // report that a class or struct has a trivial constructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_CONSTRUCTOR to help the compiler.
+ //
+ // A default constructor for a class X is a constructor of class X that
+ // can be called without an argument.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_constructor : public is_pod<T> {};
+
+#define EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(T) namespace mimp{ template <> struct has_trivial_constructor<T> : public true_type{}; template <> struct has_trivial_constructor<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_copy
+ //
+ // has_trivial_copy<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy constructor. A copy constructor is
+ // trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_copy<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_copy<M>::value == true, where M is the
+ // type of the data member
+ //
+ // has_trivial_copy may only be applied to complete types.
+ //
+ // Another way of looking at this is:
+ // A copy constructor for class X is trivial if it is implicitly
+ // declared and if all the following are true:
+ // - Class X has no virtual functions (10.3) and no virtual base classes (10.1).
+ // - Each direct base class of X has a trivial copy constructor.
+ // - For all the nonstatic data members of X that are of class type
+ // (or array thereof), each such class type has a trivial copy constructor;
+ // otherwise the copy constructor is nontrivial.
+ //
+ // Without from the compiler or user, has_trivial_copy will not report
+ // that a class or struct has a trivial copy constructor. The user can
+ // use EASTL_DECLARE_TRIVIAL_COPY to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct has_trivial_copy : public integral_constant<bool, is_pod<T>::value && !is_volatile<T>::value>{};
+
+#define EASTL_DECLARE_TRIVIAL_COPY(T) namespace mimp{ template <> struct has_trivial_copy<T> : public true_type{}; template <> struct has_trivial_copy<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_assign
+ //
+ // has_trivial_assign<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy assignment operator. A copy assignment
+ // operator is trivial if:
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_assign<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_assign<M>::value == true, where M is
+ // the type of the data member.
+ //
+ // has_trivial_assign may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_assign will not
+ // report that a class or struct has trivial assignment. The user
+ // can use EASTL_DECLARE_TRIVIAL_ASSIGN to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool,
+ is_pod<T>::value && !is_const<T>::value && !is_volatile<T>::value
+ >{};
+
+#define EASTL_DECLARE_TRIVIAL_ASSIGN(T) namespace mimp{ template <> struct has_trivial_assign<T> : public true_type{}; template <> struct has_trivial_assign<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_destructor
+ //
+ // has_trivial_destructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial destructor. A destructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - for every direct base class of T, has_trivial_destructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or
+ // array of class type, has_trivial_destructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_destructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_destructor will not
+ // report that a class or struct has a trivial destructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_DESTRUCTOR to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_destructor : public is_pod<T>{};
+
+#define EASTL_DECLARE_TRIVIAL_DESTRUCTOR(T) namespace mimp{ template <> struct has_trivial_destructor<T> : public true_type{}; template <> struct has_trivial_destructor<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_relocate
+ //
+ // This is an EA extension to the type traits standard.
+ //
+ // A trivially relocatable object is one that can be safely memmove'd
+ // to uninitialized memory. construction, assignment, and destruction
+ // properties are not addressed by this trait. A type that has the
+ // is_fundamental trait would always have the has_trivial_relocate trait.
+ // A type that has the has_trivial_constructor, has_trivial_copy or
+ // has_trivial_assign traits would usally have the has_trivial_relocate
+ // trait, but this is not strictly guaranteed.
+ //
+ // The user can use EASTL_DECLARE_TRIVIAL_RELOCATE to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_relocate : public integral_constant<bool, is_pod<T>::value && !is_volatile<T>::value>{};
+
+#define EASTL_DECLARE_TRIVIAL_RELOCATE(T) namespace mimp{ template <> struct has_trivial_relocate<T> : public true_type{}; template <> struct has_trivial_relocate<const T> : public true_type{}; }
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/allocator.h
+//
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+ /// EASTL_ALLOCATOR_DEFAULT_NAME
+ ///
+ /// Defines a default allocator name in the absence of a user-provided name.
+ ///
+#ifndef EASTL_ALLOCATOR_DEFAULT_NAME
+#define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL".
+#endif
+
+
+ /// alloc_flags
+ ///
+ /// Defines allocation flags.
+ ///
+ enum alloc_flags
+ {
+ MEM_TEMP = 0, // Low memory, not necessarily actually temporary.
+ MEM_PERM = 1 // High memory, for things that won't be unloaded.
+ };
+
+
+ /// allocator
+ ///
+ /// In this allocator class, note that it is not templated on any type and
+ /// instead it simply allocates blocks of memory much like the C malloc and
+ /// free functions. It can be thought of as similar to C++ std::allocator<char>.
+ /// The flags parameter has meaning that is specific to the allocation
+ ///
+ class allocator
+ {
+ public:
+ EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME));
+ allocator(const allocator& x);
+ allocator(const allocator& x, const char* pName);
+
+ allocator& operator=(const allocator& x);
+
+ void* allocate(size_t n, int flags = 0);
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ const char* get_name() const;
+ void set_name(const char* pName);
+
+ protected:
+#if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+#endif
+ };
+
+ bool operator==(const allocator& a, const allocator& b);
+ bool operator!=(const allocator& a, const allocator& b);
+
+ allocator* GetDefaultAllocator();
+ allocator* SetDefaultAllocator(allocator* pAllocator);
+
+
+
+ /// get_default_allocator
+ ///
+ /// This templated function allows the user to implement a default allocator
+ /// retrieval function that any part of EASTL can use. EASTL containers take
+ /// an Allocator parameter which identifies an Allocator class to use. But
+ /// different kinds of allocators have different mechanisms for retrieving
+ /// a default allocator instance, and some don't even intrinsically support
+ /// such functionality. The user can override this get_default_allocator
+ /// function in order to provide the glue between EASTL and whatever their
+ /// system's default allocator happens to be.
+ ///
+ /// Example usage:
+ /// MyAllocatorType* gpSystemAllocator;
+ ///
+ /// MyAllocatorType* get_default_allocator(const MyAllocatorType*)
+ /// { return gpSystemAllocator; }
+ ///
+ template <typename Allocator>
+ inline Allocator* get_default_allocator(const Allocator*)
+ {
+ return NULL; // By default we return NULL; the user must make specialization of this function in order to provide their own implementation.
+ }
+
+ inline EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*)
+ {
+ return EASTLAllocatorDefault(); // For the built-in allocator EASTLAllocatorType, we happen to already have a function for returning the default allocator instance, so we provide it.
+ }
+
+
+ /// default_allocfreemethod
+ ///
+ /// Implements a default allocfreemethod which uses the default global allocator.
+ /// This version supports only default alignment.
+ ///
+ inline void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/)
+ {
+ EASTLAllocatorType* const pAllocator = EASTLAllocatorDefault();
+
+ if(pBuffer) // If freeing...
+ {
+ EASTLFree(*pAllocator, pBuffer, n);
+ return NULL; // The return value is meaningless for the free.
+ }
+ else // allocating
+ return EASTLAlloc(*pAllocator, n);
+ }
+
+
+ /// allocate_memory
+ ///
+ /// This is a memory allocation dispatching function.
+ /// To do: Make aligned and unaligned specializations.
+ /// Note that to do this we will need to use a class with a static
+ /// function instead of a standalone function like below.
+ ///
+ template <typename Allocator>
+ void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset)
+ {
+ if(alignment <= 8)
+ return EASTLAlloc(a, n);
+ return EASTLAllocAligned(a, n, alignment, alignmentOffset);
+ }
+
+
+#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere...
+
+ inline allocator::allocator(const char* EASTL_NAME(pName))
+ {
+#if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+#endif
+ }
+
+
+ inline allocator::allocator(const allocator& EASTL_NAME(alloc))
+ {
+#if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+#endif
+ }
+
+
+ inline allocator::allocator(const allocator&, const char* EASTL_NAME(pName))
+ {
+#if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+#endif
+ }
+
+
+ inline allocator& allocator::operator=(const allocator& EASTL_NAME(alloc))
+ {
+#if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+#endif
+ return *this;
+ }
+
+
+ inline const char* allocator::get_name() const
+ {
+#if EASTL_NAME_ENABLED
+ return mpName;
+#else
+ return EASTL_ALLOCATOR_DEFAULT_NAME;
+#endif
+ }
+
+
+ inline void allocator::set_name(const char* EASTL_NAME(pName))
+ {
+#if EASTL_NAME_ENABLED
+ mpName = pName;
+#endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, int /*flags*/)
+ {
+#if EASTL_NAME_ENABLED
+ return MI_ALLOC(n);
+#else
+ return MI_ALLOC(n);
+#endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/)
+ {
+#if EASTL_NAME_ENABLED
+ return MI_ALLOC(n);
+#else
+ return MI_ALLOC(n);
+#endif
+ }
+
+
+ inline void allocator::deallocate(void* p, size_t)
+ {
+ MI_FREE(p);
+ }
+
+
+ inline bool operator==(const allocator&, const allocator&)
+ {
+ return true; // All allocators are considered equal, as they merely use global new/delete.
+ }
+
+
+ inline bool operator!=(const allocator&, const allocator&)
+ {
+ return false; // All allocators are considered equal, as they merely use global new/delete.
+ }
+
+
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/iterator.h
+//
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+ /// iterator_status_flag
+ ///
+ /// Defines the validity status of an iterator. This is primarily used for
+ /// iterator validation in debug builds. These are implemented as OR-able
+ /// flags (as opposed to mutually exclusive values) in order to deal with
+ /// the nature of iterator status. In particular, an iterator may be valid
+ /// but not dereferencable, as in the case with an iterator to container end().
+ /// An iterator may be valid but also dereferencable, as in the case with an
+ /// iterator to container begin().
+ ///
+ enum iterator_status_flag
+ {
+ isf_none = 0x00, /// This is called none and not called invalid because it is not strictly the opposite of invalid.
+ isf_valid = 0x01, /// The iterator is valid, which means it is in the range of [begin, end].
+ isf_current = 0x02, /// The iterator is valid and points to the same element it did when created. For example, if an iterator points to vector::begin() but an element is inserted at the front, the iterator is valid but not current. Modification of elements in place do not make iterators non-current.
+ isf_can_dereference = 0x04 /// The iterator is dereferencable, which means it is in the range of [begin, end). It may or may not be current.
+ };
+
+
+
+ // The following declarations are taken directly from the C++ standard document.
+ // input_iterator_tag, etc.
+ // iterator
+ // iterator_traits
+ // reverse_iterator
+
+ // Iterator categories
+ // Every iterator is defined as belonging to one of the iterator categories that
+ // we define here. These categories come directly from the C++ standard.
+ struct input_iterator_tag { };
+ struct output_iterator_tag { };
+ struct forward_iterator_tag : public input_iterator_tag { };
+ struct bidirectional_iterator_tag : public forward_iterator_tag { };
+ struct random_access_iterator_tag : public bidirectional_iterator_tag { };
+ struct contiguous_iterator_tag : public random_access_iterator_tag { }; // Extension to the C++ standard. Contiguous ranges are more than random access, they are physically contiguous.
+
+ // struct iterator
+ template <typename Category, typename T, typename Distance = ptrdiff_t,
+ typename Pointer = T*, typename Reference = T&>
+ struct iterator
+ {
+ typedef Category iterator_category;
+ typedef T value_type;
+ typedef Distance difference_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ };
+
+
+ // struct iterator_traits
+ template <typename Iterator>
+ struct iterator_traits
+ {
+ typedef typename Iterator::iterator_category iterator_category;
+ typedef typename Iterator::value_type value_type;
+ typedef typename Iterator::difference_type difference_type;
+ typedef typename Iterator::pointer pointer;
+ typedef typename Iterator::reference reference;
+ };
+
+ template <typename T>
+ struct iterator_traits<T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; // To consider: Change this to contiguous_iterator_tag for the case that
+ typedef T value_type; // EASTL_ITC_NS is "mimp" instead of "std".
+ typedef ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef T& reference;
+ };
+
+ template <typename T>
+ struct iterator_traits<const T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const T* pointer;
+ typedef const T& reference;
+ };
+
+
+
+
+
+ /// reverse_iterator
+ ///
+ /// From the C++ standard:
+ /// Bidirectional and random access iterators have corresponding reverse
+ /// iterator adaptors that iterate through the data structure in the
+ /// opposite direction. They have the same signatures as the corresponding
+ /// iterators. The fundamental relation between a reverse iterator and its
+ /// corresponding iterator i is established by the identity:
+ /// &*(reverse_iterator(i)) == &*(i - 1).
+ /// This mapping is dictated by the fact that while there is always a pointer
+ /// past the end of an array, there might not be a valid pointer before the
+ /// beginning of an array.
+ ///
+ template <typename Iterator>
+ class reverse_iterator : public iterator<typename mimp::iterator_traits<Iterator>::iterator_category,
+ typename mimp::iterator_traits<Iterator>::value_type,
+ typename mimp::iterator_traits<Iterator>::difference_type,
+ typename mimp::iterator_traits<Iterator>::pointer,
+ typename mimp::iterator_traits<Iterator>::reference>
+ {
+ public:
+ typedef Iterator iterator_type;
+ typedef typename mimp::iterator_traits<Iterator>::pointer pointer;
+ typedef typename mimp::iterator_traits<Iterator>::reference reference;
+ typedef typename mimp::iterator_traits<Iterator>::difference_type difference_type;
+
+ protected:
+ Iterator mIterator;
+
+ public:
+ reverse_iterator() // It's important that we construct mIterator, because if Iterator
+ : mIterator() { } // is a pointer, there's a difference between doing it and not.
+
+ explicit reverse_iterator(iterator_type i)
+ : mIterator(i) { }
+
+ reverse_iterator(const reverse_iterator& ri)
+ : mIterator(ri.mIterator) { }
+
+ template <typename U>
+ reverse_iterator(const reverse_iterator<U>& ri)
+ : mIterator(ri.base()) { }
+
+ // This operator= isn't in the standard, but the the C++
+ // library working group has tentatively approved it, as it
+ // allows const and non-const reverse_iterators to interoperate.
+ template <typename U>
+ reverse_iterator<Iterator>& operator=(const reverse_iterator<U>& ri)
+ { mIterator = ri.base(); return *this; }
+
+ iterator_type base() const
+ { return mIterator; }
+
+ reference operator*() const
+ {
+ iterator_type i(mIterator);
+ return *--i;
+ }
+
+ pointer operator->() const
+ { return &(operator*()); }
+
+ reverse_iterator& operator++()
+ { --mIterator; return *this; }
+
+ reverse_iterator operator++(int)
+ {
+ reverse_iterator ri(*this);
+ --mIterator;
+ return ri;
+ }
+
+ reverse_iterator& operator--()
+ { ++mIterator; return *this; }
+
+ reverse_iterator operator--(int)
+ {
+ reverse_iterator ri(*this);
+ ++mIterator;
+ return ri;
+ }
+
+ reverse_iterator operator+(difference_type n) const
+ { return reverse_iterator(mIterator - n); }
+
+ reverse_iterator& operator+=(difference_type n)
+ { mIterator -= n; return *this; }
+
+ reverse_iterator operator-(difference_type n) const
+ { return reverse_iterator(mIterator + n); }
+
+ reverse_iterator& operator-=(difference_type n)
+ { mIterator += n; return *this; }
+
+ reference operator[](difference_type n) const
+ { return mIterator[-n - 1]; }
+ };
+
+
+ // The C++ library working group has tentatively approved the usage of two
+ // template parameters (Iterator1 and Iterator2) in order to allow reverse_iterators
+ // and const_reverse iterators to be comparable. This is a similar issue to the
+ // C++ defect report #179 regarding comparison of container iterators and const_iterators.
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator==(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() == b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator<(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() > b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator!=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() != b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator>(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() < b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator<=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() >= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator>=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() <= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline typename reverse_iterator<Iterator1>::difference_type
+ operator-(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return b.base() - a.base(); }
+
+
+ template <typename Iterator>
+ inline reverse_iterator<Iterator>
+ operator+(typename reverse_iterator<Iterator>::difference_type n, const reverse_iterator<Iterator>& a)
+ { return reverse_iterator<Iterator>(a.base() - n); }
+
+
+
+
+
+
+
+ /// back_insert_iterator
+ ///
+ /// A back_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_back on the container with the value.
+ ///
+ template <typename Container>
+ class back_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ explicit back_insert_iterator(Container& x)
+ : container(x) { }
+
+ back_insert_iterator& operator=(const_reference value)
+ { container.push_back(value); return *this; }
+
+ back_insert_iterator& operator*()
+ { return *this; }
+
+ back_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ back_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+ };
+
+
+ /// back_inserter
+ ///
+ /// Creates an instance of a back_insert_iterator.
+ ///
+ template <typename Container>
+ inline back_insert_iterator<Container>
+ back_inserter(Container& x)
+ { return back_insert_iterator<Container>(x); }
+
+
+
+
+ /// front_insert_iterator
+ ///
+ /// A front_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_front on the container with the value.
+ ///
+ template <typename Container>
+ class front_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ explicit front_insert_iterator(Container& x)
+ : container(x) { }
+
+ front_insert_iterator& operator=(const_reference value)
+ { container.push_front(value); return *this; }
+
+ front_insert_iterator& operator*()
+ { return *this; }
+
+ front_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ front_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+ };
+
+
+ /// front_inserter
+ ///
+ /// Creates an instance of a front_insert_iterator.
+ ///
+ template <typename Container>
+ inline front_insert_iterator<Container>
+ front_inserter(Container& x)
+ { return front_insert_iterator<Container>(x); }
+
+
+
+
+ /// insert_iterator
+ ///
+ /// An insert_iterator is like an iterator except that when you assign a value to it,
+ /// the insert_iterator inserts the value into the container and increments the iterator.
+ ///
+ /// insert_iterator is an iterator adaptor that functions as an OutputIterator:
+ /// assignment through an insert_iterator inserts an object into a container.
+ /// Specifically, if ii is an insert_iterator, then ii keeps track of a container c and
+ /// an insertion point p; the expression *ii = x performs the insertion c.insert(p, x).
+ ///
+ /// If you assign through an insert_iterator several times, then you will be inserting
+ /// several elements into the underlying container. In the case of a sequence, they will
+ /// appear at a particular location in the underlying sequence, in the order in which
+ /// they were inserted: one of the arguments to insert_iterator's constructor is an
+ /// iterator p, and the new range will be inserted immediately before p.
+ ///
+ template <typename Container>
+ class insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::iterator iterator_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+ iterator_type it;
+
+ public:
+ // This assignment operator is defined more to stop compiler warnings (e.g. VC++ C4512)
+ // than to be useful. However, it does an insert_iterator to be assigned to another
+ // insert iterator provided that they point to the same container.
+ insert_iterator& operator=(const insert_iterator& x)
+ {
+ EASTL_ASSERT(&x.container == &container);
+ it = x.it;
+ return *this;
+ }
+
+ insert_iterator(Container& x, iterator_type itNew)
+ : container(x), it(itNew) {}
+
+ insert_iterator& operator=(const_reference value)
+ {
+ it = container.insert(it, value);
+ ++it;
+ return *this;
+ }
+
+ insert_iterator& operator*()
+ { return *this; }
+
+ insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ insert_iterator& operator++(int)
+ { return *this; } // This is by design.
+
+ }; // insert_iterator
+
+
+ /// inserter
+ ///
+ /// Creates an instance of an insert_iterator.
+ ///
+ template <typename Container, typename Iterator>
+ inline mimp::insert_iterator<Container>
+ inserter(Container& x, Iterator i)
+ {
+ typedef typename Container::iterator iterator;
+ return mimp::insert_iterator<Container>(x, iterator(i));
+ }
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// distance
+ ///
+ /// Implements the distance() function. There are two versions, one for
+ /// random access iterators (e.g. with vector) and one for regular input
+ /// iterators (e.g. with list). The former is more efficient.
+ ///
+ template <typename InputIterator>
+ inline typename mimp::iterator_traits<InputIterator>::difference_type
+ distance_impl(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ typename mimp::iterator_traits<InputIterator>::difference_type n = 0;
+
+ while(first != last)
+ {
+ ++first;
+ ++n;
+ }
+ return n;
+ }
+
+ template <typename RandomAccessIterator>
+ inline typename mimp::iterator_traits<RandomAccessIterator>::difference_type
+ distance_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ return last - first;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator>
+ //inline typename mimp::iterator_traits<InputIterator>::difference_type
+ //distance_impl(InputIterator first, InputIterator last, ...)
+ //{
+ // typename mimp::iterator_traits<InputIterator>::difference_type n = 0;
+ //
+ // while(first != last)
+ // {
+ // ++first;
+ // ++n;
+ // }
+ // return n;
+ //}
+
+ template <typename InputIterator>
+ inline typename mimp::iterator_traits<InputIterator>::difference_type
+ distance(InputIterator first, InputIterator last)
+ {
+ typedef typename mimp::iterator_traits<InputIterator>::iterator_category IC;
+
+ return mimp::distance_impl(first, last, IC());
+ }
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// advance
+ ///
+ /// Implements the advance() function. There are three versions, one for
+ /// random access iterators (e.g. with vector), one for bidirectional
+ /// iterators (list) and one for regular input iterators (e.g. with slist).
+ ///
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance_impl(InputIterator& i, Distance n, EASTL_ITC_NS::input_iterator_tag)
+ {
+ while(n--)
+ ++i;
+ }
+
+ template <typename BidirectionalIterator, typename Distance>
+ inline void
+ advance_impl(BidirectionalIterator& i, Distance n, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ if(n > 0)
+ {
+ while(n--)
+ ++i;
+ }
+ else
+ {
+ while(n++)
+ --i;
+ }
+ }
+
+ template <typename RandomAccessIterator, typename Distance>
+ inline void
+ advance_impl(RandomAccessIterator& i, Distance n, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ i += n;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator, typename Distance>
+ //inline void
+ //advance_impl(InputIterator& i, Distance n, ...)
+ //{
+ // while(n--)
+ // ++i;
+ //}
+
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance(InputIterator& i, Distance n)
+ {
+ typedef typename mimp::iterator_traits<InputIterator>::iterator_category IC;
+
+ mimp::advance_impl(i, n, IC());
+ }
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/utility.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+ ///////////////////////////////////////////////////////////////////////
+ /// rel_ops
+ ///
+ /// rel_ops allow the automatic generation of operators !=, >, <=, >= from
+ /// just operators == and <. These are intentionally in the rel_ops namespace
+ /// so that they don't conflict with other similar operators. To use these
+ /// operators, add "using namespace std::rel_ops;" to an appropriate place in
+ /// your code, usually right in the function that you need them to work.
+ /// In fact, you will very likely have collision problems if you put such
+ /// using statements anywhere other than in the .cpp file like so and may
+ /// also have collisions when you do, as the using statement will affect all
+ /// code in the module. You need to be careful about use of rel_ops.
+ ///
+ namespace rel_ops
+ {
+ template <typename T>
+ inline bool operator!=(const T& x, const T& y)
+ { return !(x == y); }
+
+ template <typename T>
+ inline bool operator>(const T& x, const T& y)
+ { return (y < x); }
+
+ template <typename T>
+ inline bool operator<=(const T& x, const T& y)
+ { return !(y < x); }
+
+ template <typename T>
+ inline bool operator>=(const T& x, const T& y)
+ { return !(x < y); }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// pair
+ ///
+ /// Implements a simple pair, just like the C++ std::pair.
+ ///
+ template <typename T1, typename T2>
+ struct pair
+ {
+ typedef T1 first_type;
+ typedef T2 second_type;
+
+ T1 first;
+ T2 second;
+
+ pair();
+ pair(const T1& x);
+ pair(const T1& x, const T2& y);
+
+ template <typename U, typename V>
+ pair(const pair<U, V>& p);
+
+ // pair(const pair& p); // Not necessary, as default version is OK.
+ // pair& operator=(const pair& p); // Not necessary, as default version is OK.
+ };
+
+
+
+
+ /// use_self
+ ///
+ /// operator()(x) simply returns x. Used in sets, as opposed to maps.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_first template implementation.
+ ///
+ /// The existance of use_self may seem odd, given that it does nothing,
+ /// but these kinds of things are useful, virtually required, for optimal
+ /// generic programming.
+ ///
+ template <typename T>
+ struct use_self // : public unary_function<T, T> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef T result_type;
+
+ const T& operator()(const T& x) const
+ { return x; }
+ };
+
+ /// use_first
+ ///
+ /// operator()(x) simply returns x.first. Used in maps, as opposed to sets.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_self template implementation. This is the same thing as the
+ /// SGI SGL select1st utility.
+ ///
+ template <typename Pair>
+ struct use_first // : public unary_function<Pair, typename Pair::first_type> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef typename Pair::first_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.first; }
+ };
+
+ /// use_second
+ ///
+ /// operator()(x) simply returns x.second.
+ /// This is the same thing as the SGI SGL select2nd utility
+ ///
+ template <typename Pair>
+ struct use_second // : public unary_function<Pair, typename Pair::second_type> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef typename Pair::second_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.second; }
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pair
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair()
+ : first(), second()
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair(const T1& x)
+ : first(x), second()
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair(const T1& x, const T2& y)
+ : first(x), second(y)
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ template <typename U, typename V>
+ inline pair<T1, T2>::pair(const pair<U, V>& p)
+ : first(p.first), second(p.second)
+ {
+ // Empty
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T1, typename T2>
+ inline bool operator==(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return ((a.first == b.first) && (a.second == b.second));
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator<(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ // Note that we use only operator < in this expression. Otherwise we could
+ // use the simpler: return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1);
+ // The user can write a specialization for this operator to get around this
+ // in cases where the highest performance is required.
+ return ((a.first < b.first) || (!(b.first < a.first) && (a.second < b.second)));
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator!=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator>(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator>=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator<=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(b < a);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// make_pair / make_pair_ref
+ ///
+ /// make_pair is the same as std::make_pair specified by the C++ standard.
+ /// If you look at the C++ standard, you'll see that it specifies T& instead of T.
+ /// However, it has been determined that the C++ standard is incorrect and has
+ /// flagged it as a defect (http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#181).
+ /// In case you feel that you want a more efficient version that uses references,
+ /// we provide the make_pair_ref function below.
+ ///
+ /// Note: You don't need to use make_pair in order to make a pair. The following
+ /// code is equivalent, and the latter avoids one more level of inlining:
+ /// return make_pair(charPtr, charPtr);
+ /// return pair<char*, char*>(charPtr, charPtr);
+ ///
+ template <typename T1, typename T2>
+ inline pair<T1, T2> make_pair(T1 a, T2 b)
+ {
+ return pair<T1, T2>(a, b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2> make_pair_ref(const T1& a, const T2& b)
+ {
+ return pair<T1, T2>(a, b);
+ }
+
+
+
+ /// swap
+ ///
+ /// Assigns the contents of a to b and the contents of b to a.
+ /// A temporary instance of type T is created and destroyed
+ /// in the process.
+ ///
+ /// This function is used by numerous other algorithms, and as
+ /// such it may in some cases be feasible and useful for the user
+ /// to implement an override version of this function which is
+ /// more efficient in some way.
+ ///
+ template <typename T>
+ inline void swap(T& a, T& b)
+ {
+ T temp(a);
+ a = b;
+ b = temp;
+ }
+
+
+
+ // iter_swap helper functions
+ //
+ template <bool bTypesAreEqual>
+ struct iter_swap_impl
+ {
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename mimp::iterator_traits<ForwardIterator1>::value_type value_type_a;
+
+ value_type_a temp(*a);
+ *a = *b;
+ *b = temp;
+ }
+ };
+
+ template <>
+ struct iter_swap_impl<true>
+ {
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ mimp::swap(*a, *b);
+ }
+ };
+
+ /// iter_swap
+ ///
+ /// Equivalent to swap(*a, *b), though the user can provide an override to
+ /// iter_swap that is independent of an override which may exist for swap.
+ ///
+ /// We provide a version of iter_swap which uses swap when the swapped types
+ /// are equal but a manual implementation otherwise. We do this because the
+ /// C++ standard defect report says that iter_swap(a, b) must be implemented
+ /// as swap(*a, *b) when possible.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename mimp::iterator_traits<ForwardIterator1>::value_type value_type_a;
+ typedef typename mimp::iterator_traits<ForwardIterator2>::value_type value_type_b;
+ typedef typename mimp::iterator_traits<ForwardIterator1>::reference reference_a;
+ typedef typename mimp::iterator_traits<ForwardIterator2>::reference reference_b;
+
+ iter_swap_impl<type_and<is_same<value_type_a, value_type_b>::value, is_same<value_type_a&, reference_a>::value, is_same<value_type_b&, reference_b>::value >::value >::iter_swap(a, b);
+ }
+
+
+
+
+ /// EASTL_RBTREE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+#ifndef EASTL_RBTREE_DEFAULT_NAME
+#define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree".
+#endif
+
+
+ /// EASTL_RBTREE_DEFAULT_ALLOCATOR
+ ///
+#ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR
+#define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME)
+#endif
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeColor
+ {
+ kRBTreeColorRed,
+ kRBTreeColorBlack
+ };
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeSide
+ {
+ kRBTreeSideLeft,
+ kRBTreeSideRight
+ };
+
+
+
+ /// rbtree_node_base
+ ///
+ /// We define a rbtree_node_base separately from rbtree_node (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// rbtree anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an rbtree harder, given that the node pointers are of type
+ /// rbtree_node_base and not rbtree_node.
+ ///
+ struct rbtree_node_base
+ {
+ typedef rbtree_node_base this_type;
+
+ public:
+ this_type* mpNodeRight; // Declared first because it is used most often.
+ this_type* mpNodeLeft;
+ this_type* mpNodeParent;
+ char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else.
+ };
+
+
+ /// rbtree_node
+ ///
+ template <typename Value>
+ struct rbtree_node : public rbtree_node_base
+ {
+ Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value.
+ };
+
+
+
+
+ // rbtree_node_base functions
+ //
+ // These are the fundamental functions that we use to maintain the
+ // tree. The bulk of the work of the tree maintenance is done in
+ // these functions.
+ //
+ rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode);
+ rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode);
+ rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode);
+ rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode);
+ size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop,
+ const rbtree_node_base* pNodeBottom);
+ void RBTreeInsert ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeParent,
+ rbtree_node_base* pNodeAnchor,
+ RBTreeSide insertionSide);
+ void RBTreeErase ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeAnchor);
+
+
+
+
+
+
+
+ /// rbtree_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct rbtree_iterator
+ {
+ typedef rbtree_iterator<T, Pointer, Reference> this_type;
+ typedef rbtree_iterator<T, T*, T&> iterator;
+ typedef rbtree_iterator<T, const T*, const T&> const_iterator;
+ typedef size_t size_type; // See config.h for the definition of size_t, which defaults to mimp::MiU32.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef rbtree_node_base base_node_type;
+ typedef rbtree_node<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ rbtree_iterator();
+ explicit rbtree_iterator(const node_type* pNode);
+ rbtree_iterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ rbtree_iterator& operator++();
+ rbtree_iterator operator++(int);
+
+ rbtree_iterator& operator--();
+ rbtree_iterator operator--(int);
+
+ }; // rbtree_iterator
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base
+ //
+ // This class allows us to use a generic rbtree as the basis of map, multimap,
+ // set, and multiset transparently. The vital template parameters for this are
+ // the ExtractKey and the bUniqueKeys parameters.
+ //
+ // If the rbtree has a value type of the form pair<T1, T2> (i.e. it is a map or
+ // multimap and not a set or multiset) and a key extraction policy that returns
+ // the first part of the pair, the rbtree gets a mapped_type typedef.
+ // If it satisfies those criteria and also has unique keys, then it also gets an
+ // operator[] (which only map and set have and multimap and multiset don't have).
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+
+
+ /// rb_base
+ /// This specialization is used for 'set'. In this case, Key and Value
+ /// will be the same as each other and ExtractKey will be mimp::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, bool bUniqueKeys, typename RBTree>
+ struct rb_base
+ {
+ typedef ExtractKey extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This class is used for 'multiset'.
+ /// In this case, Key and Value will be the same as each
+ /// other and ExtractKey will be mimp::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, typename RBTree>
+ struct rb_base<Key, Value, Compare, ExtractKey, false, RBTree>
+ {
+ typedef ExtractKey extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'map'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, mimp::use_first<Pair>, true, RBTree>
+ {
+ typedef mimp::use_first<Pair> extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'multimap'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, mimp::use_first<Pair>, false, RBTree>
+ {
+ typedef mimp::use_first<Pair> extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+
+
+
+ /// rbtree
+ ///
+ /// rbtree is the red-black tree basis for the map, multimap, set, and multiset
+ /// containers. Just about all the work of those containers is done here, and
+ /// they are merely a shell which sets template policies that govern the code
+ /// generation for this rbtree.
+ ///
+ /// This rbtree implementation is pretty much the same as all other modern
+ /// rbtree implementations, as the topic is well known and researched. We may
+ /// choose to implement a "relaxed balancing" option at some point in the
+ /// future if it is deemed worthwhile. Most rbtree implementations don't do this.
+ ///
+ /// The primary rbtree member variable is mAnchor, which is a node_type and
+ /// acts as the end node. However, like any other node, it has mpNodeLeft,
+ /// mpNodeRight, and mpNodeParent members. We do the conventional trick of
+ /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning
+ /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root
+ /// node to mpNodeParent.
+ ///
+ /// Compare (functor): This is a comparison class which defaults to 'less'.
+ /// It is a common STL thing which takes two arguments and returns true if
+ /// the first is less than the second.
+ ///
+ /// ExtractKey (functor): This is a class which gets the key from a stored
+ /// node. With map and set, the node is a pair, whereas with set and multiset
+ /// the node is just the value. ExtractKey will be either mimp::use_first (map and multimap)
+ /// or mimp::use_self (set and multiset).
+ ///
+ /// bMutableIterators (bool): true if rbtree::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const iterators.
+ /// It will be true for map and multimap and false for set and multiset.
+ ///
+ /// bUniqueKeys (bool): true if the keys are to be unique, and false if there
+ /// can be multiple instances of a given key. It will be true for set and map
+ /// and false for multiset and multimap.
+ ///
+ /// To consider: Add an option for relaxed tree balancing. This could result
+ /// in performance improvements but would require a more complicated implementation.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ template <typename Key, typename Value, typename Compare, typename Allocator,
+ typename ExtractKey, bool bMutableIterators, bool bUniqueKeys>
+ class rbtree
+ : public rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys,
+ rbtree<Key, Value, Compare, Allocator, ExtractKey, bMutableIterators, bUniqueKeys> >
+ {
+ public:
+ typedef ptrdiff_t difference_type;
+ typedef size_t size_type; // See config.h for the definition of size_t, which defaults to mimp::MiU32.
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef rbtree_node<value_type> node_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename type_select<bMutableIterators,
+ rbtree_iterator<value_type, value_type*, value_type&>,
+ rbtree_iterator<value_type, const value_type*, const value_type&> >::type iterator;
+ typedef rbtree_iterator<value_type, const value_type*, const value_type&> const_iterator;
+ typedef mimp::reverse_iterator<iterator> reverse_iterator;
+ typedef mimp::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ typedef Allocator allocator_type;
+ typedef Compare key_compare;
+ typedef typename type_select<bUniqueKeys, mimp::pair<iterator, bool>, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ typedef rbtree<Key, Value, Compare, Allocator,
+ ExtractKey, bMutableIterators, bUniqueKeys> this_type;
+ typedef rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys, this_type> base_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+ typedef typename base_type::extract_key extract_key;
+
+ using base_type::mCompare;
+
+ enum
+ {
+ kKeyAlignment = EASTL_ALIGN_OF(key_type),
+ kKeyAlignmentOffset = 0, // To do: Make sure this really is zero for all uses of this template.
+ kValueAlignment = EASTL_ALIGN_OF(value_type),
+ kValueAlignmentOffset = 0 // To fix: This offset is zero for sets and >0 for maps. Need to fix this.
+ };
+
+ public:
+ rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right).
+ size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node).
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ // ctor/dtor
+ rbtree();
+ rbtree(const allocator_type& allocator);
+ rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+ rbtree(const this_type& x);
+
+ template <typename InputIterator>
+ rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+
+ ~rbtree();
+
+ public:
+ // properties
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ const key_compare& key_comp() const { return mCompare; }
+ key_compare& key_comp() { return mCompare; }
+
+ this_type& operator=(const this_type& x);
+
+ void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin();
+ const_iterator begin() const;
+ iterator end();
+ const_iterator end() const;
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+
+ public:
+ bool empty() const;
+ size_type size() const;
+
+ /// map::insert and set::insert return a pair, while multimap::insert and
+ /// multiset::insert return an iterator.
+ insert_return_type insert(const value_type& value);
+
+ // C++ standard: inserts value if and only if there is no element with
+ // key equivalent to the key of t in containers with unique keys; always
+ // inserts value in containers with equivalent keys. Always returns the
+ // iterator pointing to the element with key equivalent to the key of value.
+ // iterator position is a hint pointing to where the insert should start
+ // to search. However, there is a potential defect/improvement report on this behaviour:
+ // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html)
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ iterator insert(iterator position, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ // For some reason, multiple STL versions make a specialization
+ // for erasing an array of key_types. I'm pretty sure we don't
+ // need this, but just to be safe we will follow suit.
+ // The implementation is trivial. Returns void because the values
+ // could well be randomly distributed throughout the tree and thus
+ // a return value would be nearly meaningless.
+ void erase(const key_type* first, const key_type* last);
+
+ void clear();
+ void reset();
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the tree's value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (note that the compare uses string as first type and char* as second):
+ /// set<string> strings;
+ /// strings.find_as("hello", less_2<string, const char*>());
+ ///
+ template <typename U, typename Compare2>
+ iterator find_as(const U& u, Compare2 compare2);
+
+ template <typename U, typename Compare2>
+ const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ iterator lower_bound(const key_type& key);
+ const_iterator lower_bound(const key_type& key) const;
+
+ iterator upper_bound(const key_type& key);
+ const_iterator upper_bound(const key_type& key) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ node_type* DoCreateNodeFromKey(const key_type& key);
+ node_type* DoCreateNode(const value_type& value);
+ node_type* DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent);
+
+ node_type* DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest);
+ void DoNukeSubtree(node_type* pNode);
+
+ // Intentionally return a pair and not an iterator for DoInsertValue(..., true_type)
+ // This is because the C++ standard for map and set is to return a pair and not just an iterator.
+ mimp::pair<iterator, bool> DoInsertValue(const value_type& value, true_type); // true_type means keys are unique.
+ iterator DoInsertValue(const value_type& value, false_type); // false_type means keys are not unique.
+
+ mimp::pair<iterator, bool> DoInsertKey(const key_type& key, true_type);
+ iterator DoInsertKey(const key_type& key, false_type);
+
+ iterator DoInsertValue(iterator position, const value_type& value, true_type);
+ iterator DoInsertValue(iterator position, const value_type& value, false_type);
+
+ iterator DoInsertKey(iterator position, const key_type& key, true_type);
+ iterator DoInsertKey(iterator position, const key_type& key, false_type);
+
+ iterator DoInsertValueImpl(node_type* pNodeParent, const value_type& value, bool bForceToLeft);
+ iterator DoInsertKeyImpl(node_type* pNodeParent, const key_type& key, bool bForceToLeft);
+
+ }; // rbtree
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_node_base functions
+ ///////////////////////////////////////////////////////////////////////
+
+ inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeLeft)
+ pNodeBase = pNodeBase->mpNodeLeft;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeRight)
+ pNodeBase = pNodeBase->mpNodeRight;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ // The rest of the functions are non-trivial and are found in
+ // the corresponding .cpp file to this file.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_iterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator()
+ : mpNode(NULL) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<node_type*>(pNode))) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const iterator& x)
+ : mpNode(x.mpNode) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::reference
+ rbtree_iterator<T, Pointer, Reference>::operator*() const
+ { return mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::pointer
+ rbtree_iterator<T, Pointer, Reference>::operator->() const
+ { return &mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const rbtree_iterator<T, Pointer, Reference>& a,
+ const rbtree_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree()
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(EASTL_RBTREE_DEFAULT_NAME)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const allocator_type& allocator)
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const this_type& x)
+ : base_type(x.mCompare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+
+ for(; first != last; ++first)
+ insert(*first);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::~rbtree()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::size_type
+ rbtree<K, V, C, A, E, bM, bU>::size() const
+ { return mnSize; }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline bool rbtree<K, V, C, A, E, bM, bU>::empty() const
+ { return (mnSize == 0); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin()
+ { return iterator(static_cast<node_type*>(mAnchor.mpNodeLeft)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() const
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::end()
+ { return iterator(static_cast<node_type*>(&mAnchor)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() const
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin()
+ { return reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() const
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend()
+ { return reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() const
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+#if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+#endif
+
+ base_type::mCompare = x.mCompare;
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::swap(this_type& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // Most of our members can be exchaged by a basic swap:
+ // We leave mAllocator as-is.
+ mimp::swap(mnSize, x.mnSize);
+ mimp::swap(base_type::mCompare, x.mCompare);
+
+ // However, because our anchor node is a part of our class instance and not
+ // dynamically allocated, we can't do a swap of it but must do a more elaborate
+ // procedure. This is the downside to having the mAnchor be like this, but
+ // otherwise we consider it a good idea to avoid allocating memory for a
+ // nominal container instance.
+
+ // We optimize for the expected most common case: both pointers being non-null.
+ if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null...
+ {
+ mimp::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight);
+ mimp::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft);
+ mimp::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent);
+
+ // We need to fix up the anchors to point to themselves (we can't just swap them).
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+ }
+ else if(mAnchor.mpNodeParent)
+ {
+ x.mAnchor.mpNodeRight = mAnchor.mpNodeRight;
+ x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft;
+ x.mAnchor.mpNodeParent = mAnchor.mpNodeParent;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+
+ // We need to fix up our anchor to point it itself (we can't have it swap with x).
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ }
+ else if(x.mAnchor.mpNodeParent)
+ {
+ mAnchor.mpNodeRight = x.mAnchor.mpNodeRight;
+ mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft;
+ mAnchor.mpNodeParent = x.mAnchor.mpNodeParent;
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+
+ // We need to fix up x's anchor to point it itself (we can't have it swap with us).
+ x.mAnchor.mpNodeRight = &x.mAnchor;
+ x.mAnchor.mpNodeLeft = &x.mAnchor;
+ x.mAnchor.mpNodeParent = NULL;
+ } // Else both are NULL and there is nothing to do.
+ }
+ else
+ {
+ const this_type temp(*this); // Can't call mimp::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(const value_type& value)
+ { return DoInsertValue(value, has_unique_keys_type()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(iterator position, const value_type& value)
+ { return DoInsertValue(position, value, has_unique_keys_type()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ mimp::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(const value_type& value, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = mCompare(extractKey(value), extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), extractKey(value))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ const iterator itResult(DoInsertValueImpl(pLowerBound, value, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(mCompare(extractKey(pLowerBound->mValue), extractKey(value))) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ const iterator itResult(DoInsertValueImpl(pParent, value, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ return pair<iterator, bool>(iterator(pLowerBound), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(const value_type& value, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(mCompare(extractKey(value), extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), extractKey(value))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return DoInsertValueImpl(pRangeEnd, value, false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ mimp::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(const key_type& key, true_type) // true_type means keys are unique.
+ {
+ // This code is essentially a slightly modified copy of the the rbtree::insert
+ // function whereby this version takes a key and not a full value_type.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = mCompare(key, extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ const iterator itResult(DoInsertKeyImpl(pLowerBound, key, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(mCompare(extractKey(pLowerBound->mValue), key)) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ const iterator itResult(DoInsertKeyImpl(pParent, key, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ return pair<iterator, bool>(iterator(pLowerBound), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(const key_type& key, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(mCompare(key, extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return DoInsertKeyImpl(pRangeEnd, key, false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(iterator position, const value_type& value, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = mCompare(extractKey(position.mpNode->mValue), extractKey(value));
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = mCompare(extractKey(value), extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // if (value < *itNext)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(itNext.mpNode->mValue), extractKey(value))); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ return DoInsertValueImpl(itNext.mpNode, value, true);
+ return DoInsertValueImpl(position.mpNode, value, false);
+ }
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()).first;
+ }
+
+ if(mnSize && mCompare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), extractKey(value)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ return DoInsertValueImpl((node_type*)mAnchor.mpNodeRight, value, false);
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(iterator position, const value_type& value, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+
+ if(!mCompare(extractKey(value), extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !mCompare(extractKey(itNext.mpNode->mValue), extractKey(value))) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ return DoInsertValueImpl(itNext.mpNode, value, true); // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return DoInsertValueImpl(position.mpNode, value, false);
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()); // If the above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+
+ if(mnSize && !mCompare(extractKey(value), extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ return DoInsertValueImpl((node_type*)mAnchor.mpNodeRight, value, false); // Insert after the last node (doesn't matter if we force left or not).
+
+ return DoInsertValue(value, has_unique_keys_type()); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(iterator position, const key_type& key, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = mCompare(extractKey(position.mpNode->mValue), key);
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = mCompare(key, extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // If value < *itNext...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(itNext.mpNode->mValue), key)); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ return DoInsertKeyImpl(itNext.mpNode, key, true);
+ return DoInsertKeyImpl(position.mpNode, key, false);
+ }
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()).first;
+ }
+
+ if(mnSize && mCompare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), key))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ return DoInsertKeyImpl((node_type*)mAnchor.mpNodeRight, key, false);
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(iterator position, const key_type& key, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ if(!mCompare(key, extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !mCompare(extractKey(itNext.mpNode->mValue), key)) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ return DoInsertKeyImpl(itNext.mpNode, key, true); // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return DoInsertKeyImpl(position.mpNode, key, false);
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()); // If the above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+ if(mnSize && !mCompare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ return DoInsertKeyImpl((node_type*)mAnchor.mpNodeRight, key, false); // Insert after the last node (doesn't matter if we force left or not).
+
+ return DoInsertKey(key, has_unique_keys_type()); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, const value_type& value, bool bForceToLeft)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || mCompare(extractKey(value), extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNode(value); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKeyImpl(node_type* pNodeParent, const key_type& key, bool bForceToLeft)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || mCompare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for( ; first != last; ++first)
+ DoInsertValue(*first, has_unique_keys_type()); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::clear()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ mAnchor.mColor = kRBTreeColorRed;
+ mnSize = 0;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(iterator position)
+ {
+ const iterator iErase(position);
+ --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below.
+ ++position;
+ RBTreeErase(iErase.mpNode, &mAnchor);
+ DoFreeNode(iErase.mpNode);
+ return position;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(iterator first, iterator last)
+ {
+ // We expect that if the user means to clear the container, they will call clear.
+ if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ...
+ {
+ // Basic implementation:
+ while(first != last)
+ first = erase(first);
+ return first;
+
+ // Inlined implementation:
+ //size_type n = 0;
+ //while(first != last)
+ //{
+ // const iterator itErase(first);
+ // ++n;
+ // ++first;
+ // RBTreeErase(itErase.mpNode, &mAnchor);
+ // DoFreeNode(itErase.mpNode);
+ //}
+ //mnSize -= n;
+ //return first;
+ }
+
+ clear();
+ return iterator((node_type*)&mAnchor); // Same as: return end();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::erase(const key_type* first, const key_type* last)
+ {
+ // We have no choice but to run a loop like this, as the first/last range could
+ // have values that are discontiguously located in the tree. And some may not
+ // even be in the tree.
+ while(first != last)
+ erase(*first++);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key)
+ {
+ // To consider: Implement this instead via calling lower_bound and
+ // inspecting the result. The following is an implementation of this:
+ // const iterator it(lower_bound(key));
+ // return ((it.mpNode == &mAnchor) || mCompare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it;
+ // We don't currently implement the above because in practice people tend to call
+ // find a lot with trees, but very uncommonly call lower_bound.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!mCompare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !mCompare(key, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare2(extractKey(pCurrent->mValue), u))) // If pCurrent is >= u...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find_as(u, compare2));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!mCompare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->lower_bound(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(mCompare(key, extractKey(pCurrent->mValue)))) // If key is < pCurrent...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->upper_bound(key));
+ }
+
+
+ // To do: Move this validate function entirely to a template-less implementation.
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ bool rbtree<K, V, C, A, E, bM, bU>::validate() const
+ {
+ // Red-black trees have the following canonical properties which we validate here:
+ // 1 Every node is either red or black.
+ // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence.
+ // 3 If a node is red, then both its children are black. Thus, on any path from
+ // the root to a leaf, red nodes must not be adjacent.
+ // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes.
+ // 5 The mnSize member of the tree must equal the number of nodes in the tree.
+ // 6 The tree is sorted as per a conventional binary tree.
+ // 7 The comparison function is sane; it obeys strict weak ordering. If mCompare(a,b) is true, then mCompare(b,a) must be false. Both cannot be true.
+
+ extract_key extractKey;
+
+ if(mnSize)
+ {
+ // Verify basic integrity.
+ //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight))
+ // return false; // Fix this for case of empty tree.
+
+ if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent))
+ return false;
+
+ if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent))
+ return false;
+
+ const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft);
+ size_type nIteratedSize = 0;
+
+ for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize)
+ {
+ const node_type* const pNode = (const node_type*)it.mpNode;
+ const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight;
+ const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft;
+
+ // Verify #7 above.
+ if(pNodeRight && mCompare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && mCompare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify #7 above.
+ if(pNodeLeft && mCompare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && mCompare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify item #1 above.
+ if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack))
+ return false;
+
+ // Verify item #3 above.
+ if(pNode->mColor == kRBTreeColorRed)
+ {
+ if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) ||
+ (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed)))
+ return false;
+ }
+
+ // Verify item #6 above.
+ if(pNodeRight && mCompare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)))
+ return false;
+
+ if(pNodeLeft && mCompare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue)))
+ return false;
+
+ if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree...
+ {
+ // Verify item #4 above.
+ if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount)
+ return false;
+ }
+ }
+
+ // Verify item #5 above.
+ if(nIteratedSize != mnSize)
+ return false;
+
+ return true;
+ }
+ else
+ {
+ if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor))
+ return false;
+ }
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline int rbtree<K, V, C, A, E, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoAllocateNode()
+ {
+ return (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNodeFromKey(const key_type& key)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ ::new(&pNode->mValue) value_type(key);
+
+#if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+#endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const value_type& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ ::new(&pNode->mValue) value_type(value);
+#if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+#endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent)
+ {
+ node_type* const pNode = DoCreateNode(pNodeSource->mValue);
+
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = pNodeParent;
+ pNode->mColor = pNodeSource->mColor;
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest)
+ {
+ node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest);
+
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeRoot->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeRoot);
+
+ node_type* pNewNodeLeft;
+
+ for(pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeRoot;
+ pNodeSource;
+ pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeLeft)
+ {
+ pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest);
+
+ pNodeDest->mpNodeLeft = pNewNodeLeft;
+
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeLeft->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeLeft);
+ }
+ return pNewNodeRoot;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::DoNukeSubtree(node_type* pNode)
+ {
+ while(pNode) // Recursively traverse the tree and destroy items as we go.
+ {
+ DoNukeSubtree((node_type*)pNode->mpNodeRight);
+
+ node_type* const pNodeLeft = (node_type*)pNode->mpNodeLeft;
+ DoFreeNode(pNode);
+ pNode = pNodeLeft;
+ }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator==(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ // Note that in operator< we do comparisons based on the tree value_type with operator<() of the
+ // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while
+ // for map/multimap the value_type is a pair<Key, T>. operator< for pair can be seen by looking
+ // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard
+ // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do
+ // this is for consistency. A map and a vector that contain the same items should compare the same.
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator!=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline void swap(rbtree<K, V, C, A, E, bM, bU>& a, rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/functional.h
+// Written and maintained by Paul Pedriana - 2005
+///////////////////////////////////////////////////////////////////////////////
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Primary C++ functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Argument, typename Result>
+ struct unary_function
+ {
+ typedef Argument argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename Argument1, typename Argument2, typename Result>
+ struct binary_function
+ {
+ typedef Argument1 first_argument_type;
+ typedef Argument2 second_argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename T>
+ struct plus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a + b; }
+ };
+
+ template <typename T>
+ struct minus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a - b; }
+ };
+
+ template <typename T>
+ struct multiplies : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a * b; }
+ };
+
+ template <typename T>
+ struct divides : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a / b; }
+ };
+
+ template <typename T>
+ struct modulus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a % b; }
+ };
+
+ template <typename T>
+ struct negate : public unary_function<T, T>
+ {
+ T operator()(const T& a) const
+ { return -a; }
+ };
+
+ template <typename T>
+ struct equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a == b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a);
+ }
+
+ template <typename T>
+ struct not_equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a != b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_not_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a); // We want the not equal comparison results to be equal.
+ }
+
+ /// str_equal_to
+ ///
+ /// Compares two 0-terminated string types.
+ /// The T types are expected to be iterators or act like iterators.
+ ///
+ /// Example usage:
+ /// hash_set<const char*, eastl_hash<const char*>, str_equal_to<const char*> > stringHashSet;
+ ///
+ /// Note:
+ /// You couldn't use str_equal_to like this:
+ /// bool result = equal("hi", "hi" + 2, "ho", str_equal_to<const char*>());
+ /// This is because equal tests an array of something, with each element by
+ /// the comparison function. But str_equal_to tests an array of something itself.
+ ///
+ template <typename T>
+ struct str_equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(T a, T b) const
+ {
+ while(*a && (*a == *b))
+ {
+ ++a;
+ ++b;
+ }
+ return (*a == *b);
+ }
+ };
+
+ template <typename T>
+ struct greater : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a > b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a > b), then !(b > a)
+ }
+
+ template <typename T>
+ struct less : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a < b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_less(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a < b), then !(b < a)
+ }
+
+ template <typename T>
+ struct greater_equal : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a >= b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a >= b), then !(b >= a)
+ }
+
+ template <typename T>
+ struct less_equal : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a <= b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_less_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a <= b), then !(b <= a)
+ }
+
+ template <typename T>
+ struct logical_and : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a && b; }
+ };
+
+ template <typename T>
+ struct logical_or : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a || b; }
+ };
+
+ template <typename T>
+ struct logical_not : public unary_function<T, bool>
+ {
+ bool operator()(const T& a) const
+ { return !a; }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Dual type functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename U>
+ struct equal_to_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a == b; }
+ };
+
+ template <typename T, typename U>
+ struct not_equal_to_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a != b; }
+ };
+
+ template <typename T, typename U>
+ struct less_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a < b; }
+ };
+
+
+
+
+ /// unary_negate
+ ///
+ template <typename Predicate>
+ class unary_negate : public unary_function<typename Predicate::argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit unary_negate(const Predicate& a)
+ : mPredicate(a) {}
+ bool operator()(const typename Predicate::argument_type& a) const
+ { return !mPredicate(a); }
+ };
+
+ template <typename Predicate>
+ inline unary_negate<Predicate> not1(const Predicate& predicate)
+ { return unary_negate<Predicate>(predicate); }
+
+
+
+ /// binary_negate
+ ///
+ template <typename Predicate>
+ class binary_negate : public binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit binary_negate(const Predicate& a)
+ : mPredicate(a) { }
+ bool operator()(const typename Predicate::first_argument_type& a, const typename Predicate::second_argument_type& b) const
+ { return !mPredicate(a, b); }
+ };
+
+ template <typename Predicate>
+ inline binary_negate<Predicate> not2(const Predicate& predicate)
+ { return binary_negate<Predicate>(predicate); }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bind
+ ///////////////////////////////////////////////////////////////////////
+
+ /// bind1st
+ ///
+ template <typename Operation>
+ class binder1st : public unary_function<typename Operation::second_argument_type, typename Operation::result_type>
+ {
+ protected:
+ typename Operation::first_argument_type value;
+ Operation op;
+
+ public:
+ binder1st(const Operation& x, const typename Operation::first_argument_type& y)
+ : value(y), op(x) { }
+
+ typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+
+ typename Operation::result_type operator()(typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder1st<Operation> bind1st(const Operation& op, const T& x)
+ {
+ typedef typename Operation::first_argument_type value;
+ return binder1st<Operation>(op, value(x));
+ }
+
+
+ /// bind2nd
+ ///
+ template <typename Operation>
+ class binder2nd : public unary_function<typename Operation::first_argument_type, typename Operation::result_type>
+ {
+ protected:
+ Operation op;
+ typename Operation::second_argument_type value;
+
+ public:
+ binder2nd(const Operation& x, const typename Operation::second_argument_type& y)
+ : op(x), value(y) { }
+
+ typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+
+ typename Operation::result_type operator()(typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder2nd<Operation> bind2nd(const Operation& op, const T& x)
+ {
+ typedef typename Operation::second_argument_type value;
+ return binder2nd<Operation>(op, value(x));
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_unary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_unary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ /// Example usage:
+ /// ptrdiff_t Rand(ptrdiff_t n) { return rand() % n; } // Note: The C rand function is poor and slow.
+ /// pointer_to_unary_function<ptrdiff_t, ptrdiff_t> randInstance(Rand);
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ template <typename Arg, typename Result>
+ class pointer_to_unary_function : public unary_function<Arg, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg);
+
+ public:
+ pointer_to_unary_function()
+ { }
+
+ explicit pointer_to_unary_function(Result (*pFunction)(Arg))
+ : mpFunction(pFunction) { }
+
+ Result operator()(Arg x) const
+ { return mpFunction(x); }
+ };
+
+
+ /// ptr_fun
+ ///
+ /// This ptr_fun is simply shorthand for usage of pointer_to_unary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int factorial(int x) { return (x > 1) ? (x * factorial(x - 1)) : x; }
+ /// transform(pIntArrayBegin, pIntArrayEnd, pIntArrayBegin, ptr_fun(factorial));
+ ///
+ template <typename Arg, typename Result>
+ inline pointer_to_unary_function<Arg, Result> ptr_fun(Result (*pFunction)(Arg))
+ { return pointer_to_unary_function<Arg, Result>(pFunction); }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_binary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_binary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ class pointer_to_binary_function : public binary_function<Arg1, Arg2, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg1, Arg2);
+
+ public:
+ pointer_to_binary_function()
+ { }
+
+ explicit pointer_to_binary_function(Result (*pFunction)(Arg1, Arg2))
+ : mpFunction(pFunction) {}
+
+ Result operator()(Arg1 x, Arg2 y) const
+ { return mpFunction(x, y); }
+ };
+
+
+ /// This ptr_fun is simply shorthand for usage of pointer_to_binary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int multiply(int x, int y) { return x * y; }
+ /// transform(pIntArray1Begin, pIntArray1End, pIntArray2Begin, pIntArray1Begin, ptr_fun(multiply));
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ inline pointer_to_binary_function<Arg1, Arg2, Result> ptr_fun(Result (*pFunction)(Arg1, Arg2))
+ { return pointer_to_binary_function<Arg1, Arg2, Result>(pFunction); }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun
+ // mem_fun1
+ //
+ // Note that mem_fun calls member functions via *pointers* to classes
+ // and not instances of classes. mem_fun_ref is for calling functions
+ // via instances of classes or references to classes.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_t
+ ///
+ /// Member function with no arguments.
+ ///
+ template <typename Result, typename T>
+ class mem_fun_t : public unary_function<T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ MI_INLINE explicit mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_t
+ ///
+ /// Member function with one argument.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_t : public binary_function<T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ MI_INLINE explicit mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_t
+ ///
+ /// Const member function with no arguments.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_t : public unary_function<const T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ MI_INLINE explicit const_mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(const T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_t
+ ///
+ /// Const member function with one argument.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_t : public binary_function<const T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ MI_INLINE explicit const_mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(const T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun
+ ///
+ /// This is the high level interface to the mem_fun_t family.
+ ///
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass* pTestClassArray[3] = { ... };
+ /// for_each(pTestClassArray, pTestClassArray + 3, &TestClass::print);
+ ///
+ template <typename Result, typename T>
+ MI_INLINE mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)())
+ {
+ return mimp::mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ MI_INLINE mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument))
+ {
+ return mimp::mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ MI_INLINE const_mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)() const)
+ {
+ return mimp::const_mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ MI_INLINE const_mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument) const)
+ {
+ return mimp::const_mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun_ref
+ // mem_fun1_ref
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ MI_INLINE explicit mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ MI_INLINE explicit mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ MI_INLINE explicit const_mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(const T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ MI_INLINE explicit const_mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ MI_INLINE Result operator()(const T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun_ref
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass testClassArray[3];
+ /// for_each(testClassArray, testClassArray + 3, &TestClass::print);
+ ///
+ template <typename Result, typename T>
+ MI_INLINE mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)())
+ {
+ return mimp::mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ MI_INLINE mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument))
+ {
+ return mimp::mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ MI_INLINE const_mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)() const)
+ {
+ return mimp::const_mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ MI_INLINE const_mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument) const)
+ {
+ return mimp::const_mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // eastl_hash
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct eastl_hash;
+
+ template <typename T> struct eastl_hash<T*> // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution.
+ { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } };
+
+ template <> struct eastl_hash<bool>
+ { size_t operator()(bool val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<char>
+ { size_t operator()(char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<signed char>
+ { size_t operator()(signed char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<unsigned char>
+ { size_t operator()(unsigned char val) const { return static_cast<size_t>(val); } };
+
+#ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ template <> struct eastl_hash<wchar_t>
+ { size_t operator()(wchar_t val) const { return static_cast<size_t>(val); } };
+#endif
+
+ template <> struct eastl_hash<signed short>
+ { size_t operator()(short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<unsigned short>
+ { size_t operator()(unsigned short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<signed int>
+ { size_t operator()(signed int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<unsigned int>
+ { size_t operator()(unsigned int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<signed long>
+ { size_t operator()(signed long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<unsigned long>
+ { size_t operator()(unsigned long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<signed long long>
+ { size_t operator()(signed long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<unsigned long long>
+ { size_t operator()(unsigned long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<float>
+ { size_t operator()(float val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<double>
+ { size_t operator()(double val) const { return static_cast<size_t>(val); } };
+
+ template <> struct eastl_hash<long double>
+ { size_t operator()(long double val) const { return static_cast<size_t>(val); } };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // string hashes
+ //
+ // Note that our string hashes here intentionally are slow for long strings.
+ // The reasoning for this is so:
+ // - The large majority of hashed strings are only a few bytes long.
+ // - The eastl_hash function is significantly more efficient if it can make this assumption.
+ // - The user is welcome to make a custom eastl_hash for those uncommon cases where
+ // long strings need to be hashed. Indeed, the user can probably make a
+ // special eastl_hash customized for such strings that's better than what we provide.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <> struct eastl_hash<mimp::MiI8*>
+ {
+ size_t operator()(const mimp::MiI8* p) const
+ {
+ size_t c, result = 2166136261U; // FNV1 eastl_hash. Perhaps the best string eastl_hash.
+ while((c = (mimp::MiU8)*p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct eastl_hash<const mimp::MiI8*>
+ {
+ size_t operator()(const mimp::MiI8* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (mimp::MiU8)*p++) != 0) // cast to unsigned 8 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct eastl_hash<mimp::MiI16*>
+ {
+ size_t operator()(const mimp::MiI16* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (MiU16)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct eastl_hash<const mimp::MiI16*>
+ {
+ size_t operator()(const mimp::MiI16* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (MiU16)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct eastl_hash<mimp::MiU32*>
+ {
+ size_t operator()(const mimp::MiU32* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (mimp::MiU32)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct eastl_hash<const mimp::MiU32*>
+ {
+ size_t operator()(const mimp::MiU32* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (mimp::MiU32)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ /// string_hash
+ ///
+ /// Defines a generic string eastl_hash for an arbitrary EASTL basic_string container.
+ ///
+ /// Example usage:
+ /// mimp::hash_set<MyString, mimp::string_hash<MyString> > hashSet;
+ ///
+ template <typename String>
+ struct string_hash
+ {
+ typedef String string_type;
+ typedef typename String::value_type value_type;
+ typedef typename mimp::add_unsigned<value_type>::type unsigned_value_type;
+
+ size_t operator()(const string_type& s) const
+ {
+ const unsigned_value_type* p = (const unsigned_value_type*)s.c_str();
+ size_t c, result = 2166136261U;
+ while((c = *p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+
+} // namespace mimp
+
+#pragma warning(pop)
+
+#endif // include guard
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiMath.h b/APEX_1.4/shared/general/meshimport/include/utils/MiMath.h
new file mode 100644
index 00000000..93366485
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiMath.h
@@ -0,0 +1,2128 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_MATH_H
+
+#define MI_MATH_H
+
+#include <math.h>
+#include <float.h>
+#include "MeshImportTypes.h"
+#include "MiPlatformConfig.h"
+
+using namespace mimp;
+
+#define MiSqrt sqrtf
+#define MiIsFinite(x) _finite(x)
+#define MiAbs(x) fabsf(x)
+#define MiRecipSqrt(x) (1.0f/MiSqrt(x))
+#define MiSin(x) sinf(x)
+#define MiCos(x) cosf(x)
+#define MiAtan2(x,y) atan2f(x,y)
+#define MiAcos(x) acosf(x)
+
+
+namespace mimp
+{
+ // constants
+ static const MiF32 MiPi = 3.141592653589793f;
+ static const MiF32 MiHalfPi = 1.57079632679489661923f;
+ static const MiF32 MiTwoPi = 6.28318530717958647692f;
+ static const MiF32 MiInvPi = 0.31830988618379067154f;
+
+
+
+
+ MI_FORCE_INLINE MiF32 MiMax(MiF32 a,MiF32 b)
+ {
+ return a < b ? b : a;
+ }
+
+ MI_FORCE_INLINE MiF32 MiMin(MiF32 a,MiF32 b)
+ {
+ return a < b ? a : b;
+ }
+/**
+\brief 2 Element vector class.
+
+This is a vector class with public data members.
+This is not nice but it has become such a standard that hiding the xy data members
+makes it difficult to reuse external code that assumes that these are public in the library.
+The vector class can be made to use float or double precision by appropriately defining MiF32.
+This has been chosen as a cleaner alternative to a template class.
+*/
+class MiVec2
+{
+public:
+
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ MI_FORCE_INLINE MiVec2() {}
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit MI_FORCE_INLINE MiVec2(MiF32 a): x(a), y(a) {}
+
+ /**
+ \brief Initializes from 2 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ */
+ MI_FORCE_INLINE MiVec2(MiF32 nx, MiF32 ny): x(nx), y(ny){}
+
+ /**
+ \brief Copy ctor.
+ */
+ MI_FORCE_INLINE MiVec2(const MiVec2& v): x(v.x), y(v.y) {}
+
+ //Operators
+
+ /**
+ \brief Assignment operator
+ */
+ MI_FORCE_INLINE MiVec2& operator=(const MiVec2& p) { x = p.x; y = p.y; return *this; }
+
+ /**
+ \brief element access
+ */
+ MI_FORCE_INLINE MiF32& operator[](int index) { MI_ASSERT(index>=0 && index<=1); return (&x)[index]; }
+
+ /**
+ \brief element access
+ */
+ MI_FORCE_INLINE const MiF32& operator[](int index) const { MI_ASSERT(index>=0 && index<=1); return (&x)[index]; }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ MI_FORCE_INLINE bool operator==(const MiVec2&v) const { return x == v.x && y == v.y; }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ MI_FORCE_INLINE bool operator!=(const MiVec2&v) const { return x != v.x || y != v.y; }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ MI_FORCE_INLINE bool isZero() const { return x==0.0f && y==0.0f; }
+
+ /**
+ \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ MI_INLINE bool isFinite() const
+ {
+ return MiIsFinite(x) && MiIsFinite(y);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ MI_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = MiF32(1e-4);
+ return isFinite() && MiAbs(magnitude()-1)<unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling MiSqrt()!
+ */
+ MI_FORCE_INLINE MiF32 magnitudeSquared() const { return x * x + y * y; }
+
+ /**
+ \brief returns the magnitude
+ */
+ MI_FORCE_INLINE MiF32 magnitude() const { return MiSqrt(magnitudeSquared()); }
+
+ /**
+ \brief negation
+ */
+ MI_FORCE_INLINE MiVec2 operator -() const
+ {
+ return MiVec2(-x, -y);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_FORCE_INLINE MiVec2 operator +(const MiVec2& v) const { return MiVec2(x + v.x, y + v.y); }
+
+ /**
+ \brief vector difference
+ */
+ MI_FORCE_INLINE MiVec2 operator -(const MiVec2& v) const { return MiVec2(x - v.x, y - v.y); }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ MI_FORCE_INLINE MiVec2 operator *(MiF32 f) const { return MiVec2(x * f, y * f); }
+
+ /**
+ \brief scalar division
+ */
+ MI_FORCE_INLINE MiVec2 operator /(MiF32 f) const
+ {
+ f = MiF32(1) / f; // PT: inconsistent notation with operator /=
+ return MiVec2(x * f, y * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_FORCE_INLINE MiVec2& operator +=(const MiVec2& v)
+ {
+ x += v.x;
+ y += v.y;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ MI_FORCE_INLINE MiVec2& operator -=(const MiVec2& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ MI_FORCE_INLINE MiVec2& operator *=(MiF32 f)
+ {
+ x *= f;
+ y *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ MI_FORCE_INLINE MiVec2& operator /=(MiF32 f)
+ {
+ f = 1.0f/f; // PT: inconsistent notation with operator /
+ x *= f;
+ y *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ MI_FORCE_INLINE MiF32 dot(const MiVec2& v) const
+ {
+ return x * v.x + y * v.y;
+ }
+
+ /** return a unit vector */
+
+ MI_FORCE_INLINE MiVec2 getNormalized() const
+ {
+ const MiF32 m = magnitudeSquared();
+ return m>0 ? *this * MiRecipSqrt(m) : MiVec2(0,0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ MI_FORCE_INLINE MiF32 normalize()
+ {
+ const MiF32 m = magnitude();
+ if (m>0)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ MI_FORCE_INLINE MiVec2 multiply(const MiVec2& a) const
+ {
+ return MiVec2(x*a.x, y*a.y);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ MI_FORCE_INLINE MiVec2 minimum(const MiVec2& v) const
+ {
+ return MiVec2(MiMin(x, v.x), MiMin(y,v.y));
+ }
+
+ /**
+ \brief returns MIN(x, y);
+ */
+ MI_FORCE_INLINE float minElement() const
+ {
+ return MiMin(x, y);
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ MI_FORCE_INLINE MiVec2 maximum(const MiVec2& v) const
+ {
+ return MiVec2(MiMax(x, v.x), MiMax(y,v.y));
+ }
+
+ /**
+ \brief returns MAX(x, y);
+ */
+ MI_FORCE_INLINE float maxElement() const
+ {
+ return MiMax(x, y);
+ }
+
+ MiF32 x,y;
+};
+
+ static MI_FORCE_INLINE MiVec2 operator *(MiF32 f, const MiVec2& v)
+{
+ return MiVec2(f * v.x, f * v.y);
+}
+
+/**
+\brief 3 Element vector class.
+
+This is a vector class with public data members.
+This is not nice but it has become such a standard that hiding the xyz data members
+makes it difficult to reuse external code that assumes that these are public in the library.
+The vector class can be made to use float or double precision by appropriately defining MiF32.
+This has been chosen as a cleaner alternative to a template class.
+*/
+class MiVec3
+{
+public:
+
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ MI_FORCE_INLINE MiVec3() {}
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit MI_FORCE_INLINE MiVec3(MiF32 a): x(a), y(a), z(a) {}
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ */
+ MI_FORCE_INLINE MiVec3(MiF32 nx, MiF32 ny, MiF32 nz): x(nx), y(ny), z(nz) {}
+
+ /**
+ \brief Copy ctor.
+ */
+ MI_FORCE_INLINE MiVec3(const MiVec3& v): x(v.x), y(v.y), z(v.z) {}
+
+ //Operators
+
+ /**
+ \brief Assignment operator
+ */
+ MI_FORCE_INLINE MiVec3& operator=(const MiVec3& p) { x = p.x; y = p.y; z = p.z; return *this; }
+
+ /**
+ \brief element access
+ */
+ MI_FORCE_INLINE MiF32& operator[](int index) { MI_ASSERT(index>=0 && index<=2); return (&x)[index]; }
+
+ /**
+ \brief element access
+ */
+ MI_FORCE_INLINE const MiF32& operator[](int index) const { MI_ASSERT(index>=0 && index<=2); return (&x)[index]; }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ MI_FORCE_INLINE bool operator==(const MiVec3&v) const { return x == v.x && y == v.y && z == v.z; }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ MI_FORCE_INLINE bool operator!=(const MiVec3&v) const { return x != v.x || y != v.y || z != v.z; }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ MI_FORCE_INLINE bool isZero() const { return x==0.0f && y==0.0f && z == 0.0f; }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ MI_INLINE bool isFinite() const
+ {
+ return MiIsFinite(x) && MiIsFinite(y) && MiIsFinite(z);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ MI_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = MiF32(1e-4);
+ return isFinite() && MiAbs(magnitude()-1)<unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling MiSqrt()!
+ */
+ MI_FORCE_INLINE MiF32 magnitudeSquared() const { return x * x + y * y + z * z; }
+
+ /**
+ \brief returns the magnitude
+ */
+ MI_FORCE_INLINE MiF32 magnitude() const { return MiSqrt(magnitudeSquared()); }
+
+ /**
+ \brief negation
+ */
+ MI_FORCE_INLINE MiVec3 operator -() const
+ {
+ return MiVec3(-x, -y, -z);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_FORCE_INLINE MiVec3 operator +(const MiVec3& v) const { return MiVec3(x + v.x, y + v.y, z + v.z); }
+
+ /**
+ \brief vector difference
+ */
+ MI_FORCE_INLINE MiVec3 operator -(const MiVec3& v) const { return MiVec3(x - v.x, y - v.y, z - v.z); }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ MI_FORCE_INLINE MiVec3 operator *(MiF32 f) const { return MiVec3(x * f, y * f, z * f); }
+
+ /**
+ \brief scalar division
+ */
+ MI_FORCE_INLINE MiVec3 operator /(MiF32 f) const
+ {
+ f = MiF32(1) / f; // PT: inconsistent notation with operator /=
+ return MiVec3(x * f, y * f, z * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_FORCE_INLINE MiVec3& operator +=(const MiVec3& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ MI_FORCE_INLINE MiVec3& operator -=(const MiVec3& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ MI_FORCE_INLINE MiVec3& operator *=(MiF32 f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ MI_FORCE_INLINE MiVec3& operator /=(MiF32 f)
+ {
+ f = 1.0f/f; // PT: inconsistent notation with operator /
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ MI_FORCE_INLINE MiF32 dot(const MiVec3& v) const
+ {
+ return x * v.x + y * v.y + z * v.z;
+ }
+
+ /**
+ \brief cross product
+ */
+ MI_FORCE_INLINE MiVec3 cross(const MiVec3& v) const
+ {
+ return MiVec3(y * v.z - z * v.y,
+ z * v.x - x * v.z,
+ x * v.y - y * v.x);
+ }
+
+ /** return a unit vector */
+
+ MI_FORCE_INLINE MiVec3 getNormalized() const
+ {
+ const MiF32 m = magnitudeSquared();
+ return m>0 ? *this * MiRecipSqrt(m) : MiVec3(0,0,0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ MI_FORCE_INLINE MiF32 normalize()
+ {
+ const MiF32 m = magnitude();
+ if (m>0)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ MI_FORCE_INLINE MiVec3 multiply(const MiVec3& a) const
+ {
+ return MiVec3(x*a.x, y*a.y, z*a.z);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ MI_FORCE_INLINE MiVec3 minimum(const MiVec3& v) const
+ {
+ return MiVec3(MiMin(x, v.x), MiMin(y,v.y), MiMin(z,v.z));
+ }
+
+ /**
+ \brief returns MIN(x, y, z);
+ */
+ MI_FORCE_INLINE float minElement() const
+ {
+ return MiMin(x, MiMin(y, z));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ MI_FORCE_INLINE MiVec3 maximum(const MiVec3& v) const
+ {
+ return MiVec3(MiMax(x, v.x), MiMax(y,v.y), MiMax(z,v.z));
+ }
+
+ /**
+ \brief returns MAX(x, y, z);
+ */
+ MI_FORCE_INLINE float maxElement() const
+ {
+ return MiMax(x, MiMax(y, z));
+ }
+
+ MiF32 x,y,z;
+};
+
+ static MI_FORCE_INLINE MiVec3 operator *(MiF32 f, const MiVec3& v)
+{
+ return MiVec3(f * v.x, f * v.y, f * v.z);
+}
+
+
+class MiVec4
+{
+public:
+
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ MI_INLINE MiVec4() {}
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit MI_INLINE MiVec4(MiF32 a): x(a), y(a), z(a), w(a) {}
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ \param[in] nw Value to initialize W component.
+ */
+ MI_INLINE MiVec4(MiF32 nx, MiF32 ny, MiF32 nz, MiF32 nw): x(nx), y(ny), z(nz), w(nw) {}
+
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] v Value to initialize the X, Y, and Z components.
+ \param[in] nw Value to initialize W component.
+ */
+ MI_INLINE MiVec4(const MiVec3& v, MiF32 nw): x(v.x), y(v.y), z(v.z), w(nw) {}
+
+
+ /**
+ \brief Initializes from an array of scalar parameters.
+
+ \param[in] v Value to initialize with.
+ */
+ explicit MI_INLINE MiVec4(const MiF32 v[]): x(v[0]), y(v[1]), z(v[2]), w(v[3]) {}
+
+ /**
+ \brief Copy ctor.
+ */
+ MI_INLINE MiVec4(const MiVec4& v): x(v.x), y(v.y), z(v.z), w(v.w) {}
+
+ //Operators
+
+ /**
+ \brief Assignment operator
+ */
+ MI_INLINE MiVec4& operator=(const MiVec4& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; }
+
+ /**
+ \brief element access
+ */
+ MI_INLINE MiF32& operator[](int index) { MI_ASSERT(index>=0 && index<=3); return (&x)[index]; }
+
+ /**
+ \brief element access
+ */
+ MI_INLINE const MiF32& operator[](int index) const { MI_ASSERT(index>=0 && index<=3); return (&x)[index]; }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ MI_INLINE bool operator==(const MiVec4&v) const { return x == v.x && y == v.y && z == v.z && w == v.w; }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ MI_INLINE bool operator!=(const MiVec4&v) const { return x != v.x || y != v.y || z != v.z || w!= v.w; }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ MI_INLINE bool isZero() const { return x==0 && y==0 && z == 0 && w == 0; }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ MI_INLINE bool isFinite() const
+ {
+ return MiIsFinite(x) && MiIsFinite(y) && MiIsFinite(z) && MiIsFinite(w);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ MI_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = MiF32(1e-4);
+ return isFinite() && MiAbs(magnitude()-1)<unitTolerance;
+ }
+
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling MiSqrt()!
+ */
+ MI_INLINE MiF32 magnitudeSquared() const { return x * x + y * y + z * z + w * w; }
+
+ /**
+ \brief returns the magnitude
+ */
+ MI_INLINE MiF32 magnitude() const { return MiSqrt(magnitudeSquared()); }
+
+ /**
+ \brief negation
+ */
+ MI_INLINE MiVec4 operator -() const
+ {
+ return MiVec4(-x, -y, -z, -w);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_INLINE MiVec4 operator +(const MiVec4& v) const { return MiVec4(x + v.x, y + v.y, z + v.z, w + v.w); }
+
+ /**
+ \brief vector difference
+ */
+ MI_INLINE MiVec4 operator -(const MiVec4& v) const { return MiVec4(x - v.x, y - v.y, z - v.z, w - v.w); }
+
+ /**
+ \brief scalar post-multiplication
+ */
+
+ MI_INLINE MiVec4 operator *(MiF32 f) const { return MiVec4(x * f, y * f, z * f, w * f); }
+
+ /**
+ \brief scalar division
+ */
+ MI_INLINE MiVec4 operator /(MiF32 f) const
+ {
+ f = MiF32(1) / f;
+ return MiVec4(x * f, y * f, z * f, w * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ MI_INLINE MiVec4& operator +=(const MiVec4& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ w += v.w;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ MI_INLINE MiVec4& operator -=(const MiVec4& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ w -= v.w;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ MI_INLINE MiVec4& operator *=(MiF32 f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ MI_INLINE MiVec4& operator /=(MiF32 f)
+ {
+ f = 1.0f/f;
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ MI_INLINE MiF32 dot(const MiVec4& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ /** return a unit vector */
+
+ MI_INLINE MiVec4 getNormalized() const
+ {
+ MiF32 m = magnitudeSquared();
+ return m>0 ? *this * MiRecipSqrt(m) : MiVec4(0,0,0,0);
+ }
+
+
+ /**
+ \brief normalizes the vector in place
+ */
+ MI_INLINE MiF32 normalize()
+ {
+ MiF32 m = magnitude();
+ if (m>0)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ MI_INLINE MiVec4 multiply(const MiVec4& a) const
+ {
+ return MiVec4(x*a.x, y*a.y, z*a.z, w*a.w);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ MI_INLINE MiVec4 minimum(const MiVec4& v) const
+ {
+ return MiVec4(MiMin(x, v.x), MiMin(y,v.y), MiMin(z,v.z), MiMin(w,v.w));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ MI_INLINE MiVec4 maximum(const MiVec4& v) const
+ {
+ return MiVec4(MiMax(x, v.x), MiMax(y,v.y), MiMax(z,v.z), MiMax(w,v.w));
+ }
+
+ MI_INLINE MiVec3 getXYZ() const
+ {
+ return MiVec3(x,y,z);
+ }
+
+ /**
+ \brief set vector elements to zero
+ */
+ MI_INLINE void setZero() { x = y = z = w = MiF32(0); }
+
+ MiF32 x,y,z,w;
+};
+
+
+ static MI_INLINE MiVec4 operator *(MiF32 f, const MiVec4& v)
+{
+ return MiVec4(f * v.x, f * v.y, f * v.z, f * v.w);
+}
+
+/**
+\brief This is a quaternion class. For more information on quaternion mathematics
+consult a mathematics source on complex numbers.
+
+*/
+
+class MiMat33;
+
+class MiQuat
+{
+public:
+ /**
+ \brief Default constructor, does not do any initialization.
+ */
+ MI_FORCE_INLINE MiQuat() { }
+
+
+ /**
+ \brief Constructor. Take note of the order of the elements!
+ */
+ MI_FORCE_INLINE MiQuat(MiF32 nx, MiF32 ny, MiF32 nz, MiF32 nw) : x(nx),y(ny),z(nz),w(nw) {}
+
+ /**
+ \brief Creates from angle-axis representation.
+
+ Axis must be normalized!
+
+ Angle is in radians!
+
+ <b>Unit:</b> Radians
+ */
+ MI_INLINE MiQuat(MiF32 angleRadians, const MiVec3& unitAxis)
+ {
+ MI_ASSERT(MiAbs(1.0f-unitAxis.magnitude())<1e-3f);
+ const MiF32 a = angleRadians * 0.5f;
+ const MiF32 s = MiSin(a);
+ w = MiCos(a);
+ x = unitAxis.x * s;
+ y = unitAxis.y * s;
+ z = unitAxis.z * s;
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ MI_FORCE_INLINE MiQuat(const MiQuat& v): x(v.x), y(v.y), z(v.z), w(v.w) {}
+
+ /**
+ \brief Creates from orientation matrix.
+
+ \param[in] m Rotation matrix to extract quaternion from.
+ */
+ MI_INLINE explicit MiQuat(const MiMat33& m); /* defined in MiMat33.h */
+
+ /**
+ \brief returns true if all elements are finite (not NAN or INF, etc.)
+ */
+ bool isFinite() const
+ {
+ return MiIsFinite(x)
+ && MiIsFinite(y)
+ && MiIsFinite(z)
+ && MiIsFinite(w);
+ }
+
+ /**
+ \brief returns true if finite and magnitude is close to unit
+ */
+
+ bool isValid() const
+ {
+ const MiF32 unitTolerance = MiF32(1e-4);
+ return isFinite() && MiAbs(magnitude()-1)<unitTolerance;
+ }
+
+ /**
+ \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs isValid
+ */
+
+ bool isSane() const
+ {
+ const MiF32 unitTolerance = MiF32(1e-2);
+ return isFinite() && MiAbs(magnitude()-1)<unitTolerance;
+ }
+
+ /**
+ \brief converts this quaternion to angle-axis representation
+ */
+
+ MI_INLINE void toRadiansAndUnitAxis(MiF32& angle, MiVec3& axis) const
+ {
+ const MiF32 quatEpsilon = (MiF32(1.0e-8f));
+ const MiF32 s2 = x*x+y*y+z*z;
+ if(s2<quatEpsilon*quatEpsilon) // can't extract a sensible axis
+ {
+ angle = 0;
+ axis = MiVec3(1,0,0);
+ }
+ else
+ {
+ const MiF32 s = MiRecipSqrt(s2);
+ axis = MiVec3(x,y,z) * s;
+ angle = w<quatEpsilon ? MiPi : MiAtan2(s2*s, w) * 2;
+ }
+
+ }
+
+ /**
+ \brief Gets the angle between this quat and the identity quaternion.
+
+ <b>Unit:</b> Radians
+ */
+ MI_INLINE MiF32 getAngle() const
+ {
+ return MiAcos(w) * MiF32(2);
+ }
+
+
+ /**
+ \brief Gets the angle between this quat and the argument
+
+ <b>Unit:</b> Radians
+ */
+ MI_INLINE MiF32 getAngle(const MiQuat& q) const
+ {
+ return MiAcos(dot(q)) * MiF32(2);
+ }
+
+
+ /**
+ \brief This is the squared 4D vector length, should be 1 for unit quaternions.
+ */
+ MI_FORCE_INLINE MiF32 magnitudeSquared() const
+ {
+ return x*x + y*y + z*z + w*w;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ MI_FORCE_INLINE MiF32 dot(const MiQuat& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ MI_INLINE MiQuat getNormalized() const
+ {
+ const MiF32 s = 1.0f/magnitude();
+ return MiQuat(x*s, y*s, z*s, w*s);
+ }
+
+
+ MI_INLINE float magnitude() const
+ {
+ return MiSqrt(magnitudeSquared());
+ }
+
+ //modifiers:
+ /**
+ \brief maps to the closest unit quaternion.
+ */
+ MI_INLINE MiF32 normalize() // convert this MiQuat to a unit quaternion
+ {
+ const MiF32 mag = magnitude();
+ if (mag)
+ {
+ const MiF32 imag = MiF32(1) / mag;
+
+ x *= imag;
+ y *= imag;
+ z *= imag;
+ w *= imag;
+ }
+ return mag;
+ }
+
+ /*
+ \brief returns the conjugate.
+
+ \note for unit quaternions, this is the inverse.
+ */
+ MI_INLINE MiQuat getConjugate() const
+ {
+ return MiQuat(-x,-y,-z,w);
+ }
+
+ /*
+ \brief returns imaginary part.
+ */
+ MI_INLINE MiVec3& getImaginaryPart()
+ {
+ return reinterpret_cast<MiVec3&>(x);
+ }
+
+ /*
+ \brief returns imaginary part.
+ */
+ MI_INLINE const MiVec3& getImaginaryPart() const
+ {
+ return reinterpret_cast<const MiVec3&>(x);
+ }
+
+
+ /** brief computes rotation of x-axis */
+ MI_FORCE_INLINE MiVec3 getBasisVector0() const
+ {
+// return rotate(MiVec3(1,0,0));
+ const MiF32 x2 = x*2.0f;
+ const MiF32 w2 = w*2.0f;
+ return MiVec3( (w * w2) - 1.0f + x*x2,
+ (z * w2) + y*x2,
+ (-y * w2) + z*x2);
+ }
+
+ /** brief computes rotation of y-axis */
+ MI_FORCE_INLINE MiVec3 getBasisVector1() const
+ {
+// return rotate(MiVec3(0,1,0));
+ const MiF32 y2 = y*2.0f;
+ const MiF32 w2 = w*2.0f;
+ return MiVec3( (-z * w2) + x*y2,
+ (w * w2) - 1.0f + y*y2,
+ (x * w2) + z*y2);
+ }
+
+
+ /** brief computes rotation of z-axis */
+ MI_FORCE_INLINE MiVec3 getBasisVector2() const
+ {
+// return rotate(MiVec3(0,0,1));
+ const MiF32 z2 = z*2.0f;
+ const MiF32 w2 = w*2.0f;
+ return MiVec3( (y * w2) + x*z2,
+ (-x * w2) + y*z2,
+ (w * w2) - 1.0f + z*z2);
+ }
+
+ /**
+ rotates passed vec by this (assumed unitary)
+ */
+ MI_FORCE_INLINE const MiVec3 rotate(const MiVec3& v) const
+// MI_INLINE const MiVec3 rotate(const MiVec3& v) const
+ {
+ const MiF32 vx = 2.0f*v.x;
+ const MiF32 vy = 2.0f*v.y;
+ const MiF32 vz = 2.0f*v.z;
+ const MiF32 w2 = w*w-0.5f;
+ const MiF32 dot2 = (x*vx + y*vy +z*vz);
+ return MiVec3
+ (
+ (vx*w2 + (y * vz - z * vy)*w + x*dot2),
+ (vy*w2 + (z * vx - x * vz)*w + y*dot2),
+ (vz*w2 + (x * vy - y * vx)*w + z*dot2)
+ );
+ /*
+ const MiVec3 qv(x,y,z);
+ return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2;
+ */
+ }
+
+ /**
+ inverse rotates passed vec by this (assumed unitary)
+ */
+ MI_FORCE_INLINE const MiVec3 rotateInv(const MiVec3& v) const
+// MI_INLINE const MiVec3 rotateInv(const MiVec3& v) const
+ {
+ const MiF32 vx = 2.0f*v.x;
+ const MiF32 vy = 2.0f*v.y;
+ const MiF32 vz = 2.0f*v.z;
+ const MiF32 w2 = w*w-0.5f;
+ const MiF32 dot2 = (x*vx + y*vy +z*vz);
+ return MiVec3
+ (
+ (vx*w2 - (y * vz - z * vy)*w + x*dot2),
+ (vy*w2 - (z * vx - x * vz)*w + y*dot2),
+ (vz*w2 - (x * vy - y * vx)*w + z*dot2)
+ );
+// const MiVec3 qv(x,y,z);
+// return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2;
+ }
+
+ /**
+ \brief Assignment operator
+ */
+ MI_FORCE_INLINE MiQuat& operator=(const MiQuat& p) { x = p.x; y = p.y; z = p.z; w = p.w; return *this; }
+
+ MI_FORCE_INLINE MiQuat& operator*= (const MiQuat& q)
+ {
+ const MiF32 tx = w*q.x + q.w*x + y*q.z - q.y*z;
+ const MiF32 ty = w*q.y + q.w*y + z*q.x - q.z*x;
+ const MiF32 tz = w*q.z + q.w*z + x*q.y - q.x*y;
+
+ w = w*q.w - q.x*x - y*q.y - q.z*z;
+ x = tx;
+ y = ty;
+ z = tz;
+
+ return *this;
+ }
+
+ MI_FORCE_INLINE MiQuat& operator+= (const MiQuat& q)
+ {
+ x+=q.x;
+ y+=q.y;
+ z+=q.z;
+ w+=q.w;
+ return *this;
+ }
+
+ MI_FORCE_INLINE MiQuat& operator-= (const MiQuat& q)
+ {
+ x-=q.x;
+ y-=q.y;
+ z-=q.z;
+ w-=q.w;
+ return *this;
+ }
+
+ MI_FORCE_INLINE MiQuat& operator*= (const MiF32 s)
+ {
+ x*=s;
+ y*=s;
+ z*=s;
+ w*=s;
+ return *this;
+ }
+
+ /** quaternion multiplication */
+ MI_INLINE MiQuat operator*(const MiQuat& q) const
+ {
+ return MiQuat(w*q.x + q.w*x + y*q.z - q.y*z,
+ w*q.y + q.w*y + z*q.x - q.z*x,
+ w*q.z + q.w*z + x*q.y - q.x*y,
+ w*q.w - x*q.x - y*q.y - z*q.z);
+ }
+
+ /** quaternion addition */
+ MI_FORCE_INLINE MiQuat operator+(const MiQuat& q) const
+ {
+ return MiQuat(x+q.x,y+q.y,z+q.z,w+q.w);
+ }
+
+ /** quaternion subtraction */
+ MI_FORCE_INLINE MiQuat operator-() const
+ {
+ return MiQuat(-x,-y,-z,-w);
+ }
+
+
+ MI_FORCE_INLINE MiQuat operator-(const MiQuat& q) const
+ {
+ return MiQuat(x-q.x,y-q.y,z-q.z,w-q.w);
+ }
+
+
+ MI_FORCE_INLINE MiQuat operator*(MiF32 r) const
+ {
+ return MiQuat(x*r,y*r,z*r,w*r);
+ }
+
+ static MI_INLINE MiQuat createIdentity() { return MiQuat(0,0,0,1); }
+
+ /** the quaternion elements */
+ MiF32 x,y,z,w;
+};
+
+/*!
+Basic mathematical 3x3 matrix
+
+Some clarifications, as there have been much confusion about matrix formats etc in the past.
+
+Short:
+- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices).
+- Matrix is physically stored in column major format
+- Matrices are concaternated from left
+
+Long:
+Given three base vectors a, b and c the matrix is stored as
+
+|a.x b.x c.x|
+|a.y b.y c.y|
+|a.z b.z c.z|
+
+Vectors are treated as columns, so the vector v is
+
+|x|
+|y|
+|z|
+
+And matrices are applied _before_ the vector (pre-multiplication)
+v' = M*v
+
+|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z|
+|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z|
+|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z|
+
+
+Physical storage and indexing:
+To be compatible with popular 3d rendering APIs (read D3d and OpenGL)
+the physical indexing is
+
+|0 3 6|
+|1 4 7|
+|2 5 8|
+
+index = column*3 + row
+
+which in C++ translates to M[column][row]
+
+The mathematical indexing is M_row,column and this is what is used for _-notation
+so _12 is 1st row, second column and operator(row, column)!
+
+*/
+class MiMat33
+{
+public:
+ //! Default constructor
+ MI_INLINE MiMat33()
+ {}
+
+ //! Construct from three base vectors
+ MiMat33(const MiVec3& col0, const MiVec3& col1, const MiVec3& col2)
+ : column0(col0), column1(col1), column2(col2)
+ {}
+
+ //! Construct from float[9]
+ explicit MI_INLINE MiMat33(MiF32 values[]):
+ column0(values[0],values[1],values[2]),
+ column1(values[3],values[4],values[5]),
+ column2(values[6],values[7],values[8])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit MI_FORCE_INLINE MiMat33(const MiQuat& q)
+ {
+ const MiF32 x = q.x;
+ const MiF32 y = q.y;
+ const MiF32 z = q.z;
+ const MiF32 w = q.w;
+
+ const MiF32 x2 = x + x;
+ const MiF32 y2 = y + y;
+ const MiF32 z2 = z + z;
+
+ const MiF32 xx = x2*x;
+ const MiF32 yy = y2*y;
+ const MiF32 zz = z2*z;
+
+ const MiF32 xy = x2*y;
+ const MiF32 xz = x2*z;
+ const MiF32 xw = x2*w;
+
+ const MiF32 yz = y2*z;
+ const MiF32 yw = y2*w;
+ const MiF32 zw = z2*w;
+
+ column0 = MiVec3(1.0f - yy - zz, xy + zw, xz - yw);
+ column1 = MiVec3(xy - zw, 1.0f - xx - zz, yz + xw);
+ column2 = MiVec3(xz + yw, yz - xw, 1.0f - xx - yy);
+ }
+
+ //! Copy constructor
+ MI_INLINE MiMat33(const MiMat33& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2)
+ {}
+
+ //! Assignment operator
+ MI_FORCE_INLINE MiMat33& operator=(const MiMat33& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ return *this;
+ }
+
+ //! Set to identity matrix
+ MI_INLINE static MiMat33 createIdentity()
+ {
+ return MiMat33(MiVec3(1,0,0), MiVec3(0,1,0), MiVec3(0,0,1));
+ }
+
+ //! Set to zero matrix
+ MI_INLINE static MiMat33 createZero()
+ {
+ return MiMat33(MiVec3(0.0f), MiVec3(0.0f), MiVec3(0.0f));
+ }
+
+ //! Construct from diagonal, off-diagonals are zero.
+ MI_INLINE static MiMat33 createDiagonal(const MiVec3& d)
+ {
+ return MiMat33(MiVec3(d.x,0.0f,0.0f), MiVec3(0.0f,d.y,0.0f), MiVec3(0.0f,0.0f,d.z));
+ }
+
+
+ //! Get transposed matrix
+ MI_FORCE_INLINE MiMat33 getTranspose() const
+ {
+ const MiVec3 v0(column0.x, column1.x, column2.x);
+ const MiVec3 v1(column0.y, column1.y, column2.y);
+ const MiVec3 v2(column0.z, column1.z, column2.z);
+
+ return MiMat33(v0,v1,v2);
+ }
+
+ //! Get the real inverse
+ MI_INLINE MiMat33 getInverse() const
+ {
+ const MiF32 det = getDeterminant();
+ MiMat33 inverse;
+
+ if(det != 0)
+ {
+ const MiF32 invDet = 1.0f/det;
+
+ inverse.column0[0] = invDet * (column1[1]*column2[2] - column2[1]*column1[2]);
+ inverse.column0[1] = invDet *-(column0[1]*column2[2] - column2[1]*column0[2]);
+ inverse.column0[2] = invDet * (column0[1]*column1[2] - column0[2]*column1[1]);
+
+ inverse.column1[0] = invDet *-(column1[0]*column2[2] - column1[2]*column2[0]);
+ inverse.column1[1] = invDet * (column0[0]*column2[2] - column0[2]*column2[0]);
+ inverse.column1[2] = invDet *-(column0[0]*column1[2] - column0[2]*column1[0]);
+
+ inverse.column2[0] = invDet * (column1[0]*column2[1] - column1[1]*column2[0]);
+ inverse.column2[1] = invDet *-(column0[0]*column2[1] - column0[1]*column2[0]);
+ inverse.column2[2] = invDet * (column0[0]*column1[1] - column1[0]*column0[1]);
+
+ return inverse;
+ }
+ else
+ {
+ return createIdentity();
+ }
+ }
+
+ //! Get determinant
+ MI_INLINE MiF32 getDeterminant() const
+ {
+ return column0.dot(column1.cross(column2));
+ }
+
+ //! Unary minus
+ MI_INLINE MiMat33 operator-() const
+ {
+ return MiMat33(-column0, -column1, -column2);
+ }
+
+ //! Add
+ MI_INLINE MiMat33 operator+(const MiMat33& other) const
+ {
+ return MiMat33( column0+other.column0,
+ column1+other.column1,
+ column2+other.column2);
+ }
+
+ //! Subtract
+ MI_INLINE MiMat33 operator-(const MiMat33& other) const
+ {
+ return MiMat33( column0-other.column0,
+ column1-other.column1,
+ column2-other.column2);
+ }
+
+ //! Scalar multiplication
+ MI_INLINE MiMat33 operator*(MiF32 scalar) const
+ {
+ return MiMat33(column0*scalar, column1*scalar, column2*scalar);
+ }
+
+ friend MiMat33 operator*(MiF32, const MiMat33&);
+
+ //! Matrix vector multiplication (returns 'this->transform(vec)')
+ MI_INLINE MiVec3 operator*(const MiVec3& vec) const
+ {
+ return transform(vec);
+ }
+
+ //! Matrix multiplication
+ MI_FORCE_INLINE MiMat33 operator*(const MiMat33& other) const
+ {
+ //Rows from this <dot> columns from other
+ //column0 = transform(other.column0) etc
+ return MiMat33(transform(other.column0), transform(other.column1), transform(other.column2));
+ }
+
+ // a <op>= b operators
+
+ //! Equals-add
+ MI_INLINE MiMat33& operator+=(const MiMat33& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ return *this;
+ }
+
+ //! Equals-sub
+ MI_INLINE MiMat33& operator-=(const MiMat33& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ MI_INLINE MiMat33& operator*=(MiF32 scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ MI_FORCE_INLINE MiF32 operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ MI_FORCE_INLINE MiF32& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ // Transform etc
+
+ //! Transform vector by matrix, equal to v' = M*v
+ MI_FORCE_INLINE MiVec3 transform(const MiVec3& other) const
+ {
+ return column0*other.x + column1*other.y + column2*other.z;
+ }
+
+ //! Transform vector by matrix transpose, v' = M^t*v
+ MI_INLINE MiVec3 transformTranspose(const MiVec3& other) const
+ {
+ return MiVec3( column0.dot(other),
+ column1.dot(other),
+ column2.dot(other));
+ }
+
+ MI_FORCE_INLINE const MiF32* front() const
+ {
+ return &column0.x;
+ }
+
+ MI_FORCE_INLINE MiVec3& operator[](int num) {return (&column0)[num];}
+ MI_FORCE_INLINE const MiVec3& operator[](int num) const {return (&column0)[num];}
+
+ //Data, see above for format!
+
+ MiVec3 column0, column1, column2; //the three base vectors
+};
+
+// implementation from MiQuat.h
+ MI_INLINE MiQuat::MiQuat(const MiMat33& m)
+{
+ MiF32 tr = m(0,0) + m(1,1) + m(2,2), h;
+ if(tr >= 0)
+ {
+ h = MiSqrt(tr +1);
+ w = MiF32(0.5) * h;
+ h = MiF32(0.5) / h;
+
+ x = (m(2,1) - m(1,2)) * h;
+ y = (m(0,2) - m(2,0)) * h;
+ z = (m(1,0) - m(0,1)) * h;
+ }
+ else
+ {
+ int i = 0;
+ if (m(1,1) > m(0,0))
+ i = 1;
+ if(m(2,2) > m(i,i))
+ i=2;
+ switch (i)
+ {
+ case 0:
+ h = MiSqrt((m(0,0) - (m(1,1) + m(2,2))) + 1);
+ x = MiF32(0.5) * h;
+ h = MiF32(0.5) / h;
+
+ y = (m(0,1) + m(1,0)) * h;
+ z = (m(2,0) + m(0,2)) * h;
+ w = (m(2,1) - m(1,2)) * h;
+ break;
+ case 1:
+ h = MiSqrt((m(1,1) - (m(2,2) + m(0,0))) + 1);
+ y = MiF32(0.5) * h;
+ h = MiF32(0.5) / h;
+
+ z = (m(1,2) + m(2,1)) * h;
+ x = (m(0,1) + m(1,0)) * h;
+ w = (m(0,2) - m(2,0)) * h;
+ break;
+ case 2:
+ h = MiSqrt((m(2,2) - (m(0,0) + m(1,1))) + 1);
+ z = MiF32(0.5) * h;
+ h = MiF32(0.5) / h;
+
+ x = (m(2,0) + m(0,2)) * h;
+ y = (m(1,2) + m(2,1)) * h;
+ w = (m(1,0) - m(0,1)) * h;
+ }
+ }
+}
+
+class MiMat44
+{
+public:
+ //! Default constructor
+ MI_INLINE MiMat44()
+ {}
+
+ //! Construct from four 4-vectors
+ MiMat44(const MiVec4& col0, const MiVec4& col1, const MiVec4& col2, const MiVec4 &col3)
+ : column0(col0), column1(col1), column2(col2), column3(col3)
+ {}
+
+ //! Construct from three base vectors and a translation
+ MiMat44(const MiVec3& column0, const MiVec3& column1, const MiVec3& column2, const MiVec3& column3)
+ : column0(column0,0), column1(column1,0), column2(column2,0), column3(column3,1)
+ {}
+
+ //! Construct from float[16]
+ explicit MI_INLINE MiMat44(MiF32 values[]):
+ column0(values[0],values[1],values[2], values[3]),
+ column1(values[4],values[5],values[6], values[7]),
+ column2(values[8],values[9],values[10], values[11]),
+ column3(values[12], values[13], values[14], values[15])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit MI_INLINE MiMat44(const MiQuat& q)
+ {
+ const MiF32 x = q.x;
+ const MiF32 y = q.y;
+ const MiF32 z = q.z;
+ const MiF32 w = q.w;
+
+ const MiF32 x2 = x + x;
+ const MiF32 y2 = y + y;
+ const MiF32 z2 = z + z;
+
+ const MiF32 xx = x2*x;
+ const MiF32 yy = y2*y;
+ const MiF32 zz = z2*z;
+
+ const MiF32 xy = x2*y;
+ const MiF32 xz = x2*z;
+ const MiF32 xw = x2*w;
+
+ const MiF32 yz = y2*z;
+ const MiF32 yw = y2*w;
+ const MiF32 zw = z2*w;
+
+ column0 = MiVec4(1.0f - yy - zz, xy + zw, xz - yw, 0.0f);
+ column1 = MiVec4(xy - zw, 1.0f - xx - zz, yz + xw, 0.0f);
+ column2 = MiVec4(xz + yw, yz - xw, 1.0f - xx - yy, 0.0f);
+ column3 = MiVec4(0.0f, 0.0f, 0.0f, 1.0f);
+ }
+
+ //! Construct from a diagonal vector
+ explicit MI_INLINE MiMat44(const MiVec4& diagonal):
+ column0(diagonal.x,0.0f,0.0f,0.0f),
+ column1(0.0f,diagonal.y,0.0f,0.0f),
+ column2(0.0f,0.0f,diagonal.z,0.0f),
+ column3(0.0f,0.0f,0.0f,diagonal.w)
+ {
+ }
+
+ MiMat44(const MiMat33& orientation, const MiVec3& position):
+ column0(orientation.column0,0.0f),
+ column1(orientation.column1,0.0f),
+ column2(orientation.column2,0.0f),
+ column3(position,1)
+ {
+ }
+
+ //! Copy constructor
+ MI_INLINE MiMat44(const MiMat44& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3)
+ {}
+
+ //! Assignment operator
+ MI_INLINE const MiMat44& operator=(const MiMat44& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ column3 = other.column3;
+ return *this;
+ }
+
+ MI_INLINE static MiMat44 createIdentity()
+ {
+ return MiMat44(
+ MiVec4(1.0f,0.0f,0.0f,0.0f),
+ MiVec4(0.0f,1.0f,0.0f,0.0f),
+ MiVec4(0.0f,0.0f,1.0f,0.0f),
+ MiVec4(0.0f,0.0f,0.0f,1.0f));
+ }
+
+
+ MI_INLINE static MiMat44 createZero()
+ {
+ return MiMat44(MiVec4(0.0f), MiVec4(0.0f), MiVec4(0.0f), MiVec4(0.0f));
+ }
+
+ //! Get transposed matrix
+ MI_INLINE MiMat44 getTranspose() const
+ {
+ return MiMat44(MiVec4(column0.x, column1.x, column2.x, column3.x),
+ MiVec4(column0.y, column1.y, column2.y, column3.y),
+ MiVec4(column0.z, column1.z, column2.z, column3.z),
+ MiVec4(column0.w, column1.w, column2.w, column3.w));
+ }
+
+ //! Unary minus
+ MI_INLINE MiMat44 operator-() const
+ {
+ return MiMat44(-column0, -column1, -column2, -column3);
+ }
+
+ //! Add
+ MI_INLINE MiMat44 operator+(const MiMat44& other) const
+ {
+ return MiMat44( column0+other.column0,
+ column1+other.column1,
+ column2+other.column2,
+ column3+other.column3);
+ }
+
+ //! Subtract
+ MI_INLINE MiMat44 operator-(const MiMat44& other) const
+ {
+ return MiMat44( column0-other.column0,
+ column1-other.column1,
+ column2-other.column2,
+ column3-other.column3);
+ }
+
+ //! Scalar multiplication
+ MI_INLINE MiMat44 operator*(MiF32 scalar) const
+ {
+ return MiMat44(column0*scalar, column1*scalar, column2*scalar, column3*scalar);
+ }
+
+ friend MiMat44 operator*(MiF32, const MiMat44&);
+
+ //! Matrix multiplication
+ MI_INLINE MiMat44 operator*(const MiMat44& other) const
+ {
+ //Rows from this <dot> columns from other
+ //column0 = transform(other.column0) etc
+ return MiMat44(transform(other.column0), transform(other.column1), transform(other.column2), transform(other.column3));
+ }
+
+ // a <op>= b operators
+
+ //! Equals-add
+ MI_INLINE MiMat44& operator+=(const MiMat44& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ column3 += other.column3;
+ return *this;
+ }
+
+ //! Equals-sub
+ MI_INLINE MiMat44& operator-=(const MiMat44& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ column3 -= other.column3;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ MI_INLINE MiMat44& operator*=(MiF32 scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ column3 *= scalar;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ MI_FORCE_INLINE MiF32 operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ MI_FORCE_INLINE MiF32& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ MI_INLINE MiVec4 transform(const MiVec4& other) const
+ {
+ return column0*other.x + column1*other.y + column2*other.z + column3*other.w;
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ MI_INLINE MiVec3 transform(const MiVec3& other) const
+ {
+ return transform(MiVec4(other,1)).getXYZ();
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ MI_INLINE MiVec4 rotate(const MiVec4& other) const
+ {
+ return column0*other.x + column1*other.y + column2*other.z;// + column3*0;
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ MI_INLINE MiVec3 rotate(const MiVec3& other) const
+ {
+ return rotate(MiVec4(other,1)).getXYZ();
+ }
+
+
+ MI_INLINE MiVec3 getBasis(int num) const
+ {
+ MI_ASSERT(num>=0 && num<3);
+ return (&column0)[num].getXYZ();
+ }
+
+ MI_INLINE MiVec3 getPosition() const
+ {
+ return column3.getXYZ();
+ }
+
+ MI_INLINE void setPosition(const MiVec3& position)
+ {
+ column3.x = position.x;
+ column3.y = position.y;
+ column3.z = position.z;
+ }
+
+ MI_FORCE_INLINE const MiF32* front() const
+ {
+ return &column0.x;
+ }
+
+ MI_FORCE_INLINE MiVec4& operator[](int num) {return (&column0)[num];}
+ MI_FORCE_INLINE const MiVec4& operator[](int num) const {return (&column0)[num];}
+
+ MI_INLINE void scale(const MiVec4& p)
+ {
+ column0 *= p.x;
+ column1 *= p.y;
+ column2 *= p.z;
+ column3 *= p.w;
+ }
+
+ MI_INLINE MiMat44 inverseRT(void) const
+ {
+ MiVec3 r0(column0.x, column1.x, column2.x),
+ r1(column0.y, column1.y, column2.y),
+ r2(column0.z, column1.z, column2.z);
+
+ return MiMat44(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z));
+ }
+
+ MI_INLINE bool isFinite() const
+ {
+ return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite();
+ }
+
+
+ //Data, see above for format!
+
+ MiVec4 column0, column1, column2, column3; //the four base vectors
+};
+
+
+/**
+\brief Class representing 3D range or axis aligned bounding box.
+
+Stored as minimum and maximum extent corners. Alternate representation
+would be center and dimensions.
+May be empty or nonempty. If not empty, minimum <= maximum has to hold.
+*/
+class MiBounds3
+{
+public:
+
+ /**
+ \brief Default constructor, not performing any initialization for performance reason.
+ \remark Use empty() function below to construct empty bounds.
+ */
+ MI_FORCE_INLINE MiBounds3() {}
+
+ /**
+ \brief Construct from two bounding points
+ */
+ MI_FORCE_INLINE MiBounds3(const MiVec3& minimum, const MiVec3& maximum);
+
+ /**
+ \brief Return empty bounds.
+ */
+ static MI_FORCE_INLINE MiBounds3 empty();
+
+ /**
+ \brief returns the AABB containing v0 and v1.
+ \param v0 first point included in the AABB.
+ \param v1 second point included in the AABB.
+ */
+ static MI_FORCE_INLINE MiBounds3 boundsOfPoints(const MiVec3& v0, const MiVec3& v1);
+
+ /**
+ \brief returns the AABB from center and extents vectors.
+ \param center Center vector
+ \param extent Extents vector
+ */
+ static MI_FORCE_INLINE MiBounds3 centerExtents(const MiVec3& center, const MiVec3& extent);
+
+ /**
+ \brief Construct from center, extent, and (not necessarily orthogonal) basis
+ */
+ static MI_INLINE MiBounds3 basisExtent(const MiVec3& center, const MiMat33& basis, const MiVec3& extent);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+ \param[in] matrix Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static MI_INLINE MiBounds3 transform(const MiMat33& matrix, const MiBounds3& bounds);
+
+ /**
+ \brief Sets empty to true
+ */
+ MI_FORCE_INLINE void setEmpty();
+
+ /**
+ \brief Sets infinite bounds
+ */
+ MI_FORCE_INLINE void setInfinite();
+
+ /**
+ \brief expands the volume to include v
+ \param v Point to expand to.
+ */
+ MI_FORCE_INLINE void include(const MiVec3& v);
+
+ /**
+ \brief expands the volume to include b.
+ \param b Bounds to perform union with.
+ */
+ MI_FORCE_INLINE void include(const MiBounds3& b);
+
+ MI_FORCE_INLINE bool isEmpty() const;
+
+ /**
+ \brief indicates whether the intersection of this and b is empty or not.
+ \param b Bounds to test for intersection.
+ */
+ MI_FORCE_INLINE bool intersects(const MiBounds3& b) const;
+
+ /**
+ \brief computes the 1D-intersection between two AABBs, on a given axis.
+ \param a the other AABB
+ \param axis the axis (0, 1, 2)
+ */
+ MI_FORCE_INLINE bool intersects1D(const MiBounds3& a, MiU32 axis) const;
+
+ /**
+ \brief indicates if these bounds contain v.
+ \param v Point to test against bounds.
+ */
+ MI_FORCE_INLINE bool contains(const MiVec3& v) const;
+
+ /**
+ \brief checks a box is inside another box.
+ \param box the other AABB
+ */
+ MI_FORCE_INLINE bool isInside(const MiBounds3& box) const;
+
+ /**
+ \brief returns the center of this axis aligned box.
+ */
+ MI_FORCE_INLINE MiVec3 getCenter() const;
+
+ /**
+ \brief get component of the box's center along a given axis
+ */
+ MI_FORCE_INLINE float getCenter(MiU32 axis) const;
+
+ /**
+ \brief get component of the box's extents along a given axis
+ */
+ MI_FORCE_INLINE float getExtents(MiU32 axis) const;
+
+ /**
+ \brief returns the dimensions (width/height/depth) of this axis aligned box.
+ */
+ MI_FORCE_INLINE MiVec3 getDimensions() const;
+
+ /**
+ \brief returns the extents, which are half of the width/height/depth.
+ */
+ MI_FORCE_INLINE MiVec3 getExtents() const;
+
+ /**
+ \brief scales the AABB.
+ \param scale Factor to scale AABB by.
+ */
+ MI_FORCE_INLINE void scale(MiF32 scale);
+
+ /**
+ fattens the AABB in all 3 dimensions by the given distance.
+ */
+ MI_FORCE_INLINE void fatten(MiF32 distance);
+
+ /**
+ checks that the AABB values are not NaN
+ */
+
+ MI_FORCE_INLINE bool isFinite() const;
+
+ MiVec3 minimum, maximum;
+};
+
+
+ MI_FORCE_INLINE MiBounds3::MiBounds3(const MiVec3& minimum, const MiVec3& maximum)
+: minimum(minimum), maximum(maximum)
+{
+}
+
+ MI_FORCE_INLINE MiBounds3 MiBounds3::empty()
+{
+ return MiBounds3(MiVec3(MI_MAX_REAL), MiVec3(-MI_MAX_REAL));
+}
+
+ MI_FORCE_INLINE bool MiBounds3::isFinite() const
+ {
+ return minimum.isFinite() && maximum.isFinite();
+}
+
+ MI_FORCE_INLINE MiBounds3 MiBounds3::boundsOfPoints(const MiVec3& v0, const MiVec3& v1)
+{
+ return MiBounds3(v0.minimum(v1), v0.maximum(v1));
+}
+
+ MI_FORCE_INLINE MiBounds3 MiBounds3::centerExtents(const MiVec3& center, const MiVec3& extent)
+{
+ return MiBounds3(center - extent, center + extent);
+}
+
+ MI_INLINE MiBounds3 MiBounds3::basisExtent(const MiVec3& center, const MiMat33& basis, const MiVec3& extent)
+{
+ // extended basis vectors
+ MiVec3 c0 = basis.column0 * extent.x;
+ MiVec3 c1 = basis.column1 * extent.y;
+ MiVec3 c2 = basis.column2 * extent.z;
+
+ MiVec3 w;
+ // find combination of base vectors that produces max. distance for each component = sum of abs()
+ w.x = MiAbs(c0.x) + MiAbs(c1.x) + MiAbs(c2.x);
+ w.y = MiAbs(c0.y) + MiAbs(c1.y) + MiAbs(c2.y);
+ w.z = MiAbs(c0.z) + MiAbs(c1.z) + MiAbs(c2.z);
+
+ return MiBounds3(center - w, center + w);
+}
+
+ MI_FORCE_INLINE void MiBounds3::setEmpty()
+{
+ minimum = MiVec3(MI_MAX_REAL);
+ maximum = MiVec3(-MI_MAX_REAL);
+}
+
+ MI_FORCE_INLINE void MiBounds3::setInfinite()
+{
+ minimum = MiVec3(-MI_MAX_REAL);
+ maximum = MiVec3(MI_MAX_REAL);
+}
+
+ MI_FORCE_INLINE void MiBounds3::include(const MiVec3& v)
+{
+ MI_ASSERT(isFinite());
+ minimum = minimum.minimum(v);
+ maximum = maximum.maximum(v);
+}
+
+ MI_FORCE_INLINE void MiBounds3::include(const MiBounds3& b)
+{
+ MI_ASSERT(isFinite());
+ minimum = minimum.minimum(b.minimum);
+ maximum = maximum.maximum(b.maximum);
+}
+
+ MI_FORCE_INLINE bool MiBounds3::isEmpty() const
+{
+ MI_ASSERT(isFinite());
+ // Consistency condition for (Min, Max) boxes: minimum < maximum
+ return minimum.x > maximum.x || minimum.y > maximum.y || minimum.z > maximum.z;
+}
+
+ MI_FORCE_INLINE bool MiBounds3::intersects(const MiBounds3& b) const
+{
+ MI_ASSERT(isFinite() && b.isFinite());
+ return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x ||
+ b.minimum.y > maximum.y || minimum.y > b.maximum.y ||
+ b.minimum.z > maximum.z || minimum.z > b.maximum.z);
+}
+
+ MI_FORCE_INLINE bool MiBounds3::intersects1D(const MiBounds3& a, MiU32 axis) const
+{
+ MI_ASSERT(isFinite() && a.isFinite());
+ return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis];
+}
+
+ MI_FORCE_INLINE bool MiBounds3::contains(const MiVec3& v) const
+{
+ MI_ASSERT(isFinite());
+
+ return !(v.x < minimum.x || v.x > maximum.x ||
+ v.y < minimum.y || v.y > maximum.y ||
+ v.z < minimum.z || v.z > maximum.z);
+}
+
+ MI_FORCE_INLINE bool MiBounds3::isInside(const MiBounds3& box) const
+{
+ MI_ASSERT(isFinite() && box.isFinite());
+ if(box.minimum.x>minimum.x) return false;
+ if(box.minimum.y>minimum.y) return false;
+ if(box.minimum.z>minimum.z) return false;
+ if(box.maximum.x<maximum.x) return false;
+ if(box.maximum.y<maximum.y) return false;
+ if(box.maximum.z<maximum.z) return false;
+ return true;
+}
+
+ MI_FORCE_INLINE MiVec3 MiBounds3::getCenter() const
+{
+ MI_ASSERT(isFinite());
+ return (minimum+maximum) * MiF32(0.5);
+}
+
+ MI_FORCE_INLINE float MiBounds3::getCenter(MiU32 axis) const
+{
+ MI_ASSERT(isFinite());
+ return (minimum[axis] + maximum[axis]) * MiF32(0.5);
+}
+
+ MI_FORCE_INLINE float MiBounds3::getExtents(MiU32 axis) const
+{
+ MI_ASSERT(isFinite());
+ return (maximum[axis] - minimum[axis]) * MiF32(0.5);
+}
+
+ MI_FORCE_INLINE MiVec3 MiBounds3::getDimensions() const
+{
+ MI_ASSERT(isFinite());
+ return maximum - minimum;
+}
+
+ MI_FORCE_INLINE MiVec3 MiBounds3::getExtents() const
+{
+ MI_ASSERT(isFinite());
+ return getDimensions() * MiF32(0.5);
+}
+
+ MI_FORCE_INLINE void MiBounds3::scale(MiF32 scale)
+{
+ MI_ASSERT(isFinite());
+ *this = centerExtents(getCenter(), getExtents() * scale);
+}
+
+ MI_FORCE_INLINE void MiBounds3::fatten(MiF32 distance)
+{
+ MI_ASSERT(isFinite());
+ minimum.x -= distance;
+ minimum.y -= distance;
+ minimum.z -= distance;
+
+ maximum.x += distance;
+ maximum.y += distance;
+ maximum.z += distance;
+}
+
+ MI_INLINE MiBounds3 MiBounds3::transform(const MiMat33& matrix, const MiBounds3& bounds)
+{
+ MI_ASSERT(bounds.isFinite());
+ return bounds.isEmpty() ? bounds :
+ MiBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents());
+}
+
+
+};
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiMemoryBuffer.h b/APEX_1.4/shared/general/meshimport/include/utils/MiMemoryBuffer.h
new file mode 100644
index 00000000..236d5d66
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiMemoryBuffer.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_MEMORY_BUFFER_H
+#define MI_MEMORY_BUFFER_H
+
+#include "MiFileBuf.h"
+
+namespace mimp
+{
+
+ const MiU32 BUFFER_SIZE_DEFAULT = 4096;
+
+//Use this class if you want to use your own allocator
+class MiMemoryBufferBase : public MiFileBuf
+{
+ void init(const void *readMem, MiU32 readLen)
+ {
+ mReadBuffer = mReadLoc = (const MiU8 *)readMem;
+ mReadStop = &mReadLoc[readLen];
+
+ mWriteBuffer = mWriteLoc = mWriteStop = NULL;
+ mWriteBufferSize = 0;
+ mDefaultWriteBufferSize = BUFFER_SIZE_DEFAULT;
+
+ mOpenMode = OPEN_READ_ONLY;
+ mSeekType = SEEKABLE_READ;
+ }
+
+ void init(MiU32 defaultWriteBufferSize)
+ {
+ mReadBuffer = mReadLoc = mReadStop = NULL;
+ mWriteBuffer = mWriteLoc = mWriteStop = NULL;
+ mWriteBufferSize = 0;
+ mDefaultWriteBufferSize = defaultWriteBufferSize;
+
+ mOpenMode = OPEN_READ_WRITE_NEW;
+ mSeekType = SEEKABLE_READWRITE;
+ }
+
+public:
+ MiMemoryBufferBase(const void *readMem,MiU32 readLen)
+ {
+ init(readMem, readLen);
+ }
+
+ MiMemoryBufferBase(MiU32 defaultWriteBufferSize = BUFFER_SIZE_DEFAULT)
+ {
+ init(defaultWriteBufferSize);
+ }
+
+ virtual ~MiMemoryBufferBase(void)
+ {
+ reset();
+ }
+
+ void initWriteBuffer(MiU32 size)
+ {
+ if ( mWriteBuffer == NULL )
+ {
+ if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize;
+ mWriteBuffer = (MiU8 *)MI_ALLOC(size);
+ MI_ASSERT( mWriteBuffer );
+ mWriteLoc = mWriteBuffer;
+ mWriteStop = &mWriteBuffer[size];
+ mWriteBufferSize = size;
+ mReadBuffer = mWriteBuffer;
+ mReadStop = mWriteBuffer;
+ mReadLoc = mWriteBuffer;
+ }
+ }
+
+ void reset(void)
+ {
+ MI_FREE(mWriteBuffer);
+ mWriteBuffer = NULL;
+ mWriteBufferSize = 0;
+ mWriteLoc = NULL;
+ mReadBuffer = NULL;
+ mReadStop = NULL;
+ mReadLoc = NULL;
+ }
+
+ virtual OpenMode getOpenMode(void) const
+ {
+ return mOpenMode;
+ }
+
+
+ SeekType isSeekable(void) const
+ {
+ return mSeekType;
+ }
+
+ virtual MiU32 read(void* buffer, MiU32 size)
+ {
+ if ( (mReadLoc+size) > mReadStop )
+ {
+ size = (MiU32)(mReadStop - mReadLoc);
+ }
+ if ( size != 0 )
+ {
+ memmove(buffer,mReadLoc,size);
+ mReadLoc+=size;
+ }
+ return size;
+ }
+
+ virtual MiU32 peek(void* buffer, MiU32 size)
+ {
+ if ( (mReadLoc+size) > mReadStop )
+ {
+ size = (MiU32)(mReadStop - mReadLoc);
+ }
+ if ( size != 0 )
+ {
+ memmove(buffer,mReadLoc,size);
+ }
+ return size;
+ }
+
+ virtual MiU32 write(const void* buffer, MiU32 size)
+ {
+ MI_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW );
+ if ( mOpenMode == OPEN_READ_WRITE_NEW )
+ {
+ if ( (mWriteLoc+size) > mWriteStop )
+ growWriteBuffer(size);
+ memmove(mWriteLoc,buffer,size);
+ mWriteLoc+=size;
+ mReadStop = mWriteLoc;
+ }
+ else
+ {
+ size = 0;
+ }
+ return size;
+ }
+
+ MI_INLINE const MiU8 * getReadLoc(void) const { return mReadLoc; };
+ MI_INLINE void advanceReadLoc(MiU32 len)
+ {
+ MI_ASSERT(mReadBuffer);
+ if ( mReadBuffer )
+ {
+ mReadLoc+=len;
+ if ( mReadLoc >= mReadStop )
+ {
+ mReadLoc = mReadStop;
+ }
+ }
+ }
+
+ virtual MiU32 tellRead(void) const
+ {
+ MiU32 ret=0;
+
+ if ( mReadBuffer )
+ {
+ ret = (MiU32) (mReadLoc-mReadBuffer);
+ }
+ return ret;
+ }
+
+ virtual MiU32 tellWrite(void) const
+ {
+ return (MiU32)(mWriteLoc-mWriteBuffer);
+ }
+
+ virtual MiU32 seekRead(MiU32 loc)
+ {
+ MiU32 ret = 0;
+ MI_ASSERT(mReadBuffer);
+ if ( mReadBuffer )
+ {
+ mReadLoc = &mReadBuffer[loc];
+ if ( mReadLoc >= mReadStop )
+ {
+ mReadLoc = mReadStop;
+ }
+ ret = (MiU32) (mReadLoc-mReadBuffer);
+ }
+ return ret;
+ }
+
+ virtual MiU32 seekWrite(MiU32 loc)
+ {
+ MiU32 ret = 0;
+ MI_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW );
+ if ( mWriteBuffer )
+ {
+ mWriteLoc = &mWriteBuffer[loc];
+ if ( mWriteLoc >= mWriteStop )
+ {
+ mWriteLoc = mWriteStop;
+ }
+ ret = (MiU32)(mWriteLoc-mWriteBuffer);
+ }
+ return ret;
+ }
+
+ virtual void flush(void)
+ {
+
+ }
+
+ virtual MiU32 getFileLength(void) const
+ {
+ MiU32 ret = 0;
+ if ( mReadBuffer )
+ {
+ ret = (MiU32) (mReadStop-mReadBuffer);
+ }
+ else if ( mWriteBuffer )
+ {
+ ret = (MiU32)(mWriteLoc-mWriteBuffer);
+ }
+ return ret;
+ }
+
+ MiU32 getWriteBufferSize(void) const
+ {
+ return (MiU32)(mWriteLoc-mWriteBuffer);
+ }
+
+ void setWriteLoc(MiU8 *writeLoc)
+ {
+ MI_ASSERT(writeLoc >= mWriteBuffer && writeLoc < mWriteStop );
+ mWriteLoc = writeLoc;
+ mReadStop = mWriteLoc;
+ }
+
+ const MiU8 * getWriteBuffer(void) const
+ {
+ return mWriteBuffer;
+ }
+
+ /**
+ * Attention: if you use aligned allocator you cannot free memory with MI_FREE macros instead use deallocate method from base
+ */
+ MiU8 * getWriteBufferOwnership(MiU32 &dataLen) // return the write buffer, and zero it out, the caller is taking ownership of the memory
+ {
+ MiU8 *ret = mWriteBuffer;
+ dataLen = (MiU32)(mWriteLoc-mWriteBuffer);
+ mWriteBuffer = NULL;
+ mWriteLoc = NULL;
+ mWriteStop = NULL;
+ mWriteBufferSize = 0;
+ return ret;
+ }
+
+
+ void alignRead(MiU32 a)
+ {
+ MiU32 loc = tellRead();
+ MiU32 aloc = ((loc+(a-1))/a)*a;
+ if ( aloc != loc )
+ {
+ seekRead(aloc);
+ }
+ }
+
+ void alignWrite(MiU32 a)
+ {
+ MiU32 loc = tellWrite();
+ MiU32 aloc = ((loc+(a-1))/a)*a;
+ if ( aloc != loc )
+ {
+ seekWrite(aloc);
+ }
+ }
+
+private:
+
+ // double the size of the write buffer or at least as large as the 'size' value passed in.
+ void growWriteBuffer(MiU32 size)
+ {
+ if ( mWriteBuffer == NULL )
+ {
+ if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize;
+ initWriteBuffer(size);
+ }
+ else
+ {
+ MiU32 oldWriteIndex = (MiU32) (mWriteLoc - mWriteBuffer);
+ MiU32 newSize = mWriteBufferSize*2;
+ MiU32 avail = newSize-oldWriteIndex;
+ if ( size >= avail ) newSize = newSize+size;
+ MiU8 *writeBuffer = (MiU8 *)MI_ALLOC(newSize);
+ MI_ASSERT( writeBuffer );
+ memmove(writeBuffer,mWriteBuffer,mWriteBufferSize);
+ MI_FREE(mWriteBuffer);
+ mWriteBuffer = writeBuffer;
+ mWriteBufferSize = newSize;
+ mWriteLoc = &mWriteBuffer[oldWriteIndex];
+ mWriteStop = &mWriteBuffer[mWriteBufferSize];
+ MiU32 oldReadLoc = (MiU32)(mReadLoc-mReadBuffer);
+ mReadBuffer = mWriteBuffer;
+ mReadStop = mWriteLoc;
+ mReadLoc = &mReadBuffer[oldReadLoc];
+ }
+ }
+
+ const MiU8 *mReadBuffer;
+ const MiU8 *mReadLoc;
+ const MiU8 *mReadStop;
+
+ MiU8 *mWriteBuffer;
+ MiU8 *mWriteLoc;
+ MiU8 *mWriteStop;
+
+ MiU32 mWriteBufferSize;
+ MiU32 mDefaultWriteBufferSize;
+ OpenMode mOpenMode;
+ SeekType mSeekType;
+
+};
+
+//Use this class if you want to use PhysX memory allocator
+class MiMemoryBuffer: public MiMemoryBufferBase, public MeshImportAllocated
+{
+ typedef MiMemoryBufferBase BaseClass;
+
+public:
+ MiMemoryBuffer(const void *readMem,MiU32 readLen): BaseClass(readMem, readLen) {}
+ MiMemoryBuffer(MiU32 defaultWriteBufferSize=BUFFER_SIZE_DEFAULT): BaseClass(defaultWriteBufferSize) {}
+};
+
+}
+
+#endif // MI_MEMORY_BUFFER_H
+
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiPlatformConfig.h b/APEX_1.4/shared/general/meshimport/include/utils/MiPlatformConfig.h
new file mode 100644
index 00000000..89d7d7f6
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiPlatformConfig.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef PLATFORM_CONFIG_MESH_IMPORT_H
+
+#define PLATFORM_CONFIG_MESH_IMPORT_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <float.h>
+#include <new>
+
+#include "MeshImportTypes.h"
+
+// This header file provides a brief compatibility layer between the PhysX and APEX SDK foundation header files.
+// Modify this header file to your own data types and memory allocation routines and do a global find/replace if necessary
+
+namespace mimp
+{
+
+class MiEmpty;
+
+#define MI_SIGN_BITMASK 0x80000000
+
+ // avoid unreferenced parameter warning (why not just disable it?)
+ // PT: or why not just omit the parameter's name from the declaration????
+#define MI_FORCE_PARAMETER_REFERENCE(_P) (void)(_P);
+#define MI_UNUSED(_P) MI_FORCE_PARAMETER_REFERENCE(_P)
+
+#define MI_ALLOC(x) ::malloc(x)
+#define MI_FREE(x) ::free(x)
+#define MI_REALLOC(x,y) ::realloc(x,y)
+
+#define MI_ASSERT(x) assert(x)
+#define MI_ALWAYS_ASSERT() assert(0)
+
+#define MI_PLACEMENT_NEW(p, T) new(p) T
+
+class MeshImportAllocated
+{
+public:
+ MI_INLINE void* operator new(size_t size,MeshImportAllocated *t)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(size);
+ return t;
+ }
+
+ MI_INLINE void* operator new(size_t size,const char *className,const char* fileName, int lineno,size_t classSize)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(className);
+ MI_FORCE_PARAMETER_REFERENCE(fileName);
+ MI_FORCE_PARAMETER_REFERENCE(lineno);
+ MI_FORCE_PARAMETER_REFERENCE(classSize);
+ return MI_ALLOC(size);
+ }
+
+ inline void* operator new[](size_t size,const char *className,const char* fileName, int lineno,size_t classSize)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(className);
+ MI_FORCE_PARAMETER_REFERENCE(fileName);
+ MI_FORCE_PARAMETER_REFERENCE(lineno);
+ MI_FORCE_PARAMETER_REFERENCE(classSize);
+ return MI_ALLOC(size);
+ }
+
+ inline void operator delete(void* p,MeshImportAllocated *t)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(p);
+ MI_FORCE_PARAMETER_REFERENCE(t);
+ MI_ALWAYS_ASSERT(); // should never be executed
+ }
+
+ inline void operator delete(void* p)
+ {
+ MI_FREE(p);
+ }
+
+ inline void operator delete[](void* p)
+ {
+ MI_FREE(p);
+ }
+
+ inline void operator delete(void *p,const char *className,const char* fileName, int line,size_t classSize)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(className);
+ MI_FORCE_PARAMETER_REFERENCE(fileName);
+ MI_FORCE_PARAMETER_REFERENCE(line);
+ MI_FORCE_PARAMETER_REFERENCE(classSize);
+ MI_FREE(p);
+ }
+
+ inline void operator delete[](void *p,const char *className,const char* fileName, int line,size_t classSize)
+ {
+ MI_FORCE_PARAMETER_REFERENCE(className);
+ MI_FORCE_PARAMETER_REFERENCE(fileName);
+ MI_FORCE_PARAMETER_REFERENCE(line);
+ MI_FORCE_PARAMETER_REFERENCE(classSize);
+ MI_FREE(p);
+ }
+
+};
+}; // end of mimp namespace
+
+#define MI_NEW(T) new(#T,__FILE__,__LINE__,sizeof(T)) T
+
+#pragma warning(push)
+#pragma warning(disable:4996)
+
+#ifdef PLUGINS_EMBEDDED
+
+#include "PsString.h"
+#include "PxIntrinsics.h"
+
+#define MESH_IMPORT_STRING physx::shdfnd
+#define MESH_IMPORT_INTRINSICS nvidia::intrinsics
+
+#else
+
+namespace mimp
+{
+namespace string
+{
+
+ MI_INLINE MiI32 stricmp(const char *str, const char *str1) {return(::_stricmp(str, str1));}
+ MI_INLINE MiI32 strnicmp(const char *str, const char *str1, size_t len) {return(::_strnicmp(str, str1, len));}
+ MI_INLINE MiI32 strncat_s(char* a, MiI32 b, const char* c, size_t d) {return(::strncat_s(a, b, c, d));}
+ MI_INLINE MiI32 strncpy_s( char *strDest, size_t sizeInBytes, const char *strSource, size_t count) {return(::strncpy_s( strDest,sizeInBytes,strSource, count));}
+ MI_INLINE void strcpy_s(char* dest, size_t size, const char* src) {::strcpy_s(dest, size, src);}
+ MI_INLINE void strcat_s(char* dest, size_t size, const char* src) {::strcat_s(dest, size, src);}
+ MI_INLINE MiI32 _vsnprintf(char* dest, size_t size, const char* src, va_list arg)
+ {
+ MiI32 r = ::_vsnprintf(dest, size, src, arg);
+
+ return r;
+ }
+ MI_INLINE MiI32 vsprintf_s(char* dest, size_t size, const char* src, va_list arg)
+ {
+ MiI32 r = ::vsprintf_s(dest, size, src, arg);
+
+ return r;
+ }
+
+ MI_INLINE MiI32 sprintf_s( char * _DstBuf, size_t _DstSize, const char * _Format, ...)
+ {
+ va_list arg;
+ va_start( arg, _Format );
+ MiI32 r = ::vsprintf_s(_DstBuf, _DstSize, _Format, arg);
+ va_end(arg);
+
+ return r;
+ }
+ MI_INLINE MiI32 sscanf_s( const char *buffer, const char *format, ...)
+ {
+ va_list arg;
+ va_start( arg, format );
+ MiI32 r = ::sscanf_s(buffer, format, arg);
+ va_end(arg);
+
+ return r;
+ };
+
+ MI_INLINE void strlwr(char* str)
+ {
+ while ( *str )
+ {
+ if ( *str>='A' && *str<='Z' ) *str+=32;
+ str++;
+ }
+ }
+
+ MI_INLINE void strupr(char* str)
+ {
+ while ( *str )
+ {
+ if ( *str>='a' && *str<='z' ) *str-=32;
+ str++;
+ }
+ }
+
+
+}; // end of string namespace
+}; // end of mimp namesapce
+
+
+namespace mimp
+{
+
+ namespace intrinsics
+ {
+ static MI_FORCE_INLINE bool isFinite(float a)
+ {
+ return _finite(a) ? true : false;
+ }
+ static MI_FORCE_INLINE bool isFinite(double a)
+ {
+ return _finite(a) ? true : false;
+ }
+
+ }
+
+};
+
+#define MESH_IMPORT_STRING mimp::string
+#define MESH_IMPORT_INTRINSICS mimp::intrinsics
+
+#endif
+
+
+#pragma warning(pop)
+
+
+#define USE_STL 0 // set to 1 to use the standard template library for all code; if off it uses high performance custom containers which trap all memory allocations.
+
+#if USE_STL
+
+#include <map>
+#include <set>
+#include <vector>
+#define STDNAME std
+
+#else
+
+#include "MiVector.h"
+#include "MiMapSet.h"
+
+#define STDNAME mimp
+
+#endif
+
+
+
+#define MI_MAX_I8 127 //maximum possible sbyte value, 0x7f
+#define MI_MIN_I8 (-128) //minimum possible sbyte value, 0x80
+#define MI_MAX_U8 255U //maximum possible ubyte value, 0xff
+#define MI_MIN_U8 0 //minimum possible ubyte value, 0x00
+#define MI_MAX_I16 32767 //maximum possible sword value, 0x7fff
+#define MI_MIN_I16 (-32768) //minimum possible sword value, 0x8000
+#define MI_MAX_U16 65535U //maximum possible uword value, 0xffff
+#define MI_MIN_U16 0 //minimum possible uword value, 0x0000
+#define MI_MAX_I32 2147483647 //maximum possible sdword value, 0x7fffffff
+#define MI_MIN_I32 (-2147483647 - 1) //minimum possible sdword value, 0x80000000
+#define MI_MAX_U32 4294967295U //maximum possible udword value, 0xffffffff
+#define MI_MIN_U32 0 //minimum possible udword value, 0x00000000
+#define MI_MAX_F32 3.4028234663852885981170418348452e+38F
+//maximum possible float value
+#define MI_MAX_F64 DBL_MAX //maximum possible double value
+
+#define MI_EPS_F32 FLT_EPSILON //maximum relative error of float rounding
+#define MI_EPS_F64 DBL_EPSILON //maximum relative error of double rounding
+
+#define MI_MAX_REAL MI_MAX_F32
+#define MI_EPS_REAL MI_EPS_F32
+
+
+
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiStringDictionary.h b/APEX_1.4/shared/general/meshimport/include/utils/MiStringDictionary.h
new file mode 100644
index 00000000..e4320772
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiStringDictionary.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_STRING_DICTIONARY_H
+#define MI_STRING_DICTIONARY_H
+
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "MiPlatformConfig.h"
+#include "MiStringTable.h"
+
+
+#define __LINE ( __LINE__ )
+#define __GETLINE(x) "(" #x ")"
+#define GETLINE __GETLINE __LINE
+#define IDE_MESSAGE(y) message ( __FILE__ GETLINE " : " #y " " )
+
+#pragma warning(push)
+#pragma warning(disable:4996)
+
+namespace mimp
+{
+
+extern const char *nullstring;
+extern const char *emptystring;
+
+
+class StringRef
+{
+ friend class StringRefHash;
+ friend class StringRefEqual;
+public:
+
+ static const StringRef Null;
+ static const StringRef Empty;
+ static const StringRef EmptyInitializer(); // use this for static initializers -ENS
+
+ StringRef(void)
+ {
+ mString = nullstring;
+ }
+
+ StringRef(size_t index)
+ {
+ mString = (const char *)index;
+ }
+
+ StringRef(const char *str);
+
+ ~StringRef(void)
+ {
+ }
+
+ inline StringRef(const StringRef &str);
+
+ operator const char *() const
+ {
+ return mString;
+ }
+
+ size_t operator()(const StringRef& key_value) const
+ {
+ return (size_t)(key_value.mString);
+ }
+
+ bool operator()(const StringRef &s1, const StringRef &s2) const
+ {
+ return s1.mString == s2.mString == 0;
+ }
+
+ const char * Get(void) const { return mString; };
+
+ size_t GetSizeT(void) const { return (size_t)mString; };
+
+ void Set(const char *str)
+ {
+ mString = str;
+ }
+
+ const StringRef &operator= (const StringRef& rhs )
+ {
+ mString = rhs.Get();
+ return *this;
+ }
+
+ bool operator== ( const StringRef& rhs ) const
+ {
+ return rhs.mString == mString;
+ }
+
+ bool operator< ( const StringRef& rhs ) const
+ {
+ return rhs.mString < mString;
+ }
+
+ bool operator!= ( const StringRef& rhs ) const
+ {
+ return rhs.mString != mString;
+ }
+
+ bool operator> ( const StringRef& rhs ) const
+ {
+ return rhs.mString > mString;
+ }
+
+ bool operator<= ( const StringRef& rhs ) const
+ {
+ return rhs.mString <= mString;
+ }
+
+ bool operator>= ( const StringRef& rhs ) const
+ {
+ return rhs.mString >= mString;
+ }
+
+ bool SamePrefix(const char *prefix) const
+ {
+ unsigned int len = (unsigned int)strlen(prefix);
+ if ( len && strncmp(mString,prefix,len) == 0 ) return true;
+ return false;
+ }
+
+ bool SameSuffix(const StringRef &suf) const
+ {
+ const char *source = mString;
+ const char *suffix = suf.mString;
+ unsigned int len1 = (unsigned int)strlen(source);
+ unsigned int len2 = (unsigned int)strlen(suffix);
+ if ( len1 < len2 ) return false;
+ const char *compare = &source[(len1-len2)];
+ if ( strcmp(compare,suffix) == 0 ) return true;
+ return false;
+ }
+
+private:
+ const char *mString; // the actual char ptr
+};
+
+class StringDict : public mimp::MeshImportAllocated
+{
+public:
+ StringDict(void)
+ {
+ mLogging = false;
+ }
+
+ ~StringDict(void)
+ {
+ }
+
+ StringRef Get(const char *text)
+ {
+ StringRef ref;
+ if ( text )
+ {
+ if ( strcmp(text,nullstring) == 0 )
+ {
+ ref.Set(nullstring);
+ }
+ else
+ {
+ if ( strcmp(text,emptystring) == 0 )
+ {
+ ref.Set(emptystring);
+ }
+ else
+ {
+ bool first;
+ const char *foo = mStringTable.Get(text,first);
+ ref.Set(foo);
+ }
+ }
+ }
+ return ref;
+ }
+
+ StringRef Get(const char *text,bool &first)
+ {
+ StringRef ref;
+ const char *foo = mStringTable.Get(text,first);
+ ref.Set(foo);
+ return ref;
+ }
+
+ void SetLogging(bool state)
+ {
+ mLogging = state;
+ }
+
+ bool GetLogging(void) const
+ {
+ return mLogging;
+ }
+private:
+ bool mLogging;
+ StringTable mStringTable;
+};
+
+inline StringRef::StringRef(const StringRef &str)
+{
+ mString = str.Get();
+}
+
+
+extern StringDict *gStringDict;
+
+class StringSortRef
+{
+public:
+
+ bool operator()(const StringRef &a,const StringRef &b) const
+ {
+ const char *str1 = a.Get();
+ const char *str2 = b.Get();
+ int r = MESH_IMPORT_STRING::stricmp(str1,str2);
+ return r < 0;
+ }
+};
+
+
+
+static inline StringDict * getGlobalStringDict(void)
+{
+ if ( gStringDict == 0 )
+ {
+ gStringDict = MI_NEW(StringDict);
+ }
+ return gStringDict;
+}
+
+#define SGET(x) getGlobalStringDict()->Get(x)
+
+inline StringRef::StringRef(const char *str)
+{
+ StringRef ref = SGET(str);
+ mString = ref.mString;
+}
+
+
+}; // end of namespace
+
+#pragma warning(pop)
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiStringTable.h b/APEX_1.4/shared/general/meshimport/include/utils/MiStringTable.h
new file mode 100644
index 00000000..26362263
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiStringTable.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_STRING_TABLE_H
+
+#define MI_STRING_TABLE_H
+
+#pragma warning( push )
+#pragma warning( disable: 4555 ) // expression has no effect; expected expression with side-effect
+#pragma warning( pop )
+
+#include <assert.h>
+#include "MiPlatformConfig.h"
+
+//********** String Table
+#pragma warning(push)
+#pragma warning(disable:4996)
+
+namespace mimp
+{
+
+class StringSort
+{
+public:
+ bool operator()(const char *str1,const char *str2) const
+ {
+ return MESH_IMPORT_STRING::stricmp(str1,str2) < 0;
+ }
+};
+
+typedef STDNAME::map< const char *, const char *, StringSort> StringMap;
+
+class StringTable : public mimp::MeshImportAllocated
+{
+public:
+
+ StringTable(void)
+ {
+ };
+
+ ~StringTable(void)
+ {
+ release();
+ }
+
+ void release(void)
+ {
+ for (StringMap::iterator i=mStrings.begin(); i!=mStrings.end(); ++i)
+ {
+ const char *string = (*i).second;
+ MI_FREE( (void *)string );
+ }
+ mStrings.clear();
+ }
+
+ const char * Get(const char *str,bool &first)
+ {
+ str = str ? str : "";
+ const char *ret;
+ StringMap::iterator found = mStrings.find(str);
+ if ( found == mStrings.end() )
+ {
+ MiU32 slen = (MiU32)strlen(str);
+ char *string = (char *)MI_ALLOC(slen+1);
+ strcpy(string,str);
+ mStrings[string] = string;
+ ret = string;
+ first = true;
+ }
+ else
+ {
+ first = false;
+ ret = (*found).second;
+ }
+ return ret;
+ };
+
+
+private:
+ StringMap mStrings;
+// char mScratch[512];
+};
+
+class StringTableInt
+{
+public:
+ typedef STDNAME::map< size_t, MiU32 > StringIntMap;
+
+ bool Get(const char *str,MiU32 &v)
+ {
+ bool ret = false;
+ bool first;
+ str = mStringTable.Get(str,first);
+ size_t index = (size_t)str;
+
+ StringIntMap::iterator found = mStringInt.find(index);
+ if ( found != mStringInt.end() )
+ {
+ v = (*found).second;
+ ret = true;
+ }
+ return ret;
+ }
+
+ MiU32 Get(const char *str)
+ {
+ MiU32 ret=0;
+ Get(str,ret);
+ return ret;
+ }
+
+ void Add(const char *str,MiU32 v)
+ {
+ bool first;
+ str = mStringTable.Get(str,first);
+ size_t index = (size_t)str;
+ StringIntMap::iterator found = mStringInt.find(index);
+ if ( found != mStringInt.end() )
+ {
+ assert(0);
+ }
+ else
+ {
+ mStringInt[index] = v;
+ }
+ }
+
+private:
+ StringTable mStringTable;
+ StringIntMap mStringInt;
+};
+
+}; // end of namespace
+
+#pragma warning(pop)
+
+#endif
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiSutil.h b/APEX_1.4/shared/general/meshimport/include/utils/MiSutil.h
new file mode 100644
index 00000000..a40c2a87
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiSutil.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_SUTIL_H
+#define MI_SUTIL_H
+
+namespace mimp
+{
+
+char * stristr(const char *str,const char *key); // case insensitive str str
+bool isstristr(const char *str,const char *key); // bool true/false based on case insenstive strstr
+MiU32 GetHEX(const char *foo,const char **next=0);
+MiU8 GetHEX1(const char *foo,const char **next=0);
+MiU16 GetHEX2(const char *foo,const char **next=0);
+MiU32 GetHEX4(const char *foo,const char **next=0);
+MiF32 GetFloatValue(const char *str,const char **next=0);
+MiI32 GetIntValue(const char *str,const char **next=0);
+const char * SkipWhitespace(const char *str);
+bool IsWhitespace(char c);
+const char * FloatString(MiF32 v,bool binary=false);
+const char * GetTrueFalse(MiU32 state);
+bool CharToWide(const char *source,wchar_t *dest,MiI32 maxlen);
+bool WideToChar(const wchar_t *source,char *dest,MiI32 maxlen);
+const char ** GetArgs(char *str,MiI32 &count); // destructable parser, stomps EOS markers on the input string!
+MiI32 GetUserArgs(const char *us,const char *key,const char **args);
+bool GetUserSetting(const char *us,const char *key,MiI32 &v);
+bool GetUserSetting(const char *us,const char *key,const char * &v);
+const char * GetRootName(const char *fname); // strip off everything but the 'root' file name.
+bool IsTrueFalse(const char *c);
+bool IsDirectory(const char *fname,char *path,char *basename,char *postfix);
+bool hasSpace(const char *str); // true if the string contains a space
+const char * lastDot(const char *src);
+const char * lastSlash(const char *src); // last forward or backward slash character, null if none found.
+const char * lastChar(const char *src,char c);
+const char *fstring(MiF32 v);
+const char * formatNumber(MiI32 number); // JWR format this integer into a fancy comma delimited string
+bool fqnMatch(const char *n1,const char *n2); // returns true if two fully specified file names are 'the same' but ignores case sensitivty and treats either a forward or backslash as the same character.
+bool getBool(const char *str);
+bool needsQuote(const char *str); // if this string needs quotes around it (spaces, commas, #, etc)
+
+#define MAX_FQN 512 // maximum size of an FQN string
+void normalizeFQN(const wchar_t *source,wchar_t *dest);
+void normalizeFQN(const char *source,char *dest);
+bool endsWith(const char *str,const char *ends,bool caseSensitive);
+
+};
+
+#endif // SUTIL_H
diff --git a/APEX_1.4/shared/general/meshimport/include/utils/MiVector.h b/APEX_1.4/shared/general/meshimport/include/utils/MiVector.h
new file mode 100644
index 00000000..b9dfd7ad
--- /dev/null
+++ b/APEX_1.4/shared/general/meshimport/include/utils/MiVector.h
@@ -0,0 +1,569 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef MI_FOUNDATION_VECTOR_H
+#define MI_FOUNDATION_VECTOR_H
+
+#include "MiPlatformConfig.h"
+
+namespace mimp
+{
+
+ /*!
+ An array is a sequential container.
+
+ Implementation note
+ * entries between 0 and size are valid objects
+ * we use inheritance to build this because the array is included inline in a lot
+ of objects and we want the allocator to take no space if it's not stateful, which
+ aggregation doesn't allow. Also, we want the metadata at the front for the inline
+ case where the allocator contains some inline storage space
+ */
+ template<class T>
+ class vector
+ {
+
+ public:
+
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+
+ explicit vector(const MiEmpty& /*v*/)
+ {
+ if(mData)
+ mCapacity |= MI_SIGN_BITMASK;
+ }
+
+ /*!
+ Default array constructor. Initialize an empty array
+ */
+ MI_INLINE explicit vector(void)
+ : mData(0), mSize(0), mCapacity(0)
+ {}
+
+ /*!
+ Initialize array with given capacity
+ */
+ MI_INLINE explicit vector(MiU32 size, const T& a = T())
+ : mData(0), mSize(0), mCapacity(0)
+ {
+ resize(size, a);
+ }
+
+ // This is necessary else the basic default copy constructor is used in the case of both arrays being of the same template instance
+ // The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words,
+ // the presence of a template constructor does not suppress the implicit declaration of the copy constructor.
+ // Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because
+ // 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/)
+ // *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ
+ MI_INLINE vector(const vector& other)
+ {
+ copy(other);
+ }
+
+ /*!
+ Initialize array with given length
+ */
+ MI_INLINE explicit vector(const T* first, const T* last)
+ : mSize(last<first?0:(MiU32)(last-first)), mCapacity(mSize)
+ {
+ mData = allocate(mSize);
+ copy(mData, mData + mSize, first);
+ }
+
+ /*!
+ Destructor
+ */
+ MI_INLINE ~vector()
+ {
+ destroy(mData, mData + mSize);
+
+ if(capacity() && !isInUserMemory())
+ deallocate(mData);
+ }
+
+ /*!
+ Assignment operator. Copy content (deep-copy)
+ */
+ MI_INLINE vector& operator= (const vector<T>& rhs)
+ {
+ if(&rhs == this)
+ return *this;
+
+ clear();
+ reserve(rhs.mSize);
+ copy(mData, mData + rhs.mSize, rhs.mData);
+
+ mSize = rhs.mSize;
+ return *this;
+ }
+
+ /*!
+ vector indexing operator.
+ \param i
+ The index of the element that will be returned.
+ \return
+ The element i in the array.
+ */
+ MI_FORCE_INLINE const T& operator[] (MiU32 i) const
+ {
+ MI_ASSERT(i < mSize);
+ return mData[i];
+ }
+
+ /*!
+ vector indexing operator.
+ \param i
+ The index of the element that will be returned.
+ \return
+ The element i in the array.
+ */
+ MI_FORCE_INLINE T& operator[] (MiU32 i)
+ {
+ MI_ASSERT(i < mSize);
+ return mData[i];
+ }
+
+ /*!
+ Returns a pointer to the initial element of the array.
+ \return
+ a pointer to the initial element of the array.
+ */
+ MI_FORCE_INLINE const_iterator begin() const
+ {
+ return mData;
+ }
+
+ MI_FORCE_INLINE iterator begin()
+ {
+ return mData;
+ }
+
+ /*!
+ Returns an iterator beyond the last element of the array. Do not dereference.
+ \return
+ a pointer to the element beyond the last element of the array.
+ */
+
+ MI_FORCE_INLINE const_iterator end() const
+ {
+ return mData+mSize;
+ }
+
+ MI_FORCE_INLINE iterator end()
+ {
+ return mData+mSize;
+ }
+
+ /*!
+ Returns a reference to the first element of the array. Undefined if the array is empty.
+ \return a reference to the first element of the array
+ */
+
+ MI_FORCE_INLINE const T& front() const
+ {
+ MI_ASSERT(mSize);
+ return mData[0];
+ }
+
+ MI_FORCE_INLINE T& front()
+ {
+ MI_ASSERT(mSize);
+ return mData[0];
+ }
+
+ /*!
+ Returns a reference to the last element of the array. Undefined if the array is empty
+ \return a reference to the last element of the array
+ */
+
+ MI_FORCE_INLINE const T& back() const
+ {
+ MI_ASSERT(mSize);
+ return mData[mSize-1];
+ }
+
+ MI_FORCE_INLINE T& back()
+ {
+ MI_ASSERT(mSize);
+ return mData[mSize-1];
+ }
+
+
+ /*!
+ Returns the number of entries in the array. This can, and probably will,
+ differ from the array capacity.
+ \return
+ The number of of entries in the array.
+ */
+ MI_FORCE_INLINE MiU32 size() const
+ {
+ return mSize;
+ }
+
+ /*!
+ Clears the array.
+ */
+ MI_INLINE void clear()
+ {
+ destroy(mData, mData + mSize);
+ mSize = 0;
+ }
+
+ /*!
+ Returns whether the array is empty (i.e. whether its size is 0).
+ \return
+ true if the array is empty
+ */
+ MI_FORCE_INLINE bool empty() const
+ {
+ return mSize==0;
+ }
+
+ /*!
+ Finds the first occurrence of an element in the array.
+ \param a
+ The element to find.
+ */
+
+
+ MI_INLINE iterator find(const T& a)
+ {
+ MiU32 index;
+ for(index=0;index<mSize && mData[index]!=a;index++)
+ ;
+ return mData+index;
+ }
+
+ MI_INLINE const_iterator find(const T& a) const
+ {
+ MiU32 index;
+ for(index=0;index<mSize && mData[index]!=a;index++)
+ ;
+ return mData+index;
+ }
+
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Adds one element to the end of the array. Operation is O(1).
+ \param a
+ The element that will be added to this array.
+ */
+ /////////////////////////////////////////////////////////////////////////
+
+ MI_FORCE_INLINE T& push_back(const T& a)
+ {
+ if(capacity()<=mSize)
+ grow(capacityIncrement());
+
+ MI_PLACEMENT_NEW((void*)(mData + mSize),T)(a);
+
+ return mData[mSize++];
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Returns the element at the end of the array. Only legal if the array is non-empty.
+ */
+ /////////////////////////////////////////////////////////////////////////
+ MI_INLINE T pop_back()
+ {
+ MI_ASSERT(mSize);
+ T t = mData[mSize-1];
+ mData[--mSize].~T();
+ return t;
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Construct one element at the end of the array. Operation is O(1).
+ */
+ /////////////////////////////////////////////////////////////////////////
+ MI_INLINE T& insert()
+ {
+ if(capacity()<=mSize)
+ grow(capacityIncrement());
+
+ return *(new (mData+mSize++)T);
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Subtracts the element on position i from the array and replace it with
+ the last element.
+ Operation is O(1)
+ \param i
+ The position of the element that will be subtracted from this array.
+ \return
+ The element that was removed.
+ */
+ /////////////////////////////////////////////////////////////////////////
+ MI_INLINE void replaceWithLast(MiU32 i)
+ {
+ MI_ASSERT(i<mSize);
+ mData[i] = mData[--mSize];
+ mData[mSize].~T();
+ }
+
+ MI_INLINE void replaceWithLast(iterator i)
+ {
+ replaceWithLast(static_cast<MiU32>(i-mData));
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Replaces the first occurrence of the element a with the last element
+ Operation is O(n)
+ \param i
+ The position of the element that will be subtracted from this array.
+ \return Returns true if the element has been removed.
+ */
+ /////////////////////////////////////////////////////////////////////////
+
+ MI_INLINE bool findAndReplaceWithLast(const T& a)
+ {
+ MiU32 index = 0;
+ while(index<mSize && mData[index]!=a)
+ ++index;
+ if(index == mSize)
+ return false;
+ replaceWithLast(index);
+ return true;
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Subtracts the element on position i from the array. Shift the entire
+ array one step.
+ Operation is O(n)
+ \param i
+ The position of the element that will be subtracted from this array.
+ */
+ /////////////////////////////////////////////////////////////////////////
+ MI_INLINE void remove(MiU32 i)
+ {
+ MI_ASSERT(i<mSize);
+ for(T* it=mData+i; it->~T(), ++i<mSize; ++it)
+ new(it)T(mData[i]);
+
+ --mSize;
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ /*!
+ Removes a range from the array. Shifts the array so order is maintained.
+ Operation is O(n)
+ \param begin
+ The starting position of the element that will be subtracted from this array.
+ \param end
+ The ending position of the elment that will be subtracted from this array.
+ */
+ /////////////////////////////////////////////////////////////////////////
+ MI_INLINE void removeRange(MiU32 begin,MiU32 count)
+ {
+ MI_ASSERT(begin<mSize);
+ MI_ASSERT( (begin+count) <= mSize );
+ for (MiU32 i=0; i<count; i++)
+ {
+ mData[begin+i].~T(); // call the destructor on the ones being removed first.
+ }
+ T *dest = &mData[begin]; // location we are copying the tail end objects to
+ T *src = &mData[begin+count]; // start of tail objects
+ MiU32 move_count = mSize - (begin+count); // compute remainder that needs to be copied down
+ for (MiU32 i=0; i<move_count; i++)
+ {
+ new ( dest ) T(*src); // copy the old one to the new location
+ src->~T(); // call the destructor on the old location
+ dest++;
+ src++;
+ }
+ mSize-=count;
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Resize array
+ */
+ //////////////////////////////////////////////////////////////////////////
+ MI_NOINLINE void resize(const MiU32 size, const T& a = T());
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Resize array such that only as much memory is allocated to hold the
+ existing elements
+ */
+ //////////////////////////////////////////////////////////////////////////
+ MI_INLINE void shrink()
+ {
+ recreate(mSize);
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Deletes all array elements and frees memory.
+ */
+ //////////////////////////////////////////////////////////////////////////
+ MI_INLINE void reset()
+ {
+ resize(0);
+ shrink();
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Ensure that the array has at least size capacity.
+ */
+ //////////////////////////////////////////////////////////////////////////
+ MI_INLINE void reserve(const MiU32 capacity)
+ {
+ if(capacity > this->capacity())
+ grow(capacity);
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ /*!
+ Query the capacity(allocated mem) for the array.
+ */
+ //////////////////////////////////////////////////////////////////////////
+ MI_FORCE_INLINE MiU32 capacity() const
+ {
+ return mCapacity & ~MI_SIGN_BITMASK;
+ }
+
+ protected:
+
+ MI_NOINLINE void copy(const vector<T>& other)
+ {
+ if(!other.empty())
+ {
+ mData = allocate(mSize = mCapacity = other.size());
+ copy(mData, mData + mSize, other.begin());
+ }
+ else
+ {
+ mData = NULL;
+ mSize = 0;
+ mCapacity = 0;
+ }
+
+ //mData = allocate(other.mSize);
+ //mSize = other.mSize;
+ //mCapacity = other.mSize;
+ //copy(mData, mData + mSize, other.mData);
+
+ }
+
+ MI_INLINE T* allocate(MiU32 size)
+ {
+ return size ? (T*)MI_ALLOC(sizeof(T) * size) : 0;
+ }
+
+ MI_INLINE void deallocate(void* mem)
+ {
+ MI_FREE(mem);
+ }
+
+ static MI_INLINE void create(T* first, T* last, const T& a)
+ {
+ for(; first<last; ++first)
+ ::new(first)T(a);
+ }
+
+ static MI_INLINE void copy(T* first, T* last, const T* src)
+ {
+ for(; first<last; ++first, ++src)
+ ::new (first)T(*src);
+ }
+
+ static MI_INLINE void destroy(T* first, T* last)
+ {
+ for(; first<last; ++first)
+ first->~T();
+ }
+
+ /*!
+ Resizes the available memory for the array.
+
+ \param capacity
+ The number of entries that the set should be able to hold.
+ */
+ MI_INLINE void grow(MiU32 capacity)
+ {
+ MI_ASSERT(this->capacity() < capacity);
+ recreate(capacity);
+ }
+
+ /*!
+ Creates a new memory block, copies all entries to the new block and destroys old entries.
+
+ \param capacity
+ The number of entries that the set should be able to hold.
+ */
+ MI_NOINLINE void recreate(MiU32 capacity);
+
+ // The idea here is to prevent accidental brain-damage with push_back or insert. Unfortunately
+ // it interacts badly with InlineArrays with smaller inline allocations.
+ // TODO(dsequeira): policy template arg, this is exactly what they're for.
+ MI_INLINE MiU32 capacityIncrement() const
+ {
+ const MiU32 capacity = this->capacity();
+ return capacity == 0 ? 1 : capacity * 2;
+ }
+
+ // We need one bit to mark arrays that have been deserialized from a user-provided memory block.
+ // For alignment & memory saving purpose we store that bit in the rarely used capacity member.
+ MI_FORCE_INLINE MiU32 isInUserMemory() const
+ {
+ return mCapacity & MI_SIGN_BITMASK;
+ }
+
+ public: // need to be public for serialization
+
+ T* mData;
+ MiU32 mSize;
+
+ protected:
+
+ MiU32 mCapacity;
+ };
+
+ template<class T>
+ MI_NOINLINE void vector<T>::resize(const MiU32 size, const T& a)
+ {
+ reserve(size);
+ create(mData + mSize, mData + size, a);
+ destroy(mData + size, mData + mSize);
+ mSize = size;
+ }
+
+ template<class T>
+ MI_NOINLINE void vector<T>::recreate(MiU32 capacity)
+ {
+ T* newData = allocate(capacity);
+ MI_ASSERT(!capacity || newData && newData != mData);
+
+ copy(newData, newData + mSize, mData);
+ destroy(mData, mData + mSize);
+ if(!isInUserMemory())
+ deallocate(mData);
+
+ mData = newData;
+ mCapacity = capacity;
+ }
+
+} // namespace mimp
+
+#endif