aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/shared/internal/include/authoring
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/shared/internal/include/authoring
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/shared/internal/include/authoring')
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSG.h404
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h1064
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h794
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h630
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h187
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h648
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h191
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexGSA.h412
-rw-r--r--APEX_1.4/shared/internal/include/authoring/Fracturing.h544
9 files changed, 4874 insertions, 0 deletions
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSG.h b/APEX_1.4/shared/internal/include/authoring/ApexCSG.h
new file mode 100644
index 00000000..711d90de
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSG.h
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_H
+#define APEX_CSG_H
+
+
+#include "ApexUsingNamespace.h"
+#include "RenderMeshAsset.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+class UserRandom
+{
+public:
+ virtual uint32_t getInt() = 0;
+ virtual float getReal(float min, float max) = 0;
+};
+
+
+struct BSPBuildParameters
+{
+ /*
+ Used for searching splitting planes.
+ If NULL, a default random # generator will be used.
+ */
+ UserRandom* rnd;
+
+ /*
+ Mesh pre-processing. The mesh is initially scaled to fit in a unit cube, then (if gridSize is not
+ zero), the vertices of the scaled mesh are snapped to a grid of size 1/gridSize.
+ A power of two is recommended.
+ Default value = 65536.
+ */
+ uint32_t snapGridSize;
+
+ /*
+ At each step in the tree building process, the surface with maximum triangle area is compared
+ to the other surface triangle areas. If the maximum area surface is far from the "typical" set of
+ surface areas, then that surface is chosen as the next splitting plane. Otherwise, a random
+ test set is chosen and a winner determined based upon the weightings below.
+ The value logAreaSigmaThreshold determines how "atypical" the maximum area surface must be to
+ be chosen in this manner.
+ Default value = 2.0.
+ */
+ float logAreaSigmaThreshold;
+
+ /*
+ Larger values of testSetSize may find better BSP trees, but will take more time to create.
+ testSetSize = 0 is treated as infinity (all surfaces will be tested for each branch).
+ */
+ uint32_t testSetSize;
+
+ /*
+ How much to weigh the relative number of triangle splits when searching for a BSP surface.
+ */
+ float splitWeight;
+
+ /*
+ How much to weigh the relative triangle imbalance when searching for a BSP surface.
+ */
+ float imbalanceWeight;
+
+ /*
+ The BSP representation of the mesh will be transformed from the space of the mesh input into IApexBSP::fromMesh
+ using this transform. By default, this is the identity transformation. If the user wishes to use a
+ different transformation, it may be set using internalTransform. However, note that when combining
+ BSPs using the IApexBSP::combine function, the two BSPs should use the same internal transform. If they don't,
+ the resulting behavior is not specified. When a mesh is created using IApexBSP::toMesh, the inverse
+ of the internal transform is applied to put the mesh back into the original space.
+
+ A special value for internalTransform is the zero 4x4 matrix. If this is used, an internal transform
+ will be calculated in the IApexBSP::fromMesh function. This may be read using IApexBSP::getInternalTransform(),
+ and applied when creating other BSPs which are to be used in combine operations.
+ */
+ physx::PxMat44 internalTransform;
+
+ /*
+ If false, the triangles associated with this BSP will not be kept. The BSP may be used for CSG, but will
+ not provide any mesh data.
+
+ Default = true
+ */
+ bool keepTriangles;
+
+ BSPBuildParameters()
+ {
+ setToDefault();
+ }
+
+ void setToDefault()
+ {
+ rnd = NULL;
+ snapGridSize = 65536;
+ logAreaSigmaThreshold = (float)2.0;
+ testSetSize = 10;
+ splitWeight = (float)0.5;
+ imbalanceWeight = 0;
+ internalTransform = physx::PxMat44(physx::PxIdentity);
+ keepTriangles = true;
+ }
+};
+
+struct BSPTolerances
+{
+ /*
+ A unitless value (relative to mesh size) used to determine mesh triangle coplanarity during BSP building.
+ Default value = 1.0e-6.
+ */
+ float linear;
+
+ /*
+ A threshold angle (in radians) used to determine mesh triangle coplanarity during BSP building.
+ Default value = 1.0e-5.
+ */
+ float angular;
+
+ /*
+ A unitless value (relative to mesh size) used to determine triangle splitting during BSP building.
+ Default value = 1.0e-9.
+ */
+ float base;
+
+ /*
+ A unitless value (relative to mesh size) used to determine a skin width for mesh clipping against BSP
+ nodes during mesh creation from the BSP.
+ Default value = 1.0e-13.
+ */
+ float clip;
+
+ /*
+ Mesh postprocessing. A unitless value (relative to mesh size) used to determine merge tolerances for
+ mesh clean-up after triangles have been clipped to BSP leaves. A value of 0.0 disables this feature.
+ Default value = 1.0e-6.
+ */
+ float cleaning;
+
+ BSPTolerances()
+ {
+ setToDefault();
+ }
+
+ void setToDefault()
+ {
+ linear = (float)1.0e-6;
+ angular = (float)1.0e-5;
+ base = (float)1.0e-9;
+ clip = (float)1.0e-13;
+ cleaning = (float)1.0e-6;
+ }
+};
+
+extern BSPTolerances gDefaultTolerances;
+
+struct Operation
+{
+ enum Enum
+ {
+ Empty_Set = 0x0, // constant
+ All_Space = 0x1, // constant
+ Set_A = 0x2, // unary
+ Set_A_Complement = 0x3, // unary
+ Set_B = 0x4, // unary
+ Set_B_Complement = 0x5, // unary
+ Exclusive_Or = 0x6,
+ Equivalent = 0x7,
+ Intersection = 0x8,
+ Intersection_Complement = 0x9,
+ A_Minus_B = 0xA,
+ A_Implies_B = 0xB,
+ B_Minus_A = 0xC,
+ B_Implies_A = 0xD,
+ Union = 0xE,
+ Union_Complement = 0xF,
+
+ NOP = 0x80000000 // no op
+ };
+};
+
+
+struct BSPVisualizationFlags
+{
+ enum Enum
+ {
+ OutsideRegions = (1 << 0),
+ InsideRegions = (1 << 1),
+
+ SingleRegion = (1 << 16)
+ };
+};
+
+
+struct BSPType
+{
+ enum Enum
+ {
+ Empty_Set, // BSP has a single node, which is an outside leaf. Therefore the inside region is the empty set.
+ All_Space, // BSP has a single node, which is an inside leaf. Therefore the inside region is all of space.
+ Nontrivial, // BSP has more than a single node.
+ Combined, // BSP is the combination of two BSPs, ready for a CSG operation to define a single BSP.
+
+ BSPTypeCount
+ };
+};
+
+
+/*
+ Memory cache for BSP construction. Not global, so that concurrent calculations can use different pools.
+ */
+class IApexBSPMemCache
+{
+public:
+
+ /*
+ Deallocate all memory buffers.
+ */
+ virtual void clearAll() = 0;
+
+ /*
+ Deallocate only temporary data buffers.
+ */
+ virtual void clearTemp() = 0;
+
+ /*
+ Clean up.
+ */
+ virtual void release() = 0;
+
+protected:
+
+ IApexBSPMemCache() {}
+ virtual ~IApexBSPMemCache() {}
+};
+
+
+/*
+ BSP interface.
+
+ Convert a mesh into a BSP, perform boolean operations between BSPs, and extract the resulting mesh.
+ */
+
+class IApexBSP
+{
+public:
+ /*
+ Set the tolerances used for various aspects of BSP creation, merging, mesh creation, etc.
+ Default values are those in BSPTolerances.
+ */
+ virtual void setTolerances(const BSPTolerances& tolerances) = 0;
+
+ /*
+ Construct a BSP from the given mesh, using the given parameters.
+ */
+ virtual bool fromMesh(const nvidia::ExplicitRenderTriangle* mesh, uint32_t meshSize, const BSPBuildParameters& params, nvidia::IProgressListener* progressListener = NULL, volatile bool* cancel = NULL) = 0;
+
+ /*
+ Construct a BSP from a convex polyhedron defined by a list of planes.
+ See the definition of internalTransform in BSPBuildParameters. The same meaning applies here.
+ The mesh array is optional. If included, the single internal leaf created will be associated with these triangles.
+ */
+ virtual bool fromConvexPolyhedron(const physx::PxPlane* poly, uint32_t polySize, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity), const nvidia::ExplicitRenderTriangle* mesh = NULL, uint32_t meshSize = 0) = 0;
+
+ /*
+ Build a combination of two BSPs (this and the passed-in bsp), upon which boolean operations of the two can be performed.
+ */
+ virtual bool combine(const IApexBSP& bsp) = 0;
+
+ /*
+ Build a BSP resulting from a boolean operation upon a combination.
+ Note: you may do this "in place," i.e.
+ bsp.op( bsp, operation );
+ ... in this case, bsp will no longer be a combined BSP.
+ */
+ virtual bool op(const IApexBSP& combinedBSP, Operation::Enum operation) = 0;
+
+ /*
+ This BSP is changed to its complement (inside <-> outside)
+ */
+ virtual bool complement() = 0;
+
+ /*
+ The transform from mesh space to BSP space. This may be used in the BSPBuildParameters passed into fromMesh,
+ in order to match the transform used for a combining mesh.
+ */
+ virtual physx::PxMat44 getInternalTransform() const = 0;
+
+ /*
+ Returns an enum characterizing the BSP. See BSPType.
+ */
+ virtual BSPType::Enum getType() const = 0;
+
+ /*
+ Returns the total surface area and volume of the regions designated to be on the given side.
+ If this is a combined BSP, then you must provide a merge operation. In this case,
+ the BSP will not actually be merged, but the resulting area will be that of the
+ merged BSP you would get if you did perform the merge with the op() function.
+ If this is not a combined BSP and you provide a merge operation, it will be ignored.
+
+ If there the volume or area of one of the leaves in consideration is infinite, then this function returns false. Otherwise it returns true.
+ */
+ virtual bool getSurfaceAreaAndVolume(float& area, float& volume, bool inside, Operation::Enum operation = Operation::NOP) const = 0;
+
+ /*
+ Determines if given point is in an outside or inside leaf.
+ If this is a combined BSP, then you must provide a merge operation. In this case,
+ the BSP will not actually be merged, but the resulting area will be that of the
+ merged BSP you would get if you did perform the merge with the op() function.
+ If this is not a combined BSP and you provide a merge operation, it will be ignored.
+ */
+ virtual bool pointInside(const physx::PxVec3& point, Operation::Enum operation = Operation::NOP) const = 0;
+
+ /*
+ Construct a mesh from the current BSP.
+ */
+ virtual bool toMesh(physx::Array<nvidia::ExplicitRenderTriangle>& mesh) const = 0;
+
+ /*
+ Deep copy of given bsp.
+ Input bsp may be the same as *this.
+ The transform tm will be applied.
+ If the internalTransform given is not zero, it will become the new internal transform. The mesh will be scaled internally appropriately with the given tm.
+ A combined BSP may be copied.
+ */
+ virtual void copy(const IApexBSP& bsp, const physx::PxMat44& tm = physx::PxMat44(physx::PxIdentity), const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxZero)) = 0;
+
+ /*
+ Decompose into disjoint islands.
+ This BSP is not affected.
+ The BSP is split into a set of BSPs, each representing one connected island.
+ The set of BSPs is returned as the first BSP in the list, with access
+ to the remainder of the list through the getNext() and getPrev() functions.
+ The BSP must be not be a combined BSP (getType() != BSPType::Combined).
+ Returns this if the BSP is already an island.
+ Returns NULL if the operation fails (e.g. this is a combined BSP).
+ */
+ virtual IApexBSP* decomposeIntoIslands() const = 0;
+
+ /**
+ Utility to replace the submesh on a set of interior triangles.
+ */
+ virtual void replaceInteriorSubmeshes(uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex) = 0;
+
+ /*
+ Deletes the triangles associated with this BSP. The BSP may be used for CSG, but will not provide any mesh data.
+ */
+ virtual void deleteTriangles() = 0;
+
+ /*
+ If a BSP has been decomposed into islands, getNext() and getPrev() will iterate through the
+ BSPs in the decomposition. NULL is returned if an attempt is made to iterate past
+ the beginning or end of the list.
+ */
+ virtual IApexBSP* getNext() const = 0;
+ virtual IApexBSP* getPrev() const = 0;
+
+ /*
+ Serialization.
+ */
+ virtual void serialize(physx::PxFileBuf& stream) const = 0;
+ virtual void deserialize(physx::PxFileBuf& stream) = 0;
+
+ /*
+ Visualization.
+ Set flags to bits from BSPVisualizationFlags::Enum.
+ */
+ virtual void visualize(nvidia::RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const = 0;
+
+ /*
+ Clean up.
+ */
+ virtual void release() = 0;
+
+protected:
+
+ IApexBSP() {}
+ virtual ~IApexBSP() {}
+};
+
+
+// CSG Tools API
+
+// Create a BSP memory cache to share among several BSPs
+IApexBSPMemCache*
+createBSPMemCache();
+
+// Instantiate a BSP. If cache = NULL, the BSP will create and own its own cache.
+IApexBSP*
+createBSP(IApexBSPMemCache* memCache = NULL, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity));
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #ifndef APEX_CSG_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h
new file mode 100644
index 00000000..ebfb105a
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_DEFS_H
+#define APEX_CSG_DEFS_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexSharedUtils.h"
+#include "ApexRand.h"
+#include "Link.h"
+#include "authoring/ApexCSG.h"
+#include "authoring/ApexCSGMath.h"
+#include "authoring/ApexGSA.h"
+#include "PsUserAllocated.h"
+#include "ApexGSA.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+// Binary tree node
+class BinaryNode
+{
+public:
+ PX_INLINE BinaryNode();
+
+ PX_INLINE void setChild(uint32_t index, BinaryNode* child);
+
+ PX_INLINE void detach();
+
+ PX_INLINE BinaryNode* getParent() const
+ {
+ return m_parent;
+ }
+
+ PX_INLINE BinaryNode* getChild(uint32_t index) const
+ {
+ PX_ASSERT((index & 1) == index);
+ return m_children[index & 1];
+ }
+
+ PX_INLINE uint32_t getIndex() const
+ {
+ return m_index;
+ }
+
+protected:
+ BinaryNode* m_parent;
+ BinaryNode* m_children[2];
+ uint32_t m_index; // index of this node in parent (0xFFFFFFFF => not attached)
+};
+
+PX_INLINE
+BinaryNode::BinaryNode()
+{
+ m_index = 0xFFFFFFFF;
+ m_children[1] = m_children[0] = m_parent = NULL;
+}
+
+PX_INLINE void
+BinaryNode::setChild(uint32_t index, BinaryNode* child)
+{
+ index &= 1;
+ BinaryNode*& oldChild = m_children[index];
+
+ if (oldChild != NULL)
+ {
+ oldChild->detach();
+ }
+
+ oldChild = child;
+
+ if (child != NULL)
+ {
+ child->detach();
+ child->m_parent = this;
+ child->m_index = index;
+ }
+}
+
+PX_INLINE void
+BinaryNode::detach()
+{
+ if (m_parent != NULL)
+ {
+ PX_ASSERT(m_parent->getChild(m_index) == this);
+ m_parent->m_children[m_index & 1] = NULL;
+ m_parent = NULL;
+ m_index = 0xFFFFFFFF;
+ }
+}
+
+
+// CSG mesh representation
+
+class UV : public Vec<Real, 2>
+{
+public:
+
+ PX_INLINE UV() {}
+ PX_INLINE UV(const float* uv)
+ {
+ set((Real)uv[0], (Real)uv[1]);
+ }
+ PX_INLINE UV(const double* uv)
+ {
+ set((Real)uv[0], (Real)uv[1]);
+ }
+ PX_INLINE UV& operator = (const UV& uv)
+ {
+ el[0] = uv.el[0];
+ el[1] = uv.el[1];
+ return *this;
+ }
+
+ PX_INLINE void set(Real u, Real v)
+ {
+ el[0] = u;
+ el[1] = v;
+ }
+
+ PX_INLINE const Real& u() const
+ {
+ return el[0];
+ }
+ PX_INLINE const Real& v() const
+ {
+ return el[1];
+ }
+};
+
+class Color : public Vec<Real, 4>
+{
+public:
+
+ PX_INLINE Color() {}
+ PX_INLINE Color(const uint32_t c);
+ PX_INLINE Color& operator = (const Color& c)
+ {
+ el[0] = c.el[0];
+ el[1] = c.el[1];
+ el[2] = c.el[2];
+ el[3] = c.el[3];
+ return *this;
+ }
+
+ PX_INLINE void set(Real r, Real g, Real b, Real a)
+ {
+ el[0] = r;
+ el[1] = g;
+ el[2] = b;
+ el[3] = a;
+ }
+
+ PX_INLINE uint32_t toInt() const;
+
+ PX_INLINE const Real& r() const
+ {
+ return el[0];
+ }
+ PX_INLINE const Real& g() const
+ {
+ return el[1];
+ }
+ PX_INLINE const Real& b() const
+ {
+ return el[2];
+ }
+ PX_INLINE const Real& a() const
+ {
+ return el[3];
+ }
+};
+
+PX_INLINE
+Color::Color(const uint32_t c)
+{
+ const Real recip255 = 1 / (Real)255;
+ set((Real)(c & 0xFF)*recip255, (Real)((c >> 8) & 0xFF)*recip255, (Real)((c >> 16) & 0xFF)*recip255, (Real)(c >> 24)*recip255);
+}
+
+PX_INLINE uint32_t
+Color::toInt() const
+{
+ return (uint32_t)((int)(255 * el[3] + (Real)0.5)) << 24 | (uint32_t)((int)(255 * el[2] + (Real)0.5)) << 16 | (uint32_t)((int)(255 * el[1] + (Real)0.5)) << 8 | (uint32_t)((int)(255 * el[0] + (Real)0.5));
+}
+
+struct VertexData
+{
+ Dir normal;
+ Dir tangent;
+ Dir binormal;
+ UV uv[nvidia::VertexFormat::MAX_UV_COUNT];
+ Color color;
+};
+
+struct Triangle
+{
+ Pos vertices[3];
+ Dir normal;
+ Real area;
+ int32_t submeshIndex;
+ uint32_t smoothingMask;
+ uint32_t extraDataIndex;
+
+ void fromExplicitRenderTriangle(VertexData vertexData[3], const nvidia::ExplicitRenderTriangle& tri)
+ {
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ vertices[i] = Pos(tri.vertices[i].position);
+ vertexData[i].normal = Dir(tri.vertices[i].normal);
+ vertexData[i].tangent = Dir(tri.vertices[i].tangent);
+ vertexData[i].binormal = Dir(tri.vertices[i].binormal);
+ for (unsigned j = 0; j < nvidia::VertexFormat::MAX_UV_COUNT; ++j)
+ {
+ vertexData[i].uv[j] = UV(&tri.vertices[i].uv[j][0]);
+ }
+ vertexData[i].color.set((Real)tri.vertices[i].color.r, (Real)tri.vertices[i].color.g, (Real)tri.vertices[i].color.b, (Real)tri.vertices[i].color.a);
+ }
+ submeshIndex = tri.submeshIndex;
+ smoothingMask = tri.smoothingMask;
+ extraDataIndex = tri.extraDataIndex;
+ calculateQuantities();
+ }
+
+ void toExplicitRenderTriangle(nvidia::ExplicitRenderTriangle& tri, const VertexData vertexData[3]) const
+ {
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ tri.vertices[i].position = ApexCSG::GSA::toPxVec3(vertices[i]);
+ tri.vertices[i].normal = ApexCSG::GSA::toPxVec3(vertexData[i].normal);
+ tri.vertices[i].tangent = ApexCSG::GSA::toPxVec3(vertexData[i].tangent);
+ tri.vertices[i].binormal = ApexCSG::GSA::toPxVec3(vertexData[i].binormal);
+ for (unsigned j = 0; j < nvidia::VertexFormat::MAX_UV_COUNT; ++j)
+ {
+ tri.vertices[i].uv[j].set((float)vertexData[i].uv[j][0], (float)vertexData[i].uv[j][1]);
+ }
+ tri.vertices[i].color.set((float)vertexData[i].color.r(), (float)vertexData[i].color.g(), (float)vertexData[i].color.b(), (float)vertexData[i].color.a());
+ }
+ tri.submeshIndex = submeshIndex;
+ tri.smoothingMask = smoothingMask;
+ tri.extraDataIndex = extraDataIndex;
+ }
+
+ void calculateQuantities()
+ {
+ const Dir e0 = Dir(vertices[1] - vertices[0]);
+ const Dir e1 = Dir(vertices[2] - vertices[1]);
+ const Dir e2 = Dir(vertices[0] - vertices[2]);
+ normal = (e0^e1) + (e1^e2) + (e2^e0);
+ area = (Real)0.5 * normal.normalize();
+ }
+
+ void transform(const Mat4Real& tm)
+ {
+ for (int i = 0; i < 3; ++i)
+ {
+ vertices[i] = tm*vertices[i];
+ }
+ calculateQuantities();
+ }
+};
+
+struct LinkedVertex : public nvidia::Link
+{
+ LinkedVertex* getAdj(uint32_t which) const
+ {
+ return (LinkedVertex*)nvidia::Link::getAdj(which);
+ }
+
+ Pos vertex;
+};
+
+struct LinkedEdge2D : public nvidia::Link
+{
+ LinkedEdge2D() : loopID(-1) {}
+ ~LinkedEdge2D()
+ {
+ remove();
+ }
+
+ void setAdj(uint32_t which, LinkedEdge2D* link)
+ {
+ // Ensure neighboring links' adjoining vertices are equal
+ which &= 1;
+ const uint32_t other = which ^ 1;
+ v[which] = link->v[other];
+ ((LinkedEdge2D*)link->adj[other])->v[which] = ((LinkedEdge2D*)adj[which])->v[other];
+ nvidia::Link::setAdj(which, link);
+ }
+
+ LinkedEdge2D* getAdj(uint32_t which) const
+ {
+ return (LinkedEdge2D*)nvidia::Link::getAdj(which);
+ }
+
+ void remove()
+ {
+ // Ensure neighboring links' adjoining vertices are equal
+ ((LinkedEdge2D*)adj[0])->v[1] = ((LinkedEdge2D*)adj[1])->v[0] = (Real)0.5 * (v[0] + v[1]);
+ nvidia::Link::remove();
+ }
+
+ Vec2Real v[2];
+ int32_t loopID;
+};
+
+struct Surface
+{
+ uint32_t planeIndex;
+ uint32_t triangleIndexStart;
+ uint32_t triangleIndexStop;
+ float totalTriangleArea; // Keeping it 32-bit real, since we don't need precision here
+};
+
+struct Region
+{
+ uint32_t side;
+
+ // Not to be serialized, but we have this extra space since Region is used in a union with Surface
+ uint32_t tempIndex1;
+ uint32_t tempIndex2;
+ uint32_t tempIndex3;
+};
+
+
+// Interpolator - calculates interpolation data for triangle quantities
+class Interpolator
+{
+public:
+
+ enum VertexField
+ {
+ Normal_x, Normal_y, Normal_z,
+ Tangent_x, Tangent_y, Tangent_z,
+ Binormal_x, Binormal_y, Binormal_z,
+ UV0_u, UV0_v, UV1_u, UV1_v, UV2_u, UV2_v, UV3_u, UV3_v,
+ Color_r, Color_g, Color_b, Color_a,
+
+ VertexFieldCount
+ };
+
+ Interpolator() {}
+ Interpolator(const Triangle& tri, const VertexData vertexData[3])
+ {
+ setFromTriangle(tri, vertexData);
+ }
+ Interpolator(const Dir tangents[3], const Vec<Real, 2>& uvScale)
+ {
+ setFlat(tangents, uvScale);
+ }
+
+ PX_INLINE void setFromTriangle(const Triangle& tri, const VertexData vertexData[3]);
+ PX_INLINE void setFlat(const Dir tangents[3], const Vec<Real, 2>& uvScale);
+
+ PX_INLINE void interpolateVertexData(VertexData& vertexData, const Pos& point) const;
+
+ PX_INLINE bool equals(const Interpolator& interpolator, Real frameDirTol, Real frameScaleTol, Real dirTol, Real uvTol, Real colorTol) const;
+
+ PX_INLINE void transform(Interpolator& transformedInterpolator, const Mat4Real& tm, const Mat4Real& cofTM) const;
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream, uint32_t version);
+
+private:
+ ApexCSG::Plane m_frames[VertexFieldCount];
+ static size_t s_offsets[VertexFieldCount];
+
+ friend class InterpolatorBuilder;
+};
+
+PX_INLINE void
+Interpolator::setFromTriangle(const Triangle& tri, const VertexData vertexData[3])
+{
+ const Pos& p0 = tri.vertices[0];
+ const Pos& p1 = tri.vertices[1];
+ const Pos& p2 = tri.vertices[2];
+ const Dir p1xp2 = Dir(p1) ^ Dir(p2);
+ const Dir p2xp0 = Dir(p2) ^ Dir(p0);
+ const Dir p0xp1 = Dir(p0) ^ Dir(p1);
+ const Dir n = p1xp2 + p2xp0 + p0xp1;
+ const Real n2 = n | n;
+ if (n2 < EPS_REAL * EPS_REAL)
+ {
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ m_frames[i].set(Dir((Real)0), 0);
+ }
+ return;
+ }
+
+ // Calculate inverse 4x4 matrix (only need first three columns):
+ const Dir nP = n / n2; // determinant is -n2
+ const Dir Q0(nP[2] * (p1[1] - p2[1]) - nP[1] * (p1[2] - p2[2]), nP[2] * (p2[1] - p0[1]) - nP[1] * (p2[2] - p0[2]), nP[2] * (p0[1] - p1[1]) - nP[1] * (p0[2] - p1[2]));
+ const Dir Q1(nP[0] * (p1[2] - p2[2]) - nP[2] * (p1[0] - p2[0]), nP[0] * (p2[2] - p0[2]) - nP[2] * (p2[0] - p0[0]), nP[0] * (p0[2] - p1[2]) - nP[2] * (p0[0] - p1[0]));
+ const Dir Q2(nP[1] * (p1[0] - p2[0]) - nP[0] * (p1[1] - p2[1]), nP[1] * (p2[0] - p0[0]) - nP[0] * (p2[1] - p0[1]), nP[1] * (p0[0] - p1[0]) - nP[0] * (p0[1] - p1[1]));
+ const Dir r(nP | p1xp2, nP | p2xp0, nP | p0xp1);
+
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ const size_t offset = s_offsets[i];
+ const Dir vi(*(Real*)(((uint8_t*)&vertexData[0]) + offset), *(Real*)(((uint8_t*)&vertexData[1]) + offset), *(Real*)(((uint8_t*)&vertexData[2]) + offset));
+ Dir n(Q0 | vi, Q1 | vi, Q2 | vi);
+ if ((n | n) < 100 * EPS_REAL * EPS_REAL)
+ {
+ n.set((Real)0, (Real)0, (Real)0);
+ }
+ Real o = r | vi;
+ if (physx::PxAbs(o) < 100 * EPS_REAL)
+ {
+ o = (Real)0;
+ }
+ m_frames[i].set(n, o);
+ }
+}
+
+PX_INLINE void
+Interpolator::setFlat(const Dir tangents[3], const Vec<Real, 2>& uvScale)
+{
+ // Local z ~ normal = tangents[2], x ~ u and tangent = tangents[0], y ~ v and binormal = tangents[1]
+ m_frames[Normal_x].set(Dir((Real)0), tangents[2][0]);
+ m_frames[Normal_y].set(Dir((Real)0), tangents[2][1]);
+ m_frames[Normal_z].set(Dir((Real)0), tangents[2][2]);
+ m_frames[Tangent_x].set(Dir((Real)0), tangents[0][0]);
+ m_frames[Tangent_y].set(Dir((Real)0), tangents[0][1]);
+ m_frames[Tangent_z].set(Dir((Real)0), tangents[0][2]);
+ m_frames[Binormal_x].set(Dir((Real)0), tangents[1][0]);
+ m_frames[Binormal_y].set(Dir((Real)0), tangents[1][1]);
+ m_frames[Binormal_z].set(Dir((Real)0), tangents[1][2]);
+ const Dir su = (uvScale[0] ? 1 / uvScale[0] : (Real)0) * tangents[0];
+ const Dir sv = (uvScale[1] ? 1 / uvScale[1] : (Real)0) * tangents[1];
+ m_frames[UV0_u].set(su, 0);
+ m_frames[UV0_v].set(sv, 0);
+ m_frames[UV1_u].set(su, 0);
+ m_frames[UV1_v].set(sv, 0);
+ m_frames[UV2_u].set(su, 0);
+ m_frames[UV2_v].set(sv, 0);
+ m_frames[UV3_u].set(su, 0);
+ m_frames[UV3_v].set(sv, 0);
+ m_frames[Color_r].set(Dir((Real)0), (Real)1);
+ m_frames[Color_g].set(Dir((Real)0), (Real)1);
+ m_frames[Color_b].set(Dir((Real)0), (Real)1);
+ m_frames[Color_a].set(Dir((Real)0), (Real)1);
+}
+
+PX_INLINE void
+Interpolator::interpolateVertexData(VertexData& vertexData, const Pos& point) const
+{
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ Real& value = *(Real*)(((uint8_t*)&vertexData) + s_offsets[i]);
+ value = m_frames[i].distance(point);
+ }
+}
+
+PX_INLINE bool
+framesEqual(const Plane& f0, const Plane& f1, Real twoFrameScaleTol2, Real sinFrameTol2, Real tol2)
+{
+ const Dir n0 = f0.normal();
+ const Dir n1 = f1.normal();
+ const Real n02 = n0 | n0;
+ const Real n12 = n1 | n1;
+ const Real n2Diff = n02 - n12;
+
+ if (n2Diff * n2Diff > twoFrameScaleTol2 * (n02 + n12))
+ {
+ return false; // Scales differ by more than frame scale tolerance
+ }
+
+ const Real n2Prod = n02 * n12;
+ const Real unnormalizedSinFrameTheta2 = (n0 ^ n1).lengthSquared();
+ if (unnormalizedSinFrameTheta2 > n2Prod * sinFrameTol2)
+ {
+ return false; // Directions differ by more than frame angle tolerance
+ }
+
+ const Real unnormalizedOriginDiff = f0.d() - f1.d();
+ const Real originScale = 0.5f * (physx::PxAbs(f0.d()) + physx::PxAbs(f1.d()));
+ if (unnormalizedOriginDiff * unnormalizedOriginDiff > tol2 * originScale * originScale)
+ {
+ return false; // Origins differ by more than tolerance
+ }
+
+ return true;
+}
+
+PX_INLINE bool
+Interpolator::equals(const Interpolator& interpolator, Real frameDirTol, Real frameScaleTol, Real dirTol, Real uvTol, Real colorTol) const
+{
+ const Real twoFrameScaleTol2 = (Real)2 * frameScaleTol * frameScaleTol;
+ const Real sinFrameTol2 = frameDirTol * frameDirTol;
+ const Real dirTol2 = dirTol * dirTol;
+ const Real uvTol2 = uvTol * uvTol;
+ const Real colorTol2 = colorTol * colorTol;
+
+ // Directions
+ for (uint32_t i = Normal_x; i <= Binormal_z; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, dirTol2))
+ {
+ return false;
+ }
+ }
+
+ // UVs
+ for (uint32_t i = UV0_u; i <= UV3_v; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, uvTol2))
+ {
+ return false;
+ }
+ }
+
+ // Color
+ for (uint32_t i = Color_r; i <= Color_a; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, colorTol2))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+PX_INLINE void
+Interpolator::transform(Interpolator& transformedInterpolator, const Mat4Real& tm, const Mat4Real& invTransposeTM) const
+{
+ // Apply left-hand transform.
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ transformedInterpolator.m_frames[i] = invTransposeTM * m_frames[i];
+ }
+ // Apply right-hand transform. This is specific to the quantities being transformed.
+ for (int i = 0; i < 4; ++i)
+ {
+ // Normal, transform by invTransposeTM:
+ Dir normal_frame_i(transformedInterpolator.m_frames[Interpolator::Normal_x][i], transformedInterpolator.m_frames[Interpolator::Normal_y][i], transformedInterpolator.m_frames[Interpolator::Normal_z][i]);
+ normal_frame_i = invTransposeTM * normal_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Normal_x][i] = normal_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Normal_y][i] = normal_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Normal_z][i] = normal_frame_i[2];
+ // Tangent, transform by tm:
+ Dir tangent_frame_i(transformedInterpolator.m_frames[Interpolator::Tangent_x][i], transformedInterpolator.m_frames[Interpolator::Tangent_y][i], transformedInterpolator.m_frames[Interpolator::Tangent_z][i]);
+ tangent_frame_i = tm * tangent_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Tangent_x][i] = tangent_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Tangent_y][i] = tangent_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Tangent_z][i] = tangent_frame_i[2];
+ // Binormal, transform by tm:
+ Dir binormal_frame_i(transformedInterpolator.m_frames[Interpolator::Binormal_x][i], transformedInterpolator.m_frames[Interpolator::Binormal_y][i], transformedInterpolator.m_frames[Interpolator::Binormal_z][i]);
+ binormal_frame_i = tm * binormal_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Binormal_x][i] = binormal_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Binormal_y][i] = binormal_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Binormal_z][i] = binormal_frame_i[2];
+ // Other quantities are scalars
+ }
+}
+
+
+class InterpolatorBuilder
+{
+public:
+ InterpolatorBuilder()
+ {
+#define CREATE_OFFSET( field ) (size_t)((uintptr_t)&vertexData.field-(uintptr_t)&vertexData)
+
+ VertexData vertexData;
+ Interpolator::s_offsets[Interpolator::Normal_x] = CREATE_OFFSET(normal[0]);
+ Interpolator::s_offsets[Interpolator::Normal_y] = CREATE_OFFSET(normal[1]);
+ Interpolator::s_offsets[Interpolator::Normal_z] = CREATE_OFFSET(normal[2]);
+ Interpolator::s_offsets[Interpolator::Tangent_x] = CREATE_OFFSET(tangent[0]);
+ Interpolator::s_offsets[Interpolator::Tangent_y] = CREATE_OFFSET(tangent[1]);
+ Interpolator::s_offsets[Interpolator::Tangent_z] = CREATE_OFFSET(tangent[2]);
+ Interpolator::s_offsets[Interpolator::Binormal_x] = CREATE_OFFSET(binormal[0]);
+ Interpolator::s_offsets[Interpolator::Binormal_y] = CREATE_OFFSET(binormal[1]);
+ Interpolator::s_offsets[Interpolator::Binormal_z] = CREATE_OFFSET(binormal[2]);
+ Interpolator::s_offsets[Interpolator::UV0_u] = CREATE_OFFSET(uv[0].u());
+ Interpolator::s_offsets[Interpolator::UV0_v] = CREATE_OFFSET(uv[0].v());
+ Interpolator::s_offsets[Interpolator::UV1_u] = CREATE_OFFSET(uv[1].u());
+ Interpolator::s_offsets[Interpolator::UV1_v] = CREATE_OFFSET(uv[1].v());
+ Interpolator::s_offsets[Interpolator::UV2_u] = CREATE_OFFSET(uv[2].u());
+ Interpolator::s_offsets[Interpolator::UV2_v] = CREATE_OFFSET(uv[2].v());
+ Interpolator::s_offsets[Interpolator::UV3_u] = CREATE_OFFSET(uv[3].u());
+ Interpolator::s_offsets[Interpolator::UV3_v] = CREATE_OFFSET(uv[3].v());
+ Interpolator::s_offsets[Interpolator::Color_r] = CREATE_OFFSET(color.r());
+ Interpolator::s_offsets[Interpolator::Color_g] = CREATE_OFFSET(color.g());
+ Interpolator::s_offsets[Interpolator::Color_b] = CREATE_OFFSET(color.b());
+ Interpolator::s_offsets[Interpolator::Color_a] = CREATE_OFFSET(color.a());
+ }
+};
+
+
+// ClippedTriangleInfo - used to map bsp output back to the original mesh
+struct ClippedTriangleInfo
+{
+ uint32_t planeIndex;
+ uint32_t originalTriangleIndex;
+ uint32_t clippedTriangleIndex;
+ uint32_t ccw;
+
+ static int cmp(const void* a, const void* b)
+ {
+ const int planeIndexDiff = (int)((ClippedTriangleInfo*)a)->planeIndex - (int)((ClippedTriangleInfo*)b)->planeIndex;
+ if (planeIndexDiff != 0)
+ {
+ return planeIndexDiff;
+ }
+ const int originalTriangleDiff = (int)((ClippedTriangleInfo*)a)->originalTriangleIndex - (int)((ClippedTriangleInfo*)b)->originalTriangleIndex;
+ if (originalTriangleDiff != 0)
+ {
+ return originalTriangleDiff;
+ }
+ return (int)((ClippedTriangleInfo*)a)->clippedTriangleIndex - (int)((ClippedTriangleInfo*)b)->clippedTriangleIndex;
+ }
+};
+
+// BSPLink - a link with an "isBSP" method to act as a stop
+class BSPLink : public nvidia::Link, public nvidia::UserAllocated
+{
+public:
+ virtual bool isBSP()
+ {
+ return false;
+ }
+
+ BSPLink* getAdjBSP(uint32_t which) const
+ {
+ if (isSolitary())
+ {
+ return NULL;
+ }
+ BSPLink* adjLink = static_cast<BSPLink*>(getAdj(which));
+ return adjLink->isBSP() ? adjLink : NULL;
+ }
+
+ void removeBSPLink()
+ {
+ BSPLink* adjLink = static_cast<BSPLink*>(getAdj(1));
+ remove();
+ if (!adjLink->isBSP() && adjLink->isSolitary())
+ {
+ delete adjLink;
+ }
+ }
+};
+
+// Specialized progress listener implementation
+class QuantityProgressListener : public nvidia::IProgressListener
+{
+public:
+ QuantityProgressListener(Real totalAmount, IProgressListener* parent) :
+ m_total((Real)0)
+ , m_parent(parent)
+ {
+ m_scale = totalAmount > (Real)0 ? (Real)100/(Real)totalAmount : (Real)0;
+ }
+
+ // IProgressListener interface
+ virtual void setProgress(int progress, const char* taskName = NULL)
+ {
+ if (m_parent != NULL)
+ {
+ m_parent->setProgress(progress, taskName);
+ }
+ }
+
+ virtual void add(Real amount)
+ {
+ m_total += amount;
+ if (m_parent != NULL)
+ {
+ m_parent->setProgress((int)(m_total*m_scale + (Real)0.5));
+ }
+ }
+
+private:
+ Real m_total;
+ Real m_scale;
+ IProgressListener* m_parent;
+};
+
+
+// IApexBSP implementation
+class BSP : public IApexBSP, public BSPLink
+{
+public:
+ BSP(IApexBSPMemCache* memCache = NULL, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity));
+ ~BSP();
+
+ // IApexBSP implementation
+ void setTolerances(const BSPTolerances& tolerances);
+ bool fromMesh(const nvidia::ExplicitRenderTriangle* mesh, uint32_t meshSize, const BSPBuildParameters& params, nvidia::IProgressListener* progressListener = NULL, volatile bool* cancel = NULL);
+ bool fromConvexPolyhedron(const physx::PxPlane* poly, uint32_t polySize, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity), const nvidia::ExplicitRenderTriangle* mesh = NULL, uint32_t meshSize = 0);
+ bool combine(const IApexBSP& bsp);
+ bool op(const IApexBSP& combinedBSP, Operation::Enum operation);
+ bool complement();
+ BSPType::Enum getType() const;
+ bool getSurfaceAreaAndVolume(float& area, float& volume, bool inside, Operation::Enum operation = Operation::NOP) const;
+ bool pointInside(const physx::PxVec3& point, Operation::Enum operation = Operation::NOP) const;
+ bool toMesh(physx::Array<nvidia::ExplicitRenderTriangle>& mesh) const;
+ void copy(const IApexBSP& bsp, const physx::PxMat44& tm = physx::PxMat44(physx::PxIdentity), const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxZero));
+ physx::PxMat44 getInternalTransform() const
+ {
+ return m_internalTransform;
+ }
+
+ void replaceInteriorSubmeshes(uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex);
+
+ IApexBSP* decomposeIntoIslands() const;
+ IApexBSP* getNext() const
+ {
+ return static_cast<BSP*>(getAdjBSP(1));
+ }
+ IApexBSP* getPrev() const
+ {
+ return static_cast<BSP*>(getAdjBSP(0));
+ }
+
+ void deleteTriangles();
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream);
+ void visualize(nvidia::RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const;
+ void release();
+
+ // Debug
+ void performDiagnostics() const;
+
+ // BSPLink
+ bool isBSP()
+ {
+ return true;
+ }
+
+ // Node, a binary node with geometric data
+ class Node : public BinaryNode
+ {
+ Node& operator = (const Node&); // No assignment
+
+ public:
+ enum Type { Leaf, Branch };
+
+ Node() : m_type(Leaf)
+ {
+ m_leafData.side = 1;
+ }
+
+ PX_INLINE void setLeafData(const Region& leafData)
+ {
+ m_type = Leaf;
+ m_leafData = leafData;
+ }
+ PX_INLINE void setBranchData(const Surface& branchData)
+ {
+ m_type = Branch;
+ m_branchData = branchData;
+ }
+
+ PX_INLINE Type getType() const
+ {
+ return (Type)m_type;
+ }
+
+ PX_INLINE Region* getLeafData()
+ {
+ PX_ASSERT(getType() == Leaf);
+ return &m_leafData;
+ }
+ PX_INLINE Surface* getBranchData()
+ {
+ PX_ASSERT(getType() == Branch);
+ return &m_branchData;
+ }
+ PX_INLINE const Region* getLeafData() const
+ {
+ PX_ASSERT(getType() == Leaf);
+ return &m_leafData;
+ }
+ PX_INLINE const Surface* getBranchData() const
+ {
+ PX_ASSERT(getType() == Branch);
+ return &m_branchData;
+ }
+
+ PX_INLINE Node* getParent() const
+ {
+ return (Node*)BinaryNode::getParent();
+ }
+ PX_INLINE Node* getChild(uint32_t index) const
+ {
+ return (Node*)BinaryNode::getChild(index);
+ }
+
+ // Iterator (uses a stack, but no recursion)
+ // Can handle branches with NULL children
+ class It
+ {
+ public:
+ PX_INLINE It(const Node* root) : m_current(const_cast<Node*>(root)), m_valid(true) {}
+ PX_INLINE It(Node* root) : m_current(root), m_valid(true) {}
+
+ PX_INLINE bool valid() const
+ {
+ return m_valid;
+ }
+
+ PX_INLINE Node* node() const
+ {
+ return m_current;
+ }
+
+ PX_INLINE void inc()
+ {
+ if (m_current != NULL && m_current->getType() == Branch)
+ {
+ m_stack.pushBack(m_current->getChild(1));
+ m_current = m_current->getChild(0);
+ }
+ else
+ if (!m_stack.empty())
+ {
+ m_current = m_stack.popBack();
+ }
+ else
+ {
+ m_current = NULL;
+ m_valid = false;
+ }
+ }
+
+ private:
+ Node* m_current;
+ physx::Array<Node*> m_stack;
+ bool m_valid;
+ };
+
+ protected:
+ uint32_t m_type;
+
+ union
+ {
+ Region m_leafData;
+ Surface m_branchData;
+ };
+ };
+
+ class Halfspace : public GSA::VS3D_Halfspace_Set
+ {
+ public:
+ Halfspace(const Plane plane) : m_plane(plane) {}
+
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4])
+ {
+ for (int i = 0; i < 4; ++i) plane[i] = (GSA::real)m_plane[i];
+ return plane[0]*point[0] + plane[1]*point[1] + plane[2]*point[2] + plane[3]*point[3];
+ }
+
+ Halfspace& operator = (const Halfspace& halfspace) { m_plane = halfspace.m_plane; return *this; }
+
+ private:
+ Plane m_plane;
+ };
+
+ class RegionShape : public GSA::VS3D_Halfspace_Set
+ {
+ public:
+ RegionShape(const Plane* planes, Real skinWidth = (Real)0) : m_planes(planes), m_leaf(NULL), m_nonempty(true), m_skinWidth(skinWidth) {}
+
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4]);
+
+ void set_leaf(const BSP::Node* leaf)
+ {
+ m_leaf = leaf;
+ }
+
+ void calculate()
+ {
+ m_nonempty = (1 == GSA::vs3d_test(*this));
+ }
+
+ bool is_nonempty() const
+ {
+ return m_nonempty;
+ }
+
+#if 0
+ bool intersects_halfspace(const Plane* plane)
+ {
+ Halfspace halfspace(plane);
+ set_shapes(this, &halfspace);
+ return intersect();
+ }
+#endif
+
+ private:
+ const Plane* m_planes;
+ const BSP::Node* m_leaf;
+ bool m_nonempty;
+ Real m_skinWidth;
+ };
+
+private:
+ class BoolOp
+ {
+ public:
+ BoolOp(Operation::Enum op) : c_ba(((uint32_t)op >> 3) & 1), c_b(((uint32_t)op >> 2) & 1), c_a(((uint32_t)op >> 1) & 1), c_k((uint32_t)op & 1) {}
+
+ uint32_t operator()(uint32_t a, uint32_t b) const
+ {
+ return (c_ba & a & b) ^(c_b & b) ^(c_a & a) ^ c_k;
+ }
+
+ private:
+ uint32_t c_ba, c_b, c_a, c_k;
+ };
+
+ struct BuildConstants
+ {
+ BSPBuildParameters m_params;
+ float m_recipMaxArea;
+ };
+
+ void clear();
+
+ void transform(const Mat4Real& tm, bool transformFrames = true);
+
+ // Returns the area and volume of the clipped mesh. clippedMesh and triangleInfo may be NULL, in which case nothing is done but
+ // the area and volume calculation.
+ void clipMeshToLeaf(Real& area, Real& volume, physx::Array<Triangle>* clippedMesh, physx::Array<ClippedTriangleInfo>* triangleInfo, const Node* leaf, float clipTolerance) const;
+
+ // Called by buildTree - forcing no inline to ensure a small stack frame
+
+ // Returns a new stackReadStop
+ PX_INLINE uint32_t removeRedundantSurfacesFromStack(physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop, Node* leaf);
+ PX_INLINE void assignLeafSide(Node* leaf, QuantityProgressListener* quantityListener);
+ PX_INLINE void createBranchSurfaceAndSplitStack(uint32_t childReadStart[2], uint32_t childReadStop[2], Node* node, physx::Array<Surface>& surfaceStack,
+ uint32_t stackReadStart, uint32_t stackReadStop, const BuildConstants& buildConstants);
+
+ // Recursive functions
+ void complementLeaves(Node* root) const;
+ void mergeLeaves(const BoolOp& op, Node* root);
+ void clipMeshToLeaves(physx::Array<Triangle>& clippedMesh, physx::Array<ClippedTriangleInfo>& triangleInfo, Node* root, float clipTolerance) const;
+ void clone(Node* root, const Node* originalRoot);
+ void combineTrees(Node* root, const Node* combineRoot, uint32_t triangleIndexOffset, uint32_t planeIndexOffset);
+ bool buildTree(Node* root, physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop,
+ const BuildConstants& buildConstants, QuantityProgressListener* quantityListener, volatile bool* cancel = NULL);
+ void visualizeNode(nvidia::RenderDebugInterface& debugRender, uint32_t flags, const Node* root) const;
+ bool addLeafAreasAndVolumes(Real& totalArea, Real& totalVolume, const Node* root, bool inside, const BoolOp& op) const;
+ void serializeNode(const Node* root, physx::PxFileBuf& stream) const;
+ Node* deserializeNode(uint32_t version, physx::PxFileBuf& stream);
+ void releaseNode(Node* node);
+ void indexInsideLeaves(uint32_t& index, Node* root) const;
+ void listInsideLeaves(physx::Array<Node*>& insideLeaves, Node* root) const;
+ void findInsideLeafNeighbors(physx::Array<nvidia::IntPair>& neighbors, Node* root) const;
+
+ void clean();
+
+ // Parameters
+ BSPTolerances m_tolerarnces;
+
+ // Tree
+ Node* m_root;
+
+ // Internal mesh representation
+ physx::Array<Triangle> m_mesh;
+ physx::Array<Interpolator> m_frames;
+ Real m_meshSize;
+ physx::PxBounds3 m_meshBounds;
+ physx::PxMat44 m_internalTransform;
+ Mat4Real m_internalTransformInverse;
+ bool m_incidentalMesh;
+
+
+ // Unique splitting planes
+ physx::Array<Plane> m_planes;
+
+ // Combination data
+ bool m_combined;
+ Real m_combiningMeshSize;
+ bool m_combiningIncidentalMesh;
+
+ // Memory cache
+ class BSPMemCache* m_memCache;
+ bool m_ownsMemCache;
+};
+
+
+// Surface iterator; walks from a leaf's parent to the root of a tree, allowing inspection of surfaces along the way
+class SurfaceIt
+{
+public:
+ PX_INLINE SurfaceIt() : m_current(NULL), m_side(0xFFFFFFFF) {}
+ PX_INLINE SurfaceIt(const BSP::Node* leaf) : m_current((BSP::Node*)leaf)
+ {
+ PX_ASSERT(leaf != NULL && leaf->getType() == BSP::Node::Leaf);
+ inc();
+ }
+
+ PX_INLINE bool valid() const
+ {
+ return m_current != NULL;
+ }
+
+ PX_INLINE void inc()
+ {
+ m_side = m_current->getIndex();
+ m_current = m_current->getParent();
+ }
+
+ PX_INLINE const Surface* surface() const
+ {
+ return m_current->getBranchData();
+ }
+
+ PX_INLINE uint32_t side() const
+ {
+ return m_side;
+ }
+
+private:
+ BSP::Node* m_current;
+ uint32_t m_side;
+};
+
+
+// IBSPMemCache implementation, several pools and growable arrays. Not global, so that concurrent calculations can use different pools
+class BSPMemCache : public IApexBSPMemCache, public nvidia::UserAllocated
+{
+public:
+
+ BSPMemCache();
+
+ void clearAll();
+ void clearTemp();
+
+ void release();
+
+ // Persistent data
+ nvidia::Pool<BSP::Node> m_nodePool;
+
+ // Temporary data
+ nvidia::Pool<LinkedVertex> m_linkedVertexPool;
+ physx::Array<uint8_t> m_surfaceFlags;
+ physx::Array<uint8_t> m_surfaceTestFlags;
+};
+
+
+// Mesh cleaning interface
+void
+cleanMesh(physx::Array<nvidia::ExplicitRenderTriangle>& cleanedMesh, const physx::Array<Triangle>& mesh, physx::Array<ClippedTriangleInfo>& triangleInfo, const physx::Array<Plane>& planes, const physx::Array<Triangle>& originalTriangles, const physx::Array<Interpolator>& frames, Real distanceTol, const Mat4Real& BSPToMeshTM);
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #define APEX_CSG_DEFS_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h
new file mode 100644
index 00000000..22884507
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_FAST_MATH_H
+#define APEX_CSG_FAST_MATH_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include "PsUtilities.h"
+
+#include "PxIntrinsics.h"
+#include <emmintrin.h>
+#include <fvec.h>
+
+#include <math.h>
+#include <float.h>
+
+#ifdef __SSE2__
+#define APEX_CSG_SSE
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+#ifdef __SSE4_1__
+#include <smmintrin.h>
+#endif
+
+namespace ApexCSG
+{
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+#ifndef APEX_CSG_LOOP_UNROLL
+#define APEX_CSG_LOOP_UNROLL 1
+#endif
+
+#ifndef APEX_CSG_SSE
+#define APEX_CSG_SSE 1
+#endif
+
+#ifndef APEX_CSG_INLINE
+#define APEX_CSG_INLINE 1
+#endif
+
+#ifndef APEX_CSG_ALIGN
+#define APEX_CSG_ALIGN 16
+#endif
+
+#if APEX_CSG_LOOP_UNROLL
+
+#define VEC_SIZE() sizeof(*this)/sizeof(Real)
+
+#define OP_VV(a,op,b,i) a[i] op b[i]
+#define OP_SV(a,op,b,i) a op b[i]
+#define OP_VS(a,op,b,i) a[i] op b
+#define OP_VVV(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c[i]
+#define OP_SVV(a,op1,b,op2,c,i) a op1 b[i] op2 c[i]
+#define OP_VVS(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c
+#define OP_D(_D) (_D == 0 ? 0 : (_D == 1 ? 1 : (_D == 2 ? 2 : (_D == 3 ? 3 : 3))))
+#define OP_NAME(_T) PX_CONCAT(OP_,_T)
+#define OP_2_NAME(_D) ALL_2_##_D /*PX_CONCAT(ALL_2_,OP_D(_D))*/
+#define OP_3_NAME(_D) ALL_3_##_D /*PX_CONCAT(ALL_3_,OP_D(_D))*/
+
+#define ALL_2_1( _T, a,op,b) OP_##_T (a,op,b,0)
+#define ALL_2_2( _T, a,op,b) ALL_2_1(_T,a,op,b); OP_##_T (a,op,b,1)
+#define ALL_2_3( _T, a,op,b) ALL_2_2(_T,a,op,b); OP_##_T (a,op,b,2)
+#define ALL_2_4( _T, a,op,b) ALL_2_3(_T,a,op,b); OP_##_T (a,op,b,3)
+#define ALL_VV_i( _D, a,op,b) OP_2_NAME(_D)(VV,a,op,b)
+#define ALL_SV_i( _D, a,op,b) OP_2_NAME(_D)(SV,a,op,b)
+#define ALL_VS_i( _D, a,op,b) OP_2_NAME(_D)(VS,a,op,b)
+
+#define ALL_3_1( _T, a,op1,b,op2,c) OP_NAME(_T) (a,op1,b,op2,c,0)
+#define ALL_3_2( _T, a,op1,b,op2,c) ALL_3_1(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,1)
+#define ALL_3_3( _T, a,op1,b,op2,c) ALL_3_2(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,2)
+#define ALL_3_4( _T, a,op1,b,op2,c) ALL_3_3(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,3)
+#define ALL_VVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVV,a,op1,b,op2,c)
+#define ALL_SVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(SVV,a,op1,b,op2,c)
+#define ALL_VVS_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVS,a,op1,b,op2,c)
+#define ALL_VVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVV,a,op1,b,op2,c)
+#define ALL_SVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(SVV,a,op1,b,op2,c)
+#define ALL_VVS( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVS,a,op1,b,op2,c)
+
+#else
+
+#define ALL_VV_i( _D, a,op,b) ALL_i( _D, a[i] op b[i] )
+#define ALL_SV_i( _D, a,op,b) ALL_i( _D, a op b[i] )
+#define ALL_VS_i( _D, a,op,b) ALL_i( _D, a[i] op b )
+#define ALL_VVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c[i] )
+#define ALL_SVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a op1 b[i] op2 c[i] )
+#define ALL_VVS_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c )
+
+#endif
+
+/* General vector */
+
+__declspec(align(APEX_CSG_ALIGN)) class aligned { };
+
+template<typename T, int D>
+class Vec : public aligned
+{
+public:
+
+ PX_INLINE Vec() {}
+ PX_INLINE Vec(const T& v)
+ {
+ set(v);
+ }
+ PX_INLINE Vec(const T* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const T& v)
+ {
+ ALL_i(D, el[i] = v);
+ }
+ PX_INLINE void set(const T* v)
+ {
+ ALL_i(D, el[i] = v[i]);
+ }
+
+ PX_INLINE T& operator [](int i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](int i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE Vec operator - () const
+ {
+ Vec r;
+ ALL_i(D, r[i] = -el[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator + (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] + v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator - (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] - v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator * (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator / (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] / v[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator * (T v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v);
+ return r;
+ }
+ PX_INLINE Vec operator / (T v) const
+ {
+ return *this * ((T)1 / v);
+ }
+
+ PX_INLINE Vec& operator += (const Vec& v)
+ {
+ ALL_i(D, el[i] += v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator -= (const Vec& v)
+ {
+ ALL_i(D, el[i] -= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator *= (const Vec& v)
+ {
+ ALL_i(D, el[i] *= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator /= (const Vec& v)
+ {
+ ALL_i(D, el[i] /= v[i]);
+ return *this;
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec& v) const
+ {
+ T r = (T)0;
+ ALL_i(D, r += el[i] * v[i]);
+ return r;
+ }
+
+ PX_INLINE T normalize();
+
+ PX_INLINE T lengthSquared() const
+ {
+ return *this | *this;
+ }
+
+protected:
+ T el[D];
+};
+
+template<typename T, int D>
+PX_INLINE T
+Vec<T, D>::normalize()
+{
+ const T l2 = *this | *this;
+ if (l2 == (T)0)
+ {
+ return (T)0;
+ }
+ const T recipL = (T)1 / physx::PxSqrt(l2);
+ *this *= recipL;
+ return recipL * l2;
+}
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ //ALL_VVS_i(D, r, =, v, *, s);
+ return r;
+}
+
+
+/* Popular real vectors */
+template<typename T>
+class Vec2 : public Vec<T, 2>
+{
+public:
+ PX_INLINE Vec2() {}
+ PX_INLINE Vec2(const Vec2& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE Vec2(const Vec<T, 2>& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE Vec2(T x, T y)
+ {
+ set(x, y);
+ }
+ PX_INLINE Vec2& operator = (const Vec2& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE void set(T x, T y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE T operator ^(const Vec2& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+};
+typedef Vec2<Real> Vec2Real;
+
+template<typename T>
+class Vec3 : public Vec<T, 3>
+{
+public:
+ PX_INLINE Vec3() {}
+ PX_INLINE Vec3(const Vec3& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE Vec3(const Vec<T, 3>& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE Vec3(T x, T y, T z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Vec3& operator = (const Vec3& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE void set(T x, T y, T z)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ }
+};
+typedef Vec3<Real> Vec3Real;
+
+template<typename T>
+class Vec4 : public Vec<Real, 4>
+{
+public:
+ PX_INLINE Vec4() {}
+ PX_INLINE Vec4(const Vec4& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE Vec4(const Vec<T, 4>& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE Vec4(T x, T y, T z, T w)
+ {
+ set(x, y, z, w);
+ }
+ PX_INLINE Vec4& operator = (const Vec4& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE void set(T x, T y, T z, T w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+
+#if APEX_CSG_INLINE
+ PX_INLINE Vec operator - () const
+ {
+ Vec4Real r;
+ ALL_VV_i(4, r, =, -el);
+ return r;
+ }
+
+ PX_INLINE Vec4 operator + (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, +, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator - (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, -, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator * (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, *, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator / (const Vec4& v) const
+ {
+ Vec4 r;
+ ALL_VVV_i(4, r, =, el, /, v);
+ return r;
+ }
+
+ PX_INLINE Vec4 operator * (Real v) const
+ {
+ Vec4 r;
+ ALL_VVS_i(4, r, = , el, *, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator / (Real v) const
+ {
+ return *this * (1. / v);
+ }
+
+ PX_INLINE Vec4& operator += (const Vec4& v)
+ {
+ ALL_VV_i(4, el, +=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator -= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, -=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator *= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, *=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator *= (Real v)
+ {
+ ALL_VS_i(4, el, *=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator /= (Real v)
+ {
+ Real vInv = 1. / v;
+ return operator*=(vInv);
+ }
+ PX_INLINE Vec4& operator /= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, /=, v);
+ return *this;
+ }
+#endif /* #if APEX_CGS_INLINE */
+
+ template<typename U> friend U dot(const Vec4<U>&, const Vec4<U>&);
+ PX_FORCE_INLINE Real operator | (const Vec4& v) const
+ {
+ return dot(*this, v);
+ }
+};
+typedef Vec4<Real> Vec4Real;
+
+template<typename T>
+PX_FORCE_INLINE T dot(const Vec4<T>& a, const Vec4<T>& b)
+{
+ Real r = 0;
+ ALL_SVV_i(sizeof(a)/sizeof(T), r, +=, a, *, b);
+ return r;
+}
+
+#if APEX_CSG_SSE
+
+template<typename T> PX_INLINE __m128d& xy(Vec4<T>& v) { return *reinterpret_cast< __m128d*>(&v[0]); }
+template<typename T> PX_INLINE const __m128d& xy(const Vec4<T>& v) { return *reinterpret_cast<const __m128d*>(&v[0]); }
+template<typename T> PX_INLINE __m128d& zw(Vec4<T>& v) { return *reinterpret_cast< __m128d*>(&v[2]); }
+template<typename T> PX_INLINE const __m128d& zw(const Vec4<T>& v) { return *reinterpret_cast<const __m128d*>(&v[2]); }
+template<typename T> PX_INLINE __m128& xyzw(Vec4<T>& v) { return *reinterpret_cast< __m128*>(&v[0]); }
+template<typename T> PX_INLINE const __m128& xyzw(const Vec4<T>& v) { return *reinterpret_cast<const __m128*>(&v[0]); }
+
+template<>
+PX_FORCE_INLINE double dot<double>(const Vec4<double>& a, const Vec4<double>& b)
+{
+ __declspec(align(16)) double r[2] = { 0., 0. };
+ __m128d mresult;
+ mresult = _mm_add_pd(_mm_mul_pd( xy(a), xy(b) ),
+ _mm_mul_pd( zw(a), zw(b) ) );
+ _mm_store_pd(r, mresult);
+ return r[0] + r[1];
+}
+
+#endif /* #if APEX_CSG_SSE */
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() {}
+ PX_INLINE Plane(const Dir& n, Real d)
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p)
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v)
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ ALL_VV_i(3, el, =, n);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ ALL_VV_i(3, el, =, n);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ const Real recipL = 1. / physx::PxSqrt(l2);
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+__declspec(align(16)) class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ ALL_VVS_i(4, r, =, el, |, v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ ALL_VVS_i(4, r, =, el, *, s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ ALL_VS_i(4, el, *=, s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ ALL_VVS_i(4, r, =, m, *, s);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define APEX_CSG_FAST_MATH_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h
new file mode 100644
index 00000000..6f0f9ccf
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h
@@ -0,0 +1,630 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_FAST_MATH_2_H
+#define APEX_CSG_FAST_MATH_2_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include "PsUtilities.h"
+
+#include "PxIntrinsics.h"
+#include <emmintrin.h>
+#include <fvec.h>
+
+#include <math.h>
+#include <float.h>
+
+#ifdef __SSE2__
+#define APEX_CSG_SSE
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+#ifdef __SSE4_1__
+#include <smmintrin.h>
+#endif
+
+namespace ApexCSG
+{
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+#ifndef APEX_CSG_LOOP_UNROLL
+#define APEX_CSG_LOOP_UNROLL 1
+#endif
+
+#ifndef APEX_CSG_SSE
+#define APEX_CSG_SSE 1
+#endif
+
+#ifndef APEX_CSG_INLINE
+#define APEX_CSG_INLINE 1
+#endif
+
+#ifndef APEX_CSG_ALIGN
+#define APEX_CSG_ALIGN 16
+#endif
+
+#if APEX_CSG_LOOP_UNROLL
+
+#define VEC_SIZE() sizeof(*this)/sizeof(Real)
+
+#define OP_VV(a,op,b,i) a[i] op b[i]
+#define OP_SV(a,op,b,i) a op b[i]
+#define OP_VS(a,op,b,i) a[i] op b
+#define OP_VVV(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c[i]
+#define OP_SVV(a,op1,b,op2,c,i) a op1 b[i] op2 c[i]
+#define OP_VVS(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c
+#define OP_D(_D) (_D == 0 ? 0 : (_D == 1 ? 1 : (_D == 2 ? 2 : (_D == 3 ? 3 : 3))))
+#define OP_NAME(_T) PX_CONCAT(OP_,_T)
+#define OP_2_NAME(_D) ALL_2_##_D /*PX_CONCAT(ALL_2_,OP_D(_D))*/
+#define OP_3_NAME(_D) ALL_3_##_D /*PX_CONCAT(ALL_3_,OP_D(_D))*/
+
+#define ALL_2_1( _T, a,op,b) OP_##_T (a,op,b,0)
+#define ALL_2_2( _T, a,op,b) ALL_2_1(_T,a,op,b); OP_##_T (a,op,b,1)
+#define ALL_2_3( _T, a,op,b) ALL_2_2(_T,a,op,b); OP_##_T (a,op,b,2)
+#define ALL_2_4( _T, a,op,b) ALL_2_3(_T,a,op,b); OP_##_T (a,op,b,3)
+#define ALL_VV_i( _D, a,op,b) OP_2_NAME(_D)(VV,a,op,b)
+#define ALL_SV_i( _D, a,op,b) OP_2_NAME(_D)(SV,a,op,b)
+#define ALL_VS_i( _D, a,op,b) OP_2_NAME(_D)(VS,a,op,b)
+
+#define ALL_3_1( _T, a,op1,b,op2,c) OP_NAME(_T) (a,op1,b,op2,c,0)
+#define ALL_3_2( _T, a,op1,b,op2,c) ALL_3_1(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,1)
+#define ALL_3_3( _T, a,op1,b,op2,c) ALL_3_2(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,2)
+#define ALL_3_4( _T, a,op1,b,op2,c) ALL_3_3(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,3)
+#define ALL_VVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVV,a,op1,b,op2,c)
+#define ALL_SVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(SVV,a,op1,b,op2,c)
+#define ALL_VVS_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVS,a,op1,b,op2,c)
+#define ALL_VVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVV,a,op1,b,op2,c)
+#define ALL_SVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(SVV,a,op1,b,op2,c)
+#define ALL_VVS( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVS,a,op1,b,op2,c)
+
+#else
+
+#define ALL_VV_i( _D, a,op,b) ALL_i( _D, a[i] op b[i] )
+#define ALL_SV_i( _D, a,op,b) ALL_i( _D, a op b[i] )
+#define ALL_VS_i( _D, a,op,b) ALL_i( _D, a[i] op b )
+#define ALL_VVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c[i] )
+#define ALL_SVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a op1 b[i] op2 c[i] )
+#define ALL_VVS_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c )
+
+#endif
+
+#if 1
+
+#define DEFINE_VEC(_D) \
+ public: \
+ typedef Vec<T,_D> VecD; \
+ PX_INLINE VecD() {} \
+ PX_INLINE T& operator [](int i) { return el[i]; } \
+ PX_INLINE const T& operator [](int i) const { return el[i]; } \
+ PX_INLINE VecD(const VecD& v) { set(v); } \
+ PX_INLINE VecD(const T& v) { set(v); } \
+ PX_INLINE VecD(const T* v) { set(v); } \
+ PX_INLINE VecD& operator=(const VecD& v) { ALL_VV_i(_D, el, =, v); return *this; } \
+ PX_INLINE VecD operator/(T v) const { return *this * ((T)1 / v); } \
+ PX_INLINE VecD operator+(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, +, v); return r; } \
+ PX_INLINE VecD operator-(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, -, v); return r; } \
+ PX_INLINE VecD operator*(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, *, v); return r; } \
+ PX_INLINE VecD operator-( ) const { VecD r; ALL_VV_i( _D, r, =, -el); return r;} \
+ PX_INLINE VecD& operator+=(const VecD& v) {ALL_VV_i(_D, el, +=, v); return *this; } \
+ PX_INLINE VecD& operator-=(const VecD& v) {ALL_VV_i(_D, el, -=, v); return *this; } \
+ PX_INLINE VecD& operator*=(const VecD& v) {ALL_VV_i(_D, el, *=, v); return *this; } \
+ PX_INLINE VecD& operator/=(const VecD& v) {ALL_VV_i(_D, el, /=, v); return *this; } \
+ PX_INLINE void set(const VecD& v) { ALL_VV_i(_D, el, =, v); } \
+ PX_INLINE void set(const T* v) { ALL_VV_i(_D, el, =, v); } \
+ PX_INLINE void set(const T& v) { ALL_VS_i(_D, el, =, v); } \
+ PX_INLINE T lengthSquared() const { return *this | *this; } \
+ PX_INLINE T normalize() { const T l2 = *this | *this; if (l2 == (T)0) { return (T)0; } const T recipL = (T)1 / physx::PxSqrt(l2); *this *= recipL; return recipL * l2; } \
+ protected:T el[_D]; \
+
+ //PX_FORCE_INLINE T operator|(const VecD& v) const { T r = (T)0; ALL_SVV_i(_D, r, +=, el, *, v); return r; }
+#endif
+
+/* General vector */
+
+__declspec(align(APEX_CSG_ALIGN)) class aligned { };
+
+template<typename T, int D>
+class Vec : public aligned
+{
+
+};
+
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ //ALL_VVS_i(D, r, =, v, *, s);
+ return r;
+}
+
+template<typename T>
+class Vec<T, 2> : public aligned {
+ DEFINE_VEC(2);
+public:
+ typedef Vec<T,2> Vec2;
+
+ PX_INLINE Vec2(T x, T y)
+ {
+ set(x, y);
+ }
+
+ PX_INLINE void set(T x, T y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE Real operator ^ (const Vec2& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec2& v) const
+ {
+ T r = (T)0;
+ ALL_SVV_i(2, r, +=, el, *, v);
+ return r;
+ }
+
+};
+
+template<typename T>
+class Vec<T, 3> : public aligned {
+ DEFINE_VEC(3);
+public:
+ typedef Vec<T,3> Vec3;
+
+ PX_INLINE Vec3(T x, T y, T z)
+ {
+ set(x, y, z);
+ }
+
+ PX_INLINE void set(T x, T y, T z)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec3& v) const
+ {
+ T r = (T)0;
+ ALL_SVV_i(3, r, +=, el, *, v);
+ return r;
+ }
+};
+
+template<typename T>
+class Vec<T, 4> : public aligned {
+ DEFINE_VEC(4);
+public:
+ typedef Vec<T,4> Vec4;
+
+ PX_INLINE Vec4(T x, T y, T z, T w)
+ {
+ set(x, y, z, w);
+ }
+
+ PX_INLINE void set(T x, T y, T z, T w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+
+#if APEX_CSG_SSE
+
+#if APEX_CSG_DBL
+ PX_INLINE __m128d& xy() { return *reinterpret_cast< __m128d*>(&el[0]); }
+ PX_INLINE const __m128d& xy() const { return *reinterpret_cast<const __m128d*>(&el[0]); }
+ PX_INLINE __m128d& zw() { return *reinterpret_cast< __m128d*>(&el[2]); }
+ PX_INLINE const __m128d& zw() const { return *reinterpret_cast<const __m128d*>(&el[2]); }
+#else
+ PX_INLINE __m128& xyzw() { return *reinterpret_cast< __m128*>(&el[0]); }
+ PX_INLINE const __m128& xyzw() const { return *reinterpret_cast<const __m128*>(&el[0]); }
+#endif /* #if APEX_CSG_DBL */
+
+ friend Real dot(const Vec4&, const Vec4&);
+ PX_FORCE_INLINE Real operator | (const Vec4& v) const
+ {
+ return dot(*this, v);
+ }
+
+#endif /* #if APEX_CSG_SSE */
+};
+
+/* Popular real vectors */
+
+typedef Vec<Real, 2> Vec2Real;
+typedef Vec<Real, 3> Vec3Real;
+typedef Vec<Real, 4> Vec4Real;
+
+#if APEX_CSG_SSE
+
+#if APEX_CSG_DBL
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ /*
+ __m128d mr = _mm_add_sd ( _mm_mul_pd ( a.xy(), b.xy()),
+ _mm_mul_sd ( a.zw(), b.zw()) ) ;
+ mr = _mm_add_sd ( _mm_unpackhi_pd ( mr , mr ), mr );
+ double r;
+ _mm_store_sd(&r, mr);
+ return r;*/
+ __declspec(align(16)) double r[2] = { 0., 0. };
+ __m128d mresult;
+ mresult = _mm_add_pd(_mm_mul_pd( a.xy(), b.xy() ),
+ _mm_mul_pd( a.zw(), b.zw() ) );
+ _mm_store_pd(r, mresult);
+ return r[0] + r[1];
+}
+
+#else
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ float r;
+ _mm_store_ps(&s, _mm_dot_pos(a.xyzw(), b.xyzw()));
+ return r;
+}
+
+#endif /* #if APEX_CSG_DBL */
+
+#else
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ Real r = 0;
+ ALL_SVV_i(sizeof(a)/sizeof(Real), r, +=, a, *, b);
+ return r;
+}
+
+#endif /* #if APEX_CSG_SSE */
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() {}
+ PX_INLINE Plane(const Dir& n, Real d)
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p)
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v)
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p)
+ {
+ //ALL_i(4, el[i] = p[i]);
+ ALL_VV_i(4, el, =, p);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ //ALL_i(4, el[i] = p[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ //ALL_i(3, el[i] = n[i]);
+ ALL_VV_i(3, el, =, n);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ //ALL_i(3, el[i] = n[i]);
+ ALL_VV_i(3, el, =, n);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ const Real recipL = 1. / physx::PxSqrt(l2);
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+__declspec(align(16)) class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ //ALL_i(4, r[i] = el[i] | v);
+ ALL_VVS_i(4, r, =, el, |, v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ //ALL_i(4, r[i] = el[i] * s);
+ ALL_VVS_i(4, r, =, el, *, s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ //ALL_i(4, el[i] *= s);
+ ALL_VS_i(4, el, *=, s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ //ALL_i(4, r[i] = s * m[i]);
+ ALL_VVS_i(4, r, =, m, *, s);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define APEX_CSG_FAST_MATH_2_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h
new file mode 100644
index 00000000..f8b25457
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_HULL_H
+#define APEX_CSG_HULL_H
+
+#include "ApexUsingNamespace.h"
+#include "authoring/ApexCSGMath.h"
+#include "PsArray.h"
+#include "PxFileBuf.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+/* Convex hull that handles unbounded sets. */
+
+class Hull
+{
+public:
+ struct Edge
+ {
+ uint32_t m_indexV0;
+ uint32_t m_indexV1;
+ uint32_t m_indexF1;
+ uint32_t m_indexF2;
+ };
+
+ struct EdgeType
+ {
+ enum Enum
+ {
+ LineSegment,
+ Ray,
+ Line
+ };
+ };
+
+ PX_INLINE Hull()
+ {
+ setToAllSpace();
+ }
+ PX_INLINE Hull(const Hull& geom)
+ {
+ *this = geom;
+ }
+
+ PX_INLINE void setToAllSpace()
+ {
+ clear();
+ allSpace = true;
+ }
+ PX_INLINE void setToEmptySet()
+ {
+ clear();
+ emptySet = true;
+ }
+
+ void intersect(const Plane& plane, Real distanceTol);
+
+ PX_INLINE void transform(const Mat4Real& tm, const Mat4Real& cofTM);
+
+ PX_INLINE uint32_t getFaceCount() const
+ {
+ return faces.size();
+ }
+ PX_INLINE const Plane& getFace(uint32_t faceIndex) const
+ {
+ return faces[faceIndex];
+ }
+
+ PX_INLINE uint32_t getEdgeCount() const
+ {
+ return edges.size();
+ }
+ PX_INLINE const Edge& getEdge(uint32_t edgeIndex) const
+ {
+ return edges[edgeIndex];
+ }
+
+ PX_INLINE uint32_t getVertexCount() const
+ {
+ return vertexCount;
+ }
+ PX_INLINE const Pos& getVertex(uint32_t vertexIndex) const
+ {
+ return *(const Pos*)(vectors.begin() + vertexIndex);
+ }
+
+ PX_INLINE bool isEmptySet() const
+ {
+ return emptySet;
+ }
+ PX_INLINE bool isAllSpace() const
+ {
+ return allSpace;
+ }
+
+ Real calculateVolume() const;
+
+ // Edge accessors
+ PX_INLINE EdgeType::Enum getType(const Edge& edge) const
+ {
+ return (EdgeType::Enum)((uint32_t)(edge.m_indexV0 >= vertexCount) + (uint32_t)(edge.m_indexV1 >= vertexCount));
+ }
+ PX_INLINE const Pos& getV0(const Edge& edge) const
+ {
+ return *(Pos*)(vectors.begin() + edge.m_indexV0);
+ }
+ PX_INLINE const Pos& getV1(const Edge& edge) const
+ {
+ return *(Pos*)(vectors.begin() + edge.m_indexV1);
+ }
+ PX_INLINE const Dir& getDir(const Edge& edge) const
+ {
+ PX_ASSERT(edge.m_indexV1 >= vertexCount);
+ return *(Dir*)(vectors.begin() + edge.m_indexV1);
+ }
+ PX_INLINE uint32_t getF1(const Edge& edge) const
+ {
+ return edge.m_indexF1;
+ }
+ PX_INLINE uint32_t getF2(const Edge& edge) const
+ {
+ return edge.m_indexF2;
+ }
+
+ // Serialization
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream, uint32_t version);
+
+protected:
+ PX_INLINE void clear();
+
+ bool testConsistency(Real distanceTol, Real angleTol) const;
+
+ // Faces
+ physx::Array<Plane> faces;
+ physx::Array<Edge> edges;
+ physx::Array<Vec4Real> vectors;
+ uint32_t vertexCount; // vectors[i], i >= vertexCount, are used to store vectors for ray and line edges
+ bool allSpace;
+ bool emptySet;
+};
+
+PX_INLINE void
+Hull::transform(const Mat4Real& tm, const Mat4Real& cofTM)
+{
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ Plane& face = faces[i];
+ face = cofTM * face;
+ face.normalize();
+ }
+
+ for (uint32_t i = 0; i < vectors.size(); ++i)
+ {
+ Vec4Real& vector = vectors[i];
+ vector = tm * vector;
+ }
+}
+
+PX_INLINE void
+Hull::clear()
+{
+ vectors.reset();
+ edges.reset();
+ faces.reset();
+ vertexCount = 0;
+ allSpace = false;
+ emptySet = false;
+}
+
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #define APEX_CSG_HULL_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h
new file mode 100644
index 00000000..bdc605ea
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_MATH_H
+#define APEX_CSG_MATH_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include <math.h>
+#include <float.h>
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+
+// APEX_CSG_DBL may be defined externally
+#ifndef APEX_CSG_DBL
+#define APEX_CSG_DBL 1
+#endif
+
+
+namespace ApexCSG
+{
+#if !(APEX_CSG_DBL)
+typedef float Real;
+#define MAX_REAL FLT_MAX
+#define EPS_REAL FLT_EPSILON
+#else
+typedef double Real;
+#define MAX_REAL DBL_MAX
+#define EPS_REAL DBL_EPSILON
+#endif
+
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+
+/* General vector */
+
+template<typename T, int D>
+class Vec
+{
+public:
+
+ PX_INLINE Vec() {}
+ PX_INLINE Vec(const T& v)
+ {
+ set(v);
+ }
+ PX_INLINE Vec(const T* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const T& v)
+ {
+ ALL_i(D, el[i] = v);
+ }
+ PX_INLINE void set(const T* v)
+ {
+ ALL_i(D, el[i] = v[i]);
+ }
+
+ PX_INLINE T& operator [](int i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](int i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE T& operator [](unsigned i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](unsigned i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE Vec operator - () const
+ {
+ Vec r;
+ ALL_i(D, r[i] = -el[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator + (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] + v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator - (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] - v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator * (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator / (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] / v[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator * (T v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v);
+ return r;
+ }
+ PX_INLINE Vec operator / (T v) const
+ {
+ return *this * ((T)1 / v);
+ }
+
+ PX_INLINE Vec& operator += (const Vec& v)
+ {
+ ALL_i(D, el[i] += v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator -= (const Vec& v)
+ {
+ ALL_i(D, el[i] -= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator *= (const Vec& v)
+ {
+ ALL_i(D, el[i] *= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator /= (const Vec& v)
+ {
+ ALL_i(D, el[i] /= v[i]);
+ return *this;
+ }
+
+ PX_INLINE T operator | (const Vec& v) const
+ {
+ T r = (T)0;
+ ALL_i(D, r += el[i] * v[i]);
+ return r;
+ }
+
+ PX_INLINE T normalize();
+
+ PX_INLINE T lengthSquared() const
+ {
+ return *this | *this;
+ }
+
+protected:
+ T el[D];
+};
+
+template<typename T, int D>
+PX_INLINE T
+Vec<T, D>::normalize()
+{
+ const T l2 = *this | *this;
+ if (l2 == (T)0)
+ {
+ return (T)0;
+ }
+ const T recipL = (T)1 / physx::PxSqrt(l2);
+ *this *= recipL;
+ return recipL * l2;
+}
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ return r;
+}
+
+
+/* Popular real vectors */
+
+class Vec2Real : public Vec<Real, 2>
+{
+public:
+ PX_INLINE Vec2Real() : Vec<Real, 2>()
+ {
+ }
+ PX_INLINE Vec2Real(const Vec2Real& v) : Vec<Real, 2>()
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE Vec2Real(const Vec<Real, 2>& v) : Vec<Real, 2>()
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE Vec2Real(Real x, Real y) : Vec<Real, 2>()
+ {
+ set(x, y);
+ }
+ PX_INLINE Vec2Real& operator = (const Vec2Real& v)
+ {
+ ALL_i(2, el[i] = v[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE void set(Real x, Real y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE Real operator ^(const Vec2Real& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+
+ PX_INLINE Vec2Real perp() const
+ {
+ Vec2Real result;
+ result.el[0] = el[1];
+ result.el[1] = -el[0];
+ return result;
+ }
+};
+
+class Vec4Real : public Vec<Real, 4>
+{
+public:
+ PX_INLINE Vec4Real() : Vec<Real, 4>()
+ {
+ }
+ PX_INLINE Vec4Real(const Vec4Real& v) : Vec<Real, 4>()
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE Vec4Real(const Vec<Real, 4>& v) : Vec<Real, 4>()
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE Vec4Real(Real x, Real y, Real z, Real w) : Vec<Real, 4>()
+ {
+ set(x, y, z, w);
+ }
+ PX_INLINE Vec4Real& operator = (const Vec4Real& v)
+ {
+ ALL_i(4, el[i] = v[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE void set(Real x, Real y, Real z, Real w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+};
+
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos() : Vec4Real()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z) : Vec4Real()
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c) : Vec4Real()
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p) : Vec4Real()
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p) : Vec4Real()
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir() : Vec4Real()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z) : Vec4Real()
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c) : Vec4Real()
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p) : Vec4Real()
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d) : Vec4Real()
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir cross(const Dir& d) const // Simple cross-product
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+ PX_INLINE Real dot(const Dir& d) const // Simple dot-product
+ {
+ return el[0]*d[0] + el[1]*d[1] + el[2]*d[2];
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const // Uses an improvement step for more accuracy
+ {
+ const Dir c = cross(d); // Cross-product gives perpendicular
+ const Real c2 = c|c;
+ if (c2 != 0.0f)
+ {
+ return c + ((dot(c))*(c.cross(d)) + (d|c)*(cross(c)))/c2;
+ }
+ return c; // Improvement to (*this d)^T(c) = (0)
+ }
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() : Vec4Real() {}
+ PX_INLINE Plane(const Dir& n, Real d) : Vec4Real()
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p) : Vec4Real()
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p) : Vec4Real()
+ {
+ ALL_i(4, el[i] = p[i]);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_i(4, el[i] = p[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ ALL_i(3, el[i] = n[i]);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ ALL_i(3, el[i] = n[i]);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ Real recipL = 1 / physx::PxSqrt(l2);
+ recipL *= (Real)1.5 - (Real)0.5*l2*recipL*recipL;
+ recipL *= (Real)1.5 - (Real)0.5*l2*recipL*recipL;
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ ALL_i(4, r[i] = el[i] | v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator + (const Mat4Real& m) const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[i][j] + m[i][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator - (const Mat4Real& m) const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[i][j] - m[i][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ ALL_i(4, r[i] = el[i] * s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ ALL_i(4, el[i] *= s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real transpose() const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[j][i];
+ }
+ return r;
+ }
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ ALL_i(4, r[i] = s * m[i]);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define !WITHOUT_APEX_AUTHORING
+
+#endif // #define APEX_CSG_MATH_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h
new file mode 100644
index 00000000..ff77d2dc
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_SERIALIZATION_H
+#define APEX_CSG_SERIALIZATION_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexSharedUtils.h"
+#include "ApexStream.h"
+#include "authoring/ApexCSGDefs.h"
+#include "authoring/ApexCSGHull.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace nvidia
+{
+namespace apex
+{
+
+/* Version for serialization */
+struct Version
+{
+ enum Enum
+ {
+ Initial = 0,
+ RevisedMeshTolerances,
+ UsingOnlyPositionDataInVertex,
+ SerializingTriangleFrames,
+ UsingGSA,
+ SerializingMeshBounds,
+ AddedInternalTransform,
+ IncidentalMeshDistinction,
+
+ Count,
+ Current = Count - 1
+ };
+};
+
+
+// Vec<T,D>
+template<typename T, int D>
+PX_INLINE physx::PxFileBuf&
+operator << (physx::PxFileBuf& stream, const ApexCSG::Vec<T, D>& v)
+{
+ for (uint32_t i = 0; i < D; ++i)
+ {
+ stream << v[(int32_t)i];
+ }
+ return stream;
+}
+
+template<typename T, int D>
+PX_INLINE physx::PxFileBuf&
+operator >> (physx::PxFileBuf& stream, ApexCSG::Vec<T, D>& v)
+{
+ for (uint32_t i = 0; i < D; ++i)
+ {
+ stream >> v[(int32_t)i];
+ }
+
+ return stream;
+}
+
+
+// Edge
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Hull::Edge& e)
+{
+ stream << e.m_indexV0 << e.m_indexV1 << e.m_indexF1 << e.m_indexF2;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Hull::Edge& e)
+{
+ PX_UNUSED(version); // Initial
+
+ stream >> e.m_indexV0 >> e.m_indexV1 >> e.m_indexF1 >> e.m_indexF2;
+}
+
+
+// Region
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Region& r)
+{
+ stream << r.side;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Region& r)
+{
+ if (version < Version::UsingGSA)
+ {
+ ApexCSG::Hull hull;
+ hull.deserialize(stream, version);
+ }
+
+ stream >> r.side;
+}
+
+
+// Surface
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Surface& s)
+{
+ stream << s.planeIndex;
+ stream << s.triangleIndexStart;
+ stream << s.triangleIndexStop;
+ stream << s.totalTriangleArea;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Surface& s)
+{
+ PX_UNUSED(version); // Initial
+
+ stream >> s.planeIndex;
+ stream >> s.triangleIndexStart;
+ stream >> s.triangleIndexStop;
+ stream >> s.totalTriangleArea;
+}
+
+
+// Triangle
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Triangle& t)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ stream << t.vertices[i];
+ }
+ stream << t.submeshIndex;
+ stream << t.smoothingMask;
+ stream << t.extraDataIndex;
+ stream << t.normal;
+ stream << t.area;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Triangle& t)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ stream >> t.vertices[i];
+ if (version < Version::UsingOnlyPositionDataInVertex)
+ {
+ ApexCSG::Dir v;
+ stream >> v; // normal
+ stream >> v; // tangent
+ stream >> v; // binormal
+ ApexCSG::UV uv;
+ for (uint32_t uvN = 0; uvN < VertexFormat::MAX_UV_COUNT; ++uvN)
+ {
+ stream >> uv; // UVs
+ }
+ ApexCSG::Color c;
+ stream >> c; // color
+ }
+ }
+ stream >> t.submeshIndex;
+ stream >> t.smoothingMask;
+ stream >> t.extraDataIndex;
+ stream >> t.normal;
+ stream >> t.area;
+}
+
+// Interpolator
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Interpolator& t)
+{
+ t.serialize(stream);
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Interpolator& t)
+{
+ t.deserialize(stream, version);
+}
+
+}
+}; // namespace nvidia::apex
+
+#endif
+
+#endif // #define APEX_CSG_SERIALIZATION_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexGSA.h b/APEX_1.4/shared/internal/include/authoring/ApexGSA.h
new file mode 100644
index 00000000..1e66b7b4
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexGSA.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef APEX_GSA_H
+#define APEX_GSA_H
+
+
+#include "ApexCSGMath.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+namespace GSA
+{
+
+// Utility vector format translation
+inline physx::PxVec3 toPxVec3(const Vec4Real& p)
+{
+ return physx::PxVec3(static_cast<float>(p[0]), static_cast<float>(p[1]), static_cast<float>(p[2]));
+}
+
+
+/*** Compact implementation of the void simplex algorithm for D = 3 ***/
+
+typedef physx::PxF32 real;
+
+/*
+ The implementation of farthest_halfspace should return the half-space "most below" the given point. The point
+ is represented by a vector in projective coordinates, and its last element (point[3]) will not necessarily equal 1.
+ However, point[3] will be non-negative. The plane returned is the boundary of the half-space found, and is also
+ represented as a vector in projective coordinates (the coefficients of the plane equation). The plane vector
+ returned should have the greatest dot product with the input point.
+
+ plane = the returned half-space boundary
+ point = the input point
+ returns the dot product of point and plane
+*/
+struct VS3D_Halfspace_Set
+{
+ virtual real farthest_halfspace(real plane[4], const real point[4]) = 0;
+};
+
+
+#define VS3D_HIGH_ACCURACY 1
+#define VS3D_UNNORMALIZED_PLANE_HANDLING 0 // 0 = planes must be normalized, 1 = planes must be near-normalized, 2 = planes may be arbitrary
+#define REAL_DOUBLE 0
+
+
+#if VS3D_UNNORMALIZED_PLANE_HANDLING == 1
+// Returns approximation to 1/sqrt(x)
+inline real vs3d_recip_sqrt(real x)
+{
+ real y = (real)1.5 - (real)0.5*x;
+#if REAL_DOUBLE
+ y *= (real)1.5 - (real)0.5*x*y*y; // Perform another iteration for doubles, to handle the case where float-normalized normals are converted to double-precision
+#endif
+ return y;
+}
+#elif VS3D_UNNORMALIZED_PLANE_HANDLING == 2
+#include <cmath>
+inline real vs3d_recip_sqrt(real x) { return 1/sqrt(x); }
+#elif VS3D_UNNORMALIZED_PLANE_HANDLING != 0
+#error Unrecognized value given for VS3D_UNNORMALIZED_PLANE_HANDLING. Please set to 0, 1, or 2.
+#endif
+
+
+// Simple types and operations for internal calculations
+struct Vec3 { real x, y, z; }; // 3-vector
+inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder
+inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x+b.x, a.y+b.y, a.z+b.z); } // vector addition
+inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication
+inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product
+inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product
+
+struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts
+inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder
+inline real operator | (const Vec4& a, const Vec4& b) { return (a.v|b.v) + a.w*b.w; } // dot product
+
+// More accurate perpendicular
+inline Vec3 perp(const Vec3& a, const Vec3& b)
+{
+ Vec3 c = a^b; // Cross-product gives perpendicular
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real c2 = c|c;
+ if (c2 != 0) c = c + (1/c2)*((a|c)*(c^b) + (b|c)*(a^c)); // Improvement to (a b)^T(c) = (0)
+#endif
+ return c;
+}
+
+// Square
+inline real sq(real x) { return x*x; }
+
+// Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n.
+inline int ext_index(int c_10, int c_21, int c_20) { return c_10<<c_21|(c_21&c_20)<<1; }
+
+// Returns index (0, 1, or 2) of minimum argument
+inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); }
+
+// Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2)
+inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2)
+{
+ const bool a_num_neg = a_num < 0;
+ const bool b_num_neg = b_num < 0;
+ return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg);
+}
+
+// Returns index (0, 1, or 2) of maximum fraction with positive deominators
+inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2)
+{
+ return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2));
+}
+
+// Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them.
+inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); }
+
+// Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them.
+inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2)
+{
+ return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0));
+}
+
+// Project 2D (homogeneous) vector onto 2D half-space boundary
+inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2)
+{
+ r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0);
+ r = r + (-(r|plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy
+ if ((r|r) > eps2) return;
+ r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0);
+ r.z = 1;
+}
+
+
+// Update function for vs3d_test
+static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2)
+{
+ // h plane is the last plane
+ const Vec4& h = S[plane_count-1];
+
+ // Handle plane_count == 1 specially (optimization; this could be commented out)
+ if (plane_count == 1)
+ {
+ // Solution is objective projected onto h plane
+ p = q;
+ p.v = p.v + -(p|h)*h.v;
+ if ((p|p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point
+ return true;
+ }
+
+ // Create basis in the h plane
+ const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z);
+ const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2));
+ const Vec3 x = y^h.v;
+
+ // Use reduced vector r instead of p
+ Vec3 r = {x|q.v, y|q.v, q.w*(y|y)}; // (x|x) = (y|y) = square of plane basis scale
+
+ // If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution
+ if ((r|r) <= eps2) r.z = 1;
+
+ // Create plane equations in the h plane. These will not be normalized in general.
+ int N = 0; // Plane count in h subspace
+ Vec3 R[3]; // Planes in h subspace
+ real recip_n2[3]; // Plane normal vector reciprocal lengths squared
+ real delta[3]; // Signed distance of objective to the planes
+ int index[3]; // Keep track of original plane indices
+ for (int i = 0; i < plane_count-1; ++i)
+ {
+ const Vec3& vi = S[i].v;
+ const real cos_theta = h.v|vi;
+ R[N] = vec3(x|vi, y|vi, S[i].w - h.w*cos_theta);
+ index[N] = i;
+ const real n2 = R[N].x*R[N].x + R[N].y*R[N].y;
+ if (n2 >= eps2)
+ {
+ const real lin_norm = (real)1.5-(real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1
+ R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior
+ recip_n2[N] = 1/(R[N].x*R[N].x + R[N].y*R[N].y);
+ delta[N] = r|R[N];
+ ++N; // Keep this plane
+ }
+ else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids
+ }
+
+ // Now work with the N-sized R array of half-spaces in the h plane
+ switch (N)
+ {
+ case 1: one_plane:
+ if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it
+ else project2D(r, R[0], delta[0], recip_n2[0], eps2);
+ break;
+ case 2: two_planes:
+ if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them
+ else
+ {
+ const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]);
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ const int min_d_index = max_d_index^1;
+ const real new_delta_min = r|R[min_d_index];
+ if (new_delta_min < 0)
+ {
+ index[0] = index[max_d_index];
+ N = 1; // S[min_d_index] is redundant, eliminate it
+ }
+ else
+ {
+ // Set r to the intersection of R[0] and R[1] and keep both
+ r = perp(R[0], R[1]);
+ if (r.z*r.z*recip_n2[0]*recip_n2[1] < eps2)
+ {
+ if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found
+ goto one_plane;
+ }
+ r = (1/r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ }
+ }
+ break;
+ case 3:
+ if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them
+ else
+ {
+ const Vec3 row_x = {R[0].x, R[1].x, R[2].x};
+ const Vec3 row_y = {R[0].y, R[1].y, R[2].y};
+ const Vec3 row_w = {R[0].z, R[1].z, R[2].z};
+ const Vec3 cof_w = perp(row_x, row_y);
+ const bool detR_pos = (row_w|cof_w) > 0;
+ const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1]*recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos)<<1)-1);
+ const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2]*recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos)<<1)-1);
+ const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0]*recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos)<<1)-1);
+
+ if ((nrw_sgn0|nrw_sgn1|nrw_sgn2) >= 0) return false; // 3D void found
+
+ const int positive_width_count = ((nrw_sgn0>>1)&1) + ((nrw_sgn1>>1)&1) + ((nrw_sgn2>>1)&1);
+ if (positive_width_count == 1)
+ {
+ // A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation.
+ const int pos_width_index = ((nrw_sgn1>>1)&1)|(nrw_sgn2&2); // Calculates which index corresponds to the positive-width side
+ R[pos_width_index] = R[2];
+ recip_n2[pos_width_index] = recip_n2[2];
+ delta[pos_width_index] = delta[2];
+ index[pos_width_index] = index[2];
+ N = 2;
+ goto two_planes;
+ }
+
+ // Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below:
+ const int max_d_index = r.z != 0
+ ? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product
+ : index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle.
+
+ // Project r onto max-d plane
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ N = 1; // Unless we use a vertex in the loop below
+ const int index_max = index[max_d_index];
+
+ // The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here.
+ // If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct.
+ const int finite_width_count = (nrw_sgn0&1) + (nrw_sgn1&1) + (nrw_sgn2&1);
+ if (finite_width_count >= 2)
+ {
+ const int i_remaining[2] = {(1<<max_d_index)&3, (3>>max_d_index)^1}; // = {(max_d_index+1)%3, (max_d_index+2)%3}
+ const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products
+ for (int i = 0; i < 2; ++i)
+ {
+ const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least
+ if ((r|R[j]) >= 0)
+ {
+ r = perp(R[max_d_index], R[j]);
+ r = (1/r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ index[1] = index[j];
+ N = 2;
+ break;
+ }
+ }
+ }
+
+ index[0] = index_max;
+ }
+ break;
+ }
+
+ // Transform r back to 3D space
+ p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z);
+
+ // Pack S array with kept planes
+ if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order
+ else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite
+ S[N] = h;
+ plane_count = N+1;
+
+ return true;
+}
+
+
+// Performs the VS algorithm for D = 3
+inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = NULL)
+{
+ // Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates
+ const Vec4 objective = q ? (q[3] != 0 ? vec4((1/q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1);
+
+ // Tolerance for 3D void simplex algorithm
+ const real eps_f = (real)1/(sizeof(real) == 4 ? (1L<<23) : (1LL<<52)); // Floating-point epsilon
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real eps = 8*eps_f;
+#else
+ const real eps = 80*eps_f;
+#endif
+ const real eps2 = eps*eps; // Using epsilon squared
+
+ // Maximum allowed iterations of main loop. If exceeded, error code is returned
+ const int max_iteration_count = 50;
+
+ // State
+ Vec4 S[4]; // Up to 4 planes
+ int plane_count = 0; // Number of valid planes
+ Vec4 p = objective; // Test point, initialized to objective
+
+ // Default result, changed to valid result if found in loop below
+ int result = -1;
+
+ // Iterate until a stopping condition is met or the maximum number of iterations is reached
+ for (int i = 0; result < 0 && i < max_iteration_count; ++i)
+ {
+ Vec4& plane = S[plane_count++];
+ real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x);
+#if VS3D_UNNORMALIZED_PLANE_HANDLING != 0
+ const real recip_norm = vs3d_recip_sqrt(plane.v|plane.v);
+ plane = vec4(recip_norm*plane.v, recip_norm*plane.w);
+ delta *= recip_norm;
+#endif
+ if (delta <= 0 || delta*delta <= eps2*(p|p)) result = 1; // Intersection found
+ else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found
+ }
+
+ // If q is given, fill it with the solution (normalize p.w if it is not zero)
+ if (q) *(Vec4*)q = (p.w != 0) ? vec4((1/p.w)*p.v, 1) : p;
+
+ PX_ASSERT(result >= 0);
+
+ return result;
+}
+
+
+/*
+ Utility class derived from GSA::ConvexShape, to handle common implementations
+
+ PlaneIterator must have:
+ 1) a constructor which takes an object of type IteratorInitValues (either by value or refrence) in its constructor,
+ 2) a valid() method which returns a bool (true iff the plane() function can return a valid plane, see below),
+ 3) an inc() method to advance to the next plane, and
+ 4) a plane() method which returns a plane of type ApexCSG::Plane, either by value or reference (the plane will be copied).
+*/
+template<class PlaneIterator, class IteratorInitValues>
+class StaticConvexPolyhedron : public VS3D_Halfspace_Set
+{
+public:
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4])
+ {
+ plane[0] = plane[1] = plane[2] = 0.0f;
+ plane[3] = 1.0f;
+ Real greatest_s = -MAX_REAL;
+
+ for (PlaneIterator it(m_initValues); it.valid(); it.inc())
+ {
+ const Plane test = it.plane();
+ const Real s = point[0]*test[0] + point[1]*test[1] + point[2]*test[2] + point[3]*test[3];
+ if (s > greatest_s)
+ {
+ greatest_s = s;
+ for (int i = 0; i < 4; ++i)
+ {
+ plane[i] = (GSA::real)test[i];
+ }
+ }
+ }
+
+ // Return results
+ return (GSA::real)greatest_s;
+ }
+
+protected:
+ IteratorInitValues m_initValues;
+};
+
+}; // namespace GSA
+}; // namespace ApexCSG
+
+#endif // #ifndef WITHOUT_APEX_AUTHORING
+
+#endif // #ifndef APEX_GSA_H
diff --git a/APEX_1.4/shared/internal/include/authoring/Fracturing.h b/APEX_1.4/shared/internal/include/authoring/Fracturing.h
new file mode 100644
index 00000000..751d1ae9
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/Fracturing.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef FRACTURING_H
+
+#define FRACTURING_H
+
+#include "Apex.h"
+#include "ApexUsingNamespace.h"
+#include "PxPlane.h"
+//#include "ApexSharedSerialization.h"
+#include "FractureTools.h"
+#include "ApexString.h"
+#include "ExplicitHierarchicalMesh.h"
+#include "authoring/ApexCSG.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace nvidia
+{
+namespace apex
+{
+
+using namespace FractureTools;
+
+
+struct IntersectMesh
+{
+ enum GridPattern
+ {
+ None, // An infinite plane
+ Equilateral,
+ Right
+ };
+
+ float getSide(const physx::PxVec3& v)
+ {
+ if (m_pattern == None)
+ {
+ return m_plane.distance(v);
+ }
+ physx::PxVec3 vLocal = m_tm.inverseRT().transform(v);
+ float x = vLocal.x - m_cornerX;
+ float y = vLocal.y - m_cornerY;
+ if (y < 0)
+ {
+ return 0;
+ }
+ float scaledY = y / m_ySpacing;
+ uint32_t gridY = (uint32_t)scaledY;
+ if (gridY >= m_numY)
+ {
+ return 0;
+ }
+ scaledY -= (float)gridY;
+ uint32_t yParity = gridY & 1;
+ if (yParity != 0)
+ {
+ scaledY = 1.0f - scaledY;
+ }
+ if (m_pattern == Equilateral)
+ {
+ x += 0.5f * m_xSpacing * scaledY;
+ }
+ if (x < 0)
+ {
+ return 0;
+ }
+ float scaledX = x / m_xSpacing;
+ uint32_t gridX = (uint32_t)scaledX;
+ if (gridX >= m_numX)
+ {
+ return 0;
+ }
+ scaledX -= (float)gridX;
+ uint32_t xParity = (uint32_t)(scaledX >= scaledY);
+ uint32_t triangleNum = 2 * (gridY * m_numX + gridX) + xParity;
+ PX_ASSERT(triangleNum < m_triangles.size());
+ nvidia::ExplicitRenderTriangle& triangle = m_triangles[triangleNum];
+ physx::PxVec3& v0 = triangle.vertices[0].position;
+ physx::PxVec3& v1 = triangle.vertices[1].position;
+ physx::PxVec3& v2 = triangle.vertices[2].position;
+ return ((v1 - v0).cross(v2 - v0)).dot(v - v0);
+ }
+
+ void clear()
+ {
+ m_pattern = None;
+ m_plane = physx::PxPlane(0, 0, 1, 0);
+ m_vertices.reset();
+ m_triangles.reset();
+ }
+
+ void build(const physx::PxPlane& plane)
+ {
+ clear();
+ m_plane = plane;
+ }
+
+ void build(GridPattern pattern, const physx::PxPlane& plane,
+ float cornerX, float cornerY, float xSpacing, float ySpacing, uint32_t numX, uint32_t numY,
+ const PxMat44& tm, float noiseAmplitude, float relativeFrequency, float xPeriod, float yPeriod,
+ int noiseType, int noiseDir, uint32_t submeshIndex, uint32_t frameIndex, const TriangleFrame& triangleFrame, bool forceGrid);
+
+ GridPattern m_pattern;
+
+ PxMat44 m_tm;
+ physx::PxPlane m_plane;
+ physx::Array<nvidia::Vertex> m_vertices;
+ physx::Array<nvidia::ExplicitRenderTriangle> m_triangles;
+
+ uint32_t m_numX;
+ float m_cornerX;
+ float m_xSpacing;
+ uint32_t m_numY;
+ float m_cornerY;
+ float m_ySpacing;
+};
+
+struct DisplacementMapVolumeImpl : public DisplacementMapVolume
+{
+ DisplacementMapVolumeImpl();
+
+ void init(const FractureSliceDesc& desc);
+
+ void getData(uint32_t& width, uint32_t& height, uint32_t& depth, uint32_t& size, unsigned char const** ppData) const;
+
+private:
+
+ void buildData(const physx::PxVec3 scale = physx::PxVec3(1)) const;
+
+ // Data creation is lazy, and does not effect externally visible state
+ // Note: At some point, we will want to switch to floating point displacements
+ mutable physx::Array<unsigned char> data;
+
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+
+};
+
+// CutoutSetImpl
+
+struct PolyVert
+{
+ uint16_t index;
+ uint16_t flags;
+};
+
+struct ConvexLoop
+{
+ physx::Array<PolyVert> polyVerts;
+};
+
+struct Cutout
+{
+ physx::Array<physx::PxVec3> vertices;
+ physx::Array<ConvexLoop> convexLoops;
+};
+
+struct CutoutSetImpl : public CutoutSet
+{
+ CutoutSetImpl() : periodic(false), dimensions(0.0f)
+ {
+ }
+
+ enum Version
+ {
+ First = 0,
+ // New versions must be put here. There is no need to explicitly number them. The
+ // numbers above were put there to conform to the old DestructionToolStreamVersion enum.
+
+ Count,
+ Current = Count - 1
+ };
+
+ uint32_t getCutoutCount() const
+ {
+ return cutouts.size();
+ }
+
+ uint32_t getCutoutVertexCount(uint32_t cutoutIndex) const
+ {
+ return cutouts[cutoutIndex].vertices.size();
+ }
+ uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const
+ {
+ return cutouts[cutoutIndex].convexLoops.size();
+ }
+
+ const physx::PxVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t vertexIndex) const
+ {
+ return cutouts[cutoutIndex].vertices[vertexIndex];
+ }
+
+ uint32_t getCutoutLoopSize(uint32_t cutoutIndex, uint32_t loopIndex) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts.size();
+ }
+
+ uint32_t getCutoutLoopVertexIndex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexNum) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts[vertexNum].index;
+ }
+ uint32_t getCutoutLoopVertexFlags(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexNum) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts[vertexNum].flags;
+ }
+ bool isPeriodic() const
+ {
+ return periodic;
+ }
+ const physx::PxVec2& getDimensions() const
+ {
+ return dimensions;
+ }
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream);
+
+ void release()
+ {
+ delete this;
+ }
+
+ physx::Array<Cutout> cutouts;
+ bool periodic;
+ physx::PxVec2 dimensions;
+};
+
+class PartConvexHullProxy : public ExplicitHierarchicalMesh::ConvexHull, public UserAllocated
+{
+public:
+ ConvexHullImpl impl;
+
+ PartConvexHullProxy()
+ {
+ impl.init();
+ }
+
+ PartConvexHullProxy(const PartConvexHullProxy& hull)
+ {
+ *this = hull;
+ }
+
+ PartConvexHullProxy& operator = (const PartConvexHullProxy& hull)
+ {
+ impl.init();
+ if (hull.impl.mParams)
+ {
+ impl.mParams->copy(*hull.impl.mParams);
+ }
+ return *this;
+ }
+
+ virtual void buildFromPoints(const void* points, uint32_t numPoints, uint32_t pointStrideBytes)
+ {
+ impl.buildFromPoints(points, numPoints, pointStrideBytes);
+ }
+
+ virtual const physx::PxBounds3& getBounds() const
+ {
+ return impl.getBounds();
+ }
+
+ virtual float getVolume() const
+ {
+ return impl.getVolume();
+ }
+
+ virtual uint32_t getVertexCount() const
+ {
+ return impl.getVertexCount();
+ }
+
+ virtual physx::PxVec3 getVertex(uint32_t vertexIndex) const
+ {
+ if (vertexIndex < impl.getVertexCount())
+ {
+ return impl.getVertex(vertexIndex);
+ }
+ return physx::PxVec3(0.0f);
+ }
+
+ virtual uint32_t getEdgeCount() const
+ {
+ return impl.getEdgeCount();
+ }
+
+ virtual physx::PxVec3 getEdgeEndpoint(uint32_t edgeIndex, uint32_t whichEndpoint) const
+ {
+ if (edgeIndex < impl.getEdgeCount())
+ {
+ return impl.getVertex(impl.getEdgeEndpointIndex(edgeIndex, whichEndpoint));
+ }
+ return physx::PxVec3(0.0f);
+ }
+
+ /**
+ This is the number of planes which bound the convex hull.
+ */
+ virtual uint32_t getPlaneCount() const
+ {
+ return impl.getPlaneCount();
+ }
+
+ /**
+ This is the plane indexed by planeIndex, which must in
+ the range [0, getPlaneCount()-1].
+ */
+ virtual physx::PxPlane getPlane(uint32_t planeIndex) const
+ {
+ if (planeIndex < impl.getPlaneCount())
+ {
+ return impl.getPlane(planeIndex);
+ }
+ return physx::PxPlane(physx::PxVec3(0.0f), 0.0f);
+ }
+
+ virtual bool rayCast(float& in, float& out, const physx::PxVec3& orig, const physx::PxVec3& dir,
+ const physx::PxTransform& localToWorldRT, const physx::PxVec3& scale, physx::PxVec3* normal = NULL) const
+ {
+ return impl.rayCast(in, out, orig, dir, localToWorldRT, scale, normal);
+ }
+
+ virtual bool reduceHull(uint32_t maxVertexCount, uint32_t maxEdgeCount, uint32_t maxFaceCount, bool inflated)
+ {
+ return impl.reduceHull(maxVertexCount, maxEdgeCount, maxFaceCount, inflated);
+ }
+
+ virtual void release()
+ {
+ delete this;
+ }
+};
+
+PX_INLINE void resizeCollision(physx::Array<PartConvexHullProxy*>& collision, uint32_t hullCount)
+{
+ const uint32_t oldHullCount = collision.size();
+ for (uint32_t i = hullCount; i < oldHullCount; ++i)
+ {
+ collision[i]->release();
+ }
+ collision.resize(hullCount);
+ for (uint32_t i = oldHullCount; i < hullCount; ++i)
+ {
+ collision[i] = PX_NEW(PartConvexHullProxy)();
+ }
+}
+
+void buildCollisionGeometry(physx::Array<PartConvexHullProxy*>& volumes, const CollisionVolumeDesc& desc,
+ const physx::PxVec3* vertices, uint32_t vertexCount, uint32_t vertexByteStride,
+ const uint32_t* indices, uint32_t indexCount);
+
+
+// ExplicitHierarchicalMeshImpl
+
+static uint64_t sNextChunkEUID = 0; // Execution-unique identifier for chunks
+
+class ExplicitHierarchicalMeshImpl : public ExplicitHierarchicalMesh, public UserAllocated
+{
+public:
+
+ // This has been copied from DestructionToolStreamVersion, at ToolStreamVersion_RemovedExplicitHMesh_mMaxDepth.
+ enum Version
+ {
+ First = 0,
+ AddedMaterialFramesToHMesh_and_NoiseType_and_GridSize_to_Cleavage = 7,
+ IncludingVertexFormatInSubmeshData = 12,
+ AddedMaterialLibraryToMesh = 14,
+ AddedCacheChunkSurfaceTracesAndInteriorSubmeshIndex = 32,
+ RemovedExplicitHMesh_mMaxDepth = 38,
+ UsingExplicitPartContainers,
+ SerializingMeshBSP,
+ SerializingMeshBounds,
+ AddedFlagsFieldToPart,
+ PerPartMeshBSPs,
+ StoringRootSubmeshCount,
+ MultipleConvexHullsPerChunk,
+ InstancingData,
+ UVInstancingData,
+ DisplacementData,
+ ChangedMaterialFrameToIncludeFracturingMethodContext,
+ RemovedInteriorSubmeshIndex,
+ AddedSliceDepthToMaterialFrame,
+ RemovedNxChunkAuthoringFlag,
+ ReaddedFlagsToPart,
+ IntroducingChunkPrivateFlags,
+ // New versions must be put here. There is no need to explicitly number them. The
+ // numbers above were put there to conform to the old DestructionToolStreamVersion enum.
+
+ Count,
+ Current = Count - 1
+ };
+
+ struct Part : public UserAllocated
+ {
+ Part() : mMeshBSP(NULL), mSurfaceNormal(0.0f), mFlags(0)
+ {
+ mBounds.setEmpty();
+ }
+
+ ~Part()
+ {
+ if (mMeshBSP != NULL)
+ {
+ mMeshBSP->release();
+ mMeshBSP = NULL;
+ }
+ resizeCollision(mCollision, 0);
+ }
+
+ enum Flags
+ {
+ MeshOpen = (1<<0),
+ };
+
+ physx::PxBounds3 mBounds;
+ physx::Array<nvidia::ExplicitRenderTriangle> mMesh;
+ ApexCSG::IApexBSP* mMeshBSP;
+ physx::Array<PartConvexHullProxy*> mCollision;
+ physx::PxVec3 mSurfaceNormal; // used to kick chunk out if desired
+ uint32_t mFlags; // See Flags
+ };
+
+ struct Chunk : public UserAllocated
+ {
+ Chunk() : mParentIndex(-1), mFlags(0), mPartIndex(-1), mInstancedPositionOffset(physx::PxVec3(0.0f)), mInstancedUVOffset(physx::PxVec2(0.0f)), mPrivateFlags(0)
+ {
+ mEUID = sNextChunkEUID++;
+ }
+
+ enum Flags
+ {
+ Root = (1<<0),
+ RootLeaf = (1<<1),
+ };
+
+ bool isRootChunk() const
+ {
+ return (mPrivateFlags & Root) != 0;
+ }
+
+ bool isRootLeafChunk() const // This means that the chunk is a root chunk and has no children that are root chunks
+ {
+ return (mPrivateFlags & RootLeaf) != 0;
+ }
+
+ PX_INLINE uint64_t getEUID() const
+ {
+ return mEUID;
+ }
+
+ int32_t mParentIndex;
+ uint32_t mFlags; // See DestructibleAsset::ChunkFlags
+ int32_t mPartIndex;
+ physx::PxVec3 mInstancedPositionOffset; // if instanced, the offsetPosition
+ physx::PxVec2 mInstancedUVOffset; // if instanced, the offset UV
+ uint32_t mPrivateFlags; // Things that don't make it to the DestructibleAsset; authoring only. See ExplicitHierarchicalMeshImpl::Chunk::Flags
+
+ private:
+ uint64_t mEUID; // A unique identifier during the application execution. Not to be serialized.
+ };
+
+ physx::Array<Part*> mParts;
+ physx::Array<Chunk*> mChunks;
+ physx::Array<ExplicitSubmeshData> mSubmeshData;
+ physx::Array<nvidia::MaterialFrame> mMaterialFrames;
+ uint32_t mRootSubmeshCount; // How many submeshes came with the root mesh
+
+ ApexCSG::IApexBSPMemCache* mBSPMemCache;
+
+ DisplacementMapVolumeImpl mDisplacementMapVolume;
+
+ ExplicitHierarchicalMeshImpl();
+ ~ExplicitHierarchicalMeshImpl();
+
+ // Sorts chunks in parent-sorted order (stable)
+ void sortChunks(physx::Array<uint32_t>* indexRemap = NULL);
+
+ // Generate part surface normals, if possible
+ void createPartSurfaceNormals();
+
+ // ExplicitHierarchicalMesh implementation:
+
+ uint32_t addPart();
+ bool removePart(uint32_t index);
+ uint32_t addChunk();
+ bool removeChunk(uint32_t index);
+ void serialize(physx::PxFileBuf& stream, Embedding& embedding) const;
+ void deserialize(physx::PxFileBuf& stream, Embedding& embedding);
+ int32_t maxDepth() const;
+ uint32_t partCount() const;
+ uint32_t chunkCount() const;
+ uint32_t depth(uint32_t chunkIndex) const;
+ int32_t* parentIndex(uint32_t chunkIndex);
+ uint64_t chunkUniqueID(uint32_t chunkIndex);
+ int32_t* partIndex(uint32_t chunkIndex);
+ physx::PxVec3* instancedPositionOffset(uint32_t chunkIndex);
+ physx::PxVec2* instancedUVOffset(uint32_t chunkIndex);
+ uint32_t meshTriangleCount(uint32_t partIndex) const;
+ nvidia::ExplicitRenderTriangle* meshTriangles(uint32_t partIndex);
+ physx::PxBounds3 meshBounds(uint32_t partIndex) const;
+ physx::PxBounds3 chunkBounds(uint32_t chunkIndex) const;
+ uint32_t* chunkFlags(uint32_t chunkIndex) const;
+ uint32_t convexHullCount(uint32_t partIndex) const;
+ const ExplicitHierarchicalMesh::ConvexHull** convexHulls(uint32_t partIndex) const;
+ physx::PxVec3* surfaceNormal(uint32_t partIndex);
+ const DisplacementMapVolume& displacementMapVolume() const;
+ uint32_t submeshCount() const;
+ ExplicitSubmeshData* submeshData(uint32_t submeshIndex);
+ uint32_t addSubmesh(const ExplicitSubmeshData& submeshData);
+ uint32_t getMaterialFrameCount() const;
+ nvidia::MaterialFrame getMaterialFrame(uint32_t index) const;
+ void setMaterialFrame(uint32_t index, const nvidia::MaterialFrame& materialFrame);
+ uint32_t addMaterialFrame();
+ void clear(bool keepRoot = false);
+ void set(const ExplicitHierarchicalMesh& mesh);
+ bool calculatePartBSP(uint32_t partIndex, uint32_t randomSeed, uint32_t microgridSize, BSPOpenMode::Enum meshMode, IProgressListener* progressListener = NULL, volatile bool* cancel = NULL);
+ void calculateMeshBSP(uint32_t randomSeed, IProgressListener* progressListener = NULL, const uint32_t* microgridSize = NULL, BSPOpenMode::Enum meshMode = BSPOpenMode::Automatic);
+ void replaceInteriorSubmeshes(uint32_t partIndex, uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex);
+ void visualize(RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const;
+ void release();
+ void buildMeshBounds(uint32_t partIndex);
+ void buildCollisionGeometryForPart(uint32_t partIndex, const CollisionVolumeDesc& desc);
+ void buildCollisionGeometryForRootChunkParts(const CollisionDesc& desc, bool aggregateRootChunkParentCollision = true);
+ void initializeDisplacementMapVolume(const nvidia::FractureSliceDesc& desc);
+ void reduceHulls(const CollisionDesc& desc, bool inflated);
+ void aggregateCollisionHullsFromRootChildren(uint32_t chunkIndex);
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
+
+#endif