aboutsummaryrefslogtreecommitdiff
path: root/APEX_1.4/shared/internal
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /APEX_1.4/shared/internal
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'APEX_1.4/shared/internal')
-rw-r--r--APEX_1.4/shared/internal/include/ApexSharedSerialization.h809
-rw-r--r--APEX_1.4/shared/internal/include/ApexStream.h191
-rw-r--r--APEX_1.4/shared/internal/include/ApexString.h276
-rw-r--r--APEX_1.4/shared/internal/include/FractureTools.h466
-rw-r--r--APEX_1.4/shared/internal/include/Link.h75
-rw-r--r--APEX_1.4/shared/internal/include/ParamArray.h225
-rw-r--r--APEX_1.4/shared/internal/include/PvdNxParamSerializer.h41
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSG.h404
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h1064
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h794
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h630
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h187
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h648
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h191
-rw-r--r--APEX_1.4/shared/internal/include/authoring/ApexGSA.h412
-rw-r--r--APEX_1.4/shared/internal/include/authoring/Fracturing.h544
-rw-r--r--APEX_1.4/shared/internal/readme.txt32
-rw-r--r--APEX_1.4/shared/internal/src/PvdNxParamSerializer.cpp607
-rw-r--r--APEX_1.4/shared/internal/src/authoring/ApexCSG.cpp3140
-rw-r--r--APEX_1.4/shared/internal/src/authoring/ApexCSGHull.cpp1224
-rw-r--r--APEX_1.4/shared/internal/src/authoring/ApexCSGMeshCleaning.cpp559
-rw-r--r--APEX_1.4/shared/internal/src/authoring/Cutout.cpp1908
-rw-r--r--APEX_1.4/shared/internal/src/authoring/Fracturing.cpp7349
-rw-r--r--APEX_1.4/shared/internal/src/authoring/Noise.h131
-rw-r--r--APEX_1.4/shared/internal/src/authoring/NoiseUtils.h156
25 files changed, 22063 insertions, 0 deletions
diff --git a/APEX_1.4/shared/internal/include/ApexSharedSerialization.h b/APEX_1.4/shared/internal/include/ApexSharedSerialization.h
new file mode 100644
index 00000000..20e8c1d4
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/ApexSharedSerialization.h
@@ -0,0 +1,809 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef __APEXSHAREDSERIALIZATION_H__
+#define __APEXSHAREDSERIALIZATION_H__
+
+#include "Apex.h"
+#include "ApexStream.h"
+#include "ApexString.h"
+#include "ApexSharedUtils.h"
+#include "ApexSDKIntl.h"
+#include "nvparameterized/NvParameterized.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ApexStreamVersion
+{
+ enum Enum
+ {
+ First = 0,
+ ConsolidatedSubmeshes = 1,
+ TriangleFlags = 2,
+ ConvexHull_VolumeStreamed = 3,
+ AddedDestrucibleDeformation = 4,
+ RemovedParticleSizeFromDestructibleParameters = 5,
+ TriangleFlagsChangedToExtraDataIndex = 6,
+ ConvexHull_EdgesStreamed = 7,
+ AddedDestrucibleDebrisParamaters = 8,
+ DestrucibleDebrisParamatersHaveLODRange = 9,
+ UniversalNamedAssetHeader = 10,
+ ClothMeshBindPoses = 10, // PH: This is not a mistake, this enum was added in another branch while other enums were added in //trunk
+ VariableVertexData = 11,
+ SoftBodyNormalsStreamed = 11, // PH: This is not a mistake, this enum was added in another branch while other enums were added in //trunk
+ UsingApexMaterialLibrary_and_NoFlagsInRenderTriangle = 12,
+ AddingTextureTypeInformationToVertexFormat = 13,
+ SoftBodyNormalsInTetraLink = 14,
+ ClothingVertexFlagsStreamed = 15,
+ ConvexHullModifiedForEfficientCollisionTests = 16,
+ RenderMeshAssetRedesign = 17,
+ FixedSerializationAfterRenderMeshAssetRedesign = 18,
+ AddedRenderCullModeToRenderMeshAsset = 19,
+ AddedTextureUVOriginField = 19, // BRG: merge conflict
+ AddedDynamicVertexBufferField = 20,
+ RenderMeshAssetBufferOverrunFix = 21,
+ CleanupOfApexRenderMesh = 22,
+ RemovedTextureTypeInformationFromVertexFormat = 23,
+ AddedDestructibleFlag_FormExtendedStructures = 24,
+ MarkingDestructibleChunksWithFlagToDenoteUnfracturableDescendant = 25,
+ AddedValidBoundsToDestructibleParameters = 26,
+ ReducingAssetUponDeserializationForReducedLOD = 27,
+ AddedDustMeshParticleFactoryNameToDestructibleAsset = 28,
+ AddedChunkSurfaceTracesToDestructibleAsset = 29,
+ AddedInteriorSubmeshIndexToDestructibleAsset = 30,
+ AddedMaxChunkSpeedAndMassScaleExponentToDestructibleAsset = 31,
+ AddedGrbVolumeLimit = 32,
+ Removing_rollSign_and_rollAxis_fromAPI = 33,
+ AddedFractureImpulseScale = 34,
+ AddedMissingRuntimeFlagsAndAlsoTraceSetAverageNormalToDestructibleAssetStream = 35,
+ CachingChunkOverlapDataInDestructibleAsset = 36,
+ AddedRotationCaps = 37,
+ AddedImpactVelocityThreshold = 37, // SJB: Trust me, this should be 37
+ ChangedVertexColorFormatToReal = 38,
+ AddedEssentialDepthParameter = 39,
+ AddedSeparateBoneBufferFlagToRenderMeshAsset = 40,
+ DoNotNormalizeBoneWeightsOnLoad = 41, // PH: This is a change for deserialization only, we don't need to do additional processing when newer than this
+ MovedChunkSurfaceNormalFromTraceSetToChunk = 41,
+ FlattenedVertexBuffer = 42,
+ RemovedSubmeshTriangleStruct = 43,
+ DestructibleAssetRearrangementForParameterization = 44,
+ AddedDestructibleMaterialStrength = 45,
+ ExposedChunkNeighborPadding = 46,
+ ChangedVertexFormat = 47,
+ UnifiedCustomAndStandardSemantics = 48,
+ AddedAdjacentPlanesToConvexHullStream = 49,
+ // Always add a new version just before this line
+
+ Count,
+ Current = Count - 1
+ };
+};
+
+/*****************************************************************************/
+// Non-versioned data (e.g. basic types) may use the << and >> operators
+// defined in ApexStream.h. This file contains (de)serialization for
+// versioned data.
+//
+// Serialization version is written "upstream" by a versioned parent object.
+// To add the version number to the streamed data for an object, record this
+// change in the ApexStreamVersion enum, and add the line:
+// stream << (uint32_t)ApexStreamVersion_Current;
+// to the serialize function.
+//
+// Serialization signature:
+// void serialize( physx::PxFileBuf& stream, const typename& object )
+//
+// Deserialization signature:
+// void deserialize( physx::PxFileBuf& stream, uint32_t version, typename& object )
+/*****************************************************************************/
+
+
+// Template foo
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ExplicitRenderTriangle& t);
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ExplicitRenderTriangle& t);
+PX_INLINE void serialize(physx::PxFileBuf& stream, const VertexUV& uv);
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, VertexUV& uv);
+PX_INLINE void serialize(physx::PxFileBuf& stream, const Vertex& v);
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, Vertex& v);
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ApexSimpleString& s);
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ApexSimpleString& s);
+PX_INLINE void serialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ const NvParameterized::Interface* param,
+ NvParameterized::Handle& childHandle);
+PX_INLINE void serialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ const NvParameterized::Interface* param,
+ const char* memberName,
+ NvParameterized::Handle* structHandle = NULL);
+PX_INLINE void deserialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ NvParameterized::Interface* param,
+ NvParameterized::Handle& childHandle);
+PX_INLINE void deserialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ NvParameterized::Interface* param,
+ const char* memberName,
+ NvParameterized::Handle* structHandle = NULL);
+
+// Utility to write an APEX asset stream header
+PX_INLINE void serializeCurrentApexStreamVersion(physx::PxFileBuf& stream, const ApexSimpleString& assetTypeName, uint32_t assetVersion)
+{
+ stream << (uint32_t)ApexStreamVersion::Current;
+ serialize(stream, assetTypeName);
+ stream << assetVersion;
+}
+
+// Utility to read version header from the start of an APEX asset stream
+PX_INLINE ApexStreamVersion::Enum deserializeApexStreamVersion(physx::PxFileBuf& stream, const ApexSimpleString& assetTypeName, uint32_t& assetVersion)
+{
+ uint32_t headerVersion;
+ stream >> headerVersion;
+ if (headerVersion > ApexStreamVersion::Current)
+ {
+ APEX_INTERNAL_ERROR("Stream version (%d) is newer than this library (%d)", headerVersion, ApexStreamVersion::Current);
+ PX_ALWAYS_ASSERT();
+ }
+ else if (headerVersion >= ApexStreamVersion::UniversalNamedAssetHeader)
+ {
+ ApexSimpleString streamedName;
+ deserialize(stream, headerVersion, streamedName);
+ if (streamedName == assetTypeName)
+ {
+ stream >> assetVersion;
+ }
+ else
+ {
+ APEX_INTERNAL_ERROR("Asset type name mismatch. File <%s> != asset name <%s>",
+ assetTypeName.c_str(), streamedName.c_str());
+ PX_ALWAYS_ASSERT();
+ }
+ }
+ return (ApexStreamVersion::Enum)headerVersion;
+}
+
+// Utility to read version header from the start of an APEX asset stream, when the type is unknown
+PX_INLINE ApexStreamVersion::Enum deserializeGenericApexStreamVersion(physx::PxFileBuf& stream, ApexSimpleString& outAssetTypeName, uint32_t& outAssetVersion)
+{
+ uint32_t headerVersion;
+ stream >> headerVersion;
+
+ if (headerVersion > ApexStreamVersion::Current)
+ {
+ APEX_INTERNAL_ERROR("Stream version (%d) is newer than this library (%d)", headerVersion, ApexStreamVersion::Current);
+ PX_ALWAYS_ASSERT();
+ }
+ else if (headerVersion >= ApexStreamVersion::UniversalNamedAssetHeader)
+ {
+ ApexSimpleString streamedName;
+ deserialize(stream, headerVersion, outAssetTypeName);
+ stream >> outAssetVersion;
+ }
+ else
+ {
+ APEX_INTERNAL_ERROR("Stream version (%d) does not contain a universal named asset header, " \
+ "use a specific asset creation method to load this asset", headerVersion);
+ PX_ALWAYS_ASSERT();
+ }
+ return (ApexStreamVersion::Enum)headerVersion;
+}
+
+// Versioned wrappers for non-versioned data
+template <class T> PX_INLINE void serialize(physx::PxFileBuf& stream, const T& t)
+{
+ stream << t;
+}
+
+template <class T> PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, T& t)
+{
+ PX_UNUSED(version);
+ stream >> t;
+}
+
+
+// physx::Array<T>
+template <class T> PX_INLINE void serialize(physx::PxFileBuf& stream, const physx::Array<T>& array)
+{
+ const uint32_t size = array.size();
+ stream << size;
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ serialize(stream, array[i]);
+ }
+}
+
+template <class T> PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, physx::Array<T>& array)
+{
+ uint32_t size;
+ stream >> size;
+ array.resize(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ deserialize(stream, version, array[i]);
+ }
+}
+
+// Several serialized objects have multiple associated versions
+template <class T> PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version0, uint32_t version1, physx::Array<T>& array)
+{
+ uint32_t size;
+ stream >> size;
+ array.resize(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ deserialize(stream, version0, version1, array[i]);
+ }
+}
+
+// VertexUV
+PX_INLINE void serialize(physx::PxFileBuf& stream, const VertexUV& uv)
+{
+ stream << uv.u << uv.v;
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, VertexUV& uv)
+{
+ // original version
+ PX_UNUSED(version);
+ stream >> uv.u >> uv.v;
+}
+
+
+// VertexColor
+PX_INLINE void serialize(physx::PxFileBuf& stream, const VertexColor& c)
+{
+ stream << c.r << c.g << c.b << c.a;
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, VertexColor& c)
+{
+ // original version
+ PX_UNUSED(version);
+ stream >> c.r >> c.g >> c.b >> c.a;
+}
+
+
+// Vertex
+PX_INLINE void serialize(physx::PxFileBuf& stream, const Vertex& v)
+{
+ //
+ stream << v.position << v.normal << v.tangent << v.binormal;
+ serialize(stream, v.uv[0]);
+ serialize(stream, v.uv[1]);
+ serialize(stream, v.uv[2]);
+ serialize(stream, v.uv[3]);
+ serialize(stream, v.color);
+ for (uint32_t i = 0; i < 4; i++)
+ {
+ stream << v.boneIndices[i];
+ stream << v.boneWeights[i];
+ }
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, Vertex& v)
+{
+ if (version < ApexStreamVersion::RenderMeshAssetRedesign)
+ {
+ // set all other indices and weights to 0
+ v.boneWeights[0] = 1.0f;
+ for (uint32_t i = 1; i < 4; i++)
+ {
+ v.boneIndices[i] = 0;
+ v.boneWeights[i] = 0.0f;
+ }
+ }
+
+ if (version >= ApexStreamVersion::VariableVertexData)
+ {
+ stream >> v.position >> v.normal >> v.tangent >> v.binormal;
+ deserialize(stream, version, v.uv[0]);
+ deserialize(stream, version, v.uv[1]);
+ deserialize(stream, version, v.uv[2]);
+ deserialize(stream, version, v.uv[3]);
+ if (version >= ApexStreamVersion::ChangedVertexColorFormatToReal)
+ {
+ deserialize(stream, version, v.color);
+ }
+ else
+ {
+ uint32_t intColor;
+ stream >> intColor;
+ v.color = VertexColor(ColorRGBA((uint8_t)(intColor & 255), (uint8_t)((intColor >> 8) & 255), (uint8_t)((intColor >> 16) & 255), (uint8_t)(intColor >> 24)));
+ }
+ if (version >= ApexStreamVersion::RenderMeshAssetRedesign)
+ {
+ for (uint32_t i = 0; i < 4; i++)
+ {
+ stream >> v.boneIndices[i];
+ stream >> v.boneWeights[i];
+ }
+ if (version < ApexStreamVersion::RenderMeshAssetBufferOverrunFix)
+ {
+ uint16_t dummyU16;
+ float dummyF32;
+ for (uint32_t i = 0; i < 4; i++)
+ {
+ stream >> dummyU16;
+ stream >> dummyF32;
+ }
+ }
+ }
+ else
+ {
+ uint32_t boneIndex;
+ stream >> boneIndex;
+ v.boneIndices[0] = (uint16_t)boneIndex;
+ }
+ }
+ else
+ {
+ stream >> v.position >> v.normal >> v.tangent >> v.binormal >> v.uv[0][0] >> v.uv[0][1];
+ uint32_t intColor;
+ stream >> intColor;
+ v.color = VertexColor(ColorRGBA((uint8_t)(intColor & 255), (uint8_t)((intColor >> 8) & 255), (uint8_t)((intColor >> 16) & 255), (uint8_t)(intColor >> 24)));
+ stream >> v.boneIndices[0];
+ }
+}
+
+
+// ExplicitRenderTriangle
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ExplicitRenderTriangle& t)
+{
+ serialize(stream, t.vertices[0]);
+ serialize(stream, t.vertices[1]);
+ serialize(stream, t.vertices[2]);
+ stream << t.submeshIndex << t.smoothingMask << t.extraDataIndex;
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ExplicitRenderTriangle& t)
+{
+ deserialize(stream, version, t.vertices[0]);
+ deserialize(stream, version, t.vertices[1]);
+ deserialize(stream, version, t.vertices[2]);
+ stream >> t.submeshIndex >> t.smoothingMask;
+ if (version >= ApexStreamVersion::TriangleFlagsChangedToExtraDataIndex)
+ {
+ stream >> t.extraDataIndex;
+ }
+ else
+ {
+ if (version >= ApexStreamVersion::TriangleFlags)
+ {
+ uint32_t deadData;
+ stream >> deadData;
+ }
+ t.extraDataIndex = 0xFFFFFFFF;
+ }
+}
+
+// ApexSimpleString
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ApexSimpleString& s)
+{
+ s.serialize(stream);
+ //stream << s.len();
+ //stream.storeBuffer( s.c_str(), s.len() );
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ApexSimpleString& s)
+{
+ PX_UNUSED(version);
+ s.deserialize(stream);
+}
+
+
+// ConvexHullImpl
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ConvexHullImpl& h)
+{
+ if (h.mParams == NULL)
+ {
+ PX_ASSERT(!"Attempting to serialize a ConvexHullImpl with NULL NvParameters.");
+ return;
+ }
+
+ Array<physx::PxVec3> vertices;
+ Array<physx::PxPlane> uniquePlanes;
+ Array<uint32_t> edges;
+ Array<uint32_t> adjacentFaces;
+ Array<float> widths;
+
+ NvParameterized::Handle handle(*h.mParams);
+
+ // vertices
+ vertices.resize((uint32_t)h.mParams->vertices.arraySizes[0]);
+ h.mParams->getParameterHandle("vertices", handle);
+ h.mParams->getParamVec3Array(handle, vertices.begin(), (int32_t)vertices.size());
+ serialize(stream, vertices);
+
+ // uniquePlanes
+ uniquePlanes.resize((uint32_t)h.mParams->uniquePlanes.arraySizes[0]);
+ for (uint32_t i = 0; i < (uint32_t)h.mParams->uniquePlanes.arraySizes[0]; ++i)
+ {
+ ConvexHullParametersNS::Plane_Type& plane = h.mParams->uniquePlanes.buf[i];
+ uniquePlanes[i] = physx::PxPlane(plane.normal, plane.d);
+ }
+ serialize(stream, uniquePlanes);
+
+ // bounds
+ stream << h.mParams->bounds;
+
+ // volume
+ stream << h.mParams->volume;
+
+ // edges
+ edges.resize((uint32_t)h.mParams->edges.arraySizes[0]);
+ h.mParams->getParameterHandle("edges", handle);
+ h.mParams->getParamU32Array(handle, edges.begin(), (int32_t)edges.size());
+ serialize(stream, edges);
+
+ // adjacentFaces
+ adjacentFaces.resize((uint32_t)h.mParams->adjacentFaces.arraySizes[0]);
+ h.mParams->getParameterHandle("adjacentFaces", handle);
+ h.mParams->getParamU32Array(handle, adjacentFaces.begin(), (int32_t)adjacentFaces.size());
+ serialize(stream, adjacentFaces);
+
+ // widths
+ widths.resize((uint32_t)h.mParams->widths.arraySizes[0]);
+ h.mParams->getParameterHandle("widths", handle);
+ h.mParams->getParamF32Array(handle, widths.begin(), (int32_t)widths.size());
+ serialize(stream, widths);
+
+ // unique direction count
+ stream << h.mParams->uniqueEdgeDirectionCount;
+
+ // plane count
+ stream << h.mParams->planeCount;
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ConvexHullImpl& h)
+{
+ if (h.mParams == NULL)
+ {
+ h.init();
+ }
+
+ Array<physx::PxVec3> vertices;
+ Array<physx::PxPlane> uniquePlanes;
+ Array<uint32_t> edges;
+ Array<uint32_t> adjacentFaces;
+ Array<float> widths;
+
+ NvParameterized::Handle handle(*h.mParams);
+
+ // vertices
+ deserialize(stream, version, vertices);
+ h.mParams->getParameterHandle("vertices", handle);
+ h.mParams->resizeArray(handle, (int32_t)vertices.size());
+ h.mParams->setParamVec3Array(handle, vertices.begin(), (int32_t)vertices.size());
+
+ // uniquePlanes
+ deserialize(stream, version, uniquePlanes);
+ h.mParams->getParameterHandle("uniquePlanes", handle);
+ h.mParams->resizeArray(handle, (int32_t)uniquePlanes.size());
+ for (uint32_t i = 0; i < uniquePlanes.size(); ++i)
+ {
+ physx::PxPlane& plane = uniquePlanes[i];
+ ConvexHullParametersNS::Plane_Type& paramPlane = h.mParams->uniquePlanes.buf[i];
+ paramPlane.normal = plane.n;
+ paramPlane.d = plane.d;
+ }
+
+ // bounds
+ stream >> h.mParams->bounds;
+
+ // volume
+ if (version >= ApexStreamVersion::ConvexHull_VolumeStreamed)
+ {
+ stream >> h.mParams->volume;
+ }
+
+ bool needsRebuild = false;
+
+ if (version >= ApexStreamVersion::ConvexHull_EdgesStreamed)
+ {
+ // edges
+ deserialize(stream, version, edges);
+ h.mParams->getParameterHandle("edges", handle);
+ h.mParams->resizeArray(handle, (int32_t)edges.size());
+ h.mParams->setParamU32Array(handle, edges.begin(), (int32_t)edges.size());
+
+ if (version >= ApexStreamVersion::AddedAdjacentPlanesToConvexHullStream)
+ {
+ // adjacentFaces
+ deserialize(stream, version, adjacentFaces);
+ h.mParams->getParameterHandle("adjacentFaces", handle);
+ h.mParams->resizeArray(handle, (int32_t)adjacentFaces.size());
+ h.mParams->setParamU32Array(handle, adjacentFaces.begin(), (int32_t)adjacentFaces.size());
+ }
+ else
+ {
+ needsRebuild = true;
+ }
+
+ if (version >= ApexStreamVersion::ConvexHullModifiedForEfficientCollisionTests)
+ {
+ // widths
+ deserialize(stream, version, widths);
+ h.mParams->getParameterHandle("widths", handle);
+ h.mParams->resizeArray(handle, (int32_t)widths.size());
+ h.mParams->setParamF32Array(handle, widths.begin(), (int32_t)widths.size());
+
+ // unique direction count
+ stream >> h.mParams->uniqueEdgeDirectionCount;
+
+ // plane count
+ stream >> h.mParams->planeCount;
+ }
+ else
+ {
+ // Fix up
+ // Ouch - rebuilding!
+ // \todo - issue load performance warning
+ h.buildFromPoints(vertices.begin(), vertices.size(), sizeof(physx::PxVec3));
+ }
+ }
+ else
+ {
+ needsRebuild = true;
+ }
+
+ if (needsRebuild)
+ {
+ // Ouch - rebuilding!
+ // \todo - issue load performance warning
+ h.buildFromPoints(vertices.begin(), vertices.size(), sizeof(physx::PxVec3));
+ }
+}
+
+// PxParamerized F32, U32, string, enum, and named ref
+PX_INLINE void serialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ const NvParameterized::Interface* param,
+ NvParameterized::Handle& childHandle)
+{
+ PX_UNUSED(headerVersion);
+ PX_UNUSED(param);
+ PX_ASSERT(childHandle.getConstInterface() == param);
+
+ ApexSimpleString tmpString;
+
+ if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_REF)
+ {
+ // named reference (we should check that it is not INCLUDED)
+
+ NvParameterized::Interface* childParamPtr = 0;
+
+ // just in case it wasn't initialized
+ //if( param->initParamRef(childHandle, NULL, true) != NvParameterized::ERROR_NONE )
+ //{
+ // return;
+ //}
+
+ childHandle.getParamRef(childParamPtr);
+
+ if (!childParamPtr) //Special case hack...
+ {
+ childHandle.initParamRef(NULL, true);
+ childHandle.getParamRef(childParamPtr);
+ }
+
+ if (childParamPtr)
+ {
+ tmpString = childParamPtr->className();
+ tmpString.serialize(stream);
+
+ tmpString = childParamPtr->name();
+ tmpString.serialize(stream);
+ }
+ }
+ else if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_STRING)
+ {
+ const char* str;
+ childHandle.getParamString(str);
+ tmpString = str;
+ tmpString.serialize(stream);
+ }
+ else if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_ENUM)
+ {
+ const char* str;
+ childHandle.getParamEnum(str);
+ tmpString = str;
+ tmpString.serialize(stream);
+ }
+ else
+ {
+ switch (childHandle.parameterDefinition()->type())
+ {
+ case NvParameterized::TYPE_VEC3:
+ {
+ physx::PxVec3 d;
+ childHandle.getParamVec3(d);
+ stream << d;
+ break;
+ }
+ case NvParameterized::TYPE_F32:
+ {
+ float d;
+ childHandle.getParamF32(d);
+ stream << d;
+ break;
+ }
+ case NvParameterized::TYPE_U32:
+ {
+ uint32_t d;
+ childHandle.getParamU32(d);
+ stream << d;
+ break;
+ }
+ case NvParameterized::TYPE_BOOL:
+ {
+ bool d;
+ childHandle.getParamBool(d);
+ if (d)
+ {
+ stream.storeByte((uint8_t)1);
+ }
+ else
+ {
+ stream.storeByte((uint8_t)0);
+ }
+ break;
+ }
+ default:
+ PX_ALWAYS_ASSERT();
+ }
+ }
+}
+
+PX_INLINE void serialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ const NvParameterized::Interface* param,
+ const char* memberName,
+ NvParameterized::Handle* structHandle)
+{
+ PX_UNUSED(headerVersion);
+
+ NvParameterized::Handle childHandle(*param);
+ if (structHandle)
+ {
+ structHandle->getChildHandle(param, memberName, childHandle);
+ }
+ else
+ {
+ param->getParameterHandle(memberName, childHandle);
+ }
+
+ serialize(stream, headerVersion, param, childHandle);
+}
+
+
+// PxParamerized F32, U32, string, enum, and named ref
+// PxParamerized F32, U32, string, enum, and named ref
+PX_INLINE void deserialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ NvParameterized::Interface* param,
+ NvParameterized::Handle& childHandle)
+{
+ PX_UNUSED(param);
+ PX_ASSERT(childHandle.getConstInterface() == param);
+ ApexSimpleString tmpString;
+
+ if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_REF)
+ {
+ deserialize(stream, headerVersion, tmpString);
+
+ // named reference (we should check that it is not INCLUDED
+ NvParameterized::Interface* childParamPtr = 0;
+
+ // tmpString is the className (asset authorable name)
+ childHandle.initParamRef(tmpString.c_str(), true);
+ childHandle.getParamRef(childParamPtr);
+
+ PX_ASSERT(childParamPtr);
+ if (childParamPtr)
+ {
+ deserialize(stream, headerVersion, tmpString);
+ childParamPtr->setName(tmpString.c_str());
+ }
+
+ }
+ else if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_STRING)
+ {
+ deserialize(stream, headerVersion, tmpString);
+ childHandle.setParamString(tmpString.c_str());
+ }
+ else if (childHandle.parameterDefinition()->type() == NvParameterized::TYPE_ENUM)
+ {
+ deserialize(stream, headerVersion, tmpString);
+ if (childHandle.setParamEnum(tmpString.c_str()) != NvParameterized::ERROR_NONE)
+ {
+ APEX_DEBUG_WARNING("NvParameterized ENUM value not correct: %s, substituting: %s", \
+ tmpString.c_str(), childHandle.parameterDefinition()->enumVal(0));
+
+ childHandle.setParamEnum(childHandle.parameterDefinition()->enumVal(0));
+ }
+
+ }
+ else
+ {
+ switch (childHandle.parameterDefinition()->type())
+ {
+ case NvParameterized::TYPE_VEC3:
+ {
+ physx::PxVec3 d;
+ stream >> d;
+ childHandle.setParamVec3(d);
+ break;
+ }
+ case NvParameterized::TYPE_F32:
+ {
+ float d;
+ stream >> d;
+ childHandle.setParamF32(d);
+ break;
+ }
+ case NvParameterized::TYPE_U32:
+ {
+ uint32_t d;
+ stream >> d;
+ childHandle.setParamU32(d);
+ break;
+ }
+ case NvParameterized::TYPE_BOOL:
+ {
+ bool d;
+ uint8_t value = stream.readByte();
+ if (value)
+ {
+ d = true;
+ }
+ else
+ {
+ d = false;
+ }
+
+ childHandle.setParamBool(d);
+ break;
+ }
+ default:
+ PX_ALWAYS_ASSERT();
+ }
+ }
+
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream,
+ uint32_t headerVersion,
+ NvParameterized::Interface* param,
+ const char* memberName,
+ NvParameterized::Handle* structHandle)
+{
+ NvParameterized::Handle childHandle(*param);
+ if (structHandle)
+ {
+ structHandle->getChildHandle(param, memberName, childHandle);
+ }
+ else
+ {
+ param->getParameterHandle(memberName, childHandle);
+ }
+
+ deserialize(stream, headerVersion, param, childHandle);
+}
+
+
+}
+} // end namespace nvidia::apex
+
+
+#endif // __APEXSHAREDSERIALIZATIONHELPERS_H__
diff --git a/APEX_1.4/shared/internal/include/ApexStream.h b/APEX_1.4/shared/internal/include/ApexStream.h
new file mode 100644
index 00000000..b5ec3b91
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/ApexStream.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef __APEX_STREAM_H__
+#define __APEX_STREAM_H__
+
+#include "ApexDefs.h"
+#include "PxPlane.h"
+
+
+namespace nvidia
+{
+namespace apex
+{
+
+// Public, useful operators for serializing nonversioned data follow.
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, bool& b)
+{
+ b = (0 != stream.readByte());
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, int8_t& b)
+{
+ b = (int8_t)stream.readByte();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, int16_t& w)
+{
+ w = (int16_t)stream.readWord();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, int32_t& d)
+{
+ d = (int32_t)stream.readDword();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, uint8_t& b)
+{
+ b = stream.readByte();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, uint16_t& w)
+{
+ w = stream.readWord();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, uint32_t& d)
+{
+ d = stream.readDword();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, float& f)
+{
+ f = stream.readFloat();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, double& f)
+{
+ f = stream.readDouble();
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxVec2& v)
+{
+ stream >> v.x >> v.y;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxVec3& v)
+{
+ stream >> v.x >> v.y >> v.z;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxVec4& v)
+{
+ stream >> v.x >> v.y >> v.z >> v.w;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxBounds3& b)
+{
+ stream >> b.minimum >> b.maximum;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxQuat& q)
+{
+ stream >> q.x >> q.y >> q.z >> q.w;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxPlane& p)
+{
+ stream >> p.n.x >> p.n.y >> p.n.z >> p.d;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator >> (physx::PxFileBuf& stream, physx::PxMat44& m)
+{
+ stream >> m.column0 >> m.column1 >> m.column2 >> m.column3;
+ return stream;
+}
+
+// The opposite of the above operators--takes data and writes it to a stream.
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const bool b)
+{
+ stream.storeByte(b ? (uint8_t)1 : (uint8_t)0);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const int8_t b)
+{
+ stream.storeByte((uint8_t)b);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const int16_t w)
+{
+ stream.storeWord((uint16_t)w);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const int32_t d)
+{
+ stream.storeDword((uint32_t)d);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const uint8_t b)
+{
+ stream.storeByte(b);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const uint16_t w)
+{
+ stream.storeWord(w);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const uint32_t d)
+{
+ stream.storeDword(d);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const float f)
+{
+ stream.storeFloat(f);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const double f)
+{
+ stream.storeDouble(f);
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxVec2& v)
+{
+ stream << v.x << v.y;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxVec3& v)
+{
+ stream << v.x << v.y << v.z;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxVec4& v)
+{
+ stream << v.x << v.y << v.z << v.w;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxBounds3& b)
+{
+ stream << b.minimum << b.maximum;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxQuat& q)
+{
+ stream << q.x << q.y << q.z << q.w;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxPlane& p)
+{
+ stream << p.n.x << p.n.y << p.n.z << p.d;
+ return stream;
+}
+PX_INLINE physx::PxFileBuf& operator << (physx::PxFileBuf& stream, const physx::PxMat44& m)
+{
+ stream << m.column0 << m.column1 << m.column2 << m.column3;
+ return stream;
+}
+
+
+}
+} // end namespace apex
+
+#endif // __APEX_STREAM_H__ \ No newline at end of file
diff --git a/APEX_1.4/shared/internal/include/ApexString.h b/APEX_1.4/shared/internal/include/ApexString.h
new file mode 100644
index 00000000..20d4f8c7
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/ApexString.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_STRING_H
+#define APEX_STRING_H
+
+#include "ApexUsingNamespace.h"
+#include "PsArray.h"
+#include "PsString.h"
+#include "PsArray.h"
+#include "PsUserAllocated.h"
+#include <PxFileBuf.h>
+
+namespace nvidia
+{
+namespace apex
+{
+
+/**
+ * ApexSimpleString - a simple string class
+ */
+class ApexSimpleString : public physx::Array<char>, public UserAllocated
+{
+public:
+ ApexSimpleString() : physx::Array<char>(), length(0)
+ {
+ }
+
+ explicit ApexSimpleString(const char* cStr) : physx::Array<char>(), length(0)
+ {
+ if (cStr)
+ {
+ length = (uint32_t)strlen(cStr);
+ if (length > 0)
+ {
+ resize(length + 1);
+ nvidia::strlcpy(begin(), size(), cStr);
+ }
+ }
+ }
+
+ ApexSimpleString(const ApexSimpleString& other) : physx::Array<char>()
+ {
+ length = other.length;
+ if (length > 0)
+ {
+ resize(length + 1);
+ nvidia::strlcpy(begin(), capacity(), other.c_str());
+ }
+ else
+ {
+ resize(0);
+ }
+ }
+
+ ApexSimpleString(uint32_t number, uint32_t fixedLength = 0) : length(fixedLength)
+ {
+ if (fixedLength)
+ {
+ char format[5]; format[0] = '%'; format[1] = '0';
+ char buffer[10];
+ if (fixedLength > 9)
+ {
+ PX_ASSERT(fixedLength);
+ fixedLength = 9;
+ }
+ physx::shdfnd::snprintf(format + 2, 2, "%d", fixedLength);
+ format[3] = 'd'; format[4] = '\0';
+ physx::shdfnd::snprintf(buffer, 10, format, number);
+ resize(length + 1);
+ nvidia::strlcpy(begin(), size(), buffer);
+ }
+ else
+ {
+ char buffer[10];
+ physx::shdfnd::snprintf(buffer, 10, "%d", number);
+ length = 1;
+ while (number >= 10)
+ {
+ number /= 10;
+ length++;
+ }
+ resize(length + 1);
+ nvidia::strlcpy(begin(), size(), buffer);
+ }
+ }
+
+ ApexSimpleString& operator = (const ApexSimpleString& other)
+ {
+ length = other.length;
+ if (length > 0)
+ {
+ resize(length + 1);
+ nvidia::strlcpy(begin(), capacity(), other.c_str());
+ }
+ else
+ {
+ resize(0);
+ }
+ return *this;
+ }
+
+ ApexSimpleString& operator = (const char* cStr)
+ {
+ if (!cStr)
+ {
+ erase();
+ }
+ else
+ {
+ length = (uint32_t)strlen(cStr);
+ if (length > 0)
+ {
+ resize(length + 1);
+ nvidia::strlcpy(begin(), capacity(), cStr);
+ }
+ else
+ {
+ resize(0);
+ }
+ }
+ return *this;
+ }
+
+ void truncate(uint32_t newLength)
+ {
+ if (newLength < length)
+ {
+ length = newLength;
+ begin()[length] = '\0';
+ }
+ }
+
+ void serialize(physx::PxFileBuf& stream) const
+ {
+ stream.storeDword(length);
+ stream.write(begin(), length);
+ }
+
+ void deserialize(physx::PxFileBuf& stream)
+ {
+ uint32_t len = stream.readDword();
+ if (len > 0)
+ {
+ resize(len + 1);
+ stream.read(begin(), len);
+ begin()[len] = '\0';
+ length = len;
+ }
+ else
+ {
+ erase();
+ }
+ }
+
+ uint32_t len() const
+ {
+ return length;
+ }
+
+ /* PH: Cast operator not allowed by coding guidelines, and evil in general anyways
+ operator const char* () const
+ {
+ return capacity() ? begin() : "";
+ }
+ */
+ const char* c_str() const
+ {
+ return capacity() > 0 ? begin() : "";
+ }
+
+ bool operator==(const ApexSimpleString& s) const
+ {
+ return nvidia::strcmp(c_str(), s.c_str()) == 0;
+ }
+ bool operator!=(const ApexSimpleString& s) const
+ {
+ return ! this->operator==(s);
+ }
+ bool operator==(const char* s) const
+ {
+ return nvidia::strcmp(c_str(), s) == 0;
+ }
+ bool operator!=(const char* s) const
+ {
+ return ! this->operator==(s);
+ }
+ bool operator < (const ApexSimpleString& s) const
+ {
+ return nvidia::strcmp(c_str(), s.c_str()) < 0;
+ }
+
+ ApexSimpleString& operator += (const ApexSimpleString& s)
+ {
+ expandTo(length + s.length);
+ nvidia::strlcpy(begin() + length, capacity() - length, s.c_str());
+ length += s.length;
+ return *this;
+ }
+
+ ApexSimpleString& operator += (char c)
+ {
+ expandTo(length + 1);
+ begin()[length++] = c;
+ begin()[length] = '\0';
+ return *this;
+ }
+
+ ApexSimpleString operator + (const ApexSimpleString& s)
+ {
+ ApexSimpleString sum = *this;
+ sum += s;
+ return sum;
+ }
+
+ ApexSimpleString& clear()
+ {
+ if (capacity())
+ {
+ begin()[0] = '\0';
+ }
+ length = 0;
+ return *this;
+ }
+
+ ApexSimpleString& erase()
+ {
+ resize(0);
+ return clear();
+ }
+
+ static PX_INLINE void ftoa(float f, ApexSimpleString& s)
+ {
+ char buf[20];
+ physx::shdfnd::snprintf(buf, sizeof(buf), "%g", f);
+ s = buf;
+ }
+
+ static PX_INLINE void itoa(uint32_t i, ApexSimpleString& s)
+ {
+ char buf[20];
+ physx::shdfnd::snprintf(buf, sizeof(buf), "%i", i);
+ s = buf;
+ }
+
+private:
+
+ void expandTo(uint32_t stringCapacity)
+ {
+ if (stringCapacity + 1 > capacity())
+ {
+ resize(2 * stringCapacity + 1);
+ }
+ }
+
+ uint32_t length;
+};
+
+PX_INLINE ApexSimpleString operator + (const ApexSimpleString& s1, const ApexSimpleString& s2)
+{
+ ApexSimpleString result = s1;
+ result += s2;
+ return result;
+}
+
+} // namespace apex
+} // namespace nvidia
+
+#endif // APEX_STRING_H
diff --git a/APEX_1.4/shared/internal/include/FractureTools.h b/APEX_1.4/shared/internal/include/FractureTools.h
new file mode 100644
index 00000000..ec9ea1c6
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/FractureTools.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef FRACTURE_TOOLS_H
+#define FRACTURE_TOOLS_H
+
+#include "Px.h"
+#include "ExplicitHierarchicalMesh.h"
+#include "FractureToolsStructs.h"
+
+PX_PUSH_PACK_DEFAULT
+
+namespace nvidia
+{
+ namespace apex
+ {
+ struct IntersectMesh;
+ class DestructibleAsset;
+ }
+}
+
+namespace FractureTools
+{
+
+/**
+ Tools for fracturing meshes.
+*/
+
+
+/** Instantiates a blank CutoutSet */
+CutoutSet* createCutoutSet();
+
+/**
+ Builds a cutout set (which must have been initially created by createCutoutSet()).
+ Uses a bitmap described by pixelBuffer, bufferWidth, and bufferHeight. Each pixel is represented
+ by one byte in the buffer.
+
+ cutoutSet: the CutoutSet to build
+ pixelBuffer: pointer to be beginning of the pixel buffer
+ bufferWidth: the width of the buffer in pixels
+ bufferHeight: the height of the buffer in pixels
+ snapThreshold: the pixel distance at which neighboring cutout vertices and segments may be fudged into alignment.
+ periodic: whether or not to use periodic boundary conditions when creating cutouts from the map
+*/
+void buildCutoutSet(CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float snapThreshold, bool periodic);
+
+/**
+ Calculate the mapping between a cutout fracture map and a given triangle.
+ The result is a 3 by 3 matrix M composed by an affine transformation and a rotation, we can get the 3-D projection for a texture coordinate pair (u,v) with such a formula:
+ (x,y,z) = M*PxVec3(u,v,1)
+
+ triangle: the target face's normal
+ theMapping: resulted mapping, composed by an affine transformation and a rotation
+*/
+bool calculateCutoutUVMapping(const nvidia::ExplicitRenderTriangle& triangle, physx::PxMat33& theMapping);
+
+/**
+ Uses the passed-in target direction to find the best triangle in the root mesh with normal near the given targetDirection. If triangles exist
+ with normals within one degree of the given target direction, then one with the greatest area of such triangles is used. Otherwise, the triangle
+ with normal closest to the given target direction is used. The resulting triangle is used to calculate a UV mapping as in the function
+ calculateCutoutUVMapping (above).
+
+ The assumption is that there exists a single mapping for all triangles on a specified face, for this feature to be useful.
+
+ hMesh: the explicit mesh with well rectangle-shaped faces
+ targetDirection: the target face's normal
+ theMapping: resulted mapping, composed by an affine transformation and a rotation
+*/
+bool calculateCutoutUVMapping(nvidia::ExplicitHierarchicalMesh& hMesh, const physx::PxVec3& targetDirection, physx::PxMat33& theMapping);
+
+/**
+ Splits the mesh in chunk[0], forming fractured pieces chunks[1...] using
+ Voronoi decomposition fracturing.
+
+ hMesh: the mesh to split
+ iHMeshCore: if this mesh is not empty, chunk 0 will be used as an indestructible "core" of the fractured
+ mesh. That is, it will be subtracted from hMesh, and placed at level 1 of the hierarchy. The remainder
+ of hMesh will be split as usual, creating chunks at level 1 (and possibly deeper).
+ exportCoreMesh: if true, a core mesh chunk will be created from iHMeshCore
+ coreMeshImprintSubmeshIndex: if this is < 0, use the core mesh materials (was applyCoreMeshMaterialToNeighborChunks). Otherwise, use the given submesh
+ meshProcessingParams: describes generic mesh processing directives
+ desc: describes the voronoi splitting parameters surfaces (see FractureVoronoiDesc)
+ collisionDesc: convex hulls will be generated for each chunk using the method. See CollisionDesc.
+ randomSeed: seed for the random number generator, to ensure reproducibility.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ cancel: if not NULL and *cancel is set to true, the root mesh will be restored to its original state, and the function will return at its earliest opportunity. Meant to be set from another thread.
+
+ returns true if successful.
+*/
+bool createVoronoiSplitMesh
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ nvidia::ExplicitHierarchicalMesh& iHMeshCore,
+ bool exportCoreMesh,
+ int32_t coreMeshImprintSubmeshIndex,
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureVoronoiDesc& desc,
+ const nvidia::CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel = NULL
+);
+
+/**
+ Generates a set of uniformly distributed points in the interior of the root mesh.
+
+ hMesh: the mesh in which to distribute sites
+ siteBuffer: an array of physx::PxVec3, at least the size of siteCount
+ siteChunkIndices: if not NULL, then must be at least the size of siteCount. siteCount indices will be written to this buffer, associating each site with a chunk that contains it.
+ siteCount: the number of points to write into siteBuffer
+ randomSeed: pointer to a seed for the random number generator, to ensure reproducibility. If NULL, the random number generator will not be re-seeded.
+ microgridSize: pointer to a grid size used for BSP creation. If NULL, the default settings will be used.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ meshMode: Open mesh handling. Modes: Automatic, Closed, Open (see BSPOpenMode)
+ chunkIndex: If this is a valid index, the voronoi sites will only be created within the volume of the indexed chunk. Otherwise,
+ the sites will be created within each of the root-level chunks. Default value is an invalid index.
+
+ returns the number of sites actually created (written to siteBuffer and siteChunkIndices). This may be less than the number of sites requested if site placement fails.
+*/
+uint32_t createVoronoiSitesInsideMesh
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ physx::PxVec3* siteBuffer,
+ uint32_t* siteChunkIndices,
+ uint32_t siteCount,
+ uint32_t* randomSeed,
+ uint32_t* microgridSize,
+ nvidia::BSPOpenMode::Enum meshMode,
+ nvidia::IProgressListener& progressListener,
+ uint32_t chunkIndex = 0xFFFFFFFF
+);
+
+/**
+ Creates scatter mesh sites randomly distributed on the mesh.
+
+ meshIndices: user-allocated array of size scatterMeshInstancesBufferSize which will be filled in by this function, giving the scatter mesh index used
+ relativeTransforms: user-allocated array of size scatterMeshInstancesBufferSize which will be filled in by this function, giving the chunk-relative transform for each chunk instance
+ chunkMeshStarts: user-allocated array which will be filled in with offsets into the meshIndices and relativeTransforms array.
+ For a chunk indexed by i, the corresponding range [chunkMeshStart[i], chunkMeshStart[i+1]-1] in meshIndices and relativeTransforms is used.
+ *NOTE*: chunkMeshStart array must be of at least size N+1, where N is the number of chunks in the base explicit hierarchical mesh.
+ scatterMeshInstancesBufferSize: the size of meshIndices and relativeTransforms array.
+ scatterMeshInstancesBufferSize: the size of meshIndices and relativeTransforms array.
+ hMesh: the mesh in which to distribute sites
+ targetChunkCount: how many chunks are in the array targetChunkIndices
+ targetChunkIndices: an array of chunk indices which are candidates for scatter meshes. The elements in the array chunkIndices will come from this array
+ randomSeed: pointer to a seed for the random number generator, to ensure reproducibility. If NULL, the random number generator will not be re-seeded.
+ scatterMeshAssetCount: the number of different scatter meshes (not instances). Should not exceed 255. If scatterMeshAssetCount > 255, only the first 255 will be used.
+ scatterMeshAssets: an array of size scatterMeshAssetCount, of the render mesh assets which will be used for the scatter meshes
+ minCount: an array of size scatterMeshAssetCount, giving the minimum number of instances to place for each mesh
+ maxCount: an array of size scatterMeshAssetCount, giving the maximum number of instances to place for each mesh
+ minScales: an array of size scatterMeshAssetCount, giving the minimum scale to apply to each scatter mesh
+ maxScales: an array of size scatterMeshAssetCount, giving the maximum scale to apply to each scatter mesh
+ maxAngles: an array of size scatterMeshAssetCount, giving a maximum deviation angle (in degrees) from the surface normal to apply to each scatter mesh
+
+ return value: the number of instances placed in indices and relativeTransforms (will not exceed scatterMeshInstancesBufferSize)
+*/
+uint32_t createScatterMeshSites
+(
+ uint8_t* meshIndices,
+ physx::PxMat44* relativeTransforms,
+ uint32_t* chunkMeshStarts,
+ uint32_t scatterMeshInstancesBufferSize,
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ uint32_t targetChunkCount,
+ const uint16_t* targetChunkIndices,
+ uint32_t* randomSeed,
+ uint32_t scatterMeshAssetCount,
+ nvidia::RenderMeshAsset** scatterMeshAssets,
+ const uint32_t* minCount,
+ const uint32_t* maxCount,
+ const float* minScales,
+ const float* maxScales,
+ const float* maxAngles
+);
+
+/**
+ Utility to visualize Voronoi cells for a given set of sites.
+
+ debugRender: rendering object which will receive the drawing primitives associated with this cell visualization
+ sites: an array of Voronoi cell sites, of length siteCount
+ siteCount: the number of Voronoi cell sites (length of sites array)
+ cellColors: an optional array of colors (see ApexRenderDebug for format) for the cells. If NULL, the white (0xFFFFFFFF) color will be used.
+ If not NULL, this (of length cellColorCount) is used to color the cell graphics. The number cellColorCount need not match siteCount. If
+ cellColorCount is less than siteCount, the cell colors will cycle. That is, site N gets cellColor[N%cellColorCount].
+ cellColorCount: the number of cell colors (the length of cellColors array)
+ bounds: defines an axis-aligned bounding box which clips the visualization, since some cells extend to infinity
+ cellIndex: if this is a valid index (cellIndex < siteCount), then only the cell corresponding to sites[cellIndex] will be drawn. Otherwise, all cells will be drawn.
+*/
+void visualizeVoronoiCells
+(
+ nvidia::RenderDebugInterface& debugRender,
+ const physx::PxVec3* sites,
+ uint32_t siteCount,
+ const uint32_t* cellColors,
+ uint32_t cellColorCount,
+ const physx::PxBounds3& bounds,
+ uint32_t cellIndex = 0xFFFFFFFF
+);
+
+/**
+ Builds a new ExplicitHierarchicalMesh from an array of triangles.
+
+ iHMesh: the ExplicitHierarchicalMesh to build
+ meshTriangles: pointer to array of ExplicitRenderTriangles which make up the mesh
+ meshTriangleCount the size of the meshTriangles array
+ submeshData: pointer to array of ExplicitSubmeshData, describing the submeshes
+ submeshCount: the size of the submeshData array
+ meshPartition: if not NULL, an array of size meshPartitionCount, giving the end elements of contiguous subsets of meshTriangles.
+ If meshPartition is NULL, one partition is assumed.
+ When there is one partition, these triangles become the level 0 part.
+ When there is more than one partition, these triangles become level 1 parts, the behavior is determined by firstPartitionIsDepthZero (see below).
+ meshPartitionCount: if meshPartition is not NULL, this is the size of the meshPartition array.
+ parentIndices: if not NULL, the parent indices for each chunk (corresponding to a partition in the mesh partition).
+ parentIndexCount: the size of the parentIndices array. This does not need to match meshPartitionCount. If a mesh partition has an index beyond the end of parentIndices,
+ then the parentIndex is considered to be 0. Therefore, if parentIndexCount = 0, all parents are 0 and so all chunks created will be depth 1. This will cause a
+ depth 0 chunk to be created that is the aggregate of the depth 1 chunks. If parentIndexCount > 0, then the depth-0 chunk must have a parentIndex of -1.
+ To reproduce the effect of the old parameter 'firstPartitionIsDepthZero' = true, set parentIndices to the address of a int32_t containing the value -1, and set parentIndexCount = 1.
+ To reproduce the effect of the old parameter 'firstPartitionIsDepthZero' = false, set parentIndexCount = 0.
+ Note: if parent indices are given, the first one must be -1, and *only* that index may be negative. That is, there may be only one depth-0 mesh and it must be the first mesh.
+*/
+bool buildExplicitHierarchicalMesh
+(
+ nvidia::ExplicitHierarchicalMesh& iHMesh,
+ const nvidia::ExplicitRenderTriangle* meshTriangles,
+ uint32_t meshTriangleCount,
+ const nvidia::ExplicitSubmeshData* submeshData,
+ uint32_t submeshCount,
+ uint32_t* meshPartition = NULL,
+ uint32_t meshPartitionCount = 0,
+ int32_t* parentIndices = NULL,
+ uint32_t parentIndexCount = 0
+);
+
+/**
+ Set the tolerances used in CSG calculations with BSPs.
+
+ linearTolerance: relative (to mesh size) tolerance used with angularTolerance to determine coplanarity. Default = 1.0e-4.
+ angularTolerance: used with linearTolerance to determine coplanarity. Default = 1.0e-3
+ baseTolerance: relative (to mesh size) tolerance used for spatial partitioning
+ clipTolerance: relative (to mesh size) tolerance used when clipping triangles for CSG mesh building operations. Default = 1.0e-4.
+ cleaningTolerance: relative (to mesh size) tolerance used when cleaning the out put mesh generated from the toMesh() function. Default = 1.0e-7.
+*/
+void setBSPTolerances
+(
+ float linearTolerance,
+ float angularTolerance,
+ float baseTolerance,
+ float clipTolerance,
+ float cleaningTolerance
+);
+
+/**
+ Set the parameters used in BSP building operations.
+
+ logAreaSigmaThreshold: At each step in the tree building process, the surface with maximum triangle area is compared
+ to the other surface triangle areas. If the maximum area surface is far from the "typical" set of
+ surface areas, then that surface is chosen as the next splitting plane. Otherwise, a random
+ test set is chosen and a winner determined based upon the weightings below.
+ The value logAreaSigmaThreshold determines how "atypical" the maximum area surface must be to
+ be chosen in this manner.
+ Default value = 2.0.
+ testSetSize: Larger values of testSetSize may find better BSP trees, but will take more time to create.
+ testSetSize = 0 is treated as infinity (all surfaces will be tested for each branch).
+ Default value = 10.
+ splitWeight: How much to weigh the relative number of triangle splits when searching for a BSP surface.
+ Default value = 0.5.
+ imbalanceWeight: How much to weigh the relative triangle imbalance when searching for a BSP surface.
+ Default value = 0.0.
+*/
+void setBSPBuildParameters
+(
+ float logAreaSigmaThreshold,
+ uint32_t testSetSize,
+ float splitWeight,
+ float imbalanceWeight
+);
+
+
+/**
+ Builds the root ExplicitHierarchicalMesh from an RenderMeshAsset.
+ Since an DestructibleAsset contains no hierarchy information, the input mesh must have only one part.
+
+ iHMesh: the ExplicitHierarchicalMesh to build
+ renderMeshAsset: Input RenderMesh asset
+ maxRootDepth: cap the root depth at this value. Re-fracturing of the mesh will occur at this depth. Default = UINT32_MAX
+*/
+bool buildExplicitHierarchicalMeshFromRenderMeshAsset(nvidia::ExplicitHierarchicalMesh& iHMesh, const nvidia::RenderMeshAsset& renderMeshAsset, uint32_t maxRootDepth = UINT32_MAX);
+
+/**
+ Builds the root ExplicitHierarchicalMesh from an DestructibleAsset.
+ Since an DestructibleAsset contains hierarchy information, the explicit mesh formed
+ will have this hierarchy structure.
+
+ iHMesh: the ExplicitHierarchicalMesh to build
+ destructibleAsset: Input Destructible asset
+ maxRootDepth: cap the root depth at this value. Re-fracturing of the mesh will occur at this depth. Default = UINT32_MAX
+*/
+bool buildExplicitHierarchicalMeshFromDestructibleAsset(nvidia::ExplicitHierarchicalMesh& iHMesh, const nvidia::DestructibleAsset& destructibleAsset, uint32_t maxRootDepth = UINT32_MAX);
+
+/**
+ Partitions (and possibly re-orders) the mesh array if the triangles form disjoint islands.
+ mesh: pointer to array of ExplicitRenderTriangles which make up the mesh
+ meshTriangleCount: the size of the meshTriangles array
+ meshPartition: user-allocated array for mesh partition, will be filled with the end elements of contiguous subsets of meshTriangles.
+ meshPartitionMaxCount: size of user-allocated meshPartitionArray
+ padding: distance (as a fraction of the mesh size) to consider vertices touching
+
+ Returns the number of partitions. The value may be larger than meshPartitionMaxCount. In that case, the partitions beyond meshPartitionMaxCount are not recorded.
+*/
+uint32_t partitionMeshByIslands
+(
+ nvidia::ExplicitRenderTriangle* mesh,
+ uint32_t meshTriangleCount,
+ uint32_t* meshPartition,
+ uint32_t meshPartitionMaxCount,
+ float padding = 0.0001f
+);
+
+/**
+ Splits the mesh in chunk[0], forming a hierarchy of fractured meshes in chunks[1...]
+
+ hMesh: the mesh to split
+ iHMeshCore: if this mesh is not empty, chunk 0 will be used as an indestructible "core" of the fractured
+ mesh. That is, it will be subtracted from hMesh, and placed at level 1 of the hierarchy. The remainder
+ of hMesh will be split as usual, creating chunks at level 1 (and possibly deeper).
+ exportCoreMesh: if true, a core mesh chunk will be created from iHMeshCore
+ coreMeshImprintSubmeshIndex: if this is < 0, use the core mesh materials (was applyCoreMeshMaterialToNeighborChunks). Otherwise, use the given submesh
+ meshProcessingParams: describes generic mesh processing directives
+ desc: describes the slicing surfaces (see FractureSliceDesc)
+ collisionDesc: convex hulls will be generated for each chunk using the method. See CollisionDesc.
+ randomSeed: seed for the random number generator, to ensure reproducibility.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ cancel: if not NULL and *cancel is set to true, the root mesh will be restored to its original state, and the function will return at its earliest opportunity. Meant to be set from another thread.
+
+ returns true if successful.
+*/
+bool createHierarchicallySplitMesh
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ nvidia::ExplicitHierarchicalMesh& iHMeshCore,
+ bool exportCoreMesh,
+ int32_t coreMeshImprintSubmeshIndex,
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureSliceDesc& desc,
+ const nvidia::CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel = NULL
+);
+
+/**
+ Chips the mesh in chunk[0], forming a hierarchy of fractured meshes in chunks[1...]
+
+ hMesh: the mesh to split
+ meshProcessingParams: describes generic mesh processing directives
+ desc: describes the slicing surfaces (see FractureCutoutDesc)
+ iCutoutSet: the cutout set to use for fracturing (see CutoutSet)
+ sliceDesc: used if desc.chunkFracturingMethod = SliceFractureCutoutChunks
+ voronoiDesc: used if desc.chunkFracturingMethod = VoronoiFractureCutoutChunks
+ collisionDesc: convex hulls will be generated for each chunk using the method. See CollisionDesc.
+ randomSeed: seed for the random number generator, to ensure reproducibility.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ cancel: if not NULL and *cancel is set to true, the root mesh will be restored to its original state, and the function will return at its earliest opportunity. Meant to be set from another thread.
+
+ returns true if successful.
+*/
+bool createChippedMesh
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureCutoutDesc& desc,
+ const CutoutSet& iCutoutSet,
+ const FractureSliceDesc& sliceDesc,
+ const FractureVoronoiDesc& voronoiDesc,
+ const nvidia::CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel = NULL
+);
+
+/**
+ Splits the chunk in chunk[chunkIndex], forming a hierarchy of fractured chunks using
+ slice-mode fracturing. The chunks will be rearranged so that they are in breadth-first order.
+
+ hMesh: the ExplicitHierarchicalMesh to act upon
+ chunkIndex: index of chunk to be split
+ meshProcessingParams: used to create a BSP for this chunk
+ desc: describes the slicing surfaces (see FractureSliceDesc)
+ collisionDesc: convex hulls will be generated for each chunk using the method. See CollisionDesc.
+ randomSeed: pointer to a seed for the random number generator, to ensure reproducibility. If NULL, the random number generator will not be re-seeded.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ cancel: if not NULL and *cancel is set to true, the root mesh will be restored to its original state, and the function will return at its earliest opportunity. Meant to be set from another thread.
+
+ returns true if successful.
+*/
+bool hierarchicallySplitChunk
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ uint32_t chunkIndex,
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureSliceDesc& desc,
+ const nvidia::CollisionDesc& collisionDesc,
+ uint32_t* randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel = NULL
+);
+
+/**
+ Splits the chunk in chunk[chunkIndex], forming fractured chunks using
+ Voronoi decomposition fracturing. The chunks will be rearranged so that they are in breadth-first order.
+
+ hMesh: the ExplicitHierarchicalMesh to act upon
+ chunkIndex: index of chunk to be split
+ meshProcessingParams: describes generic mesh processing directives
+ desc: describes the voronoi splitting parameters surfaces (see FractureVoronoiDesc)
+ collisionDesc: convex hulls will be generated for each chunk using the method. See CollisionDesc.
+ randomSeed: pointer to a seed for the random number generator, to ensure reproducibility. If NULL, the random number generator will not be re-seeded.
+ progressListener: The user must instantiate an IProgressListener, so that this function may report progress of this operation
+ cancel: if not NULL and *cancel is set to true, the root mesh will be restored to its original state, and the function will return at its earliest opportunity. Meant to be set from another thread.
+
+ returns true if successful.
+*/
+bool voronoiSplitChunk
+(
+ nvidia::ExplicitHierarchicalMesh& hMesh,
+ uint32_t chunkIndex,
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureVoronoiDesc& desc,
+ const nvidia::CollisionDesc& collisionDesc,
+ uint32_t* randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel = NULL
+);
+
+/**
+ Builds a mesh used for slice fracturing, given the noise parameters and random seed. This function is mostly intended
+ for visualization - to give the user a "typical" slice surface used for fracturing.
+*/
+bool buildSliceMesh
+(
+ nvidia::IntersectMesh& intersectMesh,
+ nvidia::ExplicitHierarchicalMesh& referenceMesh,
+ const physx::PxPlane& slicePlane,
+ const FractureTools::NoiseParameters& noiseParameters,
+ uint32_t randomSeed
+);
+
+/** Instantiates an ExplicitHierarchicalMesh */
+nvidia::ExplicitHierarchicalMesh* createExplicitHierarchicalMesh();
+
+/** Instantiates an ExplicitHierarchicalMesh::ConvexHull */
+nvidia::ExplicitHierarchicalMesh::ConvexHull* createExplicitHierarchicalMeshConvexHull();
+
+PX_POP_PACK
+
+} // namespace FractureTools
+
+#endif // FRACTURE_TOOLS_H
diff --git a/APEX_1.4/shared/internal/include/Link.h b/APEX_1.4/shared/internal/include/Link.h
new file mode 100644
index 00000000..d006d5b1
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/Link.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef LINK_H
+#define LINK_H
+
+#include "PxSimpleTypes.h"
+#include "PxAssert.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+class Link
+{
+public:
+ Link()
+ {
+ adj[1] = adj[0] = this;
+ }
+
+ virtual ~Link()
+ {
+ remove();
+ }
+
+ /*
+ which = 0: (-A-...-B-link-) + (-this-X-...-Y-) = (-A-...-B-link-this-X-...-Y-)
+ which = 1: (-X-...-Y-this-) + (-link-A-...-B-) = (-X-...-Y-this-link-A-...-B-)
+ */
+ void setAdj(uint32_t which, Link* link)
+ {
+ uint32_t other = (which &= 1) ^ 1;
+ Link* linkAdjOther = link->adj[other];
+ adj[which]->adj[other] = linkAdjOther;
+ linkAdjOther->adj[which] = adj[which];
+ adj[which] = link;
+ link->adj[other] = this;
+ }
+
+ Link* getAdj(uint32_t which) const
+ {
+ return adj[which & 1];
+ }
+
+ void remove()
+ {
+ adj[1]->adj[0] = adj[0];
+ adj[0]->adj[1] = adj[1];
+ adj[1] = adj[0] = this;
+ }
+
+ bool isSolitary() const
+ {
+ PX_ASSERT((adj[0] == this) == (adj[1] == this));
+ return adj[0] == this;
+ }
+
+protected:
+ Link* adj[2];
+};
+
+}
+} // end namespace nvidia::apex
+
+
+#endif // #ifndef LINK_H
diff --git a/APEX_1.4/shared/internal/include/ParamArray.h b/APEX_1.4/shared/internal/include/ParamArray.h
new file mode 100644
index 00000000..9f2788e2
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/ParamArray.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef PARAM_ARRAY_H
+#define PARAM_ARRAY_H
+
+#include "nvparameterized/NvParameterized.h"
+#include "PsUserAllocated.h"
+#include "PxAssert.h"
+
+namespace nvidia
+{
+namespace apex
+{
+
+struct ParamDynamicArrayStruct
+{
+ void* buf;
+ bool isAllocated;
+ int elementSize;
+ int arraySizes[1];
+};
+
+
+template <class ElemType>
+class ParamArray : public physx::UserAllocated
+{
+public:
+
+ ParamArray() : mParams(NULL), mArrayHandle(0), mArrayStruct(NULL) {}
+
+ ParamArray(NvParameterized::Interface* params, const char* arrayName, ParamDynamicArrayStruct* arrayStruct) :
+ mParams(params),
+ mArrayHandle(*params),
+ mArrayStruct(arrayStruct)
+ {
+ PX_ASSERT(mParams);
+
+ mParams->getParameterHandle(arrayName, mArrayHandle);
+
+ PX_ASSERT(mArrayStruct->elementSize == sizeof(ElemType));
+ }
+
+ ParamArray(NvParameterized::Interface* params, const NvParameterized::Handle& handle, ParamDynamicArrayStruct* arrayStruct) :
+ mParams(params),
+ mArrayHandle(handle),
+ mArrayStruct(arrayStruct)
+ {
+ PX_ASSERT(mArrayStruct->elementSize == sizeof(ElemType));
+ }
+
+ PX_INLINE bool init(NvParameterized::Interface* params, const char* arrayName, ParamDynamicArrayStruct* arrayStruct)
+ {
+ if (mParams == NULL && mArrayStruct == NULL)
+ {
+ mParams = params;
+ mArrayStruct = arrayStruct;
+ mArrayHandle.setInterface(mParams);
+ mParams->getParameterHandle(arrayName, mArrayHandle);
+
+ PX_ASSERT(mArrayStruct->elementSize == sizeof(ElemType));
+
+ return true;
+ }
+ return false;
+ }
+
+ PX_INLINE uint32_t size() const
+ {
+ // this only works for fixed structs
+ //return (uint32_t)mArrayHandle.parameterDefinition()->arraySize(0);
+ int outSize = 0;
+ if (mParams != NULL)
+ {
+ PX_ASSERT(mArrayHandle.getConstInterface() == mParams);
+ mArrayHandle.getArraySize(outSize);
+ }
+ return (uint32_t)outSize;
+ }
+
+ /**
+ Returns a constant reference to an element in the sequence.
+ */
+ PX_INLINE const ElemType& operator[](unsigned int n) const
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+#if _DEBUG
+ uint32_t NxParamArraySize = 0;
+ mArrayHandle.getArraySize((int&)NxParamArraySize);
+ PX_ASSERT(NxParamArraySize > n);
+#endif
+ return ((ElemType*)mArrayStruct->buf)[n];
+ }
+
+ /**
+ Returns a reference to an element in the sequence.
+ */
+ PX_INLINE ElemType& operator[](unsigned int n)
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ //NvParameterized::Handle indexHandle;
+ //arrayHandle.getChildHandle( n, indexHandle );
+#if _DEBUG
+ uint32_t NxParamArraySize = 0;
+ mArrayHandle.getArraySize((int&)NxParamArraySize);
+ PX_ASSERT(NxParamArraySize > n);
+#endif
+ return ((ElemType*)mArrayStruct->buf)[n];
+ }
+
+ // resize is marginally useful because the ElemType doesn't have proper
+ // copy constructors, and if strings are withing ElemType that doesn't work well
+ PX_INLINE void resize(unsigned int n)
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ PX_ASSERT(mParams == mArrayHandle.getConstInterface());
+ mArrayHandle.resizeArray((int32_t)n);
+ }
+
+ // pushBack is marginally useful because the ElemType doesn't have proper
+ // copy constructors, and if strings are withing ElemType that doesn't work well
+ PX_INLINE void pushBack(const ElemType& x)
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+
+ int32_t paramArraySize = 0;
+
+ mArrayHandle.getArraySize(paramArraySize);
+ mArrayHandle.resizeArray(paramArraySize + 1);
+
+ ((ElemType*)mArrayStruct->buf)[(uint32_t)paramArraySize] = x;
+ }
+
+ PX_INLINE ElemType& pushBack()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+
+ int32_t paramArraySize = 0;
+
+ mArrayHandle.getArraySize(paramArraySize);
+ mArrayHandle.resizeArray(paramArraySize + 1);
+
+ return ((ElemType*)mArrayStruct->buf)[(uint32_t)paramArraySize];
+ }
+
+ PX_INLINE void replaceWithLast(unsigned position)
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+
+ uint32_t arraySize = size();
+ PX_ASSERT(position < arraySize);
+ if (position != arraySize - 1)
+ {
+ // TODO should we call the destructor here or not?
+ //(*this)[position].~ElemType();
+
+ ElemType elem = back();
+
+ // put the replaced one in the back (possibly to be deleted)
+ (*this)[arraySize - 1] = (*this)[position];
+
+ (*this)[position] = elem;
+ }
+ popBack();
+ }
+
+ PX_INLINE bool isEmpty() const
+ {
+ return size() == 0;
+ }
+
+ PX_INLINE ElemType* begin()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ return &((ElemType*)mArrayStruct->buf)[0];
+ }
+
+ PX_INLINE ElemType* end()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ return &((ElemType*)mArrayStruct->buf)[size()];
+ }
+
+ PX_INLINE ElemType& front()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ return ((ElemType*)mArrayStruct->buf)[0];
+ }
+
+ PX_INLINE ElemType& back()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ return ((ElemType*)mArrayStruct->buf)[size() - 1];
+ }
+
+ PX_INLINE void clear()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ resize(0);
+ }
+
+ PX_INLINE void popBack()
+ {
+ PX_ASSERT(mParams != NULL && mArrayStruct != NULL);
+ resize(size() - 1);
+ }
+
+private:
+ NvParameterized::Interface* mParams;
+ NvParameterized::Handle mArrayHandle;
+ ParamDynamicArrayStruct* mArrayStruct;
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
+
diff --git a/APEX_1.4/shared/internal/include/PvdNxParamSerializer.h b/APEX_1.4/shared/internal/include/PvdNxParamSerializer.h
new file mode 100644
index 00000000..645dd5f0
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/PvdNxParamSerializer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#if TODO_PVD_NXPARAM_SERIALIZER
+
+#ifndef PVD_NXPARAM_SERIALIZER
+#define PVD_NXPARAM_SERIALIZER
+#include "PxSimpleTypes.h"
+#include "nvparameterized/NvParameterized.h"
+
+namespace PVD
+{
+class PvdDataStream;
+}
+
+namespace NvParameterized
+{
+class Interface;
+}
+
+namespace PvdNxParamSerializer
+{
+
+NvParameterized::ErrorType
+traverseParamDefTree(NvParameterized::Interface& obj,
+ PVD::PvdDataStream* remoteDebugger,
+ void* curPvdObj,
+ NvParameterized::Handle& handle);
+
+}; // namespacePvdNxParamSerializer
+
+#endif // #ifndef PVD_NXPARAM_SERIALIZER
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSG.h b/APEX_1.4/shared/internal/include/authoring/ApexCSG.h
new file mode 100644
index 00000000..711d90de
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSG.h
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_H
+#define APEX_CSG_H
+
+
+#include "ApexUsingNamespace.h"
+#include "RenderMeshAsset.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+class UserRandom
+{
+public:
+ virtual uint32_t getInt() = 0;
+ virtual float getReal(float min, float max) = 0;
+};
+
+
+struct BSPBuildParameters
+{
+ /*
+ Used for searching splitting planes.
+ If NULL, a default random # generator will be used.
+ */
+ UserRandom* rnd;
+
+ /*
+ Mesh pre-processing. The mesh is initially scaled to fit in a unit cube, then (if gridSize is not
+ zero), the vertices of the scaled mesh are snapped to a grid of size 1/gridSize.
+ A power of two is recommended.
+ Default value = 65536.
+ */
+ uint32_t snapGridSize;
+
+ /*
+ At each step in the tree building process, the surface with maximum triangle area is compared
+ to the other surface triangle areas. If the maximum area surface is far from the "typical" set of
+ surface areas, then that surface is chosen as the next splitting plane. Otherwise, a random
+ test set is chosen and a winner determined based upon the weightings below.
+ The value logAreaSigmaThreshold determines how "atypical" the maximum area surface must be to
+ be chosen in this manner.
+ Default value = 2.0.
+ */
+ float logAreaSigmaThreshold;
+
+ /*
+ Larger values of testSetSize may find better BSP trees, but will take more time to create.
+ testSetSize = 0 is treated as infinity (all surfaces will be tested for each branch).
+ */
+ uint32_t testSetSize;
+
+ /*
+ How much to weigh the relative number of triangle splits when searching for a BSP surface.
+ */
+ float splitWeight;
+
+ /*
+ How much to weigh the relative triangle imbalance when searching for a BSP surface.
+ */
+ float imbalanceWeight;
+
+ /*
+ The BSP representation of the mesh will be transformed from the space of the mesh input into IApexBSP::fromMesh
+ using this transform. By default, this is the identity transformation. If the user wishes to use a
+ different transformation, it may be set using internalTransform. However, note that when combining
+ BSPs using the IApexBSP::combine function, the two BSPs should use the same internal transform. If they don't,
+ the resulting behavior is not specified. When a mesh is created using IApexBSP::toMesh, the inverse
+ of the internal transform is applied to put the mesh back into the original space.
+
+ A special value for internalTransform is the zero 4x4 matrix. If this is used, an internal transform
+ will be calculated in the IApexBSP::fromMesh function. This may be read using IApexBSP::getInternalTransform(),
+ and applied when creating other BSPs which are to be used in combine operations.
+ */
+ physx::PxMat44 internalTransform;
+
+ /*
+ If false, the triangles associated with this BSP will not be kept. The BSP may be used for CSG, but will
+ not provide any mesh data.
+
+ Default = true
+ */
+ bool keepTriangles;
+
+ BSPBuildParameters()
+ {
+ setToDefault();
+ }
+
+ void setToDefault()
+ {
+ rnd = NULL;
+ snapGridSize = 65536;
+ logAreaSigmaThreshold = (float)2.0;
+ testSetSize = 10;
+ splitWeight = (float)0.5;
+ imbalanceWeight = 0;
+ internalTransform = physx::PxMat44(physx::PxIdentity);
+ keepTriangles = true;
+ }
+};
+
+struct BSPTolerances
+{
+ /*
+ A unitless value (relative to mesh size) used to determine mesh triangle coplanarity during BSP building.
+ Default value = 1.0e-6.
+ */
+ float linear;
+
+ /*
+ A threshold angle (in radians) used to determine mesh triangle coplanarity during BSP building.
+ Default value = 1.0e-5.
+ */
+ float angular;
+
+ /*
+ A unitless value (relative to mesh size) used to determine triangle splitting during BSP building.
+ Default value = 1.0e-9.
+ */
+ float base;
+
+ /*
+ A unitless value (relative to mesh size) used to determine a skin width for mesh clipping against BSP
+ nodes during mesh creation from the BSP.
+ Default value = 1.0e-13.
+ */
+ float clip;
+
+ /*
+ Mesh postprocessing. A unitless value (relative to mesh size) used to determine merge tolerances for
+ mesh clean-up after triangles have been clipped to BSP leaves. A value of 0.0 disables this feature.
+ Default value = 1.0e-6.
+ */
+ float cleaning;
+
+ BSPTolerances()
+ {
+ setToDefault();
+ }
+
+ void setToDefault()
+ {
+ linear = (float)1.0e-6;
+ angular = (float)1.0e-5;
+ base = (float)1.0e-9;
+ clip = (float)1.0e-13;
+ cleaning = (float)1.0e-6;
+ }
+};
+
+extern BSPTolerances gDefaultTolerances;
+
+struct Operation
+{
+ enum Enum
+ {
+ Empty_Set = 0x0, // constant
+ All_Space = 0x1, // constant
+ Set_A = 0x2, // unary
+ Set_A_Complement = 0x3, // unary
+ Set_B = 0x4, // unary
+ Set_B_Complement = 0x5, // unary
+ Exclusive_Or = 0x6,
+ Equivalent = 0x7,
+ Intersection = 0x8,
+ Intersection_Complement = 0x9,
+ A_Minus_B = 0xA,
+ A_Implies_B = 0xB,
+ B_Minus_A = 0xC,
+ B_Implies_A = 0xD,
+ Union = 0xE,
+ Union_Complement = 0xF,
+
+ NOP = 0x80000000 // no op
+ };
+};
+
+
+struct BSPVisualizationFlags
+{
+ enum Enum
+ {
+ OutsideRegions = (1 << 0),
+ InsideRegions = (1 << 1),
+
+ SingleRegion = (1 << 16)
+ };
+};
+
+
+struct BSPType
+{
+ enum Enum
+ {
+ Empty_Set, // BSP has a single node, which is an outside leaf. Therefore the inside region is the empty set.
+ All_Space, // BSP has a single node, which is an inside leaf. Therefore the inside region is all of space.
+ Nontrivial, // BSP has more than a single node.
+ Combined, // BSP is the combination of two BSPs, ready for a CSG operation to define a single BSP.
+
+ BSPTypeCount
+ };
+};
+
+
+/*
+ Memory cache for BSP construction. Not global, so that concurrent calculations can use different pools.
+ */
+class IApexBSPMemCache
+{
+public:
+
+ /*
+ Deallocate all memory buffers.
+ */
+ virtual void clearAll() = 0;
+
+ /*
+ Deallocate only temporary data buffers.
+ */
+ virtual void clearTemp() = 0;
+
+ /*
+ Clean up.
+ */
+ virtual void release() = 0;
+
+protected:
+
+ IApexBSPMemCache() {}
+ virtual ~IApexBSPMemCache() {}
+};
+
+
+/*
+ BSP interface.
+
+ Convert a mesh into a BSP, perform boolean operations between BSPs, and extract the resulting mesh.
+ */
+
+class IApexBSP
+{
+public:
+ /*
+ Set the tolerances used for various aspects of BSP creation, merging, mesh creation, etc.
+ Default values are those in BSPTolerances.
+ */
+ virtual void setTolerances(const BSPTolerances& tolerances) = 0;
+
+ /*
+ Construct a BSP from the given mesh, using the given parameters.
+ */
+ virtual bool fromMesh(const nvidia::ExplicitRenderTriangle* mesh, uint32_t meshSize, const BSPBuildParameters& params, nvidia::IProgressListener* progressListener = NULL, volatile bool* cancel = NULL) = 0;
+
+ /*
+ Construct a BSP from a convex polyhedron defined by a list of planes.
+ See the definition of internalTransform in BSPBuildParameters. The same meaning applies here.
+ The mesh array is optional. If included, the single internal leaf created will be associated with these triangles.
+ */
+ virtual bool fromConvexPolyhedron(const physx::PxPlane* poly, uint32_t polySize, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity), const nvidia::ExplicitRenderTriangle* mesh = NULL, uint32_t meshSize = 0) = 0;
+
+ /*
+ Build a combination of two BSPs (this and the passed-in bsp), upon which boolean operations of the two can be performed.
+ */
+ virtual bool combine(const IApexBSP& bsp) = 0;
+
+ /*
+ Build a BSP resulting from a boolean operation upon a combination.
+ Note: you may do this "in place," i.e.
+ bsp.op( bsp, operation );
+ ... in this case, bsp will no longer be a combined BSP.
+ */
+ virtual bool op(const IApexBSP& combinedBSP, Operation::Enum operation) = 0;
+
+ /*
+ This BSP is changed to its complement (inside <-> outside)
+ */
+ virtual bool complement() = 0;
+
+ /*
+ The transform from mesh space to BSP space. This may be used in the BSPBuildParameters passed into fromMesh,
+ in order to match the transform used for a combining mesh.
+ */
+ virtual physx::PxMat44 getInternalTransform() const = 0;
+
+ /*
+ Returns an enum characterizing the BSP. See BSPType.
+ */
+ virtual BSPType::Enum getType() const = 0;
+
+ /*
+ Returns the total surface area and volume of the regions designated to be on the given side.
+ If this is a combined BSP, then you must provide a merge operation. In this case,
+ the BSP will not actually be merged, but the resulting area will be that of the
+ merged BSP you would get if you did perform the merge with the op() function.
+ If this is not a combined BSP and you provide a merge operation, it will be ignored.
+
+ If there the volume or area of one of the leaves in consideration is infinite, then this function returns false. Otherwise it returns true.
+ */
+ virtual bool getSurfaceAreaAndVolume(float& area, float& volume, bool inside, Operation::Enum operation = Operation::NOP) const = 0;
+
+ /*
+ Determines if given point is in an outside or inside leaf.
+ If this is a combined BSP, then you must provide a merge operation. In this case,
+ the BSP will not actually be merged, but the resulting area will be that of the
+ merged BSP you would get if you did perform the merge with the op() function.
+ If this is not a combined BSP and you provide a merge operation, it will be ignored.
+ */
+ virtual bool pointInside(const physx::PxVec3& point, Operation::Enum operation = Operation::NOP) const = 0;
+
+ /*
+ Construct a mesh from the current BSP.
+ */
+ virtual bool toMesh(physx::Array<nvidia::ExplicitRenderTriangle>& mesh) const = 0;
+
+ /*
+ Deep copy of given bsp.
+ Input bsp may be the same as *this.
+ The transform tm will be applied.
+ If the internalTransform given is not zero, it will become the new internal transform. The mesh will be scaled internally appropriately with the given tm.
+ A combined BSP may be copied.
+ */
+ virtual void copy(const IApexBSP& bsp, const physx::PxMat44& tm = physx::PxMat44(physx::PxIdentity), const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxZero)) = 0;
+
+ /*
+ Decompose into disjoint islands.
+ This BSP is not affected.
+ The BSP is split into a set of BSPs, each representing one connected island.
+ The set of BSPs is returned as the first BSP in the list, with access
+ to the remainder of the list through the getNext() and getPrev() functions.
+ The BSP must be not be a combined BSP (getType() != BSPType::Combined).
+ Returns this if the BSP is already an island.
+ Returns NULL if the operation fails (e.g. this is a combined BSP).
+ */
+ virtual IApexBSP* decomposeIntoIslands() const = 0;
+
+ /**
+ Utility to replace the submesh on a set of interior triangles.
+ */
+ virtual void replaceInteriorSubmeshes(uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex) = 0;
+
+ /*
+ Deletes the triangles associated with this BSP. The BSP may be used for CSG, but will not provide any mesh data.
+ */
+ virtual void deleteTriangles() = 0;
+
+ /*
+ If a BSP has been decomposed into islands, getNext() and getPrev() will iterate through the
+ BSPs in the decomposition. NULL is returned if an attempt is made to iterate past
+ the beginning or end of the list.
+ */
+ virtual IApexBSP* getNext() const = 0;
+ virtual IApexBSP* getPrev() const = 0;
+
+ /*
+ Serialization.
+ */
+ virtual void serialize(physx::PxFileBuf& stream) const = 0;
+ virtual void deserialize(physx::PxFileBuf& stream) = 0;
+
+ /*
+ Visualization.
+ Set flags to bits from BSPVisualizationFlags::Enum.
+ */
+ virtual void visualize(nvidia::RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const = 0;
+
+ /*
+ Clean up.
+ */
+ virtual void release() = 0;
+
+protected:
+
+ IApexBSP() {}
+ virtual ~IApexBSP() {}
+};
+
+
+// CSG Tools API
+
+// Create a BSP memory cache to share among several BSPs
+IApexBSPMemCache*
+createBSPMemCache();
+
+// Instantiate a BSP. If cache = NULL, the BSP will create and own its own cache.
+IApexBSP*
+createBSP(IApexBSPMemCache* memCache = NULL, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity));
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #ifndef APEX_CSG_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h
new file mode 100644
index 00000000..ebfb105a
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGDefs.h
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_DEFS_H
+#define APEX_CSG_DEFS_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexSharedUtils.h"
+#include "ApexRand.h"
+#include "Link.h"
+#include "authoring/ApexCSG.h"
+#include "authoring/ApexCSGMath.h"
+#include "authoring/ApexGSA.h"
+#include "PsUserAllocated.h"
+#include "ApexGSA.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+// Binary tree node
+class BinaryNode
+{
+public:
+ PX_INLINE BinaryNode();
+
+ PX_INLINE void setChild(uint32_t index, BinaryNode* child);
+
+ PX_INLINE void detach();
+
+ PX_INLINE BinaryNode* getParent() const
+ {
+ return m_parent;
+ }
+
+ PX_INLINE BinaryNode* getChild(uint32_t index) const
+ {
+ PX_ASSERT((index & 1) == index);
+ return m_children[index & 1];
+ }
+
+ PX_INLINE uint32_t getIndex() const
+ {
+ return m_index;
+ }
+
+protected:
+ BinaryNode* m_parent;
+ BinaryNode* m_children[2];
+ uint32_t m_index; // index of this node in parent (0xFFFFFFFF => not attached)
+};
+
+PX_INLINE
+BinaryNode::BinaryNode()
+{
+ m_index = 0xFFFFFFFF;
+ m_children[1] = m_children[0] = m_parent = NULL;
+}
+
+PX_INLINE void
+BinaryNode::setChild(uint32_t index, BinaryNode* child)
+{
+ index &= 1;
+ BinaryNode*& oldChild = m_children[index];
+
+ if (oldChild != NULL)
+ {
+ oldChild->detach();
+ }
+
+ oldChild = child;
+
+ if (child != NULL)
+ {
+ child->detach();
+ child->m_parent = this;
+ child->m_index = index;
+ }
+}
+
+PX_INLINE void
+BinaryNode::detach()
+{
+ if (m_parent != NULL)
+ {
+ PX_ASSERT(m_parent->getChild(m_index) == this);
+ m_parent->m_children[m_index & 1] = NULL;
+ m_parent = NULL;
+ m_index = 0xFFFFFFFF;
+ }
+}
+
+
+// CSG mesh representation
+
+class UV : public Vec<Real, 2>
+{
+public:
+
+ PX_INLINE UV() {}
+ PX_INLINE UV(const float* uv)
+ {
+ set((Real)uv[0], (Real)uv[1]);
+ }
+ PX_INLINE UV(const double* uv)
+ {
+ set((Real)uv[0], (Real)uv[1]);
+ }
+ PX_INLINE UV& operator = (const UV& uv)
+ {
+ el[0] = uv.el[0];
+ el[1] = uv.el[1];
+ return *this;
+ }
+
+ PX_INLINE void set(Real u, Real v)
+ {
+ el[0] = u;
+ el[1] = v;
+ }
+
+ PX_INLINE const Real& u() const
+ {
+ return el[0];
+ }
+ PX_INLINE const Real& v() const
+ {
+ return el[1];
+ }
+};
+
+class Color : public Vec<Real, 4>
+{
+public:
+
+ PX_INLINE Color() {}
+ PX_INLINE Color(const uint32_t c);
+ PX_INLINE Color& operator = (const Color& c)
+ {
+ el[0] = c.el[0];
+ el[1] = c.el[1];
+ el[2] = c.el[2];
+ el[3] = c.el[3];
+ return *this;
+ }
+
+ PX_INLINE void set(Real r, Real g, Real b, Real a)
+ {
+ el[0] = r;
+ el[1] = g;
+ el[2] = b;
+ el[3] = a;
+ }
+
+ PX_INLINE uint32_t toInt() const;
+
+ PX_INLINE const Real& r() const
+ {
+ return el[0];
+ }
+ PX_INLINE const Real& g() const
+ {
+ return el[1];
+ }
+ PX_INLINE const Real& b() const
+ {
+ return el[2];
+ }
+ PX_INLINE const Real& a() const
+ {
+ return el[3];
+ }
+};
+
+PX_INLINE
+Color::Color(const uint32_t c)
+{
+ const Real recip255 = 1 / (Real)255;
+ set((Real)(c & 0xFF)*recip255, (Real)((c >> 8) & 0xFF)*recip255, (Real)((c >> 16) & 0xFF)*recip255, (Real)(c >> 24)*recip255);
+}
+
+PX_INLINE uint32_t
+Color::toInt() const
+{
+ return (uint32_t)((int)(255 * el[3] + (Real)0.5)) << 24 | (uint32_t)((int)(255 * el[2] + (Real)0.5)) << 16 | (uint32_t)((int)(255 * el[1] + (Real)0.5)) << 8 | (uint32_t)((int)(255 * el[0] + (Real)0.5));
+}
+
+struct VertexData
+{
+ Dir normal;
+ Dir tangent;
+ Dir binormal;
+ UV uv[nvidia::VertexFormat::MAX_UV_COUNT];
+ Color color;
+};
+
+struct Triangle
+{
+ Pos vertices[3];
+ Dir normal;
+ Real area;
+ int32_t submeshIndex;
+ uint32_t smoothingMask;
+ uint32_t extraDataIndex;
+
+ void fromExplicitRenderTriangle(VertexData vertexData[3], const nvidia::ExplicitRenderTriangle& tri)
+ {
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ vertices[i] = Pos(tri.vertices[i].position);
+ vertexData[i].normal = Dir(tri.vertices[i].normal);
+ vertexData[i].tangent = Dir(tri.vertices[i].tangent);
+ vertexData[i].binormal = Dir(tri.vertices[i].binormal);
+ for (unsigned j = 0; j < nvidia::VertexFormat::MAX_UV_COUNT; ++j)
+ {
+ vertexData[i].uv[j] = UV(&tri.vertices[i].uv[j][0]);
+ }
+ vertexData[i].color.set((Real)tri.vertices[i].color.r, (Real)tri.vertices[i].color.g, (Real)tri.vertices[i].color.b, (Real)tri.vertices[i].color.a);
+ }
+ submeshIndex = tri.submeshIndex;
+ smoothingMask = tri.smoothingMask;
+ extraDataIndex = tri.extraDataIndex;
+ calculateQuantities();
+ }
+
+ void toExplicitRenderTriangle(nvidia::ExplicitRenderTriangle& tri, const VertexData vertexData[3]) const
+ {
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ tri.vertices[i].position = ApexCSG::GSA::toPxVec3(vertices[i]);
+ tri.vertices[i].normal = ApexCSG::GSA::toPxVec3(vertexData[i].normal);
+ tri.vertices[i].tangent = ApexCSG::GSA::toPxVec3(vertexData[i].tangent);
+ tri.vertices[i].binormal = ApexCSG::GSA::toPxVec3(vertexData[i].binormal);
+ for (unsigned j = 0; j < nvidia::VertexFormat::MAX_UV_COUNT; ++j)
+ {
+ tri.vertices[i].uv[j].set((float)vertexData[i].uv[j][0], (float)vertexData[i].uv[j][1]);
+ }
+ tri.vertices[i].color.set((float)vertexData[i].color.r(), (float)vertexData[i].color.g(), (float)vertexData[i].color.b(), (float)vertexData[i].color.a());
+ }
+ tri.submeshIndex = submeshIndex;
+ tri.smoothingMask = smoothingMask;
+ tri.extraDataIndex = extraDataIndex;
+ }
+
+ void calculateQuantities()
+ {
+ const Dir e0 = Dir(vertices[1] - vertices[0]);
+ const Dir e1 = Dir(vertices[2] - vertices[1]);
+ const Dir e2 = Dir(vertices[0] - vertices[2]);
+ normal = (e0^e1) + (e1^e2) + (e2^e0);
+ area = (Real)0.5 * normal.normalize();
+ }
+
+ void transform(const Mat4Real& tm)
+ {
+ for (int i = 0; i < 3; ++i)
+ {
+ vertices[i] = tm*vertices[i];
+ }
+ calculateQuantities();
+ }
+};
+
+struct LinkedVertex : public nvidia::Link
+{
+ LinkedVertex* getAdj(uint32_t which) const
+ {
+ return (LinkedVertex*)nvidia::Link::getAdj(which);
+ }
+
+ Pos vertex;
+};
+
+struct LinkedEdge2D : public nvidia::Link
+{
+ LinkedEdge2D() : loopID(-1) {}
+ ~LinkedEdge2D()
+ {
+ remove();
+ }
+
+ void setAdj(uint32_t which, LinkedEdge2D* link)
+ {
+ // Ensure neighboring links' adjoining vertices are equal
+ which &= 1;
+ const uint32_t other = which ^ 1;
+ v[which] = link->v[other];
+ ((LinkedEdge2D*)link->adj[other])->v[which] = ((LinkedEdge2D*)adj[which])->v[other];
+ nvidia::Link::setAdj(which, link);
+ }
+
+ LinkedEdge2D* getAdj(uint32_t which) const
+ {
+ return (LinkedEdge2D*)nvidia::Link::getAdj(which);
+ }
+
+ void remove()
+ {
+ // Ensure neighboring links' adjoining vertices are equal
+ ((LinkedEdge2D*)adj[0])->v[1] = ((LinkedEdge2D*)adj[1])->v[0] = (Real)0.5 * (v[0] + v[1]);
+ nvidia::Link::remove();
+ }
+
+ Vec2Real v[2];
+ int32_t loopID;
+};
+
+struct Surface
+{
+ uint32_t planeIndex;
+ uint32_t triangleIndexStart;
+ uint32_t triangleIndexStop;
+ float totalTriangleArea; // Keeping it 32-bit real, since we don't need precision here
+};
+
+struct Region
+{
+ uint32_t side;
+
+ // Not to be serialized, but we have this extra space since Region is used in a union with Surface
+ uint32_t tempIndex1;
+ uint32_t tempIndex2;
+ uint32_t tempIndex3;
+};
+
+
+// Interpolator - calculates interpolation data for triangle quantities
+class Interpolator
+{
+public:
+
+ enum VertexField
+ {
+ Normal_x, Normal_y, Normal_z,
+ Tangent_x, Tangent_y, Tangent_z,
+ Binormal_x, Binormal_y, Binormal_z,
+ UV0_u, UV0_v, UV1_u, UV1_v, UV2_u, UV2_v, UV3_u, UV3_v,
+ Color_r, Color_g, Color_b, Color_a,
+
+ VertexFieldCount
+ };
+
+ Interpolator() {}
+ Interpolator(const Triangle& tri, const VertexData vertexData[3])
+ {
+ setFromTriangle(tri, vertexData);
+ }
+ Interpolator(const Dir tangents[3], const Vec<Real, 2>& uvScale)
+ {
+ setFlat(tangents, uvScale);
+ }
+
+ PX_INLINE void setFromTriangle(const Triangle& tri, const VertexData vertexData[3]);
+ PX_INLINE void setFlat(const Dir tangents[3], const Vec<Real, 2>& uvScale);
+
+ PX_INLINE void interpolateVertexData(VertexData& vertexData, const Pos& point) const;
+
+ PX_INLINE bool equals(const Interpolator& interpolator, Real frameDirTol, Real frameScaleTol, Real dirTol, Real uvTol, Real colorTol) const;
+
+ PX_INLINE void transform(Interpolator& transformedInterpolator, const Mat4Real& tm, const Mat4Real& cofTM) const;
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream, uint32_t version);
+
+private:
+ ApexCSG::Plane m_frames[VertexFieldCount];
+ static size_t s_offsets[VertexFieldCount];
+
+ friend class InterpolatorBuilder;
+};
+
+PX_INLINE void
+Interpolator::setFromTriangle(const Triangle& tri, const VertexData vertexData[3])
+{
+ const Pos& p0 = tri.vertices[0];
+ const Pos& p1 = tri.vertices[1];
+ const Pos& p2 = tri.vertices[2];
+ const Dir p1xp2 = Dir(p1) ^ Dir(p2);
+ const Dir p2xp0 = Dir(p2) ^ Dir(p0);
+ const Dir p0xp1 = Dir(p0) ^ Dir(p1);
+ const Dir n = p1xp2 + p2xp0 + p0xp1;
+ const Real n2 = n | n;
+ if (n2 < EPS_REAL * EPS_REAL)
+ {
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ m_frames[i].set(Dir((Real)0), 0);
+ }
+ return;
+ }
+
+ // Calculate inverse 4x4 matrix (only need first three columns):
+ const Dir nP = n / n2; // determinant is -n2
+ const Dir Q0(nP[2] * (p1[1] - p2[1]) - nP[1] * (p1[2] - p2[2]), nP[2] * (p2[1] - p0[1]) - nP[1] * (p2[2] - p0[2]), nP[2] * (p0[1] - p1[1]) - nP[1] * (p0[2] - p1[2]));
+ const Dir Q1(nP[0] * (p1[2] - p2[2]) - nP[2] * (p1[0] - p2[0]), nP[0] * (p2[2] - p0[2]) - nP[2] * (p2[0] - p0[0]), nP[0] * (p0[2] - p1[2]) - nP[2] * (p0[0] - p1[0]));
+ const Dir Q2(nP[1] * (p1[0] - p2[0]) - nP[0] * (p1[1] - p2[1]), nP[1] * (p2[0] - p0[0]) - nP[0] * (p2[1] - p0[1]), nP[1] * (p0[0] - p1[0]) - nP[0] * (p0[1] - p1[1]));
+ const Dir r(nP | p1xp2, nP | p2xp0, nP | p0xp1);
+
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ const size_t offset = s_offsets[i];
+ const Dir vi(*(Real*)(((uint8_t*)&vertexData[0]) + offset), *(Real*)(((uint8_t*)&vertexData[1]) + offset), *(Real*)(((uint8_t*)&vertexData[2]) + offset));
+ Dir n(Q0 | vi, Q1 | vi, Q2 | vi);
+ if ((n | n) < 100 * EPS_REAL * EPS_REAL)
+ {
+ n.set((Real)0, (Real)0, (Real)0);
+ }
+ Real o = r | vi;
+ if (physx::PxAbs(o) < 100 * EPS_REAL)
+ {
+ o = (Real)0;
+ }
+ m_frames[i].set(n, o);
+ }
+}
+
+PX_INLINE void
+Interpolator::setFlat(const Dir tangents[3], const Vec<Real, 2>& uvScale)
+{
+ // Local z ~ normal = tangents[2], x ~ u and tangent = tangents[0], y ~ v and binormal = tangents[1]
+ m_frames[Normal_x].set(Dir((Real)0), tangents[2][0]);
+ m_frames[Normal_y].set(Dir((Real)0), tangents[2][1]);
+ m_frames[Normal_z].set(Dir((Real)0), tangents[2][2]);
+ m_frames[Tangent_x].set(Dir((Real)0), tangents[0][0]);
+ m_frames[Tangent_y].set(Dir((Real)0), tangents[0][1]);
+ m_frames[Tangent_z].set(Dir((Real)0), tangents[0][2]);
+ m_frames[Binormal_x].set(Dir((Real)0), tangents[1][0]);
+ m_frames[Binormal_y].set(Dir((Real)0), tangents[1][1]);
+ m_frames[Binormal_z].set(Dir((Real)0), tangents[1][2]);
+ const Dir su = (uvScale[0] ? 1 / uvScale[0] : (Real)0) * tangents[0];
+ const Dir sv = (uvScale[1] ? 1 / uvScale[1] : (Real)0) * tangents[1];
+ m_frames[UV0_u].set(su, 0);
+ m_frames[UV0_v].set(sv, 0);
+ m_frames[UV1_u].set(su, 0);
+ m_frames[UV1_v].set(sv, 0);
+ m_frames[UV2_u].set(su, 0);
+ m_frames[UV2_v].set(sv, 0);
+ m_frames[UV3_u].set(su, 0);
+ m_frames[UV3_v].set(sv, 0);
+ m_frames[Color_r].set(Dir((Real)0), (Real)1);
+ m_frames[Color_g].set(Dir((Real)0), (Real)1);
+ m_frames[Color_b].set(Dir((Real)0), (Real)1);
+ m_frames[Color_a].set(Dir((Real)0), (Real)1);
+}
+
+PX_INLINE void
+Interpolator::interpolateVertexData(VertexData& vertexData, const Pos& point) const
+{
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ Real& value = *(Real*)(((uint8_t*)&vertexData) + s_offsets[i]);
+ value = m_frames[i].distance(point);
+ }
+}
+
+PX_INLINE bool
+framesEqual(const Plane& f0, const Plane& f1, Real twoFrameScaleTol2, Real sinFrameTol2, Real tol2)
+{
+ const Dir n0 = f0.normal();
+ const Dir n1 = f1.normal();
+ const Real n02 = n0 | n0;
+ const Real n12 = n1 | n1;
+ const Real n2Diff = n02 - n12;
+
+ if (n2Diff * n2Diff > twoFrameScaleTol2 * (n02 + n12))
+ {
+ return false; // Scales differ by more than frame scale tolerance
+ }
+
+ const Real n2Prod = n02 * n12;
+ const Real unnormalizedSinFrameTheta2 = (n0 ^ n1).lengthSquared();
+ if (unnormalizedSinFrameTheta2 > n2Prod * sinFrameTol2)
+ {
+ return false; // Directions differ by more than frame angle tolerance
+ }
+
+ const Real unnormalizedOriginDiff = f0.d() - f1.d();
+ const Real originScale = 0.5f * (physx::PxAbs(f0.d()) + physx::PxAbs(f1.d()));
+ if (unnormalizedOriginDiff * unnormalizedOriginDiff > tol2 * originScale * originScale)
+ {
+ return false; // Origins differ by more than tolerance
+ }
+
+ return true;
+}
+
+PX_INLINE bool
+Interpolator::equals(const Interpolator& interpolator, Real frameDirTol, Real frameScaleTol, Real dirTol, Real uvTol, Real colorTol) const
+{
+ const Real twoFrameScaleTol2 = (Real)2 * frameScaleTol * frameScaleTol;
+ const Real sinFrameTol2 = frameDirTol * frameDirTol;
+ const Real dirTol2 = dirTol * dirTol;
+ const Real uvTol2 = uvTol * uvTol;
+ const Real colorTol2 = colorTol * colorTol;
+
+ // Directions
+ for (uint32_t i = Normal_x; i <= Binormal_z; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, dirTol2))
+ {
+ return false;
+ }
+ }
+
+ // UVs
+ for (uint32_t i = UV0_u; i <= UV3_v; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, uvTol2))
+ {
+ return false;
+ }
+ }
+
+ // Color
+ for (uint32_t i = Color_r; i <= Color_a; ++i)
+ {
+ if (!framesEqual(m_frames[i], interpolator.m_frames[i], twoFrameScaleTol2, sinFrameTol2, colorTol2))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+PX_INLINE void
+Interpolator::transform(Interpolator& transformedInterpolator, const Mat4Real& tm, const Mat4Real& invTransposeTM) const
+{
+ // Apply left-hand transform.
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ transformedInterpolator.m_frames[i] = invTransposeTM * m_frames[i];
+ }
+ // Apply right-hand transform. This is specific to the quantities being transformed.
+ for (int i = 0; i < 4; ++i)
+ {
+ // Normal, transform by invTransposeTM:
+ Dir normal_frame_i(transformedInterpolator.m_frames[Interpolator::Normal_x][i], transformedInterpolator.m_frames[Interpolator::Normal_y][i], transformedInterpolator.m_frames[Interpolator::Normal_z][i]);
+ normal_frame_i = invTransposeTM * normal_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Normal_x][i] = normal_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Normal_y][i] = normal_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Normal_z][i] = normal_frame_i[2];
+ // Tangent, transform by tm:
+ Dir tangent_frame_i(transformedInterpolator.m_frames[Interpolator::Tangent_x][i], transformedInterpolator.m_frames[Interpolator::Tangent_y][i], transformedInterpolator.m_frames[Interpolator::Tangent_z][i]);
+ tangent_frame_i = tm * tangent_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Tangent_x][i] = tangent_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Tangent_y][i] = tangent_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Tangent_z][i] = tangent_frame_i[2];
+ // Binormal, transform by tm:
+ Dir binormal_frame_i(transformedInterpolator.m_frames[Interpolator::Binormal_x][i], transformedInterpolator.m_frames[Interpolator::Binormal_y][i], transformedInterpolator.m_frames[Interpolator::Binormal_z][i]);
+ binormal_frame_i = tm * binormal_frame_i;
+ transformedInterpolator.m_frames[Interpolator::Binormal_x][i] = binormal_frame_i[0];
+ transformedInterpolator.m_frames[Interpolator::Binormal_y][i] = binormal_frame_i[1];
+ transformedInterpolator.m_frames[Interpolator::Binormal_z][i] = binormal_frame_i[2];
+ // Other quantities are scalars
+ }
+}
+
+
+class InterpolatorBuilder
+{
+public:
+ InterpolatorBuilder()
+ {
+#define CREATE_OFFSET( field ) (size_t)((uintptr_t)&vertexData.field-(uintptr_t)&vertexData)
+
+ VertexData vertexData;
+ Interpolator::s_offsets[Interpolator::Normal_x] = CREATE_OFFSET(normal[0]);
+ Interpolator::s_offsets[Interpolator::Normal_y] = CREATE_OFFSET(normal[1]);
+ Interpolator::s_offsets[Interpolator::Normal_z] = CREATE_OFFSET(normal[2]);
+ Interpolator::s_offsets[Interpolator::Tangent_x] = CREATE_OFFSET(tangent[0]);
+ Interpolator::s_offsets[Interpolator::Tangent_y] = CREATE_OFFSET(tangent[1]);
+ Interpolator::s_offsets[Interpolator::Tangent_z] = CREATE_OFFSET(tangent[2]);
+ Interpolator::s_offsets[Interpolator::Binormal_x] = CREATE_OFFSET(binormal[0]);
+ Interpolator::s_offsets[Interpolator::Binormal_y] = CREATE_OFFSET(binormal[1]);
+ Interpolator::s_offsets[Interpolator::Binormal_z] = CREATE_OFFSET(binormal[2]);
+ Interpolator::s_offsets[Interpolator::UV0_u] = CREATE_OFFSET(uv[0].u());
+ Interpolator::s_offsets[Interpolator::UV0_v] = CREATE_OFFSET(uv[0].v());
+ Interpolator::s_offsets[Interpolator::UV1_u] = CREATE_OFFSET(uv[1].u());
+ Interpolator::s_offsets[Interpolator::UV1_v] = CREATE_OFFSET(uv[1].v());
+ Interpolator::s_offsets[Interpolator::UV2_u] = CREATE_OFFSET(uv[2].u());
+ Interpolator::s_offsets[Interpolator::UV2_v] = CREATE_OFFSET(uv[2].v());
+ Interpolator::s_offsets[Interpolator::UV3_u] = CREATE_OFFSET(uv[3].u());
+ Interpolator::s_offsets[Interpolator::UV3_v] = CREATE_OFFSET(uv[3].v());
+ Interpolator::s_offsets[Interpolator::Color_r] = CREATE_OFFSET(color.r());
+ Interpolator::s_offsets[Interpolator::Color_g] = CREATE_OFFSET(color.g());
+ Interpolator::s_offsets[Interpolator::Color_b] = CREATE_OFFSET(color.b());
+ Interpolator::s_offsets[Interpolator::Color_a] = CREATE_OFFSET(color.a());
+ }
+};
+
+
+// ClippedTriangleInfo - used to map bsp output back to the original mesh
+struct ClippedTriangleInfo
+{
+ uint32_t planeIndex;
+ uint32_t originalTriangleIndex;
+ uint32_t clippedTriangleIndex;
+ uint32_t ccw;
+
+ static int cmp(const void* a, const void* b)
+ {
+ const int planeIndexDiff = (int)((ClippedTriangleInfo*)a)->planeIndex - (int)((ClippedTriangleInfo*)b)->planeIndex;
+ if (planeIndexDiff != 0)
+ {
+ return planeIndexDiff;
+ }
+ const int originalTriangleDiff = (int)((ClippedTriangleInfo*)a)->originalTriangleIndex - (int)((ClippedTriangleInfo*)b)->originalTriangleIndex;
+ if (originalTriangleDiff != 0)
+ {
+ return originalTriangleDiff;
+ }
+ return (int)((ClippedTriangleInfo*)a)->clippedTriangleIndex - (int)((ClippedTriangleInfo*)b)->clippedTriangleIndex;
+ }
+};
+
+// BSPLink - a link with an "isBSP" method to act as a stop
+class BSPLink : public nvidia::Link, public nvidia::UserAllocated
+{
+public:
+ virtual bool isBSP()
+ {
+ return false;
+ }
+
+ BSPLink* getAdjBSP(uint32_t which) const
+ {
+ if (isSolitary())
+ {
+ return NULL;
+ }
+ BSPLink* adjLink = static_cast<BSPLink*>(getAdj(which));
+ return adjLink->isBSP() ? adjLink : NULL;
+ }
+
+ void removeBSPLink()
+ {
+ BSPLink* adjLink = static_cast<BSPLink*>(getAdj(1));
+ remove();
+ if (!adjLink->isBSP() && adjLink->isSolitary())
+ {
+ delete adjLink;
+ }
+ }
+};
+
+// Specialized progress listener implementation
+class QuantityProgressListener : public nvidia::IProgressListener
+{
+public:
+ QuantityProgressListener(Real totalAmount, IProgressListener* parent) :
+ m_total((Real)0)
+ , m_parent(parent)
+ {
+ m_scale = totalAmount > (Real)0 ? (Real)100/(Real)totalAmount : (Real)0;
+ }
+
+ // IProgressListener interface
+ virtual void setProgress(int progress, const char* taskName = NULL)
+ {
+ if (m_parent != NULL)
+ {
+ m_parent->setProgress(progress, taskName);
+ }
+ }
+
+ virtual void add(Real amount)
+ {
+ m_total += amount;
+ if (m_parent != NULL)
+ {
+ m_parent->setProgress((int)(m_total*m_scale + (Real)0.5));
+ }
+ }
+
+private:
+ Real m_total;
+ Real m_scale;
+ IProgressListener* m_parent;
+};
+
+
+// IApexBSP implementation
+class BSP : public IApexBSP, public BSPLink
+{
+public:
+ BSP(IApexBSPMemCache* memCache = NULL, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity));
+ ~BSP();
+
+ // IApexBSP implementation
+ void setTolerances(const BSPTolerances& tolerances);
+ bool fromMesh(const nvidia::ExplicitRenderTriangle* mesh, uint32_t meshSize, const BSPBuildParameters& params, nvidia::IProgressListener* progressListener = NULL, volatile bool* cancel = NULL);
+ bool fromConvexPolyhedron(const physx::PxPlane* poly, uint32_t polySize, const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxIdentity), const nvidia::ExplicitRenderTriangle* mesh = NULL, uint32_t meshSize = 0);
+ bool combine(const IApexBSP& bsp);
+ bool op(const IApexBSP& combinedBSP, Operation::Enum operation);
+ bool complement();
+ BSPType::Enum getType() const;
+ bool getSurfaceAreaAndVolume(float& area, float& volume, bool inside, Operation::Enum operation = Operation::NOP) const;
+ bool pointInside(const physx::PxVec3& point, Operation::Enum operation = Operation::NOP) const;
+ bool toMesh(physx::Array<nvidia::ExplicitRenderTriangle>& mesh) const;
+ void copy(const IApexBSP& bsp, const physx::PxMat44& tm = physx::PxMat44(physx::PxIdentity), const physx::PxMat44& internalTransform = physx::PxMat44(physx::PxZero));
+ physx::PxMat44 getInternalTransform() const
+ {
+ return m_internalTransform;
+ }
+
+ void replaceInteriorSubmeshes(uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex);
+
+ IApexBSP* decomposeIntoIslands() const;
+ IApexBSP* getNext() const
+ {
+ return static_cast<BSP*>(getAdjBSP(1));
+ }
+ IApexBSP* getPrev() const
+ {
+ return static_cast<BSP*>(getAdjBSP(0));
+ }
+
+ void deleteTriangles();
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream);
+ void visualize(nvidia::RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const;
+ void release();
+
+ // Debug
+ void performDiagnostics() const;
+
+ // BSPLink
+ bool isBSP()
+ {
+ return true;
+ }
+
+ // Node, a binary node with geometric data
+ class Node : public BinaryNode
+ {
+ Node& operator = (const Node&); // No assignment
+
+ public:
+ enum Type { Leaf, Branch };
+
+ Node() : m_type(Leaf)
+ {
+ m_leafData.side = 1;
+ }
+
+ PX_INLINE void setLeafData(const Region& leafData)
+ {
+ m_type = Leaf;
+ m_leafData = leafData;
+ }
+ PX_INLINE void setBranchData(const Surface& branchData)
+ {
+ m_type = Branch;
+ m_branchData = branchData;
+ }
+
+ PX_INLINE Type getType() const
+ {
+ return (Type)m_type;
+ }
+
+ PX_INLINE Region* getLeafData()
+ {
+ PX_ASSERT(getType() == Leaf);
+ return &m_leafData;
+ }
+ PX_INLINE Surface* getBranchData()
+ {
+ PX_ASSERT(getType() == Branch);
+ return &m_branchData;
+ }
+ PX_INLINE const Region* getLeafData() const
+ {
+ PX_ASSERT(getType() == Leaf);
+ return &m_leafData;
+ }
+ PX_INLINE const Surface* getBranchData() const
+ {
+ PX_ASSERT(getType() == Branch);
+ return &m_branchData;
+ }
+
+ PX_INLINE Node* getParent() const
+ {
+ return (Node*)BinaryNode::getParent();
+ }
+ PX_INLINE Node* getChild(uint32_t index) const
+ {
+ return (Node*)BinaryNode::getChild(index);
+ }
+
+ // Iterator (uses a stack, but no recursion)
+ // Can handle branches with NULL children
+ class It
+ {
+ public:
+ PX_INLINE It(const Node* root) : m_current(const_cast<Node*>(root)), m_valid(true) {}
+ PX_INLINE It(Node* root) : m_current(root), m_valid(true) {}
+
+ PX_INLINE bool valid() const
+ {
+ return m_valid;
+ }
+
+ PX_INLINE Node* node() const
+ {
+ return m_current;
+ }
+
+ PX_INLINE void inc()
+ {
+ if (m_current != NULL && m_current->getType() == Branch)
+ {
+ m_stack.pushBack(m_current->getChild(1));
+ m_current = m_current->getChild(0);
+ }
+ else
+ if (!m_stack.empty())
+ {
+ m_current = m_stack.popBack();
+ }
+ else
+ {
+ m_current = NULL;
+ m_valid = false;
+ }
+ }
+
+ private:
+ Node* m_current;
+ physx::Array<Node*> m_stack;
+ bool m_valid;
+ };
+
+ protected:
+ uint32_t m_type;
+
+ union
+ {
+ Region m_leafData;
+ Surface m_branchData;
+ };
+ };
+
+ class Halfspace : public GSA::VS3D_Halfspace_Set
+ {
+ public:
+ Halfspace(const Plane plane) : m_plane(plane) {}
+
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4])
+ {
+ for (int i = 0; i < 4; ++i) plane[i] = (GSA::real)m_plane[i];
+ return plane[0]*point[0] + plane[1]*point[1] + plane[2]*point[2] + plane[3]*point[3];
+ }
+
+ Halfspace& operator = (const Halfspace& halfspace) { m_plane = halfspace.m_plane; return *this; }
+
+ private:
+ Plane m_plane;
+ };
+
+ class RegionShape : public GSA::VS3D_Halfspace_Set
+ {
+ public:
+ RegionShape(const Plane* planes, Real skinWidth = (Real)0) : m_planes(planes), m_leaf(NULL), m_nonempty(true), m_skinWidth(skinWidth) {}
+
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4]);
+
+ void set_leaf(const BSP::Node* leaf)
+ {
+ m_leaf = leaf;
+ }
+
+ void calculate()
+ {
+ m_nonempty = (1 == GSA::vs3d_test(*this));
+ }
+
+ bool is_nonempty() const
+ {
+ return m_nonempty;
+ }
+
+#if 0
+ bool intersects_halfspace(const Plane* plane)
+ {
+ Halfspace halfspace(plane);
+ set_shapes(this, &halfspace);
+ return intersect();
+ }
+#endif
+
+ private:
+ const Plane* m_planes;
+ const BSP::Node* m_leaf;
+ bool m_nonempty;
+ Real m_skinWidth;
+ };
+
+private:
+ class BoolOp
+ {
+ public:
+ BoolOp(Operation::Enum op) : c_ba(((uint32_t)op >> 3) & 1), c_b(((uint32_t)op >> 2) & 1), c_a(((uint32_t)op >> 1) & 1), c_k((uint32_t)op & 1) {}
+
+ uint32_t operator()(uint32_t a, uint32_t b) const
+ {
+ return (c_ba & a & b) ^(c_b & b) ^(c_a & a) ^ c_k;
+ }
+
+ private:
+ uint32_t c_ba, c_b, c_a, c_k;
+ };
+
+ struct BuildConstants
+ {
+ BSPBuildParameters m_params;
+ float m_recipMaxArea;
+ };
+
+ void clear();
+
+ void transform(const Mat4Real& tm, bool transformFrames = true);
+
+ // Returns the area and volume of the clipped mesh. clippedMesh and triangleInfo may be NULL, in which case nothing is done but
+ // the area and volume calculation.
+ void clipMeshToLeaf(Real& area, Real& volume, physx::Array<Triangle>* clippedMesh, physx::Array<ClippedTriangleInfo>* triangleInfo, const Node* leaf, float clipTolerance) const;
+
+ // Called by buildTree - forcing no inline to ensure a small stack frame
+
+ // Returns a new stackReadStop
+ PX_INLINE uint32_t removeRedundantSurfacesFromStack(physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop, Node* leaf);
+ PX_INLINE void assignLeafSide(Node* leaf, QuantityProgressListener* quantityListener);
+ PX_INLINE void createBranchSurfaceAndSplitStack(uint32_t childReadStart[2], uint32_t childReadStop[2], Node* node, physx::Array<Surface>& surfaceStack,
+ uint32_t stackReadStart, uint32_t stackReadStop, const BuildConstants& buildConstants);
+
+ // Recursive functions
+ void complementLeaves(Node* root) const;
+ void mergeLeaves(const BoolOp& op, Node* root);
+ void clipMeshToLeaves(physx::Array<Triangle>& clippedMesh, physx::Array<ClippedTriangleInfo>& triangleInfo, Node* root, float clipTolerance) const;
+ void clone(Node* root, const Node* originalRoot);
+ void combineTrees(Node* root, const Node* combineRoot, uint32_t triangleIndexOffset, uint32_t planeIndexOffset);
+ bool buildTree(Node* root, physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop,
+ const BuildConstants& buildConstants, QuantityProgressListener* quantityListener, volatile bool* cancel = NULL);
+ void visualizeNode(nvidia::RenderDebugInterface& debugRender, uint32_t flags, const Node* root) const;
+ bool addLeafAreasAndVolumes(Real& totalArea, Real& totalVolume, const Node* root, bool inside, const BoolOp& op) const;
+ void serializeNode(const Node* root, physx::PxFileBuf& stream) const;
+ Node* deserializeNode(uint32_t version, physx::PxFileBuf& stream);
+ void releaseNode(Node* node);
+ void indexInsideLeaves(uint32_t& index, Node* root) const;
+ void listInsideLeaves(physx::Array<Node*>& insideLeaves, Node* root) const;
+ void findInsideLeafNeighbors(physx::Array<nvidia::IntPair>& neighbors, Node* root) const;
+
+ void clean();
+
+ // Parameters
+ BSPTolerances m_tolerarnces;
+
+ // Tree
+ Node* m_root;
+
+ // Internal mesh representation
+ physx::Array<Triangle> m_mesh;
+ physx::Array<Interpolator> m_frames;
+ Real m_meshSize;
+ physx::PxBounds3 m_meshBounds;
+ physx::PxMat44 m_internalTransform;
+ Mat4Real m_internalTransformInverse;
+ bool m_incidentalMesh;
+
+
+ // Unique splitting planes
+ physx::Array<Plane> m_planes;
+
+ // Combination data
+ bool m_combined;
+ Real m_combiningMeshSize;
+ bool m_combiningIncidentalMesh;
+
+ // Memory cache
+ class BSPMemCache* m_memCache;
+ bool m_ownsMemCache;
+};
+
+
+// Surface iterator; walks from a leaf's parent to the root of a tree, allowing inspection of surfaces along the way
+class SurfaceIt
+{
+public:
+ PX_INLINE SurfaceIt() : m_current(NULL), m_side(0xFFFFFFFF) {}
+ PX_INLINE SurfaceIt(const BSP::Node* leaf) : m_current((BSP::Node*)leaf)
+ {
+ PX_ASSERT(leaf != NULL && leaf->getType() == BSP::Node::Leaf);
+ inc();
+ }
+
+ PX_INLINE bool valid() const
+ {
+ return m_current != NULL;
+ }
+
+ PX_INLINE void inc()
+ {
+ m_side = m_current->getIndex();
+ m_current = m_current->getParent();
+ }
+
+ PX_INLINE const Surface* surface() const
+ {
+ return m_current->getBranchData();
+ }
+
+ PX_INLINE uint32_t side() const
+ {
+ return m_side;
+ }
+
+private:
+ BSP::Node* m_current;
+ uint32_t m_side;
+};
+
+
+// IBSPMemCache implementation, several pools and growable arrays. Not global, so that concurrent calculations can use different pools
+class BSPMemCache : public IApexBSPMemCache, public nvidia::UserAllocated
+{
+public:
+
+ BSPMemCache();
+
+ void clearAll();
+ void clearTemp();
+
+ void release();
+
+ // Persistent data
+ nvidia::Pool<BSP::Node> m_nodePool;
+
+ // Temporary data
+ nvidia::Pool<LinkedVertex> m_linkedVertexPool;
+ physx::Array<uint8_t> m_surfaceFlags;
+ physx::Array<uint8_t> m_surfaceTestFlags;
+};
+
+
+// Mesh cleaning interface
+void
+cleanMesh(physx::Array<nvidia::ExplicitRenderTriangle>& cleanedMesh, const physx::Array<Triangle>& mesh, physx::Array<ClippedTriangleInfo>& triangleInfo, const physx::Array<Plane>& planes, const physx::Array<Triangle>& originalTriangles, const physx::Array<Interpolator>& frames, Real distanceTol, const Mat4Real& BSPToMeshTM);
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #define APEX_CSG_DEFS_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h
new file mode 100644
index 00000000..22884507
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath.h
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_FAST_MATH_H
+#define APEX_CSG_FAST_MATH_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include "PsUtilities.h"
+
+#include "PxIntrinsics.h"
+#include <emmintrin.h>
+#include <fvec.h>
+
+#include <math.h>
+#include <float.h>
+
+#ifdef __SSE2__
+#define APEX_CSG_SSE
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+#ifdef __SSE4_1__
+#include <smmintrin.h>
+#endif
+
+namespace ApexCSG
+{
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+#ifndef APEX_CSG_LOOP_UNROLL
+#define APEX_CSG_LOOP_UNROLL 1
+#endif
+
+#ifndef APEX_CSG_SSE
+#define APEX_CSG_SSE 1
+#endif
+
+#ifndef APEX_CSG_INLINE
+#define APEX_CSG_INLINE 1
+#endif
+
+#ifndef APEX_CSG_ALIGN
+#define APEX_CSG_ALIGN 16
+#endif
+
+#if APEX_CSG_LOOP_UNROLL
+
+#define VEC_SIZE() sizeof(*this)/sizeof(Real)
+
+#define OP_VV(a,op,b,i) a[i] op b[i]
+#define OP_SV(a,op,b,i) a op b[i]
+#define OP_VS(a,op,b,i) a[i] op b
+#define OP_VVV(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c[i]
+#define OP_SVV(a,op1,b,op2,c,i) a op1 b[i] op2 c[i]
+#define OP_VVS(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c
+#define OP_D(_D) (_D == 0 ? 0 : (_D == 1 ? 1 : (_D == 2 ? 2 : (_D == 3 ? 3 : 3))))
+#define OP_NAME(_T) PX_CONCAT(OP_,_T)
+#define OP_2_NAME(_D) ALL_2_##_D /*PX_CONCAT(ALL_2_,OP_D(_D))*/
+#define OP_3_NAME(_D) ALL_3_##_D /*PX_CONCAT(ALL_3_,OP_D(_D))*/
+
+#define ALL_2_1( _T, a,op,b) OP_##_T (a,op,b,0)
+#define ALL_2_2( _T, a,op,b) ALL_2_1(_T,a,op,b); OP_##_T (a,op,b,1)
+#define ALL_2_3( _T, a,op,b) ALL_2_2(_T,a,op,b); OP_##_T (a,op,b,2)
+#define ALL_2_4( _T, a,op,b) ALL_2_3(_T,a,op,b); OP_##_T (a,op,b,3)
+#define ALL_VV_i( _D, a,op,b) OP_2_NAME(_D)(VV,a,op,b)
+#define ALL_SV_i( _D, a,op,b) OP_2_NAME(_D)(SV,a,op,b)
+#define ALL_VS_i( _D, a,op,b) OP_2_NAME(_D)(VS,a,op,b)
+
+#define ALL_3_1( _T, a,op1,b,op2,c) OP_NAME(_T) (a,op1,b,op2,c,0)
+#define ALL_3_2( _T, a,op1,b,op2,c) ALL_3_1(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,1)
+#define ALL_3_3( _T, a,op1,b,op2,c) ALL_3_2(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,2)
+#define ALL_3_4( _T, a,op1,b,op2,c) ALL_3_3(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,3)
+#define ALL_VVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVV,a,op1,b,op2,c)
+#define ALL_SVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(SVV,a,op1,b,op2,c)
+#define ALL_VVS_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVS,a,op1,b,op2,c)
+#define ALL_VVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVV,a,op1,b,op2,c)
+#define ALL_SVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(SVV,a,op1,b,op2,c)
+#define ALL_VVS( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVS,a,op1,b,op2,c)
+
+#else
+
+#define ALL_VV_i( _D, a,op,b) ALL_i( _D, a[i] op b[i] )
+#define ALL_SV_i( _D, a,op,b) ALL_i( _D, a op b[i] )
+#define ALL_VS_i( _D, a,op,b) ALL_i( _D, a[i] op b )
+#define ALL_VVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c[i] )
+#define ALL_SVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a op1 b[i] op2 c[i] )
+#define ALL_VVS_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c )
+
+#endif
+
+/* General vector */
+
+__declspec(align(APEX_CSG_ALIGN)) class aligned { };
+
+template<typename T, int D>
+class Vec : public aligned
+{
+public:
+
+ PX_INLINE Vec() {}
+ PX_INLINE Vec(const T& v)
+ {
+ set(v);
+ }
+ PX_INLINE Vec(const T* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const T& v)
+ {
+ ALL_i(D, el[i] = v);
+ }
+ PX_INLINE void set(const T* v)
+ {
+ ALL_i(D, el[i] = v[i]);
+ }
+
+ PX_INLINE T& operator [](int i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](int i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE Vec operator - () const
+ {
+ Vec r;
+ ALL_i(D, r[i] = -el[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator + (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] + v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator - (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] - v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator * (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator / (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] / v[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator * (T v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v);
+ return r;
+ }
+ PX_INLINE Vec operator / (T v) const
+ {
+ return *this * ((T)1 / v);
+ }
+
+ PX_INLINE Vec& operator += (const Vec& v)
+ {
+ ALL_i(D, el[i] += v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator -= (const Vec& v)
+ {
+ ALL_i(D, el[i] -= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator *= (const Vec& v)
+ {
+ ALL_i(D, el[i] *= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator /= (const Vec& v)
+ {
+ ALL_i(D, el[i] /= v[i]);
+ return *this;
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec& v) const
+ {
+ T r = (T)0;
+ ALL_i(D, r += el[i] * v[i]);
+ return r;
+ }
+
+ PX_INLINE T normalize();
+
+ PX_INLINE T lengthSquared() const
+ {
+ return *this | *this;
+ }
+
+protected:
+ T el[D];
+};
+
+template<typename T, int D>
+PX_INLINE T
+Vec<T, D>::normalize()
+{
+ const T l2 = *this | *this;
+ if (l2 == (T)0)
+ {
+ return (T)0;
+ }
+ const T recipL = (T)1 / physx::PxSqrt(l2);
+ *this *= recipL;
+ return recipL * l2;
+}
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ //ALL_VVS_i(D, r, =, v, *, s);
+ return r;
+}
+
+
+/* Popular real vectors */
+template<typename T>
+class Vec2 : public Vec<T, 2>
+{
+public:
+ PX_INLINE Vec2() {}
+ PX_INLINE Vec2(const Vec2& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE Vec2(const Vec<T, 2>& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE Vec2(T x, T y)
+ {
+ set(x, y);
+ }
+ PX_INLINE Vec2& operator = (const Vec2& v)
+ {
+ ALL_VV_i(2, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(2, el, =, v);
+ }
+ PX_INLINE void set(T x, T y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE T operator ^(const Vec2& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+};
+typedef Vec2<Real> Vec2Real;
+
+template<typename T>
+class Vec3 : public Vec<T, 3>
+{
+public:
+ PX_INLINE Vec3() {}
+ PX_INLINE Vec3(const Vec3& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE Vec3(const Vec<T, 3>& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE Vec3(T x, T y, T z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Vec3& operator = (const Vec3& v)
+ {
+ ALL_VV_i(3, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(3, el, =, v);
+ }
+ PX_INLINE void set(T x, T y, T z)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ }
+};
+typedef Vec3<Real> Vec3Real;
+
+template<typename T>
+class Vec4 : public Vec<Real, 4>
+{
+public:
+ PX_INLINE Vec4() {}
+ PX_INLINE Vec4(const Vec4& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE Vec4(const Vec<T, 4>& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE Vec4(T x, T y, T z, T w)
+ {
+ set(x, y, z, w);
+ }
+ PX_INLINE Vec4& operator = (const Vec4& v)
+ {
+ ALL_VV_i(4, el, =, v);
+ return *this;
+ }
+
+ PX_INLINE void set(const T* v)
+ {
+ ALL_VV_i(4, el, =, v);
+ }
+ PX_INLINE void set(T x, T y, T z, T w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+
+#if APEX_CSG_INLINE
+ PX_INLINE Vec operator - () const
+ {
+ Vec4Real r;
+ ALL_VV_i(4, r, =, -el);
+ return r;
+ }
+
+ PX_INLINE Vec4 operator + (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, +, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator - (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, -, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator * (const Vec4& v) const
+ {
+ Vec r;
+ ALL_VVV_i(4, r, =, el, *, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator / (const Vec4& v) const
+ {
+ Vec4 r;
+ ALL_VVV_i(4, r, =, el, /, v);
+ return r;
+ }
+
+ PX_INLINE Vec4 operator * (Real v) const
+ {
+ Vec4 r;
+ ALL_VVS_i(4, r, = , el, *, v);
+ return r;
+ }
+ PX_INLINE Vec4 operator / (Real v) const
+ {
+ return *this * (1. / v);
+ }
+
+ PX_INLINE Vec4& operator += (const Vec4& v)
+ {
+ ALL_VV_i(4, el, +=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator -= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, -=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator *= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, *=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator *= (Real v)
+ {
+ ALL_VS_i(4, el, *=, v);
+ return *this;
+ }
+ PX_INLINE Vec4& operator /= (Real v)
+ {
+ Real vInv = 1. / v;
+ return operator*=(vInv);
+ }
+ PX_INLINE Vec4& operator /= (const Vec4& v)
+ {
+ ALL_VV_i(4, el, /=, v);
+ return *this;
+ }
+#endif /* #if APEX_CGS_INLINE */
+
+ template<typename U> friend U dot(const Vec4<U>&, const Vec4<U>&);
+ PX_FORCE_INLINE Real operator | (const Vec4& v) const
+ {
+ return dot(*this, v);
+ }
+};
+typedef Vec4<Real> Vec4Real;
+
+template<typename T>
+PX_FORCE_INLINE T dot(const Vec4<T>& a, const Vec4<T>& b)
+{
+ Real r = 0;
+ ALL_SVV_i(sizeof(a)/sizeof(T), r, +=, a, *, b);
+ return r;
+}
+
+#if APEX_CSG_SSE
+
+template<typename T> PX_INLINE __m128d& xy(Vec4<T>& v) { return *reinterpret_cast< __m128d*>(&v[0]); }
+template<typename T> PX_INLINE const __m128d& xy(const Vec4<T>& v) { return *reinterpret_cast<const __m128d*>(&v[0]); }
+template<typename T> PX_INLINE __m128d& zw(Vec4<T>& v) { return *reinterpret_cast< __m128d*>(&v[2]); }
+template<typename T> PX_INLINE const __m128d& zw(const Vec4<T>& v) { return *reinterpret_cast<const __m128d*>(&v[2]); }
+template<typename T> PX_INLINE __m128& xyzw(Vec4<T>& v) { return *reinterpret_cast< __m128*>(&v[0]); }
+template<typename T> PX_INLINE const __m128& xyzw(const Vec4<T>& v) { return *reinterpret_cast<const __m128*>(&v[0]); }
+
+template<>
+PX_FORCE_INLINE double dot<double>(const Vec4<double>& a, const Vec4<double>& b)
+{
+ __declspec(align(16)) double r[2] = { 0., 0. };
+ __m128d mresult;
+ mresult = _mm_add_pd(_mm_mul_pd( xy(a), xy(b) ),
+ _mm_mul_pd( zw(a), zw(b) ) );
+ _mm_store_pd(r, mresult);
+ return r[0] + r[1];
+}
+
+#endif /* #if APEX_CSG_SSE */
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() {}
+ PX_INLINE Plane(const Dir& n, Real d)
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p)
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v)
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ ALL_VV_i(3, el, =, n);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ ALL_VV_i(3, el, =, n);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ const Real recipL = 1. / physx::PxSqrt(l2);
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+__declspec(align(16)) class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ ALL_VVS_i(4, r, =, el, |, v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ ALL_VVS_i(4, r, =, el, *, s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ ALL_VS_i(4, el, *=, s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ ALL_VVS_i(4, r, =, m, *, s);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define APEX_CSG_FAST_MATH_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h
new file mode 100644
index 00000000..6f0f9ccf
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGFastMath2.h
@@ -0,0 +1,630 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_FAST_MATH_2_H
+#define APEX_CSG_FAST_MATH_2_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include "PsUtilities.h"
+
+#include "PxIntrinsics.h"
+#include <emmintrin.h>
+#include <fvec.h>
+
+#include <math.h>
+#include <float.h>
+
+#ifdef __SSE2__
+#define APEX_CSG_SSE
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+#ifdef __SSE4_1__
+#include <smmintrin.h>
+#endif
+
+namespace ApexCSG
+{
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+#ifndef APEX_CSG_LOOP_UNROLL
+#define APEX_CSG_LOOP_UNROLL 1
+#endif
+
+#ifndef APEX_CSG_SSE
+#define APEX_CSG_SSE 1
+#endif
+
+#ifndef APEX_CSG_INLINE
+#define APEX_CSG_INLINE 1
+#endif
+
+#ifndef APEX_CSG_ALIGN
+#define APEX_CSG_ALIGN 16
+#endif
+
+#if APEX_CSG_LOOP_UNROLL
+
+#define VEC_SIZE() sizeof(*this)/sizeof(Real)
+
+#define OP_VV(a,op,b,i) a[i] op b[i]
+#define OP_SV(a,op,b,i) a op b[i]
+#define OP_VS(a,op,b,i) a[i] op b
+#define OP_VVV(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c[i]
+#define OP_SVV(a,op1,b,op2,c,i) a op1 b[i] op2 c[i]
+#define OP_VVS(a,op1,b,op2,c,i) a[i] op1 b[i] op2 c
+#define OP_D(_D) (_D == 0 ? 0 : (_D == 1 ? 1 : (_D == 2 ? 2 : (_D == 3 ? 3 : 3))))
+#define OP_NAME(_T) PX_CONCAT(OP_,_T)
+#define OP_2_NAME(_D) ALL_2_##_D /*PX_CONCAT(ALL_2_,OP_D(_D))*/
+#define OP_3_NAME(_D) ALL_3_##_D /*PX_CONCAT(ALL_3_,OP_D(_D))*/
+
+#define ALL_2_1( _T, a,op,b) OP_##_T (a,op,b,0)
+#define ALL_2_2( _T, a,op,b) ALL_2_1(_T,a,op,b); OP_##_T (a,op,b,1)
+#define ALL_2_3( _T, a,op,b) ALL_2_2(_T,a,op,b); OP_##_T (a,op,b,2)
+#define ALL_2_4( _T, a,op,b) ALL_2_3(_T,a,op,b); OP_##_T (a,op,b,3)
+#define ALL_VV_i( _D, a,op,b) OP_2_NAME(_D)(VV,a,op,b)
+#define ALL_SV_i( _D, a,op,b) OP_2_NAME(_D)(SV,a,op,b)
+#define ALL_VS_i( _D, a,op,b) OP_2_NAME(_D)(VS,a,op,b)
+
+#define ALL_3_1( _T, a,op1,b,op2,c) OP_NAME(_T) (a,op1,b,op2,c,0)
+#define ALL_3_2( _T, a,op1,b,op2,c) ALL_3_1(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,1)
+#define ALL_3_3( _T, a,op1,b,op2,c) ALL_3_2(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,2)
+#define ALL_3_4( _T, a,op1,b,op2,c) ALL_3_3(_T,a,op1,b,op2,c); OP_NAME(_T) (a,op1,b,op2,c,3)
+#define ALL_VVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVV,a,op1,b,op2,c)
+#define ALL_SVV_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(SVV,a,op1,b,op2,c)
+#define ALL_VVS_i( _D, a,op1,b,op2,c) OP_3_NAME(_D)(VVS,a,op1,b,op2,c)
+#define ALL_VVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVV,a,op1,b,op2,c)
+#define ALL_SVV( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(SVV,a,op1,b,op2,c)
+#define ALL_VVS( a,op1,b,op2,c) OP_3_NAME(VEC_SIZE())(VVS,a,op1,b,op2,c)
+
+#else
+
+#define ALL_VV_i( _D, a,op,b) ALL_i( _D, a[i] op b[i] )
+#define ALL_SV_i( _D, a,op,b) ALL_i( _D, a op b[i] )
+#define ALL_VS_i( _D, a,op,b) ALL_i( _D, a[i] op b )
+#define ALL_VVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c[i] )
+#define ALL_SVV_i( _D, a,op1,b,op2,c) ALL_i( _D, a op1 b[i] op2 c[i] )
+#define ALL_VVS_i( _D, a,op1,b,op2,c) ALL_i( _D, a[i] op1 b[i] op2 c )
+
+#endif
+
+#if 1
+
+#define DEFINE_VEC(_D) \
+ public: \
+ typedef Vec<T,_D> VecD; \
+ PX_INLINE VecD() {} \
+ PX_INLINE T& operator [](int i) { return el[i]; } \
+ PX_INLINE const T& operator [](int i) const { return el[i]; } \
+ PX_INLINE VecD(const VecD& v) { set(v); } \
+ PX_INLINE VecD(const T& v) { set(v); } \
+ PX_INLINE VecD(const T* v) { set(v); } \
+ PX_INLINE VecD& operator=(const VecD& v) { ALL_VV_i(_D, el, =, v); return *this; } \
+ PX_INLINE VecD operator/(T v) const { return *this * ((T)1 / v); } \
+ PX_INLINE VecD operator+(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, +, v); return r; } \
+ PX_INLINE VecD operator-(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, -, v); return r; } \
+ PX_INLINE VecD operator*(const VecD& v) const { VecD r; ALL_VVV_i(_D, r, =, el, *, v); return r; } \
+ PX_INLINE VecD operator-( ) const { VecD r; ALL_VV_i( _D, r, =, -el); return r;} \
+ PX_INLINE VecD& operator+=(const VecD& v) {ALL_VV_i(_D, el, +=, v); return *this; } \
+ PX_INLINE VecD& operator-=(const VecD& v) {ALL_VV_i(_D, el, -=, v); return *this; } \
+ PX_INLINE VecD& operator*=(const VecD& v) {ALL_VV_i(_D, el, *=, v); return *this; } \
+ PX_INLINE VecD& operator/=(const VecD& v) {ALL_VV_i(_D, el, /=, v); return *this; } \
+ PX_INLINE void set(const VecD& v) { ALL_VV_i(_D, el, =, v); } \
+ PX_INLINE void set(const T* v) { ALL_VV_i(_D, el, =, v); } \
+ PX_INLINE void set(const T& v) { ALL_VS_i(_D, el, =, v); } \
+ PX_INLINE T lengthSquared() const { return *this | *this; } \
+ PX_INLINE T normalize() { const T l2 = *this | *this; if (l2 == (T)0) { return (T)0; } const T recipL = (T)1 / physx::PxSqrt(l2); *this *= recipL; return recipL * l2; } \
+ protected:T el[_D]; \
+
+ //PX_FORCE_INLINE T operator|(const VecD& v) const { T r = (T)0; ALL_SVV_i(_D, r, +=, el, *, v); return r; }
+#endif
+
+/* General vector */
+
+__declspec(align(APEX_CSG_ALIGN)) class aligned { };
+
+template<typename T, int D>
+class Vec : public aligned
+{
+
+};
+
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ //ALL_VVS_i(D, r, =, v, *, s);
+ return r;
+}
+
+template<typename T>
+class Vec<T, 2> : public aligned {
+ DEFINE_VEC(2);
+public:
+ typedef Vec<T,2> Vec2;
+
+ PX_INLINE Vec2(T x, T y)
+ {
+ set(x, y);
+ }
+
+ PX_INLINE void set(T x, T y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE Real operator ^ (const Vec2& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec2& v) const
+ {
+ T r = (T)0;
+ ALL_SVV_i(2, r, +=, el, *, v);
+ return r;
+ }
+
+};
+
+template<typename T>
+class Vec<T, 3> : public aligned {
+ DEFINE_VEC(3);
+public:
+ typedef Vec<T,3> Vec3;
+
+ PX_INLINE Vec3(T x, T y, T z)
+ {
+ set(x, y, z);
+ }
+
+ PX_INLINE void set(T x, T y, T z)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ }
+
+ PX_FORCE_INLINE T operator | (const Vec3& v) const
+ {
+ T r = (T)0;
+ ALL_SVV_i(3, r, +=, el, *, v);
+ return r;
+ }
+};
+
+template<typename T>
+class Vec<T, 4> : public aligned {
+ DEFINE_VEC(4);
+public:
+ typedef Vec<T,4> Vec4;
+
+ PX_INLINE Vec4(T x, T y, T z, T w)
+ {
+ set(x, y, z, w);
+ }
+
+ PX_INLINE void set(T x, T y, T z, T w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+
+#if APEX_CSG_SSE
+
+#if APEX_CSG_DBL
+ PX_INLINE __m128d& xy() { return *reinterpret_cast< __m128d*>(&el[0]); }
+ PX_INLINE const __m128d& xy() const { return *reinterpret_cast<const __m128d*>(&el[0]); }
+ PX_INLINE __m128d& zw() { return *reinterpret_cast< __m128d*>(&el[2]); }
+ PX_INLINE const __m128d& zw() const { return *reinterpret_cast<const __m128d*>(&el[2]); }
+#else
+ PX_INLINE __m128& xyzw() { return *reinterpret_cast< __m128*>(&el[0]); }
+ PX_INLINE const __m128& xyzw() const { return *reinterpret_cast<const __m128*>(&el[0]); }
+#endif /* #if APEX_CSG_DBL */
+
+ friend Real dot(const Vec4&, const Vec4&);
+ PX_FORCE_INLINE Real operator | (const Vec4& v) const
+ {
+ return dot(*this, v);
+ }
+
+#endif /* #if APEX_CSG_SSE */
+};
+
+/* Popular real vectors */
+
+typedef Vec<Real, 2> Vec2Real;
+typedef Vec<Real, 3> Vec3Real;
+typedef Vec<Real, 4> Vec4Real;
+
+#if APEX_CSG_SSE
+
+#if APEX_CSG_DBL
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ /*
+ __m128d mr = _mm_add_sd ( _mm_mul_pd ( a.xy(), b.xy()),
+ _mm_mul_sd ( a.zw(), b.zw()) ) ;
+ mr = _mm_add_sd ( _mm_unpackhi_pd ( mr , mr ), mr );
+ double r;
+ _mm_store_sd(&r, mr);
+ return r;*/
+ __declspec(align(16)) double r[2] = { 0., 0. };
+ __m128d mresult;
+ mresult = _mm_add_pd(_mm_mul_pd( a.xy(), b.xy() ),
+ _mm_mul_pd( a.zw(), b.zw() ) );
+ _mm_store_pd(r, mresult);
+ return r[0] + r[1];
+}
+
+#else
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ float r;
+ _mm_store_ps(&s, _mm_dot_pos(a.xyzw(), b.xyzw()));
+ return r;
+}
+
+#endif /* #if APEX_CSG_DBL */
+
+#else
+
+PX_FORCE_INLINE Real dot(const Vec4Real& a, const Vec4Real& b)
+{
+ Real r = 0;
+ ALL_SVV_i(sizeof(a)/sizeof(Real), r, +=, a, *, b);
+ return r;
+}
+
+#endif /* #if APEX_CSG_SSE */
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z)
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c)
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p)
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v)
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v)
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() {}
+ PX_INLINE Plane(const Dir& n, Real d)
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p)
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v)
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p)
+ {
+ //ALL_i(4, el[i] = p[i]);
+ ALL_VV_i(4, el, =, p);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_VV_i(4, el, =, p);
+ //ALL_i(4, el[i] = p[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ //ALL_i(3, el[i] = n[i]);
+ ALL_VV_i(3, el, =, n);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ //ALL_i(3, el[i] = n[i]);
+ ALL_VV_i(3, el, =, n);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ const Real recipL = 1. / physx::PxSqrt(l2);
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+__declspec(align(16)) class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ //ALL_i(4, r[i] = el[i] | v);
+ ALL_VVS_i(4, r, =, el, |, v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ //ALL_i(4, r[i] = el[i] * s);
+ ALL_VVS_i(4, r, =, el, *, s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ //ALL_i(4, el[i] *= s);
+ ALL_VS_i(4, el, *=, s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ //ALL_i(4, r[i] = s * m[i]);
+ ALL_VVS_i(4, r, =, m, *, s);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define APEX_CSG_FAST_MATH_2_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h
new file mode 100644
index 00000000..f8b25457
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGHull.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_HULL_H
+#define APEX_CSG_HULL_H
+
+#include "ApexUsingNamespace.h"
+#include "authoring/ApexCSGMath.h"
+#include "PsArray.h"
+#include "PxFileBuf.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+/* Convex hull that handles unbounded sets. */
+
+class Hull
+{
+public:
+ struct Edge
+ {
+ uint32_t m_indexV0;
+ uint32_t m_indexV1;
+ uint32_t m_indexF1;
+ uint32_t m_indexF2;
+ };
+
+ struct EdgeType
+ {
+ enum Enum
+ {
+ LineSegment,
+ Ray,
+ Line
+ };
+ };
+
+ PX_INLINE Hull()
+ {
+ setToAllSpace();
+ }
+ PX_INLINE Hull(const Hull& geom)
+ {
+ *this = geom;
+ }
+
+ PX_INLINE void setToAllSpace()
+ {
+ clear();
+ allSpace = true;
+ }
+ PX_INLINE void setToEmptySet()
+ {
+ clear();
+ emptySet = true;
+ }
+
+ void intersect(const Plane& plane, Real distanceTol);
+
+ PX_INLINE void transform(const Mat4Real& tm, const Mat4Real& cofTM);
+
+ PX_INLINE uint32_t getFaceCount() const
+ {
+ return faces.size();
+ }
+ PX_INLINE const Plane& getFace(uint32_t faceIndex) const
+ {
+ return faces[faceIndex];
+ }
+
+ PX_INLINE uint32_t getEdgeCount() const
+ {
+ return edges.size();
+ }
+ PX_INLINE const Edge& getEdge(uint32_t edgeIndex) const
+ {
+ return edges[edgeIndex];
+ }
+
+ PX_INLINE uint32_t getVertexCount() const
+ {
+ return vertexCount;
+ }
+ PX_INLINE const Pos& getVertex(uint32_t vertexIndex) const
+ {
+ return *(const Pos*)(vectors.begin() + vertexIndex);
+ }
+
+ PX_INLINE bool isEmptySet() const
+ {
+ return emptySet;
+ }
+ PX_INLINE bool isAllSpace() const
+ {
+ return allSpace;
+ }
+
+ Real calculateVolume() const;
+
+ // Edge accessors
+ PX_INLINE EdgeType::Enum getType(const Edge& edge) const
+ {
+ return (EdgeType::Enum)((uint32_t)(edge.m_indexV0 >= vertexCount) + (uint32_t)(edge.m_indexV1 >= vertexCount));
+ }
+ PX_INLINE const Pos& getV0(const Edge& edge) const
+ {
+ return *(Pos*)(vectors.begin() + edge.m_indexV0);
+ }
+ PX_INLINE const Pos& getV1(const Edge& edge) const
+ {
+ return *(Pos*)(vectors.begin() + edge.m_indexV1);
+ }
+ PX_INLINE const Dir& getDir(const Edge& edge) const
+ {
+ PX_ASSERT(edge.m_indexV1 >= vertexCount);
+ return *(Dir*)(vectors.begin() + edge.m_indexV1);
+ }
+ PX_INLINE uint32_t getF1(const Edge& edge) const
+ {
+ return edge.m_indexF1;
+ }
+ PX_INLINE uint32_t getF2(const Edge& edge) const
+ {
+ return edge.m_indexF2;
+ }
+
+ // Serialization
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream, uint32_t version);
+
+protected:
+ PX_INLINE void clear();
+
+ bool testConsistency(Real distanceTol, Real angleTol) const;
+
+ // Faces
+ physx::Array<Plane> faces;
+ physx::Array<Edge> edges;
+ physx::Array<Vec4Real> vectors;
+ uint32_t vertexCount; // vectors[i], i >= vertexCount, are used to store vectors for ray and line edges
+ bool allSpace;
+ bool emptySet;
+};
+
+PX_INLINE void
+Hull::transform(const Mat4Real& tm, const Mat4Real& cofTM)
+{
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ Plane& face = faces[i];
+ face = cofTM * face;
+ face.normalize();
+ }
+
+ for (uint32_t i = 0; i < vectors.size(); ++i)
+ {
+ Vec4Real& vector = vectors[i];
+ vector = tm * vector;
+ }
+}
+
+PX_INLINE void
+Hull::clear()
+{
+ vectors.reset();
+ edges.reset();
+ faces.reset();
+ vertexCount = 0;
+ allSpace = false;
+ emptySet = false;
+}
+
+
+}; // namespace ApexCSG
+
+#endif
+
+#endif // #define APEX_CSG_HULL_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h
new file mode 100644
index 00000000..bdc605ea
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGMath.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_MATH_H
+#define APEX_CSG_MATH_H
+
+#include "ApexUsingNamespace.h"
+#include "PxMath.h"
+#include "PxVec3.h"
+
+#include <math.h>
+#include <float.h>
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+
+// APEX_CSG_DBL may be defined externally
+#ifndef APEX_CSG_DBL
+#define APEX_CSG_DBL 1
+#endif
+
+
+namespace ApexCSG
+{
+#if !(APEX_CSG_DBL)
+typedef float Real;
+#define MAX_REAL FLT_MAX
+#define EPS_REAL FLT_EPSILON
+#else
+typedef double Real;
+#define MAX_REAL DBL_MAX
+#define EPS_REAL DBL_EPSILON
+#endif
+
+
+/* Utilities */
+
+template<typename T>
+T square(T t)
+{
+ return t * t;
+}
+
+
+/* Linear algebra */
+
+#define ALL_i( _D, _exp ) for( int i = 0; i < _D; ++i ) { _exp; }
+
+
+/* General vector */
+
+template<typename T, int D>
+class Vec
+{
+public:
+
+ PX_INLINE Vec() {}
+ PX_INLINE Vec(const T& v)
+ {
+ set(v);
+ }
+ PX_INLINE Vec(const T* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const T& v)
+ {
+ ALL_i(D, el[i] = v);
+ }
+ PX_INLINE void set(const T* v)
+ {
+ ALL_i(D, el[i] = v[i]);
+ }
+
+ PX_INLINE T& operator [](int i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](int i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE T& operator [](unsigned i)
+ {
+ return el[i];
+ }
+ PX_INLINE const T& operator [](unsigned i) const
+ {
+ return el[i];
+ }
+
+ PX_INLINE Vec operator - () const
+ {
+ Vec r;
+ ALL_i(D, r[i] = -el[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator + (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] + v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator - (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] - v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator * (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v[i]);
+ return r;
+ }
+ PX_INLINE Vec operator / (const Vec& v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] / v[i]);
+ return r;
+ }
+
+ PX_INLINE Vec operator * (T v) const
+ {
+ Vec r;
+ ALL_i(D, r[i] = el[i] * v);
+ return r;
+ }
+ PX_INLINE Vec operator / (T v) const
+ {
+ return *this * ((T)1 / v);
+ }
+
+ PX_INLINE Vec& operator += (const Vec& v)
+ {
+ ALL_i(D, el[i] += v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator -= (const Vec& v)
+ {
+ ALL_i(D, el[i] -= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator *= (const Vec& v)
+ {
+ ALL_i(D, el[i] *= v[i]);
+ return *this;
+ }
+ PX_INLINE Vec& operator /= (const Vec& v)
+ {
+ ALL_i(D, el[i] /= v[i]);
+ return *this;
+ }
+
+ PX_INLINE T operator | (const Vec& v) const
+ {
+ T r = (T)0;
+ ALL_i(D, r += el[i] * v[i]);
+ return r;
+ }
+
+ PX_INLINE T normalize();
+
+ PX_INLINE T lengthSquared() const
+ {
+ return *this | *this;
+ }
+
+protected:
+ T el[D];
+};
+
+template<typename T, int D>
+PX_INLINE T
+Vec<T, D>::normalize()
+{
+ const T l2 = *this | *this;
+ if (l2 == (T)0)
+ {
+ return (T)0;
+ }
+ const T recipL = (T)1 / physx::PxSqrt(l2);
+ *this *= recipL;
+ return recipL * l2;
+}
+
+template<typename T, int D>
+PX_INLINE Vec<T, D>
+operator * (T s, const Vec<T, D>& v)
+{
+ Vec<T, D> r;
+ ALL_i(D, r[i] = s * v[i]);
+ return r;
+}
+
+
+/* Popular real vectors */
+
+class Vec2Real : public Vec<Real, 2>
+{
+public:
+ PX_INLINE Vec2Real() : Vec<Real, 2>()
+ {
+ }
+ PX_INLINE Vec2Real(const Vec2Real& v) : Vec<Real, 2>()
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE Vec2Real(const Vec<Real, 2>& v) : Vec<Real, 2>()
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE Vec2Real(Real x, Real y) : Vec<Real, 2>()
+ {
+ set(x, y);
+ }
+ PX_INLINE Vec2Real& operator = (const Vec2Real& v)
+ {
+ ALL_i(2, el[i] = v[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(2, el[i] = v[i]);
+ }
+ PX_INLINE void set(Real x, Real y)
+ {
+ el[0] = x;
+ el[1] = y;
+ }
+
+ PX_INLINE Real operator ^(const Vec2Real& v) const
+ {
+ return el[0] * v.el[1] - el[1] * v.el[0];
+ }
+
+ PX_INLINE Vec2Real perp() const
+ {
+ Vec2Real result;
+ result.el[0] = el[1];
+ result.el[1] = -el[0];
+ return result;
+ }
+};
+
+class Vec4Real : public Vec<Real, 4>
+{
+public:
+ PX_INLINE Vec4Real() : Vec<Real, 4>()
+ {
+ }
+ PX_INLINE Vec4Real(const Vec4Real& v) : Vec<Real, 4>()
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE Vec4Real(const Vec<Real, 4>& v) : Vec<Real, 4>()
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE Vec4Real(Real x, Real y, Real z, Real w) : Vec<Real, 4>()
+ {
+ set(x, y, z, w);
+ }
+ PX_INLINE Vec4Real& operator = (const Vec4Real& v)
+ {
+ ALL_i(4, el[i] = v[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i] = v[i]);
+ }
+ PX_INLINE void set(Real x, Real y, Real z, Real w)
+ {
+ el[0] = x;
+ el[1] = y;
+ el[2] = z;
+ el[3] = w;
+ }
+};
+
+
+/* Position */
+
+class Pos : public Vec4Real
+{
+public:
+
+ PX_INLINE Pos() : Vec4Real()
+ {
+ el[3] = 1;
+ }
+ PX_INLINE Pos(Real x, Real y, Real z) : Vec4Real()
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Pos(Real c) : Vec4Real()
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Pos(physx::PxVec3 p) : Vec4Real()
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Pos(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Pos(const float* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const double* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Pos(const Pos& p) : Vec4Real()
+ {
+ set(p[0], p[1], p[2]);
+ }
+ PX_INLINE Pos& operator = (const Pos& p)
+ {
+ set(p[0], p[1], p[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)1);
+ }
+};
+
+
+/* Direction */
+
+class Dir : public Vec4Real
+{
+public:
+
+ PX_INLINE Dir() : Vec4Real()
+ {
+ el[3] = 0;
+ }
+ PX_INLINE Dir(Real x, Real y, Real z) : Vec4Real()
+ {
+ set(x, y, z);
+ }
+ PX_INLINE Dir(Real c) : Vec4Real()
+ {
+ set(c, c, c);
+ }
+ PX_INLINE Dir(physx::PxVec3 p) : Vec4Real()
+ {
+ set(p.x, p.y, p.z);
+ }
+ PX_INLINE Dir(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ set(v[0], v[1], v[2]);
+ }
+ PX_INLINE Dir(const float* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const double* v) : Vec4Real()
+ {
+ set((Real)v[0], (Real)v[1], (Real)v[2]);
+ }
+ PX_INLINE Dir(const Dir& d) : Vec4Real()
+ {
+ set(d[0], d[1], d[2]);
+ }
+ PX_INLINE Dir& operator = (const Dir& d)
+ {
+ set(d[0], d[1], d[2]);
+ return *this;
+ }
+
+ PX_INLINE void set(Real x, Real y, Real z)
+ {
+ Vec4Real::set(x, y, z, (Real)0);
+ }
+
+ PX_INLINE Dir cross(const Dir& d) const // Simple cross-product
+ {
+ return Dir(el[1] * d[2] - el[2] * d[1], el[2] * d[0] - el[0] * d[2], el[0] * d[1] - el[1] * d[0]);
+ }
+
+ PX_INLINE Real dot(const Dir& d) const // Simple dot-product
+ {
+ return el[0]*d[0] + el[1]*d[1] + el[2]*d[2];
+ }
+
+ PX_INLINE Dir operator ^(const Dir& d) const // Uses an improvement step for more accuracy
+ {
+ const Dir c = cross(d); // Cross-product gives perpendicular
+ const Real c2 = c|c;
+ if (c2 != 0.0f)
+ {
+ return c + ((dot(c))*(c.cross(d)) + (d|c)*(cross(c)))/c2;
+ }
+ return c; // Improvement to (*this d)^T(c) = (0)
+ }
+};
+
+
+/* Plane */
+
+class Plane : public Vec4Real
+{
+public:
+
+ PX_INLINE Plane() : Vec4Real() {}
+ PX_INLINE Plane(const Dir& n, Real d) : Vec4Real()
+ {
+ set(n, d);
+ }
+ PX_INLINE Plane(const Dir& n, const Pos& p) : Vec4Real()
+ {
+ set(n, p);
+ }
+ PX_INLINE Plane(const Vec<Real, 4>& v) : Vec4Real()
+ {
+ Vec4Real::set(v[0], v[1], v[2], v[3]);
+ }
+ PX_INLINE Plane(const Plane& p) : Vec4Real()
+ {
+ ALL_i(4, el[i] = p[i]);
+ }
+ PX_INLINE Plane& operator = (const Plane& p)
+ {
+ ALL_i(4, el[i] = p[i]);
+ return *this;
+ }
+
+ PX_INLINE void set(const Dir& n, Real d)
+ {
+ ALL_i(3, el[i] = n[i]);
+ el[3] = d;
+ }
+ PX_INLINE void set(const Dir& n, const Pos& p)
+ {
+ ALL_i(3, el[i] = n[i]);
+ el[3] = -(n | p);
+ }
+
+ PX_INLINE Dir normal() const
+ {
+ return Dir(el[0], el[1], el[2]);
+ }
+ PX_INLINE Real d() const
+ {
+ return el[3];
+ }
+ PX_INLINE Real distance(const Pos& p) const
+ {
+ return p | *this;
+ }
+ PX_INLINE Pos project(const Pos& p) const
+ {
+ return p - normal() * distance(p);
+ }
+
+ PX_INLINE Real normalize();
+};
+
+PX_INLINE Real
+Plane::normalize()
+{
+ const Real oldD = el[3];
+ el[3] = 0;
+ const Real l2 = *this | *this;
+ if (l2 == 0)
+ {
+ return 0;
+ }
+ Real recipL = 1 / physx::PxSqrt(l2);
+ recipL *= (Real)1.5 - (Real)0.5*l2*recipL*recipL;
+ recipL *= (Real)1.5 - (Real)0.5*l2*recipL*recipL;
+ el[3] = oldD;
+ *this *= recipL;
+ return recipL * l2;
+}
+
+
+/* Matrix */
+
+class Mat4Real : public Vec<Vec4Real, 4>
+{
+public:
+
+ PX_INLINE Mat4Real() {}
+ PX_INLINE Mat4Real(const Real v)
+ {
+ set(v);
+ }
+ PX_INLINE Mat4Real(const Real* v)
+ {
+ set(v);
+ }
+
+ PX_INLINE void set(const Real v)
+ {
+ el[0].set(v, 0, 0, 0);
+ el[1].set(0, v, 0, 0);
+ el[2].set(0, 0, v, 0);
+ el[3].set(0, 0, 0, v);
+ }
+ PX_INLINE void set(const Real* v)
+ {
+ ALL_i(4, el[i].set(v + 4 * i));
+ }
+ PX_INLINE void setCol(int colN, const Vec4Real& col)
+ {
+ ALL_i(4, el[i][colN] = col[i]);
+ }
+
+ PX_INLINE Vec4Real operator * (const Vec4Real& v) const
+ {
+ Vec4Real r;
+ ALL_i(4, r[i] = el[i] | v);
+ return r;
+ }
+ PX_INLINE Mat4Real operator + (const Mat4Real& m) const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[i][j] + m[i][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator - (const Mat4Real& m) const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[i][j] - m[i][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (const Mat4Real& m) const
+ {
+ Mat4Real r((Real)0);
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j) for (int k = 0; k < 4; ++k)
+ {
+ r[i][j] += el[i][k] * m[k][j];
+ }
+ return r;
+ }
+ PX_INLINE Mat4Real operator * (Real s) const
+ {
+ Mat4Real r;
+ ALL_i(4, r[i] = el[i] * s);
+ return r;
+ }
+ PX_INLINE Mat4Real operator / (Real s) const
+ {
+ return *this * ((Real)1 / s);
+ }
+
+ PX_INLINE Mat4Real& operator *= (Real s)
+ {
+ ALL_i(4, el[i] *= s);
+ return *this;
+ }
+ PX_INLINE Mat4Real& operator /= (Real s)
+ {
+ *this *= ((Real)1 / s);
+ return *this;
+ }
+
+ PX_INLINE Vec4Real getCol(int colN) const
+ {
+ Vec4Real col;
+ ALL_i(4, col[i] = el[i][colN]);
+ return col;
+ }
+ PX_INLINE Real det3() const
+ {
+ return el[0] | (Dir(el[1]) ^ Dir(el[2])); // Determinant of upper-left 3x3 block (same as full determinant if last row = (0,0,0,1))
+ }
+ PX_INLINE Mat4Real cof34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real inverse34() const; // Assumes last row = (0,0,0,1)
+ PX_INLINE Mat4Real transpose() const
+ {
+ Mat4Real r;
+ for (int i = 0; i < 4; ++i) for (int j = 0; j < 4; ++j)
+ {
+ r[i][j] = el[j][i];
+ }
+ return r;
+ }
+};
+
+PX_INLINE Mat4Real
+Mat4Real::cof34() const
+{
+ Mat4Real r;
+ r[0].set(el[1][1]*el[2][2] - el[1][2]*el[2][1], el[1][2]*el[2][0] - el[1][0]*el[2][2], el[1][0]*el[2][1] - el[1][1]*el[2][0], 0);
+ r[1].set(el[2][1]*el[0][2] - el[2][2]*el[0][1], el[2][2]*el[0][0] - el[2][0]*el[0][2], el[2][0]*el[0][1] - el[2][1]*el[0][0], 0);
+ r[2].set(el[0][1]*el[1][2] - el[0][2]*el[1][1], el[0][2]*el[1][0] - el[0][0]*el[1][2], el[0][0]*el[1][1] - el[0][1]*el[1][0], 0);
+ r[3] = -el[0][3] * r[0] - el[1][3] * r[1] - el[2][3] * r[2];
+ r[3][3] = r[0][0] * el[0][0] + r[0][1] * el[0][1] + r[0][2] * el[0][2];
+ return r;
+}
+
+PX_INLINE Mat4Real
+Mat4Real::inverse34() const
+{
+ const Mat4Real cof = cof34();
+ Mat4Real inv;
+ const Real recipDet = physx::PxAbs(cof[3][3]) > EPS_REAL * EPS_REAL * EPS_REAL ? 1 / cof[3][3] : (Real)0;
+ for (int i = 0; i < 3; ++i)
+ {
+ for (int j = 0; j < 4; ++j)
+ {
+ inv[i][j] = cof[j][i] * recipDet;
+ }
+ }
+ inv[3].set(0, 0, 0, 1);
+ return inv;
+}
+
+PX_INLINE Mat4Real
+operator * (Real s, const Mat4Real& m)
+{
+ Mat4Real r;
+ ALL_i(4, r[i] = s * m[i]);
+ return r;
+}
+
+} // namespace ApexCSG
+
+#endif // #define !WITHOUT_APEX_AUTHORING
+
+#endif // #define APEX_CSG_MATH_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h b/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h
new file mode 100644
index 00000000..ff77d2dc
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexCSGSerialization.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef APEX_CSG_SERIALIZATION_H
+#define APEX_CSG_SERIALIZATION_H
+
+#include "ApexUsingNamespace.h"
+#include "ApexSharedUtils.h"
+#include "ApexStream.h"
+#include "authoring/ApexCSGDefs.h"
+#include "authoring/ApexCSGHull.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace nvidia
+{
+namespace apex
+{
+
+/* Version for serialization */
+struct Version
+{
+ enum Enum
+ {
+ Initial = 0,
+ RevisedMeshTolerances,
+ UsingOnlyPositionDataInVertex,
+ SerializingTriangleFrames,
+ UsingGSA,
+ SerializingMeshBounds,
+ AddedInternalTransform,
+ IncidentalMeshDistinction,
+
+ Count,
+ Current = Count - 1
+ };
+};
+
+
+// Vec<T,D>
+template<typename T, int D>
+PX_INLINE physx::PxFileBuf&
+operator << (physx::PxFileBuf& stream, const ApexCSG::Vec<T, D>& v)
+{
+ for (uint32_t i = 0; i < D; ++i)
+ {
+ stream << v[(int32_t)i];
+ }
+ return stream;
+}
+
+template<typename T, int D>
+PX_INLINE physx::PxFileBuf&
+operator >> (physx::PxFileBuf& stream, ApexCSG::Vec<T, D>& v)
+{
+ for (uint32_t i = 0; i < D; ++i)
+ {
+ stream >> v[(int32_t)i];
+ }
+
+ return stream;
+}
+
+
+// Edge
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Hull::Edge& e)
+{
+ stream << e.m_indexV0 << e.m_indexV1 << e.m_indexF1 << e.m_indexF2;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Hull::Edge& e)
+{
+ PX_UNUSED(version); // Initial
+
+ stream >> e.m_indexV0 >> e.m_indexV1 >> e.m_indexF1 >> e.m_indexF2;
+}
+
+
+// Region
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Region& r)
+{
+ stream << r.side;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Region& r)
+{
+ if (version < Version::UsingGSA)
+ {
+ ApexCSG::Hull hull;
+ hull.deserialize(stream, version);
+ }
+
+ stream >> r.side;
+}
+
+
+// Surface
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Surface& s)
+{
+ stream << s.planeIndex;
+ stream << s.triangleIndexStart;
+ stream << s.triangleIndexStop;
+ stream << s.totalTriangleArea;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Surface& s)
+{
+ PX_UNUSED(version); // Initial
+
+ stream >> s.planeIndex;
+ stream >> s.triangleIndexStart;
+ stream >> s.triangleIndexStop;
+ stream >> s.totalTriangleArea;
+}
+
+
+// Triangle
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Triangle& t)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ stream << t.vertices[i];
+ }
+ stream << t.submeshIndex;
+ stream << t.smoothingMask;
+ stream << t.extraDataIndex;
+ stream << t.normal;
+ stream << t.area;
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Triangle& t)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ stream >> t.vertices[i];
+ if (version < Version::UsingOnlyPositionDataInVertex)
+ {
+ ApexCSG::Dir v;
+ stream >> v; // normal
+ stream >> v; // tangent
+ stream >> v; // binormal
+ ApexCSG::UV uv;
+ for (uint32_t uvN = 0; uvN < VertexFormat::MAX_UV_COUNT; ++uvN)
+ {
+ stream >> uv; // UVs
+ }
+ ApexCSG::Color c;
+ stream >> c; // color
+ }
+ }
+ stream >> t.submeshIndex;
+ stream >> t.smoothingMask;
+ stream >> t.extraDataIndex;
+ stream >> t.normal;
+ stream >> t.area;
+}
+
+// Interpolator
+PX_INLINE void
+serialize(physx::PxFileBuf& stream, const ApexCSG::Interpolator& t)
+{
+ t.serialize(stream);
+}
+
+PX_INLINE void
+deserialize(physx::PxFileBuf& stream, uint32_t version, ApexCSG::Interpolator& t)
+{
+ t.deserialize(stream, version);
+}
+
+}
+}; // namespace nvidia::apex
+
+#endif
+
+#endif // #define APEX_CSG_SERIALIZATION_H
diff --git a/APEX_1.4/shared/internal/include/authoring/ApexGSA.h b/APEX_1.4/shared/internal/include/authoring/ApexGSA.h
new file mode 100644
index 00000000..1e66b7b4
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/ApexGSA.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef APEX_GSA_H
+#define APEX_GSA_H
+
+
+#include "ApexCSGMath.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+namespace GSA
+{
+
+// Utility vector format translation
+inline physx::PxVec3 toPxVec3(const Vec4Real& p)
+{
+ return physx::PxVec3(static_cast<float>(p[0]), static_cast<float>(p[1]), static_cast<float>(p[2]));
+}
+
+
+/*** Compact implementation of the void simplex algorithm for D = 3 ***/
+
+typedef physx::PxF32 real;
+
+/*
+ The implementation of farthest_halfspace should return the half-space "most below" the given point. The point
+ is represented by a vector in projective coordinates, and its last element (point[3]) will not necessarily equal 1.
+ However, point[3] will be non-negative. The plane returned is the boundary of the half-space found, and is also
+ represented as a vector in projective coordinates (the coefficients of the plane equation). The plane vector
+ returned should have the greatest dot product with the input point.
+
+ plane = the returned half-space boundary
+ point = the input point
+ returns the dot product of point and plane
+*/
+struct VS3D_Halfspace_Set
+{
+ virtual real farthest_halfspace(real plane[4], const real point[4]) = 0;
+};
+
+
+#define VS3D_HIGH_ACCURACY 1
+#define VS3D_UNNORMALIZED_PLANE_HANDLING 0 // 0 = planes must be normalized, 1 = planes must be near-normalized, 2 = planes may be arbitrary
+#define REAL_DOUBLE 0
+
+
+#if VS3D_UNNORMALIZED_PLANE_HANDLING == 1
+// Returns approximation to 1/sqrt(x)
+inline real vs3d_recip_sqrt(real x)
+{
+ real y = (real)1.5 - (real)0.5*x;
+#if REAL_DOUBLE
+ y *= (real)1.5 - (real)0.5*x*y*y; // Perform another iteration for doubles, to handle the case where float-normalized normals are converted to double-precision
+#endif
+ return y;
+}
+#elif VS3D_UNNORMALIZED_PLANE_HANDLING == 2
+#include <cmath>
+inline real vs3d_recip_sqrt(real x) { return 1/sqrt(x); }
+#elif VS3D_UNNORMALIZED_PLANE_HANDLING != 0
+#error Unrecognized value given for VS3D_UNNORMALIZED_PLANE_HANDLING. Please set to 0, 1, or 2.
+#endif
+
+
+// Simple types and operations for internal calculations
+struct Vec3 { real x, y, z; }; // 3-vector
+inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder
+inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x+b.x, a.y+b.y, a.z+b.z); } // vector addition
+inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication
+inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product
+inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product
+
+struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts
+inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder
+inline real operator | (const Vec4& a, const Vec4& b) { return (a.v|b.v) + a.w*b.w; } // dot product
+
+// More accurate perpendicular
+inline Vec3 perp(const Vec3& a, const Vec3& b)
+{
+ Vec3 c = a^b; // Cross-product gives perpendicular
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real c2 = c|c;
+ if (c2 != 0) c = c + (1/c2)*((a|c)*(c^b) + (b|c)*(a^c)); // Improvement to (a b)^T(c) = (0)
+#endif
+ return c;
+}
+
+// Square
+inline real sq(real x) { return x*x; }
+
+// Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n.
+inline int ext_index(int c_10, int c_21, int c_20) { return c_10<<c_21|(c_21&c_20)<<1; }
+
+// Returns index (0, 1, or 2) of minimum argument
+inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); }
+
+// Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2)
+inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2)
+{
+ const bool a_num_neg = a_num < 0;
+ const bool b_num_neg = b_num < 0;
+ return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg);
+}
+
+// Returns index (0, 1, or 2) of maximum fraction with positive deominators
+inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2)
+{
+ return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2));
+}
+
+// Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them.
+inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); }
+
+// Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them.
+inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2)
+{
+ return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0));
+}
+
+// Project 2D (homogeneous) vector onto 2D half-space boundary
+inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2)
+{
+ r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0);
+ r = r + (-(r|plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy
+ if ((r|r) > eps2) return;
+ r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0);
+ r.z = 1;
+}
+
+
+// Update function for vs3d_test
+static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2)
+{
+ // h plane is the last plane
+ const Vec4& h = S[plane_count-1];
+
+ // Handle plane_count == 1 specially (optimization; this could be commented out)
+ if (plane_count == 1)
+ {
+ // Solution is objective projected onto h plane
+ p = q;
+ p.v = p.v + -(p|h)*h.v;
+ if ((p|p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point
+ return true;
+ }
+
+ // Create basis in the h plane
+ const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z);
+ const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2));
+ const Vec3 x = y^h.v;
+
+ // Use reduced vector r instead of p
+ Vec3 r = {x|q.v, y|q.v, q.w*(y|y)}; // (x|x) = (y|y) = square of plane basis scale
+
+ // If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution
+ if ((r|r) <= eps2) r.z = 1;
+
+ // Create plane equations in the h plane. These will not be normalized in general.
+ int N = 0; // Plane count in h subspace
+ Vec3 R[3]; // Planes in h subspace
+ real recip_n2[3]; // Plane normal vector reciprocal lengths squared
+ real delta[3]; // Signed distance of objective to the planes
+ int index[3]; // Keep track of original plane indices
+ for (int i = 0; i < plane_count-1; ++i)
+ {
+ const Vec3& vi = S[i].v;
+ const real cos_theta = h.v|vi;
+ R[N] = vec3(x|vi, y|vi, S[i].w - h.w*cos_theta);
+ index[N] = i;
+ const real n2 = R[N].x*R[N].x + R[N].y*R[N].y;
+ if (n2 >= eps2)
+ {
+ const real lin_norm = (real)1.5-(real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1
+ R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior
+ recip_n2[N] = 1/(R[N].x*R[N].x + R[N].y*R[N].y);
+ delta[N] = r|R[N];
+ ++N; // Keep this plane
+ }
+ else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids
+ }
+
+ // Now work with the N-sized R array of half-spaces in the h plane
+ switch (N)
+ {
+ case 1: one_plane:
+ if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it
+ else project2D(r, R[0], delta[0], recip_n2[0], eps2);
+ break;
+ case 2: two_planes:
+ if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them
+ else
+ {
+ const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]);
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ const int min_d_index = max_d_index^1;
+ const real new_delta_min = r|R[min_d_index];
+ if (new_delta_min < 0)
+ {
+ index[0] = index[max_d_index];
+ N = 1; // S[min_d_index] is redundant, eliminate it
+ }
+ else
+ {
+ // Set r to the intersection of R[0] and R[1] and keep both
+ r = perp(R[0], R[1]);
+ if (r.z*r.z*recip_n2[0]*recip_n2[1] < eps2)
+ {
+ if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found
+ goto one_plane;
+ }
+ r = (1/r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ }
+ }
+ break;
+ case 3:
+ if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them
+ else
+ {
+ const Vec3 row_x = {R[0].x, R[1].x, R[2].x};
+ const Vec3 row_y = {R[0].y, R[1].y, R[2].y};
+ const Vec3 row_w = {R[0].z, R[1].z, R[2].z};
+ const Vec3 cof_w = perp(row_x, row_y);
+ const bool detR_pos = (row_w|cof_w) > 0;
+ const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1]*recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos)<<1)-1);
+ const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2]*recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos)<<1)-1);
+ const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0]*recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos)<<1)-1);
+
+ if ((nrw_sgn0|nrw_sgn1|nrw_sgn2) >= 0) return false; // 3D void found
+
+ const int positive_width_count = ((nrw_sgn0>>1)&1) + ((nrw_sgn1>>1)&1) + ((nrw_sgn2>>1)&1);
+ if (positive_width_count == 1)
+ {
+ // A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation.
+ const int pos_width_index = ((nrw_sgn1>>1)&1)|(nrw_sgn2&2); // Calculates which index corresponds to the positive-width side
+ R[pos_width_index] = R[2];
+ recip_n2[pos_width_index] = recip_n2[2];
+ delta[pos_width_index] = delta[2];
+ index[pos_width_index] = index[2];
+ N = 2;
+ goto two_planes;
+ }
+
+ // Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below:
+ const int max_d_index = r.z != 0
+ ? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product
+ : index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle.
+
+ // Project r onto max-d plane
+ project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
+ N = 1; // Unless we use a vertex in the loop below
+ const int index_max = index[max_d_index];
+
+ // The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here.
+ // If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct.
+ const int finite_width_count = (nrw_sgn0&1) + (nrw_sgn1&1) + (nrw_sgn2&1);
+ if (finite_width_count >= 2)
+ {
+ const int i_remaining[2] = {(1<<max_d_index)&3, (3>>max_d_index)^1}; // = {(max_d_index+1)%3, (max_d_index+2)%3}
+ const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products
+ for (int i = 0; i < 2; ++i)
+ {
+ const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least
+ if ((r|R[j]) >= 0)
+ {
+ r = perp(R[max_d_index], R[j]);
+ r = (1/r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
+ index[1] = index[j];
+ N = 2;
+ break;
+ }
+ }
+ }
+
+ index[0] = index_max;
+ }
+ break;
+ }
+
+ // Transform r back to 3D space
+ p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z);
+
+ // Pack S array with kept planes
+ if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order
+ else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite
+ S[N] = h;
+ plane_count = N+1;
+
+ return true;
+}
+
+
+// Performs the VS algorithm for D = 3
+inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = NULL)
+{
+ // Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates
+ const Vec4 objective = q ? (q[3] != 0 ? vec4((1/q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1);
+
+ // Tolerance for 3D void simplex algorithm
+ const real eps_f = (real)1/(sizeof(real) == 4 ? (1L<<23) : (1LL<<52)); // Floating-point epsilon
+#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
+ const real eps = 8*eps_f;
+#else
+ const real eps = 80*eps_f;
+#endif
+ const real eps2 = eps*eps; // Using epsilon squared
+
+ // Maximum allowed iterations of main loop. If exceeded, error code is returned
+ const int max_iteration_count = 50;
+
+ // State
+ Vec4 S[4]; // Up to 4 planes
+ int plane_count = 0; // Number of valid planes
+ Vec4 p = objective; // Test point, initialized to objective
+
+ // Default result, changed to valid result if found in loop below
+ int result = -1;
+
+ // Iterate until a stopping condition is met or the maximum number of iterations is reached
+ for (int i = 0; result < 0 && i < max_iteration_count; ++i)
+ {
+ Vec4& plane = S[plane_count++];
+ real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x);
+#if VS3D_UNNORMALIZED_PLANE_HANDLING != 0
+ const real recip_norm = vs3d_recip_sqrt(plane.v|plane.v);
+ plane = vec4(recip_norm*plane.v, recip_norm*plane.w);
+ delta *= recip_norm;
+#endif
+ if (delta <= 0 || delta*delta <= eps2*(p|p)) result = 1; // Intersection found
+ else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found
+ }
+
+ // If q is given, fill it with the solution (normalize p.w if it is not zero)
+ if (q) *(Vec4*)q = (p.w != 0) ? vec4((1/p.w)*p.v, 1) : p;
+
+ PX_ASSERT(result >= 0);
+
+ return result;
+}
+
+
+/*
+ Utility class derived from GSA::ConvexShape, to handle common implementations
+
+ PlaneIterator must have:
+ 1) a constructor which takes an object of type IteratorInitValues (either by value or refrence) in its constructor,
+ 2) a valid() method which returns a bool (true iff the plane() function can return a valid plane, see below),
+ 3) an inc() method to advance to the next plane, and
+ 4) a plane() method which returns a plane of type ApexCSG::Plane, either by value or reference (the plane will be copied).
+*/
+template<class PlaneIterator, class IteratorInitValues>
+class StaticConvexPolyhedron : public VS3D_Halfspace_Set
+{
+public:
+ virtual GSA::real farthest_halfspace(GSA::real plane[4], const GSA::real point[4])
+ {
+ plane[0] = plane[1] = plane[2] = 0.0f;
+ plane[3] = 1.0f;
+ Real greatest_s = -MAX_REAL;
+
+ for (PlaneIterator it(m_initValues); it.valid(); it.inc())
+ {
+ const Plane test = it.plane();
+ const Real s = point[0]*test[0] + point[1]*test[1] + point[2]*test[2] + point[3]*test[3];
+ if (s > greatest_s)
+ {
+ greatest_s = s;
+ for (int i = 0; i < 4; ++i)
+ {
+ plane[i] = (GSA::real)test[i];
+ }
+ }
+ }
+
+ // Return results
+ return (GSA::real)greatest_s;
+ }
+
+protected:
+ IteratorInitValues m_initValues;
+};
+
+}; // namespace GSA
+}; // namespace ApexCSG
+
+#endif // #ifndef WITHOUT_APEX_AUTHORING
+
+#endif // #ifndef APEX_GSA_H
diff --git a/APEX_1.4/shared/internal/include/authoring/Fracturing.h b/APEX_1.4/shared/internal/include/authoring/Fracturing.h
new file mode 100644
index 00000000..751d1ae9
--- /dev/null
+++ b/APEX_1.4/shared/internal/include/authoring/Fracturing.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifndef FRACTURING_H
+
+#define FRACTURING_H
+
+#include "Apex.h"
+#include "ApexUsingNamespace.h"
+#include "PxPlane.h"
+//#include "ApexSharedSerialization.h"
+#include "FractureTools.h"
+#include "ApexString.h"
+#include "ExplicitHierarchicalMesh.h"
+#include "authoring/ApexCSG.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace nvidia
+{
+namespace apex
+{
+
+using namespace FractureTools;
+
+
+struct IntersectMesh
+{
+ enum GridPattern
+ {
+ None, // An infinite plane
+ Equilateral,
+ Right
+ };
+
+ float getSide(const physx::PxVec3& v)
+ {
+ if (m_pattern == None)
+ {
+ return m_plane.distance(v);
+ }
+ physx::PxVec3 vLocal = m_tm.inverseRT().transform(v);
+ float x = vLocal.x - m_cornerX;
+ float y = vLocal.y - m_cornerY;
+ if (y < 0)
+ {
+ return 0;
+ }
+ float scaledY = y / m_ySpacing;
+ uint32_t gridY = (uint32_t)scaledY;
+ if (gridY >= m_numY)
+ {
+ return 0;
+ }
+ scaledY -= (float)gridY;
+ uint32_t yParity = gridY & 1;
+ if (yParity != 0)
+ {
+ scaledY = 1.0f - scaledY;
+ }
+ if (m_pattern == Equilateral)
+ {
+ x += 0.5f * m_xSpacing * scaledY;
+ }
+ if (x < 0)
+ {
+ return 0;
+ }
+ float scaledX = x / m_xSpacing;
+ uint32_t gridX = (uint32_t)scaledX;
+ if (gridX >= m_numX)
+ {
+ return 0;
+ }
+ scaledX -= (float)gridX;
+ uint32_t xParity = (uint32_t)(scaledX >= scaledY);
+ uint32_t triangleNum = 2 * (gridY * m_numX + gridX) + xParity;
+ PX_ASSERT(triangleNum < m_triangles.size());
+ nvidia::ExplicitRenderTriangle& triangle = m_triangles[triangleNum];
+ physx::PxVec3& v0 = triangle.vertices[0].position;
+ physx::PxVec3& v1 = triangle.vertices[1].position;
+ physx::PxVec3& v2 = triangle.vertices[2].position;
+ return ((v1 - v0).cross(v2 - v0)).dot(v - v0);
+ }
+
+ void clear()
+ {
+ m_pattern = None;
+ m_plane = physx::PxPlane(0, 0, 1, 0);
+ m_vertices.reset();
+ m_triangles.reset();
+ }
+
+ void build(const physx::PxPlane& plane)
+ {
+ clear();
+ m_plane = plane;
+ }
+
+ void build(GridPattern pattern, const physx::PxPlane& plane,
+ float cornerX, float cornerY, float xSpacing, float ySpacing, uint32_t numX, uint32_t numY,
+ const PxMat44& tm, float noiseAmplitude, float relativeFrequency, float xPeriod, float yPeriod,
+ int noiseType, int noiseDir, uint32_t submeshIndex, uint32_t frameIndex, const TriangleFrame& triangleFrame, bool forceGrid);
+
+ GridPattern m_pattern;
+
+ PxMat44 m_tm;
+ physx::PxPlane m_plane;
+ physx::Array<nvidia::Vertex> m_vertices;
+ physx::Array<nvidia::ExplicitRenderTriangle> m_triangles;
+
+ uint32_t m_numX;
+ float m_cornerX;
+ float m_xSpacing;
+ uint32_t m_numY;
+ float m_cornerY;
+ float m_ySpacing;
+};
+
+struct DisplacementMapVolumeImpl : public DisplacementMapVolume
+{
+ DisplacementMapVolumeImpl();
+
+ void init(const FractureSliceDesc& desc);
+
+ void getData(uint32_t& width, uint32_t& height, uint32_t& depth, uint32_t& size, unsigned char const** ppData) const;
+
+private:
+
+ void buildData(const physx::PxVec3 scale = physx::PxVec3(1)) const;
+
+ // Data creation is lazy, and does not effect externally visible state
+ // Note: At some point, we will want to switch to floating point displacements
+ mutable physx::Array<unsigned char> data;
+
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+
+};
+
+// CutoutSetImpl
+
+struct PolyVert
+{
+ uint16_t index;
+ uint16_t flags;
+};
+
+struct ConvexLoop
+{
+ physx::Array<PolyVert> polyVerts;
+};
+
+struct Cutout
+{
+ physx::Array<physx::PxVec3> vertices;
+ physx::Array<ConvexLoop> convexLoops;
+};
+
+struct CutoutSetImpl : public CutoutSet
+{
+ CutoutSetImpl() : periodic(false), dimensions(0.0f)
+ {
+ }
+
+ enum Version
+ {
+ First = 0,
+ // New versions must be put here. There is no need to explicitly number them. The
+ // numbers above were put there to conform to the old DestructionToolStreamVersion enum.
+
+ Count,
+ Current = Count - 1
+ };
+
+ uint32_t getCutoutCount() const
+ {
+ return cutouts.size();
+ }
+
+ uint32_t getCutoutVertexCount(uint32_t cutoutIndex) const
+ {
+ return cutouts[cutoutIndex].vertices.size();
+ }
+ uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const
+ {
+ return cutouts[cutoutIndex].convexLoops.size();
+ }
+
+ const physx::PxVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t vertexIndex) const
+ {
+ return cutouts[cutoutIndex].vertices[vertexIndex];
+ }
+
+ uint32_t getCutoutLoopSize(uint32_t cutoutIndex, uint32_t loopIndex) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts.size();
+ }
+
+ uint32_t getCutoutLoopVertexIndex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexNum) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts[vertexNum].index;
+ }
+ uint32_t getCutoutLoopVertexFlags(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexNum) const
+ {
+ return cutouts[cutoutIndex].convexLoops[loopIndex].polyVerts[vertexNum].flags;
+ }
+ bool isPeriodic() const
+ {
+ return periodic;
+ }
+ const physx::PxVec2& getDimensions() const
+ {
+ return dimensions;
+ }
+
+ void serialize(physx::PxFileBuf& stream) const;
+ void deserialize(physx::PxFileBuf& stream);
+
+ void release()
+ {
+ delete this;
+ }
+
+ physx::Array<Cutout> cutouts;
+ bool periodic;
+ physx::PxVec2 dimensions;
+};
+
+class PartConvexHullProxy : public ExplicitHierarchicalMesh::ConvexHull, public UserAllocated
+{
+public:
+ ConvexHullImpl impl;
+
+ PartConvexHullProxy()
+ {
+ impl.init();
+ }
+
+ PartConvexHullProxy(const PartConvexHullProxy& hull)
+ {
+ *this = hull;
+ }
+
+ PartConvexHullProxy& operator = (const PartConvexHullProxy& hull)
+ {
+ impl.init();
+ if (hull.impl.mParams)
+ {
+ impl.mParams->copy(*hull.impl.mParams);
+ }
+ return *this;
+ }
+
+ virtual void buildFromPoints(const void* points, uint32_t numPoints, uint32_t pointStrideBytes)
+ {
+ impl.buildFromPoints(points, numPoints, pointStrideBytes);
+ }
+
+ virtual const physx::PxBounds3& getBounds() const
+ {
+ return impl.getBounds();
+ }
+
+ virtual float getVolume() const
+ {
+ return impl.getVolume();
+ }
+
+ virtual uint32_t getVertexCount() const
+ {
+ return impl.getVertexCount();
+ }
+
+ virtual physx::PxVec3 getVertex(uint32_t vertexIndex) const
+ {
+ if (vertexIndex < impl.getVertexCount())
+ {
+ return impl.getVertex(vertexIndex);
+ }
+ return physx::PxVec3(0.0f);
+ }
+
+ virtual uint32_t getEdgeCount() const
+ {
+ return impl.getEdgeCount();
+ }
+
+ virtual physx::PxVec3 getEdgeEndpoint(uint32_t edgeIndex, uint32_t whichEndpoint) const
+ {
+ if (edgeIndex < impl.getEdgeCount())
+ {
+ return impl.getVertex(impl.getEdgeEndpointIndex(edgeIndex, whichEndpoint));
+ }
+ return physx::PxVec3(0.0f);
+ }
+
+ /**
+ This is the number of planes which bound the convex hull.
+ */
+ virtual uint32_t getPlaneCount() const
+ {
+ return impl.getPlaneCount();
+ }
+
+ /**
+ This is the plane indexed by planeIndex, which must in
+ the range [0, getPlaneCount()-1].
+ */
+ virtual physx::PxPlane getPlane(uint32_t planeIndex) const
+ {
+ if (planeIndex < impl.getPlaneCount())
+ {
+ return impl.getPlane(planeIndex);
+ }
+ return physx::PxPlane(physx::PxVec3(0.0f), 0.0f);
+ }
+
+ virtual bool rayCast(float& in, float& out, const physx::PxVec3& orig, const physx::PxVec3& dir,
+ const physx::PxTransform& localToWorldRT, const physx::PxVec3& scale, physx::PxVec3* normal = NULL) const
+ {
+ return impl.rayCast(in, out, orig, dir, localToWorldRT, scale, normal);
+ }
+
+ virtual bool reduceHull(uint32_t maxVertexCount, uint32_t maxEdgeCount, uint32_t maxFaceCount, bool inflated)
+ {
+ return impl.reduceHull(maxVertexCount, maxEdgeCount, maxFaceCount, inflated);
+ }
+
+ virtual void release()
+ {
+ delete this;
+ }
+};
+
+PX_INLINE void resizeCollision(physx::Array<PartConvexHullProxy*>& collision, uint32_t hullCount)
+{
+ const uint32_t oldHullCount = collision.size();
+ for (uint32_t i = hullCount; i < oldHullCount; ++i)
+ {
+ collision[i]->release();
+ }
+ collision.resize(hullCount);
+ for (uint32_t i = oldHullCount; i < hullCount; ++i)
+ {
+ collision[i] = PX_NEW(PartConvexHullProxy)();
+ }
+}
+
+void buildCollisionGeometry(physx::Array<PartConvexHullProxy*>& volumes, const CollisionVolumeDesc& desc,
+ const physx::PxVec3* vertices, uint32_t vertexCount, uint32_t vertexByteStride,
+ const uint32_t* indices, uint32_t indexCount);
+
+
+// ExplicitHierarchicalMeshImpl
+
+static uint64_t sNextChunkEUID = 0; // Execution-unique identifier for chunks
+
+class ExplicitHierarchicalMeshImpl : public ExplicitHierarchicalMesh, public UserAllocated
+{
+public:
+
+ // This has been copied from DestructionToolStreamVersion, at ToolStreamVersion_RemovedExplicitHMesh_mMaxDepth.
+ enum Version
+ {
+ First = 0,
+ AddedMaterialFramesToHMesh_and_NoiseType_and_GridSize_to_Cleavage = 7,
+ IncludingVertexFormatInSubmeshData = 12,
+ AddedMaterialLibraryToMesh = 14,
+ AddedCacheChunkSurfaceTracesAndInteriorSubmeshIndex = 32,
+ RemovedExplicitHMesh_mMaxDepth = 38,
+ UsingExplicitPartContainers,
+ SerializingMeshBSP,
+ SerializingMeshBounds,
+ AddedFlagsFieldToPart,
+ PerPartMeshBSPs,
+ StoringRootSubmeshCount,
+ MultipleConvexHullsPerChunk,
+ InstancingData,
+ UVInstancingData,
+ DisplacementData,
+ ChangedMaterialFrameToIncludeFracturingMethodContext,
+ RemovedInteriorSubmeshIndex,
+ AddedSliceDepthToMaterialFrame,
+ RemovedNxChunkAuthoringFlag,
+ ReaddedFlagsToPart,
+ IntroducingChunkPrivateFlags,
+ // New versions must be put here. There is no need to explicitly number them. The
+ // numbers above were put there to conform to the old DestructionToolStreamVersion enum.
+
+ Count,
+ Current = Count - 1
+ };
+
+ struct Part : public UserAllocated
+ {
+ Part() : mMeshBSP(NULL), mSurfaceNormal(0.0f), mFlags(0)
+ {
+ mBounds.setEmpty();
+ }
+
+ ~Part()
+ {
+ if (mMeshBSP != NULL)
+ {
+ mMeshBSP->release();
+ mMeshBSP = NULL;
+ }
+ resizeCollision(mCollision, 0);
+ }
+
+ enum Flags
+ {
+ MeshOpen = (1<<0),
+ };
+
+ physx::PxBounds3 mBounds;
+ physx::Array<nvidia::ExplicitRenderTriangle> mMesh;
+ ApexCSG::IApexBSP* mMeshBSP;
+ physx::Array<PartConvexHullProxy*> mCollision;
+ physx::PxVec3 mSurfaceNormal; // used to kick chunk out if desired
+ uint32_t mFlags; // See Flags
+ };
+
+ struct Chunk : public UserAllocated
+ {
+ Chunk() : mParentIndex(-1), mFlags(0), mPartIndex(-1), mInstancedPositionOffset(physx::PxVec3(0.0f)), mInstancedUVOffset(physx::PxVec2(0.0f)), mPrivateFlags(0)
+ {
+ mEUID = sNextChunkEUID++;
+ }
+
+ enum Flags
+ {
+ Root = (1<<0),
+ RootLeaf = (1<<1),
+ };
+
+ bool isRootChunk() const
+ {
+ return (mPrivateFlags & Root) != 0;
+ }
+
+ bool isRootLeafChunk() const // This means that the chunk is a root chunk and has no children that are root chunks
+ {
+ return (mPrivateFlags & RootLeaf) != 0;
+ }
+
+ PX_INLINE uint64_t getEUID() const
+ {
+ return mEUID;
+ }
+
+ int32_t mParentIndex;
+ uint32_t mFlags; // See DestructibleAsset::ChunkFlags
+ int32_t mPartIndex;
+ physx::PxVec3 mInstancedPositionOffset; // if instanced, the offsetPosition
+ physx::PxVec2 mInstancedUVOffset; // if instanced, the offset UV
+ uint32_t mPrivateFlags; // Things that don't make it to the DestructibleAsset; authoring only. See ExplicitHierarchicalMeshImpl::Chunk::Flags
+
+ private:
+ uint64_t mEUID; // A unique identifier during the application execution. Not to be serialized.
+ };
+
+ physx::Array<Part*> mParts;
+ physx::Array<Chunk*> mChunks;
+ physx::Array<ExplicitSubmeshData> mSubmeshData;
+ physx::Array<nvidia::MaterialFrame> mMaterialFrames;
+ uint32_t mRootSubmeshCount; // How many submeshes came with the root mesh
+
+ ApexCSG::IApexBSPMemCache* mBSPMemCache;
+
+ DisplacementMapVolumeImpl mDisplacementMapVolume;
+
+ ExplicitHierarchicalMeshImpl();
+ ~ExplicitHierarchicalMeshImpl();
+
+ // Sorts chunks in parent-sorted order (stable)
+ void sortChunks(physx::Array<uint32_t>* indexRemap = NULL);
+
+ // Generate part surface normals, if possible
+ void createPartSurfaceNormals();
+
+ // ExplicitHierarchicalMesh implementation:
+
+ uint32_t addPart();
+ bool removePart(uint32_t index);
+ uint32_t addChunk();
+ bool removeChunk(uint32_t index);
+ void serialize(physx::PxFileBuf& stream, Embedding& embedding) const;
+ void deserialize(physx::PxFileBuf& stream, Embedding& embedding);
+ int32_t maxDepth() const;
+ uint32_t partCount() const;
+ uint32_t chunkCount() const;
+ uint32_t depth(uint32_t chunkIndex) const;
+ int32_t* parentIndex(uint32_t chunkIndex);
+ uint64_t chunkUniqueID(uint32_t chunkIndex);
+ int32_t* partIndex(uint32_t chunkIndex);
+ physx::PxVec3* instancedPositionOffset(uint32_t chunkIndex);
+ physx::PxVec2* instancedUVOffset(uint32_t chunkIndex);
+ uint32_t meshTriangleCount(uint32_t partIndex) const;
+ nvidia::ExplicitRenderTriangle* meshTriangles(uint32_t partIndex);
+ physx::PxBounds3 meshBounds(uint32_t partIndex) const;
+ physx::PxBounds3 chunkBounds(uint32_t chunkIndex) const;
+ uint32_t* chunkFlags(uint32_t chunkIndex) const;
+ uint32_t convexHullCount(uint32_t partIndex) const;
+ const ExplicitHierarchicalMesh::ConvexHull** convexHulls(uint32_t partIndex) const;
+ physx::PxVec3* surfaceNormal(uint32_t partIndex);
+ const DisplacementMapVolume& displacementMapVolume() const;
+ uint32_t submeshCount() const;
+ ExplicitSubmeshData* submeshData(uint32_t submeshIndex);
+ uint32_t addSubmesh(const ExplicitSubmeshData& submeshData);
+ uint32_t getMaterialFrameCount() const;
+ nvidia::MaterialFrame getMaterialFrame(uint32_t index) const;
+ void setMaterialFrame(uint32_t index, const nvidia::MaterialFrame& materialFrame);
+ uint32_t addMaterialFrame();
+ void clear(bool keepRoot = false);
+ void set(const ExplicitHierarchicalMesh& mesh);
+ bool calculatePartBSP(uint32_t partIndex, uint32_t randomSeed, uint32_t microgridSize, BSPOpenMode::Enum meshMode, IProgressListener* progressListener = NULL, volatile bool* cancel = NULL);
+ void calculateMeshBSP(uint32_t randomSeed, IProgressListener* progressListener = NULL, const uint32_t* microgridSize = NULL, BSPOpenMode::Enum meshMode = BSPOpenMode::Automatic);
+ void replaceInteriorSubmeshes(uint32_t partIndex, uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex);
+ void visualize(RenderDebugInterface& debugRender, uint32_t flags, uint32_t index = 0) const;
+ void release();
+ void buildMeshBounds(uint32_t partIndex);
+ void buildCollisionGeometryForPart(uint32_t partIndex, const CollisionVolumeDesc& desc);
+ void buildCollisionGeometryForRootChunkParts(const CollisionDesc& desc, bool aggregateRootChunkParentCollision = true);
+ void initializeDisplacementMapVolume(const nvidia::FractureSliceDesc& desc);
+ void reduceHulls(const CollisionDesc& desc, bool inflated);
+ void aggregateCollisionHullsFromRootChildren(uint32_t chunkIndex);
+};
+
+}
+} // end namespace nvidia::apex
+
+#endif
+
+#endif
diff --git a/APEX_1.4/shared/internal/readme.txt b/APEX_1.4/shared/internal/readme.txt
new file mode 100644
index 00000000..8e2ee4e2
--- /dev/null
+++ b/APEX_1.4/shared/internal/readme.txt
@@ -0,0 +1,32 @@
+The intent of "shared" is to provide utility code that samples and tools
+can share.
+
+*** Shared code MUST go in "internal" if it is ALSO used by
+*** the APEX SDK or by any APEX Module.
+
+*** However, code in "internal" MUST be self-contained. ***
+
+That is, it should not rely on the APEX SDK. Otherwise tools need to
+drag in all of APEX in order to compile, and we end up with a statically
+linked APEX library living along side a dynamically linked APEX library.
+
+Description (from the APEX Architecture Document):
+
+ * Internal shared code (in APEX/shared/internal) - this is
+ used by APEX internal code, but is designed to be self-contained.
+ That is, it doesn't rely on code from the framework or modules.
+ In this way, "friend" projects like tools can share this code
+ with APEX without having to include the the rest of the APEX
+ source. For example, streaming utilities are defined here, as
+ well as the APEX streaming version number. Tools need this to
+ create and stream out APEX objects using the proper versioning
+ system.
+
+ * Public shared code (in APEX/shared/external) - these are
+ utilities that tools and external applications like sample apps
+ can share. For example, an implementation of the user renderer
+ is here, used by the DestructionTool and SimpleDestruction
+ sample. Code in this category should not be used by internal
+ APEX source. This helps to decouple the code used for applications
+ from the APEX SDK version.
+
diff --git a/APEX_1.4/shared/internal/src/PvdNxParamSerializer.cpp b/APEX_1.4/shared/internal/src/PvdNxParamSerializer.cpp
new file mode 100644
index 00000000..54995948
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/PvdNxParamSerializer.cpp
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#if TODO_PVD_NXPARAM_SERIALIZER
+
+#include "ApexUsingNamespace.h"
+#include "PvdNxParamSerializer.h"
+#include "nvparameterized/NvParameterized.h"
+#include "PvdConnection.h"
+#include "PVDCommLayerDebuggerStream.h"
+#include "ApexString.h"
+#include "PxMat33.h"
+#include "PxMat34Legacy.h"
+
+using namespace PVD;
+using namespace nvidia::apex;
+using namespace NvParameterized;
+
+namespace PvdNxParamSerializer
+{
+
+
+inline void Append(ApexSimpleString& inStr, const char* inAppend)
+{
+ while (inAppend && *inAppend)
+ {
+ inStr += *inAppend;
+ ++inAppend;
+ }
+}
+
+inline const char* GetVariableName(ApexSimpleString& inWorkString, const char* inNamePrefix, const char* inVarName)
+{
+ if (inNamePrefix && *inNamePrefix)
+ {
+ Append(inWorkString, inNamePrefix);
+ Append(inWorkString, ".");
+ Append(inWorkString, inVarName);
+ return inWorkString.c_str();
+ }
+ return inVarName;
+}
+
+
+/**
+ * The serialization architecture is complicated by dynamic arrays of information. I made a decision that a dynamic array cannot
+ * contain another dynamic array in the debugger's world thus if the real world has this limitation then there will be data
+ * loss when dealing with the debugger.
+ *
+ * In general, the TopLevel handler sends across properties as it encounters them giving property names dotted notations
+ * when they are part of a struct. The debugger UI on the other end has logic create objects based on the dotted names
+ * and thus even though the database layer doesn't support struct objects the UI makes it appear as though it does.
+ * Static or non-resizeable-arrays are treated as a struct.
+ *
+ * Dynamic arrays require at least two passes and perhaps three. The first pass only needs to check out the first object.
+ * The next pass creates all ref objects if necessary and needs to pass over every item in the array.
+ * the third pass collects data values and sends such values over the wire using the array blocks.
+ *
+ * An initial test with the APEX integration tests sent 11 megs of data into the database; so I imagine
+ * most of the information in the APEX system, at least as far as initialization and static
+ * or asset based information is being sent to the debugger.
+ *
+ * Due to time constraints I haven't been able to upgrade the PVD UI to handle arrays well, which is unfortunate because
+ * it seems that most of the APEX information is in arrays.
+ */
+
+#define HANDLE_PARAM_TYPE( datatype, paramFuncName ) { \
+ datatype tmp; \
+ handle.paramFuncName(tmp); \
+ return HandleDataType( tmp, theVariableName ); }
+
+
+class TopLevelParamTreeHandler
+{
+protected:
+ NvParameterized::Interface* mObj;
+ PvdDataStream* mRemoteDebugger;
+ uint64_t mCurPvdObj;
+ const char* mVariablePrefix;
+ uint64_t mDynamicArrayHandle;
+
+public:
+ TopLevelParamTreeHandler(NvParameterized::Interface* obj, PvdDataStream* remoteDebugger, uint64_t inCurrentObject, const char* inVariablePrefix = NULL)
+ : mObj(obj)
+ , mRemoteDebugger(remoteDebugger)
+ , mCurPvdObj(inCurrentObject)
+ , mVariablePrefix(inVariablePrefix)
+ {
+ mDynamicArrayHandle = mCurPvdObj + 1;
+ }
+
+ TopLevelParamTreeHandler(const TopLevelParamTreeHandler& inOther, const char* inNewPrefix)
+ : mObj(inOther.mObj)
+ , mRemoteDebugger(inOther.mRemoteDebugger)
+ , mCurPvdObj(inOther.mCurPvdObj)
+ , mVariablePrefix(inNewPrefix)
+ , mDynamicArrayHandle(inOther.mDynamicArrayHandle)
+ {}
+
+ template<typename THandlerType>
+ inline NvParameterized::ErrorType DoHandleStruct(NvParameterized::Handle& inHandle, const char* inParamName)
+ {
+ const NvParameterized::Definition* paramDef = inHandle.parameterDefinition();
+ const char* newPrefix = mVariablePrefix;
+ ApexSimpleString theWorker;
+ if (inParamName && *inParamName)
+ {
+ newPrefix = GetVariableName(theWorker, mVariablePrefix, inParamName);
+ }
+
+ THandlerType theNewHandler(*this, newPrefix);
+ for (int i = 0; i < paramDef->numChildren(); ++i)
+ {
+ inHandle.set(i);
+ theNewHandler.TraverseParamDefTree(inHandle);
+ inHandle.popIndex();
+ }
+ TransferStackInformationBack(theNewHandler);
+ return(NvParameterized::ERROR_NONE);
+ }
+ template<typename THandlerType>
+ inline NvParameterized::ErrorType DoHandleArray(NvParameterized::Handle& handle, const char* paramName)
+ {
+ int arraySize = 0;
+
+ const char* arrayName = paramName;
+ ApexSimpleString theVariableNamePrefix;
+ Append(theVariableNamePrefix, arrayName);
+
+ const Definition* theDef = handle.parameterDefinition();
+ bool isFixedSize = theDef->arraySizeIsFixed();
+ isFixedSize = false;
+
+ if (handle.getArraySize(arraySize) != NvParameterized::ERROR_NONE)
+ {
+ return(ERROR_INVALID_ARRAY_SIZE);
+ }
+
+ ApexSimpleString theWorkString(theVariableNamePrefix);
+
+
+ for (int i = 0; i < arraySize; ++i)
+ {
+ theWorkString = theVariableNamePrefix;
+ theWorkString += '[';
+ char tempBuf[20];
+ shdfnd::snprintf(tempBuf, 20, "%7d", i);
+ Append(theWorkString, tempBuf);
+ theWorkString += ']';
+ handle.set(i);
+ THandlerType theNewHandler(*this, theWorkString.c_str());
+ theNewHandler.TraverseParamDefTree(handle);
+ handle.popIndex();
+ TransferStackInformationBack(theNewHandler);
+ }
+ return(NvParameterized::ERROR_NONE);
+ }
+ virtual ~TopLevelParamTreeHandler() {}
+ virtual void TransferStackInformationBack(const TopLevelParamTreeHandler& inOther)
+ {
+ mDynamicArrayHandle = inOther.mDynamicArrayHandle;
+ }
+ virtual NvParameterized::ErrorType HandleStruct(NvParameterized::Handle& handle, const char* inParamName);
+ virtual NvParameterized::ErrorType HandleArray(NvParameterized::Handle& handle, const char* paramName);
+ virtual NvParameterized::ErrorType HandleDynamicArray(NvParameterized::Handle& handle, const char* paramName);
+ virtual NvParameterized::ErrorType HandleRef(NvParameterized::Handle& handle, const char*);
+ virtual NvParameterized::ErrorType HandleProperty(const PvdCommLayerValue& inValue, const char* inParamName);
+ template<typename TDataType>
+ inline NvParameterized::ErrorType HandleDataType(const TDataType& inDataType, const char* inParamName)
+ {
+ return HandleProperty(CreateCommLayerValue(inDataType), inParamName);
+ }
+
+ virtual NvParameterized::ErrorType TraverseParamDefTree(NvParameterized::Handle& handle);
+};
+
+//Run through a type define all the properties ignoring dynamic arrays and
+//ref objs.
+class PropertyDefinitionTreeHandler : public TopLevelParamTreeHandler
+{
+ physx::Array<uint32_t> mProperties;
+ physx::Array<PVD::PvdCommLayerDatatype> mDatatypes;
+ uint32_t mClassKey;
+ bool hasRefs;
+public:
+ PropertyDefinitionTreeHandler(const TopLevelParamTreeHandler& inOther, uint32_t inClassKey)
+ : TopLevelParamTreeHandler(inOther, "")
+ , mClassKey(inClassKey)
+ , hasRefs(false)
+ {}
+
+ PropertyDefinitionTreeHandler(const TopLevelParamTreeHandler& inOther, const char* inParamName)
+ : TopLevelParamTreeHandler(inOther, inParamName)
+ {
+ const PropertyDefinitionTreeHandler& realOther = static_cast<const PropertyDefinitionTreeHandler&>(inOther);
+ mClassKey = realOther.mClassKey;
+ hasRefs |= realOther.hasRefs;
+ }
+
+ virtual NvParameterized::ErrorType HandleDynamicArray(NvParameterized::Handle&, const char*)
+ {
+ return NvParameterized::ERROR_NONE;
+ }
+ virtual NvParameterized::ErrorType HandleArray(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleArray<PropertyDefinitionTreeHandler>(handle, inParamName);
+ }
+ virtual NvParameterized::ErrorType HandleRef(NvParameterized::Handle&, const char* inParamName)
+ {
+ HandleProperty(createInstanceId(0), inParamName);
+ hasRefs = true;
+ return NvParameterized::ERROR_NONE;
+ }
+ virtual NvParameterized::ErrorType HandleStruct(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleStruct<PropertyDefinitionTreeHandler>(handle, inParamName);
+ }
+ virtual void TransferStackInformationBack(const TopLevelParamTreeHandler& inOther)
+ {
+ const PropertyDefinitionTreeHandler& realOther = static_cast<const PropertyDefinitionTreeHandler&>(inOther);
+ hasRefs |= realOther.hasRefs;
+ for (uint32_t idx = 0; idx < realOther.mProperties.size(); ++idx)
+ {
+ mProperties.pushBack(realOther.mProperties[idx]);
+ }
+ }
+
+ virtual NvParameterized::ErrorType HandleProperty(const PvdCommLayerValue& inValue, const char* inParamName)
+ {
+ uint32_t thePropertyKey = HashFunction(inParamName);
+ mRemoteDebugger->defineProperty(mClassKey, inParamName, NULL, inValue.getDatatype(), thePropertyKey);
+ mProperties.pushBack(thePropertyKey);
+ mDatatypes.pushBack(inValue.getDatatype());
+ return NvParameterized::ERROR_NONE;
+ }
+ uint32_t GetPropertyCount()
+ {
+ return mProperties.size();
+ }
+ const uint32_t* GetProperties()
+ {
+ return mProperties.begin();
+ }
+ const PVD::PvdCommLayerDatatype* getDatatypes()
+ {
+ return mDatatypes.begin();
+ }
+ bool HasRefs()
+ {
+ return hasRefs;
+ }
+};
+
+//Simply create the parameter ref objects.
+class ParamRefTreeHandler : public TopLevelParamTreeHandler
+{
+public:
+ ParamRefTreeHandler(TopLevelParamTreeHandler& inOther)
+ : TopLevelParamTreeHandler(inOther)
+ {
+ }
+
+ ParamRefTreeHandler(TopLevelParamTreeHandler& inOther, const char* inParamName)
+ : TopLevelParamTreeHandler(inOther, inParamName)
+ {
+ }
+
+ virtual NvParameterized::ErrorType HandleStruct(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleStruct<ParamRefTreeHandler>(handle, inParamName);
+ }
+
+ virtual NvParameterized::ErrorType HandleArray(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleArray<ParamRefTreeHandler>(handle, inParamName);
+ }
+ virtual NvParameterized::ErrorType HandleDynamicArray(NvParameterized::Handle&, const char*)
+ {
+ return NvParameterized::ERROR_NONE;
+ }
+ virtual NvParameterized::ErrorType HandleProperty(const PvdCommLayerValue& , const char*)
+ {
+ return NvParameterized::ERROR_NONE;
+ }
+};
+
+class ValueRecorderTreeHandler : public TopLevelParamTreeHandler
+{
+ physx::Array<PvdCommLayerValue>* mValues;
+public:
+ ValueRecorderTreeHandler(TopLevelParamTreeHandler& inHandler, physx::Array<PvdCommLayerValue>* inValues)
+ : TopLevelParamTreeHandler(inHandler, "")
+ , mValues(inValues)
+ {
+ }
+
+ ValueRecorderTreeHandler(TopLevelParamTreeHandler& inHandler, const char* inParamName)
+ : TopLevelParamTreeHandler(inHandler, inParamName)
+ {
+ const ValueRecorderTreeHandler& realOther = static_cast< const ValueRecorderTreeHandler& >(inHandler);
+ mValues = realOther.mValues;
+ }
+
+ virtual NvParameterized::ErrorType HandleDynamicArray(NvParameterized::Handle&, const char*)
+ {
+ return NvParameterized::ERROR_NONE;
+ }
+
+ virtual NvParameterized::ErrorType HandleStruct(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleStruct<ValueRecorderTreeHandler>(handle, inParamName);
+ }
+
+ virtual NvParameterized::ErrorType HandleArray(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ return DoHandleArray<ValueRecorderTreeHandler>(handle, inParamName);
+ }
+
+ virtual NvParameterized::ErrorType HandleRef(NvParameterized::Handle& handle, const char* inParamName)
+ {
+ NvParameterized::Interface* refObj = 0;
+ if (handle.getParamRef(refObj) != NvParameterized::ERROR_NONE)
+ {
+ return(NvParameterized::ERROR_INVALID_PARAMETER_HANDLE);
+ }
+ uint64_t refObjId(PtrToPVD(refObj));
+ return HandleProperty(createInstanceId(refObjId), inParamName);
+ }
+
+ virtual NvParameterized::ErrorType HandleProperty(const PvdCommLayerValue& inValue, const char*)
+ {
+ mValues->pushBack(inValue);
+ return NvParameterized::ERROR_NONE;
+ }
+
+};
+
+class DynamicArrayParamTreeHandler : public TopLevelParamTreeHandler
+{
+ physx::Array<PvdCommLayerValue> mValues;
+ ApexSimpleString mTypeName;
+ uint64_t mInstanceHandle;
+public:
+ DynamicArrayParamTreeHandler(const TopLevelParamTreeHandler& inOther, const char* inNewPrefix, uint64_t inInstanceHandle)
+ : TopLevelParamTreeHandler(inOther, "")
+ , mInstanceHandle(inInstanceHandle)
+ {
+ Append(mTypeName, mObj->className());
+ mTypeName += '.';
+ Append(mTypeName, inNewPrefix);
+ }
+
+ virtual NvParameterized::ErrorType TraverseParamDefTree(NvParameterized::Handle& handle)
+ {
+ const NvParameterized::Definition* theDef = handle.parameterDefinition();
+ int arraySize = 0;
+ handle.getArraySize(arraySize);
+ if (arraySize > 0)
+ {
+ uint32_t theClassKey((uint32_t)(size_t)theDef);
+ mRemoteDebugger->createClass(mTypeName.c_str(), theClassKey);
+ handle.set(0);
+ PropertyDefinitionTreeHandler theHandler(*this, theClassKey);
+ theHandler.TraverseParamDefTree(handle);
+ handle.popIndex();
+ uint32_t thePropertyCount(theHandler.GetPropertyCount());
+ if (thePropertyCount)
+ {
+ if (theHandler.HasRefs())
+ {
+ for (int idx = 0; idx < arraySize; ++idx)
+ {
+ handle.set(idx);
+ ParamRefTreeHandler refHandler(*this);
+ refHandler.TraverseParamDefTree(handle); //Create the ref objects
+ handle.popIndex();
+ }
+ }
+ mValues.reserve(thePropertyCount);
+ mRemoteDebugger->beginArrayBlock(theClassKey, mInstanceHandle, theHandler.GetProperties(), theHandler.getDatatypes(), thePropertyCount);
+ for (int idx = 0; idx < arraySize; ++idx)
+ {
+ handle.set(idx);
+ ValueRecorderTreeHandler valueRecorder(*this, &mValues);
+ valueRecorder.TraverseParamDefTree(handle); //Set the values in the array.
+ handle.popIndex();
+ uint32_t theValueSize(mValues.size());
+ if (theValueSize >= thePropertyCount)
+ {
+ mRemoteDebugger->sendArrayObject(&mValues[0]);
+ }
+ mValues.clear();
+ }
+ mRemoteDebugger->endArrayBlock();
+ }
+ }
+ return NvParameterized::ERROR_NONE;
+ }
+};
+
+NvParameterized::ErrorType TopLevelParamTreeHandler::HandleStruct(NvParameterized::Handle& handle, const char* inParamName)
+{
+ return DoHandleStruct<TopLevelParamTreeHandler>(handle, inParamName);
+}
+
+NvParameterized::ErrorType TopLevelParamTreeHandler::HandleArray(NvParameterized::Handle& handle, const char* paramName)
+{
+ return DoHandleArray<TopLevelParamTreeHandler>(handle, paramName);
+}
+
+NvParameterized::ErrorType TopLevelParamTreeHandler::HandleDynamicArray(NvParameterized::Handle& handle, const char* paramName)
+{
+ int arraySize = 0;
+ handle.getArraySize(arraySize);
+ if (arraySize > 0)
+ {
+ uint64_t theArrayHandle = mDynamicArrayHandle;
+ ++mDynamicArrayHandle;
+ DynamicArrayParamTreeHandler theHandler(*this, paramName, theArrayHandle);
+ theHandler.TraverseParamDefTree(handle);
+ HandleProperty(PVD::createInstanceId(theArrayHandle), paramName);
+ }
+ else
+ {
+ HandleProperty(PVD::createInstanceId(0), paramName);
+ }
+ return NvParameterized::ERROR_NONE;
+}
+
+NvParameterized::ErrorType TopLevelParamTreeHandler::HandleRef(NvParameterized::Handle& handle, const char* inParamName)
+{
+ const NvParameterized::Definition* paramDef = handle.parameterDefinition();
+ bool includedRef = false;
+ for (int j = 0; j < paramDef->numHints(); j++)
+ {
+ const NvParameterized::Hint* hint = paramDef->hint(j);
+
+ if (strcmp("INCLUDED", hint->name()) == 0 && hint->type() == NvParameterized::TYPE_U64)
+ {
+ if (hint->asUInt())
+ {
+ includedRef = true;
+ }
+ }
+ }
+
+ NvParameterized::Interface* refObj = 0;
+ if (handle.getParamRef(refObj) != NvParameterized::ERROR_NONE)
+ {
+ return(NvParameterized::ERROR_INVALID_PARAMETER_HANDLE);
+ }
+ uint64_t refObjId(PtrToPVD(refObj));
+
+ if (includedRef)
+ {
+ //traversalState state;
+ if (!refObj)
+ {
+ return(NvParameterized::ERROR_INVALID_REFERENCE_VALUE);
+ }
+ const char* refName = refObj->className();
+ PVD::CreateObject(mRemoteDebugger, refObjId, refName);
+ TopLevelParamTreeHandler theHandler(refObj, mRemoteDebugger, refObjId);
+ NvParameterized::Handle refHandle(*refObj);
+ theHandler.TraverseParamDefTree(refHandle);
+ HandleProperty(PVD::createInstanceId(refObjId), inParamName);
+ return NvParameterized::ERROR_NONE;
+ }
+ else
+ {
+ const char* refName = paramDef->name();
+ PVD::CreateObject(mRemoteDebugger, refObjId, refName);
+ PVD::SetPropertyValue(mRemoteDebugger, refObjId, CreateCommLayerValue(refObj->className()), true, "type");
+ PVD::SetPropertyValue(mRemoteDebugger, refObjId, CreateCommLayerValue(refObj->name()), true, "name");
+ }
+
+ HandleProperty(createInstanceId(refObjId), inParamName);
+ //exit here?
+ return(NvParameterized::ERROR_NONE);
+}
+NvParameterized::ErrorType TopLevelParamTreeHandler::HandleProperty(const PvdCommLayerValue& inValue, const char* inParamName)
+{
+ PVD::SetPropertyValue(mRemoteDebugger, mCurPvdObj, inValue, true, inParamName);
+ return NvParameterized::ERROR_NONE;
+}
+
+NvParameterized::ErrorType TopLevelParamTreeHandler::TraverseParamDefTree(NvParameterized::Handle& handle)
+{
+ if (handle.numIndexes() < 1)
+ {
+ if (mObj->getParameterHandle("", handle) != NvParameterized::ERROR_NONE)
+ {
+ return(NvParameterized::ERROR_INVALID_PARAMETER_HANDLE);
+ }
+ }
+ const NvParameterized::Definition* paramDef = handle.parameterDefinition();
+ ApexSimpleString tmpStr;
+ const char* theVariableName(GetVariableName(tmpStr, mVariablePrefix, paramDef->name()));
+ switch (paramDef->type())
+ {
+ case TYPE_ARRAY:
+ if (paramDef->arraySizeIsFixed())
+ {
+ return HandleArray(handle, theVariableName);
+ }
+ return HandleDynamicArray(handle, theVariableName);
+ case TYPE_STRUCT:
+ return HandleStruct(handle, theVariableName);
+
+ case TYPE_BOOL:
+ HANDLE_PARAM_TYPE(bool, getParamBool);
+
+ case TYPE_STRING:
+ HANDLE_PARAM_TYPE(const char*, getParamString);
+
+ case TYPE_ENUM:
+ HANDLE_PARAM_TYPE(const char*, getParamEnum);
+
+ case TYPE_REF:
+ return HandleRef(handle, theVariableName);
+
+ case TYPE_I8:
+ HANDLE_PARAM_TYPE(int8_t, getParamI8);
+ case TYPE_I16:
+ HANDLE_PARAM_TYPE(int16_t, getParamI16);
+ case TYPE_I32:
+ HANDLE_PARAM_TYPE(int32_t, getParamI32);
+ case TYPE_I64:
+ HANDLE_PARAM_TYPE(int64_t, getParamI64);
+
+ case TYPE_U8:
+ HANDLE_PARAM_TYPE(uint8_t, getParamU8);
+ case TYPE_U16:
+ HANDLE_PARAM_TYPE(uint16_t, getParamU16);
+ case TYPE_U32:
+ HANDLE_PARAM_TYPE(uint32_t, getParamU32);
+ case TYPE_U64:
+ HANDLE_PARAM_TYPE(uint64_t, getParamU64);
+
+ case TYPE_F32:
+ HANDLE_PARAM_TYPE(float, getParamF32);
+ case TYPE_F64:
+ HANDLE_PARAM_TYPE(double, getParamF64);
+
+ case TYPE_VEC2:
+ HANDLE_PARAM_TYPE(physx::PxVec2, getParamVec2);
+
+ case TYPE_VEC3:
+ HANDLE_PARAM_TYPE(physx::PxVec3, getParamVec3);
+
+ case TYPE_VEC4:
+ HANDLE_PARAM_TYPE(physx::PxVec4, getParamVec4);
+
+ case TYPE_TRANSFORM:
+ HANDLE_PARAM_TYPE(physx::PxTransform, getParamTransform);
+
+ case TYPE_QUAT:
+ HANDLE_PARAM_TYPE(physx::PxQuat, getParamQuat);
+
+ case TYPE_MAT33:
+ {
+ physx::PxMat33 tmp;
+ handle.getParamMat33(tmp);
+ return HandleDataType(physx::PxMat33(tmp), theVariableName);
+ }
+
+ case TYPE_MAT34:
+ {
+ physx::PxMat44 tmp;
+ handle.getParamMat34(tmp);
+ return HandleDataType(PxMat34Legacy(tmp), theVariableName);
+ }
+
+ case TYPE_BOUNDS3:
+ HANDLE_PARAM_TYPE(physx::PxBounds3, getParamBounds3);
+
+ case TYPE_POINTER:
+ case TYPE_MAT44: //mat44 unhandled for now
+ case TYPE_UNDEFINED:
+ case TYPE_LAST:
+ return NvParameterized::ERROR_TYPE_NOT_SUPPORTED;
+ }
+ return NvParameterized::ERROR_NONE;
+}
+
+
+
+NvParameterized::ErrorType
+traverseParamDefTree(NvParameterized::Interface& obj,
+ PVD::PvdDataStream* remoteDebugger,
+ void* curPvdObj,
+ NvParameterized::Handle& handle)
+{
+ TopLevelParamTreeHandler theHandler(&obj, remoteDebugger, PtrToPVD(curPvdObj));
+ theHandler.TraverseParamDefTree(handle);
+ return(NvParameterized::ERROR_NONE);
+}
+}
+
+#endif \ No newline at end of file
diff --git a/APEX_1.4/shared/internal/src/authoring/ApexCSG.cpp b/APEX_1.4/shared/internal/src/authoring/ApexCSG.cpp
new file mode 100644
index 00000000..50a7b046
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/ApexCSG.cpp
@@ -0,0 +1,3140 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#include "ApexUsingNamespace.h"
+#include "PxSimpleTypes.h"
+#include "PxFileBuf.h"
+
+#include "authoring/ApexCSGDefs.h"
+#include "authoring/ApexCSGSerialization.h"
+#include "ApexSharedSerialization.h"
+#include "RenderDebugInterface.h"
+
+#include <stdio.h>
+
+#include "PxErrorCallback.h"
+
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+using namespace nvidia;
+using namespace apex;
+using namespace nvidia;
+
+namespace ApexCSG
+{
+
+// Tolerances for geometric calculations
+
+#define CSG_EPS ((Real)1.0e-9)
+
+BSPTolerances
+gDefaultTolerances;
+
+
+// Set to 1 to Measure the stack
+#define MEASURE_STACK_USAGE 0
+
+#if MEASURE_STACK_USAGE
+static size_t
+gStackTop = (size_t)-1;
+static size_t
+gStackBottom = (size_t)-1;
+
+#define RECORD_STACK_TOP() \
+{ \
+ int x; \
+ gStackTop = (size_t)&x; \
+ gStackBottom = (size_t)-1; \
+}
+#define RECORD_STACK_BOTTOM() \
+{ \
+ int x; \
+ gStackBottom = PxMin(gStackBottom, (size_t)&x); \
+}
+#define OUTPUT_STACK_USAGE(fn_name) \
+{ \
+ char stackMsg[100]; \
+ sprintf(stackMsg, "%s stack usage: %d bytes", #fn_name, gStackTop - gStackBottom); \
+ debugWarn(stackMsg); \
+}
+#else
+#define RECORD_STACK_TOP()
+#define RECORD_STACK_BOTTOM()
+#define OUTPUT_STACK_USAGE(fn_name)
+#endif
+
+
+/* Interpolator */
+
+size_t
+Interpolator::s_offsets[Interpolator::VertexFieldCount];
+
+static InterpolatorBuilder
+sInterpolatorBuilder;
+
+void
+Interpolator::serialize(physx::PxFileBuf& stream) const
+{
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ stream << m_frames[i];
+ }
+}
+
+void
+Interpolator::deserialize(physx::PxFileBuf& stream, uint32_t version)
+{
+ if (version < Version::SerializingTriangleFrames)
+ {
+ return;
+ }
+
+ for (uint32_t i = 0; i < VertexFieldCount; ++i)
+ {
+ stream >> m_frames[i];
+ }
+}
+
+
+/* Utilities */
+
+#define debugInfo(_msg) GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_INFO, _msg, __FILE__, __LINE__)
+#define debugWarn(_msg) GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, _msg, __FILE__, __LINE__)
+
+static bool
+transformsEqual(const physx::PxMat44& a, const physx::PxMat44& b, float eps)
+{
+ const float eps2 = eps*eps;
+ const float scaledEps = eps*PxMax(a.getPosition().abs().maxElement(), b.getPosition().abs().maxElement());
+
+ for (unsigned i = 0; i < 4; ++i)
+ {
+ for (unsigned j = 0; j < 4; ++j)
+ {
+ const float tol = i == j ? eps2 : (i == 4 ? scaledEps : eps);
+ if (!physx::PxEquals(a(i,j), b(i,j), tol))
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+isZero(const physx::PxMat44& a)
+{
+ for (unsigned i = 0; i < 4; ++i)
+ {
+ if (!a[i].isZero())
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+PX_INLINE Mat4Real
+CSGFromPx(const physx::PxMat44& a)
+{
+ Mat4Real r;
+ for (unsigned i = 0; i < 4; ++i)
+ {
+ for (unsigned j = 0; j < 4; ++j)
+ {
+ r[i][j] = (Real)a(i,j);
+ }
+ }
+ return r;
+}
+
+PX_INLINE physx::PxMat44
+PxFromCSG(const Mat4Real& a)
+{
+ physx::PxMat44 r;
+ for (unsigned i = 0; i < 4; ++i)
+ {
+ for (unsigned j = 0; j < 4; ++j)
+ {
+ r(i,j) = (float)a[i][j];
+ }
+ }
+ return r;
+}
+
+class DefaultRandom : public UserRandom
+{
+public:
+ uint32_t getInt()
+ {
+ return m_rnd.nextSeed();
+ }
+ float getReal(float min, float max)
+ {
+ return m_rnd.getScaled(min, max);
+ }
+
+ QDSRand m_rnd;
+} defaultRnd;
+
+PX_INLINE int // Returns 0 if the point is on the plane (within tolerance), otherwise +/-1 if the point is above/below the plane
+cmpPointToPlane(const Pos& pos, const Plane& plane, Real tol)
+{
+ const Real dist = plane.distance(pos);
+
+ return dist < -tol ? -1 : (dist < tol ? 0 : 1);
+}
+
+template<typename T>
+struct IndexedValue
+{
+ T value;
+ uint32_t index;
+
+ static int cmpIncreasing(const void* a, const void* b)
+ {
+ return ((IndexedValue*)a)->value == ((IndexedValue*)b)->value ? 0 : (((IndexedValue*)a)->value < ((IndexedValue*)b)->value ? -1 : 1);
+ }
+};
+
+
+PX_INLINE LinkedVertex*
+clipPolygonByPlane(LinkedVertex* poly, const Plane& plane, Pool<LinkedVertex>& pool, Real tol)
+{
+ LinkedVertex* prev = poly;
+ int prevSide = cmpPointToPlane(prev->vertex, plane, tol);
+ bool outsideFound = prevSide == 1;
+ bool insideFound = prevSide == -1;
+ LinkedVertex* clip0 = NULL;
+ LinkedVertex* clip1 = NULL;
+ LinkedVertex* next = prev->getAdj(1);
+ if (next != poly)
+ {
+ do
+ {
+ int nextSide = cmpPointToPlane(next->vertex, plane, tol);
+ switch (nextSide)
+ {
+ case -1:
+ insideFound = true;
+ if (prevSide == 1)
+ {
+ // Clip
+ clip1 = pool.borrow();
+ const Dir disp = next->vertex - prev->vertex;
+ const Real dDisp = disp | plane.normal();
+ PX_ASSERT(dDisp < 0);
+ const Real dAbove = plane.distance(prev->vertex);
+ clip1->vertex = prev->vertex - (dAbove / dDisp) * disp;
+ next->setAdj(0, clip1); // Insert clip1 between prev and next
+ }
+ else if (prevSide == 0)
+ {
+ clip1 = prev;
+ }
+ break;
+ case 0:
+ if (prevSide == -1)
+ {
+ clip0 = next;
+ }
+ else if (prevSide == 1)
+ {
+ clip1 = next;
+ }
+ break;
+ case 1:
+ outsideFound = true;
+ if (prevSide == -1)
+ {
+ // Clip
+ clip0 = pool.borrow();
+ const Dir disp = next->vertex - prev->vertex;
+ const Real dDisp = disp | plane.normal();
+ PX_ASSERT(dDisp > 0);
+ const Real dBelow = plane.distance(prev->vertex);
+ clip0->vertex = prev->vertex - (dBelow / dDisp) * disp;
+ next->setAdj(0, clip0); // Insert clip0 between prev and next
+ }
+ else if (prevSide == 0)
+ {
+ clip0 = prev;
+ }
+ break;
+ }
+ prev = next;
+ prevSide = nextSide;
+ next = prev->getAdj(1);
+ }
+ while (prev != poly);
+ }
+
+ PX_ASSERT((clip0 != NULL) == (clip1 != NULL));
+
+ if (clip0 != NULL && clip1 != NULL && clip0 != clip1)
+ {
+ // Get rid of vertices between clip0 and clip1
+ LinkedVertex* v = clip0->getAdj(1);
+ while (v != clip1)
+ {
+ LinkedVertex* w = v->getAdj(1);
+ v->remove();
+ pool.replace(v);
+ v = w;
+ }
+ poly = clip1;
+ }
+
+ if (outsideFound && !insideFound)
+ {
+ // Completely outside. Eliminate.
+ LinkedVertex* v;
+ do
+ {
+ v = poly->getAdj(0);
+ v->remove();
+ pool.replace(v);
+ }
+ while (v != poly);
+ poly = NULL;
+ }
+
+ return poly;
+}
+
+// If clippedMesh is not NULL, it will be appended with clipped triangles from the leaf.
+// Return value is the sum triangle area on the leaf.
+PX_INLINE void
+clipTriangleToLeaf(physx::Array<Triangle>* clippedMesh, Real& clippedTriangleArea, Real& clippedPyramidVolume, const Pos& origin,
+ const Triangle& tri, const BSP::Node* leaf, uint32_t edgeTraversalDir,
+ Pool<LinkedVertex>& vertexPool, Real distanceTol, const physx::Array<Plane>& planes, uint32_t skipPlaneIndex = 0xFFFFFFFF)
+{
+ clippedTriangleArea = (Real)0;
+ clippedPyramidVolume = (Real)0;
+
+ // Form a ring of vertices out of the triangle
+ LinkedVertex* v0 = vertexPool.borrow();
+ LinkedVertex* v1 = vertexPool.borrow();
+ LinkedVertex* v2 = vertexPool.borrow();
+ v0->vertex = tri.vertices[0];
+ v1->vertex = tri.vertices[1];
+ v2->vertex = tri.vertices[2];
+ v0->setAdj(edgeTraversalDir, v1);
+ v1->setAdj(edgeTraversalDir, v2);
+
+ for (SurfaceIt it(leaf); it.valid(); it.inc())
+ {
+ if (it.surface()->planeIndex == skipPlaneIndex)
+ {
+ continue;
+ }
+ const Real sign = it.side() ? (Real)1 : -(Real)1;
+ v0 = clipPolygonByPlane(v0, sign * planes[it.surface()->planeIndex], vertexPool, distanceTol);
+ if (v0 == NULL)
+ {
+ break; // Completely clipped away
+ }
+ }
+
+ if (v0 != NULL)
+ {
+ // Something remains. Add to clippedMesh if it's not NULL
+ v1 = v0->getAdj(1);
+ v2 = v1->getAdj(1);
+ if (v1 != v0 && v2 != v0)
+ {
+ if (clippedMesh != NULL)
+ {
+ // Triangluate
+ do
+ {
+ Triangle& newTri = clippedMesh->insert();
+ newTri.vertices[0] = v0->vertex;
+ newTri.vertices[1] = v1->vertex;
+ newTri.vertices[2] = v2->vertex;
+ newTri.submeshIndex = tri.submeshIndex;
+ newTri.smoothingMask = tri.smoothingMask;
+ newTri.extraDataIndex = tri.extraDataIndex;
+ newTri.calculateQuantities();
+ clippedTriangleArea += newTri.area;
+ clippedPyramidVolume += newTri.area*((newTri.vertices[0]-origin)|newTri.normal); // 3 * volume
+ v1 = v2;
+ v2 = v2->getAdj(1);
+ }
+ while (v2 != v0);
+ }
+ else
+ {
+ // Triangluate
+ do
+ {
+ Dir normal = Dir(v1->vertex - v0->vertex)^Dir(v2->vertex - v0->vertex);
+ const Real area = (Real)0.5 * normal.normalize();
+ clippedTriangleArea += area;
+ clippedPyramidVolume += area*((v0->vertex-origin)|normal); // 3 * volume
+ v1 = v2;
+ v2 = v2->getAdj(1);
+ }
+ while (v2 != v0);
+ }
+ }
+ // Return links to pool.
+ LinkedVertex* v;
+ do
+ {
+ v = v0->getAdj(0);
+ v->remove();
+ vertexPool.replace(v);
+ }
+ while (v != v0);
+ }
+
+ clippedPyramidVolume *= (Real)0.333333333333333333;
+}
+
+PX_INLINE bool
+intersectPlanes(Pos& pos, Dir& dir, const Plane& plane0, const Plane& plane1)
+{
+ const Dir n0 = plane0.normal();
+ const Dir n1 = plane1.normal();
+
+ dir = n0^n1;
+ const Real dir2 = dir.lengthSquared();
+ if (dir2 < square(EPS_REAL))
+ {
+ return false;
+ }
+
+ const Real recipDir2 = (Real)1/dir2; // DIVIDE
+
+ // Normalize dir
+ dir *= sqrt(recipDir2);
+
+ // Calculate point in both planes
+ const Real n0n0RecipDir2 = n0.lengthSquared()*recipDir2;
+ const Real n1n1RecipDir2 = n1.lengthSquared()*recipDir2;
+ const Real n0n1RecipDir2 = (n0|n1)*recipDir2;
+ pos = Pos((Real)0) + (plane1.d()*n0n1RecipDir2 - plane0.d()*n1n1RecipDir2)*n0 + (plane0.d()*n0n1RecipDir2 - plane1.d()*n0n0RecipDir2)*n1;
+
+ // Improve accuracy of solution
+ const Real error0 = pos|plane0;
+ const Real error1 = pos|plane1;
+ pos += (error1*n0n1RecipDir2 - error0*n1n1RecipDir2)*n0 + (error0*n0n1RecipDir2 - error1*n0n0RecipDir2)*n1;
+
+ return true;
+}
+
+PX_INLINE bool
+intersectLineWithHalfspace(Real& minS, Real& maxS, const Pos& pos, const Dir& dir, const Plane& plane)
+{
+ const Real num = -(pos|plane);
+ const Real den = dir|plane;
+ if (den < -CSG_EPS)
+ {
+ const Real s = num/den;
+ if (s > minS)
+ {
+ minS = s;
+ }
+ }
+ else
+ if (den > CSG_EPS)
+ {
+ const Real s = num/den;
+ if (s < maxS)
+ {
+ maxS = s;
+ }
+ }
+ else
+ if (num < -CSG_EPS)
+ {
+ minS = CSG_EPS;
+ maxS = -CSG_EPS;
+ }
+
+ return minS < maxS;
+}
+
+// Returns true if the leaf has finite area and volume, false otherwise
+PX_INLINE bool
+calculateLeafAreaAndVolume(Real& area, Real& volume, const Plane* planes, uint32_t planeCount, const Mat4Real& cofInternalTransform)
+{
+ if (planeCount <= 1)
+ {
+ area = MAX_REAL;
+ volume = MAX_REAL;
+ return false;
+ }
+
+ area = (Real)0;
+ volume = (Real)0;
+
+ bool originSet = false;
+ Pos origin(0.0f);
+ for (uint32_t i = 0; i < planeCount; ++i)
+ {
+ bool p0Set = false;
+ Pos p0(0.0f);
+ Real h = (Real)0;
+ Real faceArea = (Real)0;
+ for (uint32_t j = 0; j < planeCount; ++j)
+ {
+ if (j == i)
+ {
+ continue;
+ }
+ Pos pos;
+ Dir dir;
+ if (!intersectPlanes(pos, dir, planes[i], planes[j]))
+ {
+ continue;
+ }
+ Pos v1, v2;
+ Real minS = -MAX_REAL;
+ Real maxS = MAX_REAL;
+ for (uint32_t k = 0; k < planeCount; ++k)
+ {
+ if (k == j || k == i)
+ {
+ continue;
+ }
+ if (!intersectLineWithHalfspace(minS, maxS, pos, dir, planes[k]))
+ {
+ break;
+ }
+ }
+
+ if (minS >= maxS)
+ {
+ continue;
+ }
+
+ if (minS == -MAX_REAL || maxS == MAX_REAL)
+ {
+ area = MAX_REAL;
+ volume = MAX_REAL;
+ return false;
+ }
+
+ const Pos p1 = pos + minS*dir;
+ if (!originSet)
+ {
+ origin = p1;
+ originSet = true;
+ }
+ if (!p0Set)
+ {
+ p0 = p1;
+ h = (p0-origin)|planes[i];
+ p0Set = true;
+ continue; // The edge (p1,p2) won't contribute to the area or volume
+ }
+ const Pos p2 = pos + maxS*dir;
+ faceArea += (Dir(p1-p0)^Dir(p2-p0))|planes[i];
+ }
+ area += faceArea*physx::PxSqrt((cofInternalTransform*planes[i].normal()).lengthSquared());
+ volume += faceArea*h;
+ }
+
+ area *= (Real)0.5;
+ volume *= (Real)0.16666666666666666667*cofInternalTransform[3][3];
+
+ return true;
+}
+
+
+// GSA for a generic plane container
+struct PlaneIteratorInit
+{
+ PlaneIteratorInit() : first(NULL), stop(NULL) {}
+
+ Plane* first;
+ Plane* stop;
+};
+
+class PlaneIterator
+{
+public:
+ PlaneIterator(const PlaneIteratorInit& listBounds) : current(listBounds.first), stop(listBounds.stop) {}
+
+ bool valid() const
+ {
+ return current != stop;
+ }
+
+ void inc()
+ {
+ ++current;
+ }
+
+ Plane plane() const
+ {
+ return *current;
+ }
+
+private:
+ Plane* current;
+ Plane* stop;
+};
+
+class HalfspaceIntersection : public ApexCSG::GSA::StaticConvexPolyhedron<PlaneIterator, PlaneIteratorInit>
+{
+public:
+ void setPlanes(Plane* first, uint32_t count)
+ {
+ m_initValues.first = first;
+ m_initValues.stop = first + count;
+ }
+};
+
+
+/* BSP */
+
+BSP::BSP(IApexBSPMemCache* memCache, const physx::PxMat44& internalTransform) :
+ m_root(NULL),
+ m_meshSize(1),
+ m_meshBounds(physx::PxBounds3::empty()),
+ m_internalTransform(internalTransform),
+ m_internalTransformInverse(CSGFromPx(internalTransform).inverse34()),
+ m_incidentalMesh(false),
+ m_combined(false),
+ m_combiningMeshSize(1),
+ m_combiningIncidentalMesh(false),
+ m_memCache((BSPMemCache*)memCache),
+ m_ownsMemCache(false)
+{
+ if (m_memCache == NULL)
+ {
+ m_memCache = (BSPMemCache*)createBSPMemCache();
+ m_ownsMemCache = true;
+ }
+
+ // Always have a node. The trivial (one-leaf) tree is considered "inside".
+ m_root = m_memCache->m_nodePool.borrow();
+}
+
+BSP::~BSP()
+{
+ if (m_ownsMemCache)
+ {
+ m_memCache->release();
+ }
+}
+
+void
+BSP::setTolerances(const BSPTolerances& tolerances)
+{
+ m_tolerarnces = tolerances;
+}
+
+bool
+BSP::fromMesh(const nvidia::ExplicitRenderTriangle* mesh, uint32_t triangleCount, const BSPBuildParameters& params, IProgressListener* progressListener, volatile bool* cancel)
+{
+ if (triangleCount == 0)
+ {
+ return false;
+ }
+
+ clear();
+
+ // Shuffle triangle ordering
+ physx::Array<uint32_t> triangleOrder(triangleCount);
+ for (uint32_t i = 0; i < triangleCount; ++i)
+ {
+ triangleOrder[i] = i;
+ }
+ UserRandom* rnd = params.rnd != NULL ? params.rnd : &defaultRnd;
+ for (uint32_t i = 0; i < triangleCount; ++i)
+ {
+ nvidia::swap(triangleOrder[i], triangleOrder[i + (uint32_t)(((uint64_t)rnd->getInt() * (uint64_t)(triangleCount - i)) >> 32)]);
+ }
+
+ // Collect mesh triangles and find mesh bounds
+ m_mesh.resize(triangleCount);
+ m_frames.resize(triangleCount);
+ m_meshBounds.setEmpty();
+ for (uint32_t i = 0; i < m_mesh.size(); ++i)
+ {
+ const ExplicitRenderTriangle& inTri = mesh[triangleOrder[i]];
+ VertexData vertexData[3];
+ m_mesh[i].fromExplicitRenderTriangle(vertexData, inTri);
+ m_frames[i].setFromTriangle(m_mesh[i], vertexData);
+ m_meshBounds.include(inTri.vertices[0].position);
+ m_meshBounds.include(inTri.vertices[1].position);
+ m_meshBounds.include(inTri.vertices[2].position);
+ }
+
+ // Size scales
+ const Dir extents(m_meshBounds.getExtents());
+ m_meshSize = PxMax(extents[0], PxMax(extents[1], extents[2]));
+
+ // Scale to unit size and zero offset for BSP building
+ const Vec4Real recipScale((extents[0] > m_tolerarnces.linear ? extents[0] : (Real)1), (extents[1] > m_tolerarnces.linear ? extents[1] : (Real)1), (extents[2] > m_tolerarnces.linear ? extents[2] : (Real)1), (Real)1);
+ const Vec4Real scale((Real)1/recipScale[0], (Real)1/recipScale[1], (Real)1/recipScale[2], (Real)1);
+ const Pos center = m_meshBounds.getCenter();
+ const Real gridSize = (Real)params.snapGridSize;
+ const Real recipGridSize = params.snapGridSize > 0 ? (Real)1/gridSize : (Real)0;
+ // Rescale
+ for (physx::PxU32 i = 0; i < m_mesh.size(); ++i)
+ {
+ Triangle& tri = m_mesh[i];
+ for (physx::PxU32 j = 0; j < 3; ++j)
+ {
+ Pos& pos = tri.vertices[j];
+ pos = (pos - center) * scale;
+ }
+ }
+
+ // Align vertices
+ if (params.snapGridSize > 0)
+ {
+ physx::Array< IndexedValue<Real> > snapValues[3]; // x, y, and z
+ snapValues[0].resize(3*m_mesh.size());
+ snapValues[1].resize(3*m_mesh.size());
+ snapValues[2].resize(3*m_mesh.size());
+ for (physx::PxU32 i = 0; i < m_mesh.size(); ++i)
+ {
+ Triangle& tri = m_mesh[i];
+ for (physx::PxU32 j = 0; j < 3; ++j)
+ {
+ const Pos& pos = tri.vertices[j];
+ for (int e = 0; e < 3; ++e)
+ {
+ const physx::PxU32 index = i*3+j;
+ IndexedValue<Real>& v = snapValues[e][index];
+ v.index = index;
+ v.value = pos[e];
+ }
+ }
+ }
+
+ for (int e = 0; e < 3; ++e)
+ {
+ for (physx::PxU32 valueNum = 0; valueNum < snapValues[e].size(); ++valueNum)
+ {
+ const physx::PxU32 index = snapValues[e][valueNum].index;
+ const physx::PxU32 i = index/3;
+ const physx::PxU32 j = index-3*i;
+ m_mesh[i].vertices[j][e] = recipGridSize*floor(gridSize * snapValues[e][valueNum].value + (Real)0.5);
+ }
+ }
+ }
+
+ // Cache triangle quantities
+ for (physx::PxU32 i = 0; i < m_mesh.size(); ++i)
+ {
+ m_mesh[i].calculateQuantities();
+ }
+
+ // Initialize surface stack with surfaces formed from mesh triangles
+ physx::Array<Surface> surfaceStack;
+
+ // Crude estimate, hopefully will reduce re-allocations
+ surfaceStack.reserve(m_mesh.size() * ((int)physx::PxLog((float)m_mesh.size()) + 1));
+
+ // Track maximum and total surface triangle area
+ float maxArea = 0;
+ Real totalArea = 0;
+
+ // Add mesh triangles
+ uint32_t triangleIndex = 0;
+ while (triangleIndex < m_mesh.size())
+ {
+ // Create a surface for the next triangle
+ const Triangle& tri = m_mesh[triangleIndex];
+ surfaceStack.pushBack(Surface());
+ Surface* surface = &surfaceStack.back();
+ surface->planeIndex = m_planes.size();
+ surface->triangleIndexStart = triangleIndex++;
+ Real surfaceTotalTriangleArea = tri.area;
+ Plane& plane = m_planes.insert();
+ plane.set(tri.normal, (tri.vertices[0] + tri.vertices[1] + tri.vertices[2])/(Real)3);
+ plane.normalize();
+ // See if any of the remaining triangles can fit on this surface.
+ for (uint32_t testTriangleIndex = triangleIndex; testTriangleIndex < m_mesh.size(); ++testTriangleIndex)
+ {
+ Triangle& testTri = m_mesh[testTriangleIndex];
+ if ((testTri.normal ^ plane.normal()).lengthSquared() < square(m_tolerarnces.angular) && (testTri.normal | plane.normal()) > 0 &&
+ 0 == cmpPointToPlane(testTri.vertices[0], plane, m_tolerarnces.linear) &&
+ 0 == cmpPointToPlane(testTri.vertices[1], plane, m_tolerarnces.linear) &&
+ 0 == cmpPointToPlane(testTri.vertices[2], plane, m_tolerarnces.linear))
+ {
+ // This triangle fits. Move it next to others in the surface.
+ if (testTriangleIndex != triangleIndex)
+ {
+ nvidia::swap(m_mesh[triangleIndex], m_mesh[testTriangleIndex]);
+ nvidia::swap(m_frames[triangleIndex], m_frames[testTriangleIndex]);
+ }
+ Triangle& newTri = m_mesh[triangleIndex];
+ // Add in the new normal, properly weighted
+ Dir averageNormal = surfaceTotalTriangleArea * plane.normal() + newTri.area * m_mesh[triangleIndex].normal;
+ averageNormal.normalize();
+ surfaceTotalTriangleArea += newTri.area;
+ ++triangleIndex;
+ // Calculate the average projection
+ Real averageProjection = 0;
+ for (uint32_t i = surface->triangleIndexStart; i < triangleIndex; ++i)
+ {
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ averageProjection += averageNormal | m_mesh[i].vertices[j];
+ }
+ }
+ averageProjection /= 3 * (triangleIndex - surface->triangleIndexStart);
+ plane.set(averageNormal, -averageProjection);
+ }
+ }
+ surface->triangleIndexStop = triangleIndex;
+ surface->totalTriangleArea = (float)surfaceTotalTriangleArea;
+ maxArea = PxMax(maxArea, surface->totalTriangleArea);
+ totalArea += surfaceTotalTriangleArea;
+ // Ensure triangles lie on or below surface
+ Real maxProjection = -MAX_REAL;
+ for (uint32_t i = surface->triangleIndexStart; i < surface->triangleIndexStop; ++i)
+ {
+ Triangle& tri = m_mesh[i];
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ maxProjection = PxMax(maxProjection, plane.normal() | tri.vertices[j]);
+ }
+ }
+ plane[3] = -maxProjection;
+ }
+
+ // Set build process constants
+ BuildConstants buildConstants;
+ buildConstants.m_params = params;
+ buildConstants.m_recipMaxArea = maxArea > 0 ? 1.0f / maxArea : (float)0;
+
+ // Build
+ m_root = m_memCache->m_nodePool.borrow();
+ PX_ASSERT(m_root != NULL);
+
+ QuantityProgressListener quantityListener(totalArea, progressListener);
+ bool ok = buildTree(m_root, surfaceStack, 0, surfaceStack.size(), buildConstants, &quantityListener, cancel);
+ if (!ok)
+ {
+ return false;
+ }
+
+ // Bring the mesh back to actual size
+ Mat4Real tm;
+ tm.set((Real)1);
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ tm[i][i] = recipScale[i];
+ tm[i][3] = center[i];
+ }
+
+ // Currently the BSP is in "unit space", and tm will transform the BSP back to mesh space.
+ // If params.internalTransform is valid, then the user is asking that:
+ // (BSP space) = (params.internalTransform)(mesh space)
+ // But (mesh space) = (tm)(unit space), so (BSP space) = (params.internalTransform)(tm)(unit space),
+ // and therefore we apply (params.internalTransform)(tm).
+ // If params.internalTransform is not valid, then the user is asking to keep the BSP in unit space,
+ // so our effective internalTransform is the inverse of tm.
+ if (!isZero(params.internalTransform))
+ {
+ m_internalTransform = params.internalTransform;
+ const Mat4Real internalTransformCSG = CSGFromPx(m_internalTransform);
+ m_internalTransformInverse = internalTransformCSG.inverse34();
+ const Real meshSize = m_meshSize; // Save off mesh size. This gets garbled by scaled transforms.
+ transform(internalTransformCSG*tm, false);
+ const Real maxScale = PxMax(internalTransformCSG[0].lengthSquared(), PxMax(internalTransformCSG[1].lengthSquared(), internalTransformCSG[2].lengthSquared()));
+ m_meshSize = meshSize*maxScale;
+ }
+ else
+ {
+ m_internalTransformInverse = tm;
+ m_internalTransform = PxFromCSG(tm.inverse34());
+ m_meshSize = (Real)1;
+ }
+
+ // Delete triangle info if requested. This is done here in case any of the processing above needs this info.
+ if (!params.keepTriangles)
+ {
+ deleteTriangles();
+ }
+
+// performDiagnostics();
+
+ return true;
+}
+
+bool
+BSP::fromConvexPolyhedron(const physx::PxPlane* poly, uint32_t polySize, const physx::PxMat44& internalTransform, const nvidia::ExplicitRenderTriangle* mesh, uint32_t triangleCount)
+{
+ clear();
+
+ // Default is all space. If there are no planes, that is the result.
+ m_root = m_memCache->m_nodePool.borrow();
+ PX_ASSERT(m_root != NULL);
+
+ if (polySize == 0)
+ {
+ return true;
+ }
+
+ // Put planes into our format
+ m_planes.resize(polySize);
+ for (uint32_t planeIndex = 0; planeIndex < polySize; ++planeIndex)
+ {
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ m_planes[planeIndex][i] = (Real)poly[planeIndex].n[i];
+ }
+ m_planes[planeIndex][3] = (Real)poly[planeIndex].d;
+ m_planes[planeIndex].normalize();
+ }
+
+ // Build the tree.
+ Node* node = m_root;
+ for (uint32_t planeIndex = 0; planeIndex < polySize; ++planeIndex)
+ {
+ ApexCSG::Region outside;
+ outside.side = 0;
+ Node* child0 = m_memCache->m_nodePool.borrow();
+ child0->setLeafData(outside);
+ Node* child1 = m_memCache->m_nodePool.borrow(); // No need to set inside leaf data, that is the default
+ Surface surface;
+ surface.planeIndex = planeIndex;
+ surface.triangleIndexStart = 0;
+ surface.triangleIndexStop = 0;
+ surface.totalTriangleArea = 0.0f;
+ node->setBranchData(surface);
+ node->setChild(0, child0);
+ node->setChild(1, child1);
+ node = child1;
+ }
+
+ // See if the planes bound a non-empty set
+ RegionShape regionShape(m_planes.begin());
+ regionShape.set_leaf(node);
+ regionShape.calculate();
+ if (!regionShape.is_nonempty())
+ {
+ clear();
+ Region leafData;
+ leafData.side = 0;
+ m_root = m_memCache->m_nodePool.borrow();
+ m_root->setLeafData(leafData); // Planes define a null intersection. The result is the empty set.
+ return true;
+ }
+
+ // Currently there is no internal transform, BSP space = poly space
+ // With internalTransform is valid, then the user is asking that:
+ // (BSP space) = (params.internalTransform)(poly space)
+ // so we simply transform by params.internalTransform.
+ if (!isZero(internalTransform))
+ {
+ m_internalTransform = internalTransform;
+ const Mat4Real internalTransformCSG = CSGFromPx(m_internalTransform);
+ m_internalTransformInverse = internalTransformCSG.inverse34();
+ const Real meshSize = m_meshSize; // Save off mesh size. This gets garbled by scaled transforms.
+ transform(internalTransformCSG, false);
+ const Real maxScale = PxMax(internalTransformCSG[0].lengthSquared(), PxMax(internalTransformCSG[1].lengthSquared(), internalTransformCSG[2].lengthSquared()));
+ m_meshSize = meshSize*maxScale;
+ }
+
+ if (triangleCount > 0)
+ {
+ // Collect mesh triangles and find mesh bounds
+ m_mesh.resize(triangleCount);
+ m_frames.resize(triangleCount);
+ m_meshBounds.setEmpty();
+ for (uint32_t i = 0; i < m_mesh.size(); ++i)
+ {
+ const ExplicitRenderTriangle& inTri = mesh[i];
+ VertexData vertexData[3];
+ m_mesh[i].fromExplicitRenderTriangle(vertexData, inTri);
+ m_frames[i].setFromTriangle(m_mesh[i], vertexData);
+ m_meshBounds.include(inTri.vertices[0].position);
+ m_meshBounds.include(inTri.vertices[1].position);
+ m_meshBounds.include(inTri.vertices[2].position);
+ }
+
+ // Size scales
+ const Dir extents(m_meshBounds.getExtents());
+ m_meshSize = PxMax(extents[0], PxMax(extents[1], extents[2]));
+
+ // Scale to unit size and zero offset for BSP building
+ const Vec4Real recipScale((extents[0] > m_tolerarnces.linear ? extents[0] : (Real)1), (extents[1] > m_tolerarnces.linear ? extents[1] : (Real)1), (extents[2] > m_tolerarnces.linear ? extents[2] : (Real)1), (Real)1);
+ const Vec4Real scale((Real)1/recipScale[0], (Real)1/recipScale[1], (Real)1/recipScale[2], (Real)1);
+ const Pos center = m_meshBounds.getCenter();
+ for (uint32_t i = 0; i < m_mesh.size(); ++i)
+ {
+ Triangle& tri = m_mesh[i];
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ Pos& pos = tri.vertices[j];
+ pos = (pos - center) * scale;
+ }
+ tri.calculateQuantities();
+ }
+
+ m_incidentalMesh = true;
+ }
+
+ return true;
+}
+
+bool
+BSP::combine(const IApexBSP& ibsp)
+{
+ const BSP& bsp = *(const BSP*)&ibsp;
+
+ if (m_combined || bsp.m_combined)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::combine: can only combine two uncombined BSPs. Use op() to merge a combined BSP.", __FILE__, __LINE__);
+ return false;
+ }
+
+ if (!transformsEqual(m_internalTransform, bsp.m_internalTransform, 0.001f))
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING,
+ "BSP::combine: Nontrivial BSPs being combined have different internal transformations. Behavior is undefined.", __FILE__, __LINE__);
+ }
+
+ // Add in other bsp's triangles.
+ const uint32_t thisTriangleCount = m_mesh.size();
+ const uint32_t totalTriangleCount = thisTriangleCount + bsp.m_mesh.size();
+ m_mesh.resize(totalTriangleCount);
+ m_frames.resize(totalTriangleCount);
+ for (uint32_t i = thisTriangleCount; i < totalTriangleCount; ++i)
+ {
+ m_mesh[i] = bsp.m_mesh[i - thisTriangleCount];
+ m_frames[i] = bsp.m_frames[i - thisTriangleCount];
+ }
+
+ // Add in other bsp's planes.
+ const uint32_t thisPlaneCount = m_planes.size();
+ const uint32_t totalPlaneCount = thisPlaneCount + bsp.m_planes.size();
+ m_planes.resize(totalPlaneCount);
+ for (uint32_t i = thisPlaneCount; i < totalPlaneCount; ++i)
+ {
+ m_planes[i] = bsp.m_planes[i - thisPlaneCount];
+ }
+
+ combineTrees(m_root, bsp.m_root, thisTriangleCount, thisPlaneCount);
+
+ m_combiningMeshSize = bsp.m_meshSize;
+ m_combiningIncidentalMesh = bsp.m_incidentalMesh;
+
+ m_meshBounds.include(bsp.m_meshBounds);
+
+ m_combined = true;
+
+ clean();
+
+ return true;
+}
+
+bool
+BSP::op(const IApexBSP& icombinedBSP, Operation::Enum operation)
+{
+ const BSP& combinedBSP = *(const BSP*)&icombinedBSP;
+
+ if (!combinedBSP.m_combined)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::op: can only perform an operation upon a combined BSP. Use combine() with another BSP.", __FILE__, __LINE__);
+ return false;
+ }
+
+ if (operation == Operation::NOP)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::op: NOP requested. Mesh will remain combined.", __FILE__, __LINE__);
+ return false;
+ }
+
+ copy(combinedBSP); // No-ops if this = combinedBSP, so this is safe
+
+ // Combine size tolerances - look at symmetry
+ switch (operation >> 1)
+ {
+ case 1: // From set A
+ case 5:
+ // Keep size scales
+ break;
+ case 2: // From set B
+ case 6:
+ // Replace with other size tolerance
+ m_meshSize = m_combiningMeshSize;
+ break;
+ // Symmetric cases
+ case 0: // Empty_Set or All_Space, set size scale to unitless value
+ m_meshSize = 1;
+ break;
+ case 3: // Symmetric combinations of sets, use the min scale
+ case 4:
+ case 7:
+ m_meshSize = PxMin(m_meshSize, m_combiningMeshSize);
+ break;
+ }
+
+ mergeLeaves(BoolOp(operation), m_root);
+
+ m_incidentalMesh = m_incidentalMesh || m_combiningIncidentalMesh;
+
+ m_combined = false;
+
+ return true;
+}
+
+bool
+BSP::complement()
+{
+ if (m_combined)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::complement: can only complement an uncombined BSP. Use op() to merge a combined BSP.", __FILE__, __LINE__);
+ return false;
+ }
+
+ complementLeaves(m_root);
+
+ return true;
+}
+
+BSPType::Enum
+BSP::getType() const
+{
+ if (m_combined)
+ {
+ return BSPType::Combined;
+ }
+
+ if (m_root->getType() != Node::Leaf)
+ {
+ return BSPType::Nontrivial;
+ }
+
+ return m_root->getLeafData()->side == 1 ? BSPType::All_Space : BSPType::Empty_Set;
+}
+
+bool
+BSP::getSurfaceAreaAndVolume(float& area, float& volume, bool inside, Operation::Enum operation) const
+{
+ if (m_combined && operation == Operation::NOP)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::getSurfaceAreaAndVolume: an operation must be provided for combined BSPs.", __FILE__, __LINE__);
+ return false;
+ }
+
+ if (!m_combined && operation != Operation::NOP)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "BSP::getSurfaceAreaAndVolume: warning, operation ignored for non-combined BSPs." , __FILE__, __LINE__);
+ }
+
+ Real realArea = (Real)0;
+ Real realVolume = (Real)0;
+ if (addLeafAreasAndVolumes(realArea, realVolume, m_root, inside, BoolOp(operation)))
+ {
+ area = (float)realArea;
+ volume = (float)realVolume;
+ return true;
+ }
+
+ area = PX_MAX_F32;
+ volume = PX_MAX_F32;
+ return false;
+}
+
+bool
+BSP::pointInside(const PxVec3& point, Operation::Enum operation) const
+{
+ if (m_combined && operation == Operation::NOP)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::pointInside: an operation must be provided for combined BSPs.", __FILE__, __LINE__);
+ return 0;
+ }
+
+ if (!m_combined && operation != Operation::NOP)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "BSP::pointInside: warning, operation ignored for non-combined BSPs." , __FILE__, __LINE__);
+ }
+
+ Node* node = m_root;
+
+ const PxVec3 BSPPoint = m_internalTransform.transform(point);
+
+ while (node->getType() == Node::Branch)
+ {
+ const Surface* surface = node->getBranchData();
+ node = node->getChild((uint32_t)((m_planes[surface->planeIndex].distance(BSPPoint)) <= 0.0f));
+ }
+
+ const Region* region = node->getLeafData();
+
+ uint32_t side = region->side;
+ if (m_combined)
+ {
+ side = BoolOp(operation)(side & 1, (side >> 1) & 1);
+ }
+
+ return side != 0;
+}
+
+bool
+BSP::toMesh(physx::Array<ExplicitRenderTriangle>& mesh) const
+{
+ if (m_combined)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eINVALID_OPERATION,
+ "BSP::toMesh: can only generate a mesh from an uncombined BSP. Use op() to merge a combined BSP.", __FILE__, __LINE__);
+ return false;
+ }
+
+ // Clip triangles collected from leaves
+ physx::Array<Triangle> clippedMesh;
+ physx::Array<ClippedTriangleInfo> triangleInfo;
+ clipMeshToLeaves(clippedMesh, triangleInfo, m_root, m_tolerarnces.clip);
+
+ // Clean
+ if (m_tolerarnces.cleaning > 0 && !m_incidentalMesh)
+ {
+ cleanMesh(mesh, clippedMesh, triangleInfo, m_planes, m_mesh, m_frames, (Real)m_tolerarnces.cleaning * m_meshSize, m_internalTransformInverse);
+ }
+ else
+ {
+ // Copy to render format
+ mesh.resize(clippedMesh.size());
+ for (uint32_t i = 0; i < clippedMesh.size(); ++i)
+ {
+ clippedMesh[i].transform(m_internalTransformInverse);
+ VertexData vertexData[3];
+ for (int v = 0; v < 3; ++v)
+ {
+ m_frames[triangleInfo[i].originalTriangleIndex].interpolateVertexData(vertexData[v], clippedMesh[i].vertices[v]);
+ if (!triangleInfo[i].ccw)
+ {
+ vertexData[v].normal *= -1.0;
+ }
+ }
+ clippedMesh[i].toExplicitRenderTriangle(mesh[i], vertexData);
+ }
+ }
+
+ return true;
+}
+
+void
+BSP::copy(const IApexBSP& ibsp, const physx::PxMat44& pxTM, const physx::PxMat44& internalTransform)
+{
+ const BSP& bsp = *(const BSP*)&ibsp;
+
+ if (this != &bsp)
+ {
+ // Copy other bsp
+ clear();
+
+ if (bsp.m_root)
+ {
+ m_root = m_memCache->m_nodePool.borrow();
+ clone(m_root, bsp.m_root);
+ }
+ m_tolerarnces = bsp.m_tolerarnces;
+ m_mesh = bsp.m_mesh;
+ m_frames = bsp.m_frames;
+ m_planes = bsp.m_planes;
+ m_meshSize = bsp.m_meshSize;
+ m_incidentalMesh = bsp.m_incidentalMesh;
+ m_internalTransform = bsp.m_internalTransform;
+ m_internalTransformInverse = bsp.m_internalTransformInverse;
+ m_combined = bsp.m_combined;
+ m_combiningMeshSize = bsp.m_combiningMeshSize;
+ m_combiningIncidentalMesh = bsp.m_combiningIncidentalMesh;
+ }
+
+ // Take new internal transform if it is valid
+ if (!isZero(internalTransform))
+ {
+ m_internalTransform = internalTransform;
+ }
+
+ // Do not calculate new m_internalTransformInverse yet. We need it to transform out of the BSP space.
+
+ // Translate physx::PxMat44 to Mat4Real
+ // We actually need to apply this transform *before* the internal transform, so we apply: m_internalTransform*pxTM*m_internalTransformInverse
+ physx::PxMat44 pxTMITM = m_internalTransform*pxTM;
+ Mat4Real tmITM;
+ tmITM.setCol(0, Dir((physx::PxF32*)&pxTMITM[0]));
+ tmITM.setCol(1, Dir((physx::PxF32*)&pxTMITM[1]));
+ tmITM.setCol(2, Dir((physx::PxF32*)&pxTMITM[2]));
+ tmITM.setCol(3, Pos((physx::PxF32*)&pxTMITM[3]));
+
+ const Mat4Real netTM = tmITM*m_internalTransformInverse;
+
+ // Do not transform if netTM is the identity
+ bool isIdentity = true;
+ for (uint32_t i = 0; i < 4 && isIdentity; ++i)
+ {
+ for (uint32_t j = 0; j < 4 && isIdentity; ++j)
+ {
+ isIdentity = physx::PxAbs(netTM[i][j] - (Real)(i == j)) < (Real)(10.0f*PX_EPS_F32);
+ }
+ }
+ if (!isIdentity)
+ {
+ transform(netTM);
+ }
+
+ // Now calculate m_internalTransformInverse.
+ m_internalTransformInverse = CSGFromPx(m_internalTransform).inverse34();
+}
+
+IApexBSP*
+BSP::decomposeIntoIslands() const
+{
+ // Must be normal BSP
+ if (m_combined)
+ {
+ return NULL;
+ }
+
+ // First enumerate all inside leaves
+ uint32_t insideLeafCount = 0;
+ indexInsideLeaves(insideLeafCount, m_root);
+ if (insideLeafCount == 0)
+ {
+ return NULL;
+ }
+
+ // Find all leaf neighbors
+ physx::Array<IntPair> neighbors;
+ findInsideLeafNeighbors(neighbors, m_root);
+
+ // Find leaf neighbor islands
+ physx::Array< physx::Array<uint32_t> > islands;
+ findIslands(islands, neighbors, insideLeafCount);
+
+ // Return this if there is only one island
+ if (islands.size() == 1)
+ {
+ return const_cast<BSP*>(this);
+ }
+
+ // Otherwise we make a BSP list
+ physx::Array<Node*> insideLeaves;
+ insideLeaves.reserve(insideLeafCount);
+ BSPLink* listRoot = PX_NEW(BSPLink)();
+ for (uint32_t islandNum = islands.size(); islandNum--;)
+ {
+ // Create new island
+ BSP* islandBSP = static_cast<BSP*>(createBSP(m_memCache));
+ if (islandBSP != NULL)
+ {
+ // Copy island BSP from this and add to list
+ islandBSP->copy(*this);
+ listRoot->setAdj(1, islandBSP);
+ // Create a list of the inside leaf pointers
+ insideLeaves.clear();
+ listInsideLeaves(insideLeaves, islandBSP->m_root);
+ // Set all the leaves' sides to 0
+ for (uint32_t leafNum = 0; leafNum < insideLeaves.size(); ++leafNum)
+ {
+ insideLeaves[leafNum]->getLeafData()->side = 0;
+ }
+ // Set island leaves' sides to 1
+ const physx::Array<uint32_t>& island = islands[islandNum];
+ for (uint32_t islandLeafNum = 0; islandLeafNum < island.size(); ++islandLeafNum)
+ {
+ insideLeaves[island[islandLeafNum]]->getLeafData()->side = 1;
+ }
+ // Now merge leaves to consolidate new 0-0 siblings
+ islandBSP->mergeLeaves(BoolOp(Operation::Set_A), islandBSP->m_root);
+ }
+ }
+
+ // Return list head
+ if (!listRoot->isSolitary())
+ {
+ delete this;
+ return static_cast<BSP*>(listRoot->getAdj(1));
+ }
+
+ delete listRoot;
+ return const_cast<BSP*>(this);
+}
+
+void
+BSP::replaceInteriorSubmeshes(uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex)
+{
+ // Replace render mesh submesh indices
+ for (uint32_t triangleIndex = 0; triangleIndex < m_mesh.size(); ++triangleIndex)
+ {
+ Triangle& triangle = m_mesh[triangleIndex];
+ for (uint32_t frameNum = 0; frameNum < frameCount; ++frameNum)
+ {
+ if (triangle.extraDataIndex == frameIndices[frameNum])
+ {
+ triangle.submeshIndex = (int32_t)submeshIndex;
+ }
+ }
+ }
+}
+
+void
+BSP::deleteTriangles()
+{
+ m_mesh.reset();
+ m_frames.reset();
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Branch)
+ {
+ Surface* surface = node->getBranchData();
+ surface->triangleIndexStart = 0;
+ surface->triangleIndexStop = 0;
+ surface->totalTriangleArea = 0.0f;
+ }
+ }
+}
+
+void
+BSP::serialize(physx::PxFileBuf& stream) const
+{
+ stream << (uint32_t)Version::Current;
+
+ // Tree
+ serializeNode(m_root, stream);
+
+ // Internal mesh representation
+ nvidia::serialize(stream, m_mesh);
+ nvidia::serialize(stream, m_frames);
+ stream << m_meshSize;
+ stream << m_incidentalMesh;
+ stream << m_meshBounds;
+ stream << m_internalTransform;
+ stream << m_internalTransformInverse;
+
+ // Unique splitting planes
+ nvidia::serialize(stream, m_planes);
+
+ // Combination data
+ stream << m_combined;
+ stream << m_combiningMeshSize;
+ stream << m_combiningIncidentalMesh;
+}
+
+void
+BSP::deserialize(physx::PxFileBuf& stream)
+{
+ clear();
+
+ uint32_t version;
+ stream >> version;
+
+ // Tree
+ m_root = deserializeNode(version, stream);
+
+ if (version < Version::RevisedMeshTolerances)
+ {
+ stream.readDouble(); // Swallow old m_linearTol
+ stream.readDouble(); // Swallow old m_angularTol
+ }
+
+ // Internal mesh representation
+ if (version >= Version::SerializingTriangleFrames)
+ {
+ apex::deserialize(stream, version, m_mesh);
+ nvidia::deserialize(stream, version, m_frames);
+ }
+ else
+ {
+ const uint32_t triangleCount = stream.readDword();
+ m_mesh.resize(triangleCount);
+ m_frames.resize(triangleCount);
+ for (uint32_t triN = 0; triN < triangleCount; ++triN)
+ {
+ Triangle& tri = m_mesh[triN];
+ if (version < Version::UsingOnlyPositionDataInVertex)
+ {
+ VertexData vertexData[3];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ stream >> tri.vertices[v];
+ stream >> vertexData[v].normal;
+ stream >> vertexData[v].binormal;
+ stream >> vertexData[v].binormal;
+ for (uint32_t uvN = 0; uvN < VertexFormat::MAX_UV_COUNT; ++uvN)
+ {
+ stream >> vertexData[v].uv[uvN];
+ }
+ stream >> vertexData[v].color;
+ }
+ m_frames[triN].setFromTriangle(tri, vertexData);
+ }
+ else
+ {
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ nvidia::deserialize(stream, version, tri);
+ }
+ }
+ stream >> tri.submeshIndex;
+ stream >> tri.smoothingMask;
+ stream >> tri.extraDataIndex;
+ stream >> tri.normal;
+ stream >> tri.area;
+ }
+ }
+ stream >> m_meshSize;
+
+ if (version >= Version::IncidentalMeshDistinction)
+ {
+ stream >> m_incidentalMesh;
+ }
+
+ if (version >= Version::SerializingMeshBounds)
+ {
+ stream >> m_meshBounds;
+ }
+ else
+ {
+ m_meshBounds.setEmpty();
+ for (uint32_t triangleN = 0; triangleN < m_mesh.size(); ++triangleN)
+ {
+ Triangle& tri = m_mesh[triangleN];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ Pos& vertex = tri.vertices[v];
+ m_meshBounds.include(physx::PxVec3((float)vertex[0], (float)vertex[1], (float)vertex[2]));
+ }
+ }
+ }
+
+ if (version < Version::RevisedMeshTolerances)
+ {
+ stream.readDouble(); // Swallow old m_distanceTol
+ }
+
+ if (version >= Version::AddedInternalTransform)
+ {
+ stream >> m_internalTransform;
+ stream >> m_internalTransformInverse;
+ }
+ else
+ {
+ m_internalTransform = physx::PxMat44(physx::PxIdentity);
+ m_internalTransformInverse.set((Real)1);
+ }
+
+ // Unique splitting planes
+ apex::deserialize(stream, version, m_planes);
+
+ // Combination data
+ stream >> m_combined;
+ stream >> m_combiningMeshSize;
+ if (version >= Version::IncidentalMeshDistinction)
+ {
+ stream >> m_combiningIncidentalMesh;
+ }
+
+ if (m_root == NULL)
+ {
+ // Set to trivial tree
+ clear();
+ m_root = m_memCache->m_nodePool.borrow();
+ }
+}
+
+void BSP::visualize(RenderDebugInterface& debugRender, uint32_t flags, uint32_t index) const
+{
+#ifdef WITHOUT_DEBUG_VISUALIZE
+ PX_UNUSED(debugRender);
+ PX_UNUSED(flags);
+ PX_UNUSED(index);
+#else
+ const Node* node = m_root;
+ if (flags & BSPVisualizationFlags::SingleRegion)
+ {
+ uint32_t count = 0;
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* current = it.node();
+ if (current->getType() == BSP::Node::Leaf)
+ {
+ if (index == count++)
+ {
+ node = current;
+ break;
+ }
+ }
+ }
+ }
+
+ if (node != NULL)
+ {
+ visualizeNode(debugRender, flags, node);
+ }
+#endif
+}
+
+void
+BSP::release()
+{
+ clear();
+ delete this;
+}
+
+void
+BSP::clear()
+{
+ if (m_root != NULL)
+ {
+ releaseNode(m_root);
+ m_root = NULL;
+ }
+ m_incidentalMesh = false;
+ m_combiningIncidentalMesh = false;
+ m_combiningMeshSize = (Real)1;
+ m_combined = false;
+ m_mesh.reset();
+ m_frames.reset();
+ m_planes.reset();
+ removeBSPLink();
+}
+
+void
+BSP::clipMeshToLeaf(Real& area, Real& volume, physx::Array<Triangle>* clippedMesh, physx::Array<ClippedTriangleInfo>* triangleInfo, const Node* leaf, float clipTolerance) const
+{
+ PX_ASSERT(leaf->getType() == BSP::Node::Leaf);
+
+ area = (Real)0;
+ volume = (Real)0;
+
+ const Pos center(&m_meshBounds.getCenter()[0]);
+
+ // Collect triangles on each surface and clip to other faces
+ for (SurfaceIt it(leaf); it.valid(); it.inc())
+ {
+ for (uint32_t i = it.surface()->triangleIndexStart; i < it.surface()->triangleIndexStop; ++i)
+ {
+ const uint32_t oldClippedMeshSize = clippedMesh != NULL ? clippedMesh->size() : 0;
+ Real clippedTriangleArea, clippedPyramidVolume;
+ clipTriangleToLeaf(clippedMesh, clippedTriangleArea, clippedPyramidVolume, center, m_mesh[i], leaf, it.side(), m_memCache->m_linkedVertexPool,
+ clipTolerance * m_meshSize, m_planes, it.surface()->planeIndex);
+ area += clippedTriangleArea;
+ volume += clippedPyramidVolume;
+ if (triangleInfo != NULL && clippedMesh != NULL)
+ {
+ // Fill triangleInfo corresponding to new clipped triangles
+ const uint32_t newClippedMeshSize = clippedMesh->size();
+ for (uint32_t j = oldClippedMeshSize; j < newClippedMeshSize; ++j)
+ {
+ ClippedTriangleInfo& info = triangleInfo->insert();
+ info.planeIndex = it.surface()->planeIndex;
+ info.originalTriangleIndex = i;
+ info.clippedTriangleIndex = j;
+ info.ccw = it.side();
+ }
+ }
+ }
+ }
+
+ if (m_incidentalMesh)
+ {
+ for (uint32_t i = 0; i < m_mesh.size(); ++i)
+ {
+ const uint32_t oldClippedMeshSize = clippedMesh != NULL ? clippedMesh->size() : 0;
+ Real clippedTriangleArea, clippedPyramidVolume;
+ clipTriangleToLeaf(clippedMesh, clippedTriangleArea, clippedPyramidVolume, center, m_mesh[i], leaf, 1, m_memCache->m_linkedVertexPool, clipTolerance * m_meshSize, m_planes);
+ if (triangleInfo != NULL && clippedMesh != NULL)
+ {
+ // Fill triangleInfo corresponding to new clipped triangles
+ const uint32_t newClippedMeshSize = clippedMesh->size();
+ for (uint32_t j = oldClippedMeshSize; j < newClippedMeshSize; ++j)
+ {
+ ClippedTriangleInfo& info = triangleInfo->insert();
+ info.planeIndex = 0xFFFFFFFF;
+ info.originalTriangleIndex = i;
+ info.clippedTriangleIndex = j;
+ info.ccw = 1;
+ }
+ }
+ }
+ }
+}
+
+void
+BSP::transform(const Mat4Real& tm, bool transformFrames)
+{
+ // Build cofactor matrix for transformation of normals
+ const Mat4Real cofTM = tm.cof34();
+ const Mat4Real invTransposeTM = cofTM/cofTM[3][3];
+
+ // Transform mesh
+ for (uint32_t i = 0; i < m_mesh.size(); ++i)
+ {
+ for (int v = 0; v < 3; ++v)
+ {
+ m_mesh[i].vertices[v] = tm * m_mesh[i].vertices[v];
+ }
+ m_mesh[i].calculateQuantities();
+ if (transformFrames)
+ {
+ m_frames[i].transform(m_frames[i], tm, invTransposeTM);
+ }
+ }
+
+ // Transform planes
+ for (uint32_t i = 0; i < m_planes.size(); ++i)
+ {
+ m_planes[i] = cofTM * m_planes[i]; // Don't normalize yet - surface areas will be calculated from plane normal lengths in "Transform tree" below
+ }
+
+ // Transform tree
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Branch)
+ {
+ // Transform surface quantities
+ node->getBranchData()->totalTriangleArea *= (float)physx::PxSqrt(m_planes[node->getBranchData()->planeIndex].normal().lengthSquared());
+ }
+ }
+
+ // Now normalize planes
+ for (uint32_t i = 0; i < m_planes.size(); ++i)
+ {
+ m_planes[i].normalize();
+ }
+
+ // Adjust sizes
+ const Real scale = physx::PxPow((float) tm.det3(), (float)0.33333333333333333);
+ m_meshSize *= scale;
+ m_combiningMeshSize *= scale;
+}
+
+void
+BSP::clean()
+{
+ /*
+ 1) Mark planes and triangles that are used in the tree
+ 2) Remove those that aren't, creating index maps, bounds, and size
+ 3) Walk tree again and re-index
+ */
+
+ physx::Array<uint32_t> planeMap(m_planes.size(), 0);
+ physx::Array<uint32_t> triangleMap(m_mesh.size()+1, 0);
+
+ // 1) Mark planes and triangles that are used in the tree
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Branch)
+ {
+ const Surface* surface = node->getBranchData();
+ planeMap[surface->planeIndex] = 1;
+ for (uint32_t triangleIndex = surface->triangleIndexStart; triangleIndex < surface->triangleIndexStop; ++triangleIndex)
+ {
+ triangleMap[triangleIndex] = 1;
+ }
+ }
+ }
+
+ if (m_incidentalMesh || (m_combined && m_combiningIncidentalMesh))
+ {
+ // All triangles used
+ for (uint32_t triangleIndex = 0; triangleIndex < m_mesh.size(); ++triangleIndex)
+ {
+ triangleMap[triangleIndex] = 1;
+ }
+ }
+
+ // 2) Remove those that aren't, creating index maps and bounds
+ m_meshBounds.setEmpty();
+
+ uint32_t newPlaneIndex = 0;
+ for (uint32_t oldPlaneIndex = 0; oldPlaneIndex < planeMap.size(); ++oldPlaneIndex)
+ {
+ const bool planeUsed = planeMap[oldPlaneIndex] != 0;
+ planeMap[oldPlaneIndex] = newPlaneIndex;
+ if (planeUsed)
+ {
+ m_planes[newPlaneIndex++] = m_planes[oldPlaneIndex];
+ }
+ }
+ m_planes.resize(newPlaneIndex);
+
+ uint32_t newTriangleIndex = 0;
+ for (uint32_t oldTriangleIndex = 0; oldTriangleIndex < triangleMap.size(); ++oldTriangleIndex)
+ {
+ const bool triangleUsed = triangleMap[oldTriangleIndex] != 0;
+ triangleMap[oldTriangleIndex] = newTriangleIndex;
+ if (triangleUsed)
+ {
+ Triangle& triangle = m_mesh[newTriangleIndex];
+ triangle = m_mesh[oldTriangleIndex];
+ for (int v = 0; v < 3; ++v)
+ {
+ const Pos& vertex = triangle.vertices[v];
+ m_meshBounds.include(physx::PxVec3((float)vertex[0], (float)vertex[1], (float)vertex[3]));
+ }
+ m_frames[newTriangleIndex] = m_frames[oldTriangleIndex];
+ ++newTriangleIndex;
+ }
+ }
+ m_mesh.resize(newTriangleIndex);
+ m_frames.resize(newTriangleIndex);
+
+ m_meshSize = (Real)m_meshBounds.getExtents().maxElement();
+
+ // 3) Walk tree again and re-index
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Branch)
+ {
+ Surface* surface = const_cast<Surface*>(node->getBranchData());
+ surface->planeIndex = planeMap[surface->planeIndex];
+ const uint32_t surfaceTriangleCount = surface->triangleIndexStop - surface->triangleIndexStart;
+ surface->triangleIndexStart = triangleMap[surface->triangleIndexStart];
+ surface->triangleIndexStop = surface->triangleIndexStart + surfaceTriangleCount; // Do it this way since the last triangleIndexStop is unmapped
+ }
+ }
+}
+
+void
+BSP::performDiagnostics() const
+{
+ debugInfo("BSP diagnostics starting.");
+
+ char msg[10240];
+
+ debugInfo("Checking for holes...");
+
+ // This is the "raw result" from toMesh(). It's in our internal (high-precision) format, not cleaned, etc.:
+ physx::Array<Triangle> clippedMesh;
+ physx::Array<ClippedTriangleInfo> triangleInfo;
+ clipMeshToLeaves(clippedMesh, triangleInfo, m_root, m_tolerarnces.clip);
+
+ for (uint32_t triangleIndex = 0; triangleIndex < m_mesh.size(); ++triangleIndex)
+ {
+ // Make sure triangle is in a branch somewhere
+ physx::Array<BSP::Node*> foundInNodes;
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ BSP::Node* node = static_cast<BSP::Node*>(it.node());
+ if (node->getType() == BSP::Node::Branch)
+ {
+ const Surface* surface = node->getBranchData();
+ if (triangleIndex >= surface->triangleIndexStart && triangleIndex < surface->triangleIndexStop)
+ {
+ foundInNodes.pushBack(node);
+ }
+ }
+ }
+ if (foundInNodes.empty())
+ {
+ sprintf(msg, "Triangle %d not found in any branch.", triangleIndex);
+ debugWarn(msg);
+ }
+
+ // Make sure the triangle comes back with no holes
+ const Triangle& triangle = m_mesh[triangleIndex];
+ if (triangle.area > (Real)0)
+ {
+ Real area = (Real)0;
+ for (uint32_t clippedTriangleIndex = 0; clippedTriangleIndex < clippedMesh.size(); ++clippedTriangleIndex)
+ {
+ ClippedTriangleInfo& info = triangleInfo[clippedTriangleIndex];
+ if (info.originalTriangleIndex != triangleIndex)
+ {
+ continue;
+ }
+ Triangle& clippedMeshTriangle = clippedMesh[clippedTriangleIndex];
+ area += clippedMeshTriangle.area;
+ }
+
+ const Real areaError = area/triangle.area - (Real)1;
+ if (physx::PxAbs(areaError) > (Real)0.000001)
+ {
+ sprintf(msg, "Triangle %d is reconstructed with a different area: error = %7.4g%%.", triangleIndex, (Real)100*areaError);
+ debugWarn(msg);
+
+ sprintf(msg, " Triangle %d is found in %d node(s):", triangleIndex, foundInNodes.size());
+ debugInfo(msg);
+ Real totalClippedArea = (Real)0;
+ for (uint32_t nodeN = 0; nodeN < foundInNodes.size(); ++nodeN)
+ {
+ sprintf(msg, " Node #%d:", nodeN);
+ debugInfo(msg);
+ Real nodeArea = (Real)0;
+ for (Node::It it(foundInNodes[nodeN]); it.valid(); it.inc())
+ {
+ Node* subTreeNode = it.node();
+ if (subTreeNode->getType() == BSP::Node::Leaf)
+ {
+ const uint32_t planeSide = subTreeNode->getIndex();
+ if (subTreeNode->getLeafData()->side == 0)
+ {
+ continue;
+ }
+ Real clipArea, clipVolume;
+ const Pos origin(&m_meshBounds.getCenter()[0]);
+ clipTriangleToLeaf(NULL, clipArea, clipVolume, origin, triangle, subTreeNode, planeSide, m_memCache->m_linkedVertexPool, m_tolerarnces.clip*m_meshSize, m_planes, foundInNodes[nodeN]->getBranchData()->planeIndex);
+ nodeArea += clipArea;
+// sprintf(msg, " Subtree leaf area = %15.7f.", clipArea);
+// debugInfo(msg);
+ }
+ }
+ totalClippedArea += nodeArea;
+ sprintf(msg, " Total node area = %15.7g.", nodeArea);
+ debugInfo(msg);
+ }
+ sprintf(msg, " Total clipped area = %15.7g.", totalClippedArea);
+ debugInfo(msg);
+
+ sprintf(msg, " Attempting brute-force decoposition.");
+ Real totalClippedArea2ndAttempt[2] = {(Real)0, (Real)0};
+ uint32_t leafCount[2] = {0, 0};
+ for (Node::It it(m_root); it.valid(); it.inc())
+ {
+ Node* n = it.node();
+ if (n->getType() == BSP::Node::Leaf)
+ {
+ const uint32_t planeSide = n->getIndex();
+ const uint32_t side = n->getLeafData()->side & 1;
+ Real clipArea, clipVolume;
+ const Pos origin(&m_meshBounds.getCenter()[0]);
+ clipTriangleToLeaf(NULL, clipArea, clipVolume, origin, triangle, n, planeSide, m_memCache->m_linkedVertexPool, m_tolerarnces.clip*m_meshSize, m_planes);
+ totalClippedArea2ndAttempt[side] += clipArea;
+ if (clipArea != 0)
+ {
+ ++leafCount[side];
+ sprintf(msg, " Non-zero area found in side(%d) leaf. Parent planes:", side);
+ const BSP::Node* nn = n;
+ while((nn = (const BSP::Node*)nn->getParent()) != NULL)
+ {
+ char num[32];
+ sprintf(num, " %d,", nn->getBranchData()->planeIndex);
+ strcat(msg, num);
+ }
+ debugInfo(msg);
+ }
+ }
+ }
+ sprintf(msg, " Total outside area from %d leaves = %15.7g.", leafCount[0], totalClippedArea2ndAttempt[0]);
+ sprintf(msg, " Total inside area from %d leaves = %15.7g.", leafCount[1], totalClippedArea2ndAttempt[1]);
+ debugInfo(msg);
+ }
+ }
+ else
+ {
+ sprintf(msg, "Triangle %d has non-positive area.", triangleIndex);
+ debugWarn(msg);
+ }
+ }
+
+ debugInfo("BSP diagnostics finished.");
+}
+
+PX_INLINE uint32_t
+BSP::removeRedundantSurfacesFromStack(physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop, Node* leaf)
+{
+ // Remove surfaces that don't have triangles intersecting this region
+ const Pos center(&m_meshBounds.getCenter()[0]);
+
+ for (uint32_t i = stackReadStop; i-- > stackReadStart;)
+ {
+ Surface* surface = surfaceStack.begin() + i;
+ bool surfaceIntersectsThisRegion = false;
+ for (uint32_t j = surface->triangleIndexStart; j < surface->triangleIndexStop; ++j)
+ {
+ Real clippedTriangleArea, clippedPyramidVolume;
+ clipTriangleToLeaf(NULL, clippedTriangleArea, clippedPyramidVolume, center, m_mesh[j], leaf, 1, m_memCache->m_linkedVertexPool, m_tolerarnces.base, m_planes);
+ if (0 < clippedTriangleArea)
+ {
+ surfaceIntersectsThisRegion = true;
+ break;
+ }
+ }
+ if (!surfaceIntersectsThisRegion)
+ {
+ surfaceStack[i] = surfaceStack[--stackReadStop];
+ }
+ }
+
+ return stackReadStop;
+}
+
+PX_INLINE void
+BSP::assignLeafSide(Node* leaf, QuantityProgressListener* quantityListener)
+{
+ const Pos center(&m_meshBounds.getCenter()[0]);
+
+ // See if this leaf is inside or outside
+ Real sumSignedArea = (Real)0;
+ for (SurfaceIt it(leaf); it.valid(); it.inc())
+ {
+ const Real sign = it.side() ? (Real)1 : -(Real)1;
+ for (uint32_t j = it.surface()->triangleIndexStart; j < it.surface()->triangleIndexStop; ++j)
+ {
+ Real clippedTriangleArea, clippedPyramidVolume;
+ clipTriangleToLeaf(NULL, clippedTriangleArea, clippedPyramidVolume, center, m_mesh[j], leaf, it.side(), m_memCache->m_linkedVertexPool, m_tolerarnces.base, m_planes, it.surface()->planeIndex);
+ sumSignedArea += sign * clippedTriangleArea;
+ }
+ }
+
+ if (sumSignedArea != (Real)0)
+ {
+ leaf->getLeafData()->side = sumSignedArea > 0 ? 1u : 0u;
+ quantityListener->add((Real)0.5*physx::PxAbs(sumSignedArea));
+ }
+}
+
+PX_INLINE void
+BSP::createBranchSurfaceAndSplitStack(uint32_t childReadStart[2], uint32_t childReadStop[2], Node* node, physx::Array<Surface>& surfaceStack, uint32_t stackReadStart,
+ uint32_t stackReadStop, const BuildConstants& buildConstants)
+{
+ const uint32_t surfaceListSize = stackReadStop - stackReadStart;
+ Surface* surfaceList = surfaceStack.begin() + stackReadStart;
+
+ if (m_memCache->m_surfaceFlags.size() < surfaceListSize)
+ {
+ m_memCache->m_surfaceFlags.resize(surfaceListSize);
+ m_memCache->m_surfaceTestFlags.resize(surfaceListSize);
+ }
+
+ uint32_t branchSurfaceN = 0; // Will be the winning surface - default to 1st surface
+
+ Surface* branchSurface = surfaceList + branchSurfaceN;
+
+ bool splittingCalculated = false;
+
+ if (surfaceListSize > 1)
+ {
+ float maxLogArea = -PX_MAX_F32;
+ float meanLogArea = 0.0f;
+ float sigma2LogArea = 0.0f;
+ if (buildConstants.m_params.logAreaSigmaThreshold > 0)
+ {
+ uint32_t positiveAreaCount = 0;
+ for (uint32_t i = 0; i < surfaceListSize; ++i)
+ {
+ // Test surface
+ Surface& testSurface = surfaceList[i];
+ if (testSurface.totalTriangleArea <= 0.0f)
+ {
+ continue;
+ }
+ ++positiveAreaCount;
+ const float logArea = physx::PxLog(testSurface.totalTriangleArea);
+ if (logArea > maxLogArea)
+ {
+ maxLogArea = logArea;
+ branchSurfaceN = i; // Candidate
+ }
+ meanLogArea += logArea;
+ }
+ if (positiveAreaCount > 1)
+ {
+ meanLogArea /= (float)positiveAreaCount;
+ for (uint32_t i = 0; i < surfaceListSize; ++i)
+ {
+ // Test surface
+ Surface& testSurface = surfaceList[i];
+ if (testSurface.totalTriangleArea <= 0.0f)
+ {
+ continue;
+ }
+ const float logArea = physx::PxLog(testSurface.totalTriangleArea);
+ sigma2LogArea += square<float>(logArea - meanLogArea);
+ }
+ sigma2LogArea /= (float)(positiveAreaCount-1);
+ }
+
+ // Possibly new branchSurfaceN
+ branchSurface = surfaceList + branchSurfaceN;
+ }
+ if (maxLogArea > meanLogArea && square<float>(maxLogArea - meanLogArea) < square(buildConstants.m_params.logAreaSigmaThreshold)*sigma2LogArea)
+ {
+ // branchSurface chosen by max area does not have an area that is outside of one standard deviation from the mean surface area. Use another method to determine branchSurface.
+
+ // Pick buildConstants.m_testSetSize surfaces
+ const uint32_t testSetSize = buildConstants.m_params.testSetSize > 0 ? PxMin(surfaceListSize, buildConstants.m_params.testSetSize) : surfaceListSize;
+
+ // Low score wins
+ float minScore = PX_MAX_F32;
+ for (uint32_t i = 0; i < testSetSize; ++i)
+ {
+ // Test surface
+ Surface* testSurface = surfaceList + i;
+ int32_t counts[4] = {0, 0, 0, 0}; // on, above, below, split
+ uint32_t triangleCount = 0;
+ for (uint32_t j = 0; j < surfaceListSize; ++j)
+ {
+ uint8_t& flags = m_memCache->m_surfaceTestFlags[j]; // Whether this surface is above or below testSurface (or both)
+ flags = 0;
+
+ if (j == i)
+ {
+ continue; // Don't score testSurface itself
+ }
+
+ // Surface to contribute to score
+ Surface* surface = surfaceList + j;
+
+ // Run through all triangles
+ for (uint32_t k = surface->triangleIndexStart; k < surface->triangleIndexStop; ++k)
+ {
+ const Triangle& tri = m_mesh[k];
+ uint8_t triFlags = 0;
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const int side = cmpPointToPlane(tri.vertices[v], m_planes[testSurface->planeIndex], m_tolerarnces.base);
+ // triFlags |= (side & 1) << ((1 - side) >> 1); // 0 => 0, 1 => 1, -1 => 2
+ triFlags |= (int)(side <= 0) << 1 | (int)(side >= 0); // 0 => 3, 1 => 1, -1 => 2
+ }
+ ++counts[triFlags];
+ flags |= triFlags;
+ }
+
+ triangleCount += surface->triangleIndexStop - surface->triangleIndexStart;
+ }
+
+ // Compute score = (surface area)/(max area) + (split weight)*(# splits)/(# triangles) + (imbalance weight)*|(# above) - (# below)|/(# triangles)
+ const float score = testSurface->totalTriangleArea * buildConstants.m_recipMaxArea +
+ (buildConstants.m_params.splitWeight * counts[3] + buildConstants.m_params.imbalanceWeight * physx::PxAbs(counts[1] - counts[2])) / triangleCount;
+
+ if (score < minScore)
+ {
+ // We have a winner
+ branchSurfaceN = i;
+ minScore = score;
+ memcpy(m_memCache->m_surfaceFlags.begin(), m_memCache->m_surfaceTestFlags.begin(), surfaceListSize * sizeof(m_memCache->m_surfaceFlags[0]));
+ }
+ }
+
+ // Possibly new branchSurfaceN
+ branchSurface = surfaceList + branchSurfaceN;
+ splittingCalculated = true;
+ }
+ }
+
+ if (!splittingCalculated)
+ {
+ for (uint32_t i = 0; i < surfaceListSize; ++i)
+ {
+ uint8_t& flags = m_memCache->m_surfaceFlags[i]; // Whether this surface is above or below branchSurface (or both)
+ flags = 0;
+
+ if (i == branchSurfaceN)
+ {
+ continue; // Don't score branchSurface itself
+ }
+
+ // Surface to contribute to score
+ Surface& surface = surfaceList[i];
+
+ // Run through all triangles
+ for (uint32_t j = surface.triangleIndexStart; j < surface.triangleIndexStop; ++j)
+ {
+ const Triangle& tri = m_mesh[j];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const int side = cmpPointToPlane(tri.vertices[v], m_planes[branchSurface->planeIndex], m_tolerarnces.base);
+ // flags |= (side & 1) << ((1 - side) >> 1); // 0 => 0, 1 => 1, -1 => 2
+ flags |= (int)(side <= 0) << 1 | (int)(side >= 0); // 0 => 3, 1 => 1, -1 => 2
+ }
+ }
+ }
+ }
+
+ // Run through the surface flags and create below/above arrays on the stack.
+ // These arrays will be contiguous with child[0] surfaces first.
+ childReadStart[0] = surfaceStack.size();
+ childReadStop[0] = childReadStart[0];
+ uint32_t targetStackSize = 2*(surfaceStack.size() + 2 * surfaceListSize);
+ for (;;)
+ {
+ const uint32_t newTargetStackSize = targetStackSize&(targetStackSize-1);
+ if (newTargetStackSize == 0)
+ {
+ break;
+ }
+ targetStackSize = newTargetStackSize;
+ }
+
+ surfaceStack.reserve(targetStackSize);
+ for (uint32_t j = 0; j < surfaceListSize; ++j)
+ {
+ uint32_t surfaceJ = j + stackReadStart;
+ if (j == branchSurfaceN)
+ {
+ continue;
+ }
+ switch (m_memCache->m_surfaceFlags[j])
+ {
+ case 0:
+ break;
+ case 1:
+ surfaceStack.insert();
+ surfaceStack.back() = surfaceStack[childReadStop[0]];
+ surfaceStack[childReadStop[0]++] = surfaceStack[surfaceJ];
+ break;
+ case 2:
+ surfaceStack.pushBack(surfaceStack[surfaceJ]);
+ break;
+ case 3:
+ surfaceStack.insert();
+ surfaceStack.back() = surfaceStack[childReadStop[0]];
+ surfaceStack[childReadStop[0]++] = surfaceStack[surfaceJ];
+ surfaceStack.pushBack(surfaceStack[surfaceJ]);
+ break;
+ }
+ }
+ childReadStart[1] = childReadStop[0];
+ childReadStop[1] = surfaceStack.size();
+
+ // Set branch data to winning surface
+ node->setBranchData(surfaceStack[branchSurfaceN + stackReadStart]);
+}
+
+/* Recursive functions */
+
+// These can be implemented using a tree iterator or a simple node stack
+
+void
+BSP::releaseNode(Node* node)
+{
+ PX_ASSERT(node != NULL);
+
+ Node* stop = node->getParent();
+ do
+ {
+ Node* child0 = node->getChild(0);
+ if (child0)
+ {
+ node = child0;
+ }
+ else
+ {
+ Node* child1 = node->getChild(1);
+ if (child1)
+ {
+ node = child1;
+ }
+ else
+ {
+ Node* parent = node->getParent();
+ node->detach();
+ m_memCache->m_nodePool.replace(node);
+ node = parent;
+ }
+ }
+ } while (node != stop);
+}
+
+void
+BSP::indexInsideLeaves(uint32_t& index, Node* root) const
+{
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ if (node->getLeafData()->side == 1)
+ {
+ node->getLeafData()->tempIndex1 = index++;
+ }
+ }
+ }
+}
+
+void
+BSP::listInsideLeaves(physx::Array<Node*>& insideLeaves, Node* root) const
+{
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ if (node->getLeafData()->side == 1)
+ {
+ insideLeaves.pushBack(node);
+ }
+ }
+ }
+}
+
+void
+BSP::complementLeaves(Node* root) const
+{
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ node->getLeafData()->side = node->getLeafData()->side ^ 1;
+ }
+ }
+}
+
+void
+BSP::clipMeshToLeaves(physx::Array<Triangle>& clippedMesh, physx::Array<ClippedTriangleInfo>& triangleInfo, Node* root, float clipTolerance) const
+{
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ if (node->getLeafData()->side == 1)
+ {
+ Real area, volume;
+ clipMeshToLeaf(area, volume, &clippedMesh, &triangleInfo, node, clipTolerance);
+ }
+ }
+ }
+}
+
+void BSP::visualizeNode(RenderDebugInterface& debugRender, uint32_t flags, const Node* root) const
+{
+#if defined(WITHOUT_DEBUG_VISUALIZE)
+ PX_UNUSED(debugRender);
+ PX_UNUSED(flags);
+ PX_UNUSED(root);
+#else
+
+ const physx::PxMat44 BSPToMeshTM = PxFromCSG(m_internalTransformInverse);
+
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ bool showLeaf = false;
+ uint32_t color = 0;
+ if (node->getLeafData()->side == 0)
+ {
+ if (flags & (BSPVisualizationFlags::OutsideRegions | BSPVisualizationFlags::SingleRegion))
+ {
+ showLeaf = true;
+ color = 0xFF0000; // JWR: TODO
+ }
+ }
+ else
+ {
+ if (flags & (BSPVisualizationFlags::InsideRegions | BSPVisualizationFlags::SingleRegion))
+ {
+ showLeaf = true;
+ color = 0x00FF00; // JWR: TODO
+ }
+ }
+
+ if (showLeaf)
+ {
+ RENDER_DEBUG_IFACE(&debugRender)->setCurrentColor(color);
+ const Real clampSize = m_meshSize * 10;
+ for (SurfaceIt i(node); i.valid(); i.inc())
+ {
+ const uint32_t planeIndex_i = i.surface()->planeIndex;
+ const Plane& plane_i = m_planes[planeIndex_i];
+ SurfaceIt j = i;
+ j.inc();
+ for (; j.valid(); j.inc())
+ {
+ const uint32_t planeIndex_j = j.surface()->planeIndex;
+ const Plane& plane_j = m_planes[planeIndex_j];
+ // Find potential edge from intersection if plane_i and plane_j
+ Pos orig;
+ Dir edgeDir;
+ if (intersectPlanes(orig, edgeDir, plane_i, plane_j))
+ {
+ Real minS = -clampSize;
+ Real maxS = clampSize;
+ bool intersectionFound = true;
+ for (SurfaceIt k(node); k.valid(); k.inc())
+ {
+ const uint32_t planeIndex_k = k.surface()->planeIndex;
+ if (planeIndex_k == planeIndex_i || planeIndex_k == planeIndex_j)
+ {
+ continue;
+ }
+ const Plane& plane_k = (k.side() ? (Real)1 : -(Real)1)*m_planes[planeIndex_k];
+ const Real num = -(orig|plane_k);
+ const Real den = edgeDir|plane_k;
+ if (physx::PxAbs(den) > 10*EPS_REAL)
+ {
+ const Real s = num/den;
+ if (den > (Real)0)
+ {
+ maxS = PxMin(maxS, s);
+ }
+ else
+ {
+ minS = PxMax(minS, s);
+ }
+ if (maxS <= minS)
+ {
+ intersectionFound = false;
+ break;
+ }
+ }
+ else
+ if (num < -10*EPS_REAL)
+ {
+ intersectionFound = false;
+ break;
+ }
+ }
+ if (intersectionFound)
+ {
+ const Pos e0 = orig + minS * edgeDir;
+ const Pos e1 = orig + maxS * edgeDir;
+ physx::PxVec3 p0, p1;
+ p0 = physx::PxVec3(static_cast<float>(e0[0]),static_cast<float>(e0[1]),static_cast<float>(e0[2]));
+ p1 = physx::PxVec3(static_cast<float>(e1[0]),static_cast<float>(e1[1]),static_cast<float>(e1[2]));
+ RENDER_DEBUG_IFACE(&debugRender)->debugLine(BSPToMeshTM.transform(p0), BSPToMeshTM.transform(p1));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+void
+BSP::serializeNode(const Node* root, physx::PxFileBuf& stream) const
+{
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+
+ if (node != NULL)
+ {
+ stream << (uint32_t)1;
+ stream << (uint32_t)node->getType();
+
+ if (node->getType() == Node::Branch)
+ {
+ nvidia::serialize(stream, *node->getBranchData());
+ }
+ else
+ {
+ nvidia::serialize(stream, *node->getLeafData());
+ }
+ }
+ else
+ {
+ stream << (uint32_t)0;
+ }
+ }
+}
+
+void
+BSP::mergeLeaves(const BoolOp& op, Node* node)
+{
+ PX_ASSERT(node != NULL);
+
+ // Stackless walk of tree
+ bool up = false;
+ Node* stop = node->getParent();
+ for (;;)
+ {
+ if (up)
+ {
+ up = (node->getIndex() == 1);
+ node = node->getParent();
+ if (node == stop)
+ {
+ break;
+ }
+ if (!up)
+ {
+ node = node->getChild(1);
+ }
+ else
+ {
+ // Climbing hierarchy, at a branch
+ Node* child0 = node->getChild(0);
+ Node* child1 = node->getChild(1);
+
+ // Can consolidate if the children are both leaves on the same side.
+ PX_ASSERT(child0 != NULL && child1 != NULL);
+ if (child0 != NULL && child1 != NULL && child0->getType() == Node::Leaf && child1->getType() == Node::Leaf)
+ {
+ Region* region0 = child0->getLeafData();
+ Region* region1 = child1->getLeafData();
+
+ PX_ASSERT(region0 != NULL && region1 != NULL);
+ PX_ASSERT((region0->side & 1) == region0->side && (region1->side & 1) == region1->side);
+ if (region0->side == region1->side)
+ {
+ // Consolidate
+
+ // Delete children
+ child0->detach();
+ child1->detach();
+
+ // Turn this node into a leaf
+ node->setLeafData(*region0);
+ m_memCache->m_nodePool.replace(child0);
+ m_memCache->m_nodePool.replace(child1);
+ }
+ }
+ }
+ }
+ else
+ {
+ up = (node->getType() == Node::Leaf);
+ if (!up)
+ {
+ // Descend to first child
+ node = node->getChild(0);
+ }
+ else
+ {
+ // Leaf found
+ // Perform boolean operation
+ const uint32_t side = node->getLeafData()->side;
+ node->getLeafData()->side = op(side & 1, (side >> 1) & 1);
+ }
+ }
+ }
+}
+
+// The following functions take a more complex set of arguments, or call recursively from points within the function
+
+BSP::Node*
+BSP::deserializeNode(uint32_t version, physx::PxFileBuf& stream)
+{
+ Node* root = NULL;
+
+ physx::Array<Node*> stack;
+
+ Node* parent = NULL;
+
+ uint32_t readChildIndex = 0xFFFFFFFF;
+
+ for (;;)
+ {
+ uint32_t createNode;
+ stream >> createNode;
+
+ if (createNode)
+ {
+ Node* node = m_memCache->m_nodePool.borrow();
+
+ if (parent == NULL)
+ {
+ root = node;
+ }
+ else
+ {
+ parent->setChild(readChildIndex, node);
+ }
+
+ uint32_t nodeType;
+ stream >> nodeType;
+
+ if (nodeType != Node::Leaf)
+ {
+ Surface surface;
+ nvidia::deserialize(stream, version, surface);
+ node->setBranchData(surface);
+
+ // Push child 1
+ stack.pushBack(node);
+
+ // Process child 0
+ parent = node;
+ readChildIndex = 0;
+ }
+ else
+ {
+ Region region;
+
+ // Make compiler happy
+ region.tempIndex1 = region.tempIndex2 = region.tempIndex3 = 0;
+
+ nvidia::deserialize(stream, version, region);
+
+ node->setLeafData(region);
+
+ if (stack.size() == 0)
+ {
+ break;
+ }
+
+ parent = stack.popBack();
+ readChildIndex = 1;
+ }
+ }
+ }
+
+ return root;
+}
+
+struct CombineTreesFrame
+{
+ BSP::Node* node;
+ const BSP::Node* combineNode;
+};
+
+void
+BSP::combineTrees(Node* root, const Node* combineRoot, uint32_t triangleIndexOffset, uint32_t planeIndexOffset)
+{
+ physx::Array<CombineTreesFrame> stack;
+ stack.reserve(m_planes.size()); // To avoid reallocations
+
+ RegionShape regionShape((const Plane*)m_planes.begin(), (Real)0.0001*m_meshSize);
+
+ CombineTreesFrame localFrame;
+ localFrame.node = root;
+ localFrame.combineNode = combineRoot;
+
+ for (;;)
+ {
+ if (localFrame.node->getType() != BSP::Node::Leaf)
+ {
+ // Push child 1
+ CombineTreesFrame& callFrame = stack.insert();
+ callFrame.node = localFrame.node->getChild(1);
+ callFrame.combineNode = localFrame.combineNode;
+
+ // Process child 0
+ localFrame.node = localFrame.node->getChild(0);
+ continue;
+ }
+ else
+ {
+ if (localFrame.combineNode->getType() != Node::Leaf)
+ {
+ // Branch node
+
+ // Copy branch data, and offset the triangle indices
+ Surface branchSurface;
+ const Surface* combineBranchSurface = localFrame.combineNode->getBranchData();
+ branchSurface.planeIndex = combineBranchSurface->planeIndex + planeIndexOffset;
+ branchSurface.triangleIndexStart = combineBranchSurface->triangleIndexStart + triangleIndexOffset;
+ branchSurface.triangleIndexStop = combineBranchSurface->triangleIndexStop + triangleIndexOffset;
+ branchSurface.totalTriangleArea = combineBranchSurface->totalTriangleArea;
+
+ // Store off old leaf data
+ Region oldRegion = *localFrame.node->getLeafData();
+
+ // Turn this leaf into a branch, see which sides are non-empty
+ localFrame.node->setBranchData(branchSurface);
+ bool intersects[2];
+ for (uint32_t index = 0; index < 2; ++index)
+ {
+ Node* child = m_memCache->m_nodePool.borrow();
+ child->setLeafData(oldRegion);
+ localFrame.node->setChild(index, child);
+ regionShape.set_leaf(child);
+ regionShape.calculate();
+ intersects[index] = regionShape.is_nonempty();
+ }
+
+ if (intersects[0] && intersects[1])
+ {
+ // We need both branches
+ // Push child 1
+ CombineTreesFrame& callFrame = stack.insert();
+ callFrame.node = localFrame.node->getChild(1);
+ callFrame.combineNode = localFrame.combineNode->getChild(1);
+
+ // Process child 0
+ localFrame.node = localFrame.node->getChild(0);
+ localFrame.combineNode = localFrame.combineNode->getChild(0);
+ continue;
+ }
+ else
+ {
+ // Leaf not split by the combining branch. Return the new branch surface.
+ for (uint32_t index = 0; index < 2; ++index)
+ {
+ Node* child = localFrame.node->getChild(index);
+ localFrame.node->setChild(index, NULL);
+ m_memCache->m_nodePool.replace(child);
+ }
+ // Turn this branch back into a leaf
+ localFrame.node->setLeafData(oldRegion);
+ // Collapse tree by following one branch.
+ if (intersects[0])
+ {
+ localFrame.combineNode = localFrame.combineNode->getChild(0);
+ continue;
+ }
+ else
+ if (intersects[1])
+ {
+ localFrame.combineNode = localFrame.combineNode->getChild(1);
+ continue;
+ }
+ // else we drop down into pop stack, below
+ }
+ }
+ else
+ {
+ // Leaf node
+ localFrame.node->getLeafData()->side = localFrame.node->getLeafData()->side | localFrame.combineNode->getLeafData()->side << 1;
+ }
+ }
+ if (stack.size() == 0)
+ {
+ break;
+ }
+ localFrame = stack.popBack();
+ }
+}
+
+void
+BSP::findInsideLeafNeighbors(physx::Array<IntPair>& neighbors, Node* root) const
+{
+ if (root == NULL)
+ {
+ return;
+ }
+
+ const Real tol = m_meshSize*(Real)0.0001;
+
+ physx::Array<Plane> planes;
+
+ HalfspaceIntersection test;
+
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ // Found a leaf.
+ if (node->getLeafData()->side == 0)
+ {
+ continue; // Only want inside leaves
+ }
+
+ // Iterate up to root and collect planes
+ planes.resize(0);
+ for (SurfaceIt it(node); it.valid(); it.inc())
+ {
+ const Real sign = it.side() ? (Real)1 : -(Real)1;
+ planes.pushBack(sign * m_planes[it.surface()->planeIndex]);
+ planes.back()[3] -= tol;
+ }
+
+#ifdef CULL_PLANES_LIST
+#undef CULL_PLANES_LIST
+#endif
+#define CULL_PLANES_LIST 1
+#if CULL_PLANES_LIST
+ // Now flip each plane to see if it's necessary
+ for (uint32_t planeIndex = planes.size(); planeIndex--;)
+ {
+ planes[planeIndex] *= -(Real)1; // Invert
+ test.setPlanes(planes.begin(), planes.size());
+ const int result = GSA::vs3d_test(test);
+ const bool necessary = 1 == result;
+ const bool testError = result < 0;
+ planes[planeIndex] *= -(Real)1; // Restore
+ if (!necessary && !testError)
+ {
+ planes.replaceWithLast(planeIndex); // Unnecessary; remove
+ }
+ }
+#endif
+
+ if (planes.size() > 0)
+ {
+ // First half of pair will always be node's index.
+ IntPair pair;
+ pair.i0 = (int32_t)node->getLeafData()->tempIndex1;
+
+ const uint32_t currentLeafPlaneCount = planes.size();
+
+ // Stackless walk of remainder of tree
+ bool up = true;
+ while (node != root)
+ {
+ if (up)
+ {
+ up = (node->getIndex() == 1);
+ node = node->getParent();
+ if (planes.size() > currentLeafPlaneCount)
+ {
+ planes.popBack();
+ }
+ if (!up)
+ {
+ planes.pushBack(m_planes[node->getBranchData()->planeIndex]);
+ planes.back()[3] -= tol;
+ test.setPlanes(planes.begin(), planes.size());
+ up = 0 == GSA::vs3d_test(test); // Skip subtree if there is no intersection at this branch
+ node = node->getChild(1);
+ }
+ }
+ else
+ {
+ up = (node->getType() == Node::Leaf);
+ if (!up)
+ {
+ planes.pushBack(-m_planes[node->getBranchData()->planeIndex]);
+ planes.back()[3] -= tol;
+ up = 0 == GSA::vs3d_test(test); // Skip subtree if there is no intersection at this branch
+ node = node->getChild(0);
+ }
+ else
+ {
+ Region& region = *node->getLeafData();
+ if (region.side == 1)
+ {
+ // We have found another inside leaf which intersects (at boundary)
+ pair.i1 = (int32_t)region.tempIndex1;
+ neighbors.pushBack(pair);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+PX_INLINE bool
+planeIsNotRedundant(const physx::Array<Plane>& planes, const Plane& plane)
+{
+ for (uint32_t i = 0; i < planes.size(); ++i)
+ {
+ if ((planes[i] - plane).lengthSquared() < CSG_EPS)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+BSP::addLeafAreasAndVolumes(Real& totalArea, Real& totalVolume, const Node* root, bool inside, const BoolOp& op) const
+{
+ if (root == NULL)
+ {
+ return false;
+ }
+
+ // Build a list of essential planes
+ physx::Array<Plane> planes;
+
+#ifdef CULL_PLANES_LIST
+#undef CULL_PLANES_LIST
+#endif
+#define CULL_PLANES_LIST 0
+
+#if CULL_PLANES_LIST
+ HalfspaceIntersection test;
+#endif
+
+ const Mat4Real cofInternalTransform = CSGFromPx(m_internalTransform).cof34();
+
+ for (Node::It it(root); it.valid(); it.inc())
+ {
+ Node* node = it.node();
+ if (node->getType() == BSP::Node::Leaf)
+ {
+ // Found a leaf.
+
+ // See if it's on the correct side (possibly after combining)
+ uint32_t side = node->getLeafData()->side;
+ if (m_combined)
+ {
+ side = op(side & 1, (side >> 1) & 1);
+ }
+ if ((side != 0) != inside)
+ {
+ continue;
+ }
+
+ // Iterate up to root and collect planes
+ planes.resize(0);
+ for (SurfaceIt it(node); it.valid(); it.inc())
+ {
+ const Real sign = it.side() ? (Real)1 : -(Real)1;
+ planes.pushBack(sign * m_planes[it.surface()->planeIndex]);
+ }
+
+#if CULL_PLANES_LIST
+ // Now flip each plane to see if it's necessary
+ for (uint32_t planeIndex = planes.size(); planeIndex--;)
+ {
+ planes[planeIndex] *= -(Real)1; // Invert
+ test.setPlanes(planes.begin(), planes.size());
+ const bool necessary = test.intersect();
+ const bool testError = (test.state() & ApexCSG::GSA::GSA_State::Error_Flag) != 0;
+ planes[planeIndex] *= -(Real)1; // Restore
+ if (!necessary && !error)
+ {
+ planes.replaceWithLast(planeIndex); // Unnecessary; remove
+ }
+ }
+#endif
+
+ // Now use this culled plane list to find areas and volumes
+ if (planes.size() > 0)
+ {
+ Real area, volume;
+ if (!calculateLeafAreaAndVolume(area, volume, planes.begin(), planes.size(), cofInternalTransform))
+ {
+ totalArea = MAX_REAL;
+ totalVolume = MAX_REAL;
+ return false; // No need to add anymore
+ }
+ totalArea += area;
+ totalVolume += volume;
+ }
+ }
+ }
+
+ return true;
+}
+
+struct CloneFrame
+{
+ BSP::Node* node;
+ const BSP::Node* original;
+};
+
+void
+BSP::clone(Node* root, const Node* originalRoot)
+{
+ physx::Array<CloneFrame> stack;
+
+ CloneFrame localFrame;
+ localFrame.node = root;
+ localFrame.original = originalRoot;
+
+ for (;;)
+ {
+ switch (localFrame.original->getType())
+ {
+ case Node::Leaf:
+ localFrame.node->setLeafData(*localFrame.original->getLeafData());
+ break;
+ case Node::Branch:
+ localFrame.node->setBranchData(*localFrame.original->getBranchData());
+ break;
+ }
+
+ const Node* originalChild;
+ Node* child;
+
+ // Push child 1
+ originalChild = localFrame.original->getChild(1);
+ if (originalChild != NULL)
+ {
+ child = m_memCache->m_nodePool.borrow();
+ localFrame.node->setChild(1, child);
+ CloneFrame& callFrame = stack.insert();
+ callFrame.node = child;
+ callFrame.original = originalChild;
+ }
+
+ // Process child 0
+ originalChild = localFrame.original->getChild(0);
+ if (originalChild != NULL)
+ {
+ child = m_memCache->m_nodePool.borrow();
+ localFrame.node->setChild(0, child);
+ localFrame.node = child;
+ localFrame.original = originalChild;
+ }
+ else
+ {
+ if (stack.size() == 0)
+ {
+ break;
+ }
+ localFrame = stack.popBack();
+ }
+ }
+}
+
+struct BuildTreeFrame
+{
+ BSP::Node* node;
+ uint32_t surfaceStackReadStart;
+ uint32_t surfaceStackReadStop;
+ uint32_t inputSurfaceStackSize;
+};
+
+bool
+BSP::buildTree(Node* node, physx::Array<Surface>& surfaceStack, uint32_t stackReadStart, uint32_t stackReadStop,
+ const BuildConstants& buildConstants, QuantityProgressListener* quantityListener, volatile bool* cancel)
+{
+ physx::Array<BuildTreeFrame> stack;
+ stack.reserve(surfaceStack.size()); // To avoid reallocations
+
+ BuildTreeFrame localFrame;
+ localFrame.node = node;
+ localFrame.surfaceStackReadStart = stackReadStart;
+ localFrame.surfaceStackReadStop = stackReadStop;
+ localFrame.inputSurfaceStackSize = surfaceStack.size();
+
+ for (;;)
+ {
+ if (cancel && *cancel)
+ {
+ return false;
+ }
+
+ localFrame.surfaceStackReadStop = removeRedundantSurfacesFromStack(surfaceStack, localFrame.surfaceStackReadStart, localFrame.surfaceStackReadStop, localFrame.node);
+ if (localFrame.surfaceStackReadStop == localFrame.surfaceStackReadStart)
+ {
+ assignLeafSide(localFrame.node, quantityListener);
+ if (stack.size() == 0)
+ {
+ break;
+ }
+ localFrame = stack.popBack();
+ surfaceStack.resize(localFrame.inputSurfaceStackSize);
+ }
+ else
+ {
+ uint32_t childReadStart[2];
+ uint32_t childReadStop[2];
+ createBranchSurfaceAndSplitStack(childReadStart, childReadStop, localFrame.node, surfaceStack, localFrame.surfaceStackReadStart, localFrame.surfaceStackReadStop, buildConstants);
+
+ Node* child;
+
+ // Push child 1
+ child = m_memCache->m_nodePool.borrow();
+ child->getLeafData()->side = 1;
+ localFrame.node->setChild(1, child);
+ BuildTreeFrame& callFrame = stack.insert();
+ callFrame.node = child;
+ callFrame.surfaceStackReadStart = childReadStart[1];
+ callFrame.surfaceStackReadStop = childReadStop[1];
+ callFrame.inputSurfaceStackSize = surfaceStack.size();
+
+ // Process child 0
+ child = m_memCache->m_nodePool.borrow();
+ child->getLeafData()->side = 0;
+ localFrame.node->setChild(0, child);
+ localFrame.node = child;
+ localFrame.surfaceStackReadStart = childReadStart[0];
+ localFrame.surfaceStackReadStop = childReadStop[0];
+ localFrame.inputSurfaceStackSize = surfaceStack.size();
+ }
+ }
+
+ return true;
+}
+
+
+/* For GSA */
+
+GSA::real
+BSP::RegionShape::farthest_halfspace(GSA::real plane[4], const GSA::real point[4])
+{
+ plane[0] = plane[1] = plane[2] = 0.0f;
+ plane[3] = 1.0f;
+ Real greatest_s = -MAX_REAL;
+
+ if (m_leaf && m_planes)
+ {
+ for (SurfaceIt it(m_leaf); it.valid(); it.inc())
+ {
+ const Real sign = it.side() ? (Real)1 : -(Real)1;
+ Plane test = sign * m_planes[it.surface()->planeIndex];
+ test[3] -= m_skinWidth;
+ const Real s = point[0]*test[0] + point[1]*test[1] + point[2]*test[2] + point[3]*test[3];
+ if (s > greatest_s)
+ {
+ greatest_s = s;
+ for (int i = 0; i < 4; ++i)
+ {
+ plane[i] = (GSA::real)test[i];
+ }
+ }
+ }
+ }
+
+ // Return results
+ return (GSA::real)greatest_s;
+}
+
+
+/* BSPMemCache */
+
+BSPMemCache::BSPMemCache() :
+ m_nodePool(10000)
+{
+}
+
+void
+BSPMemCache::clearAll()
+{
+ const int32_t nodesRemaining = m_nodePool.empty();
+
+ char message[1000];
+ if (nodesRemaining != 0)
+ {
+ physx::shdfnd::snprintf(message, 1000, "BSPMemCache: %d nodes %sfreed ***", physx::PxAbs(nodesRemaining), nodesRemaining > 0 ? "un" : "over");
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_INFO, message, __FILE__, __LINE__);
+ }
+
+ clearTemp();
+}
+
+void
+BSPMemCache::clearTemp()
+{
+ m_surfaceFlags.reset();
+ m_surfaceTestFlags.reset();
+ const int32_t linkedVerticesRemaining = m_linkedVertexPool.empty();
+
+ char message[1000];
+ if (linkedVerticesRemaining != 0)
+ {
+ physx::shdfnd::snprintf(message, 1000, "BSPMemCache: %d linked vertices %sfreed ***", physx::PxAbs(linkedVerticesRemaining), linkedVerticesRemaining > 0 ? "un" : "over");
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_INFO, message, __FILE__, __LINE__);
+ }
+}
+
+void
+BSPMemCache::release()
+{
+ clearAll();
+ delete this;
+}
+
+
+/* CSG Tools API */
+
+IApexBSPMemCache*
+createBSPMemCache()
+{
+ return PX_NEW(BSPMemCache)();
+}
+
+IApexBSP*
+createBSP(IApexBSPMemCache* memCache, const physx::PxMat44& internalTransform)
+{
+ IApexBSP* bsp = PX_NEW(BSP)(memCache, internalTransform);
+
+ bsp->setTolerances(gDefaultTolerances);
+
+ return bsp;
+}
+
+}
+#endif
diff --git a/APEX_1.4/shared/internal/src/authoring/ApexCSGHull.cpp b/APEX_1.4/shared/internal/src/authoring/ApexCSGHull.cpp
new file mode 100644
index 00000000..f8b87cec
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/ApexCSGHull.cpp
@@ -0,0 +1,1224 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#include "ApexUsingNamespace.h"
+#include "PxSimpleTypes.h"
+#include "PxErrorCallback.h"
+
+#include "authoring/ApexCSGHull.h"
+#include "authoring/ApexCSGSerialization.h"
+#include "ApexSharedSerialization.h"
+#include <stdio.h>
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+#define EPS ((Real)1.0e-6)
+
+#define SOFT_ASSERT(x) //PX_ASSERT(x)
+
+using namespace nvidia;
+
+
+/* Local utilities */
+
+static int compareEdgeFirstFaceIndices(const void* a, const void* b)
+{
+ return (int)((const ApexCSG::Hull::Edge*)a)->m_indexF1 - (int)((const ApexCSG::Hull::Edge*)b)->m_indexF1;
+}
+
+
+/* Hull */
+
+void ApexCSG::Hull::intersect(const ApexCSG::Plane& plane, ApexCSG::Real distanceTol)
+{
+ if (isEmptySet())
+ {
+ return;
+ }
+
+ if (isAllSpace())
+ {
+ Plane& newFace = faces.insert();
+ newFace = plane;
+ allSpace = false;
+ return;
+ }
+
+ const Dir planeNormal = plane.normal();
+
+ if (edges.size() == 0)
+ {
+ PX_ASSERT(vectors.size() == 0);
+ PX_ASSERT(faces.size() == 1 || faces.size() == 2);
+
+ bool addFace = true;
+ const uint32_t newFaceIndex = faces.size();
+
+ // Nothing but a plane or two.
+ for (uint32_t i = 0; i < newFaceIndex; ++i)
+ {
+ Plane& faceI = faces[i];
+ const Dir normalI = faceI.normal();
+ Dir edgeDir = normalI ^ planeNormal;
+ const Real e2 = edgeDir | edgeDir; // = 1-cosAngle*cosAngle
+ const Real cosAngle = normalI | planeNormal;
+ if (e2 < EPS * EPS)
+ {
+ if (cosAngle > 0)
+ {
+ // Parallel case
+ if (plane.d() <= faceI.d())
+ {
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+ return; // Plane is outside of an existing plane
+ }
+ faceI = plane; // Plane is inside an existing plane - replace
+ addFace = false;
+ }
+ else
+ {
+ // Antiparallel case
+ if (-plane.d() < faceI.d() + distanceTol)
+ {
+ setToEmptySet(); // Halfspaces are mutually exclusive
+ return;
+ }
+ }
+ }
+ else
+ {
+ // Intersecting case - add an edge
+ const Real recipE2 = (Real)1 / e2;
+ edgeDir *= physx::PxSqrt(recipE2);
+ const Pos orig = Pos((Real)0) + (recipE2 * (faceI.d() * cosAngle - plane.d())) * planeNormal + (recipE2 * (plane.d() * cosAngle - faceI.d())) * normalI;
+ if (vectors.size() == 0)
+ {
+ vectors.resize(newFaceIndex + 1);
+ vectors[newFaceIndex] = edgeDir;
+ }
+ vectors[i] = orig;
+ Edge& newEdge = edges.insert();
+ newEdge.m_indexV0 = i;
+ newEdge.m_indexV1 = newFaceIndex; // We will have as many edges as old faces. This will be the edge direction vector index.
+ if (i == 0)
+ {
+ newEdge.m_indexF1 = i;
+ newEdge.m_indexF2 = newFaceIndex;
+ }
+ else
+ {
+ // Keep the orientation and edge directions the same
+ newEdge.m_indexF1 = newFaceIndex;
+ newEdge.m_indexF2 = i;
+ }
+ }
+ }
+
+ if (addFace)
+ {
+ Plane& newFace = faces.insert();
+ newFace = plane;
+ }
+
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+ return;
+ }
+
+ // Hull has edges. If they are lines, and parallel to the plane, this will continue to be the case.
+ if (vertexCount == 0)
+ {
+ PX_ASSERT(vectors.size() >= 2);
+ const Dir edgeDir = getDir(edges[0]); // All line edges will be in the same direction
+ PX_ASSERT(vectors[vectors.size() - 1][3] == (Real)0); // The last vector should be a direction (the edge direction);
+ const Real den = edgeDir | planeNormal;
+ if (physx::PxAbs(den) <= EPS)
+ {
+ // Lines are parallel to plane
+ // This is essentially a 2-d algorithm, edges->vertices, faces->edges
+ bool negClassEmpty = true;
+ bool posClassEmpty = true;
+ int8_t* edgeClass = (int8_t*)PxAlloca((edges.size() + 2) * sizeof(int8_t)); // Adding 2 for "edges at infinity"
+ struct FaceEdge
+ {
+ int32_t e1, e2;
+ };
+ FaceEdge* faceEdges = (FaceEdge*)PxAlloca(sizeof(FaceEdge) * faces.size() + 1);// Adding 1 for possible new face
+ uint32_t* faceIndices = (uint32_t*)PxAlloca(sizeof(uint32_t) * (faces.size() + 1)); // Adding 1 for possible new face
+ uint32_t* faceRemap = (uint32_t*)PxAlloca(sizeof(uint32_t) * (faces.size() + 1)); // Adding 1 for possible new face
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ FaceEdge& faceEdge = faceEdges[i];
+ faceEdge.e2 = faceEdge.e1 = 0x80000000; // Indicates unset
+ faceIndices[i] = i;
+ faceRemap[i] = i;
+ }
+
+ // Classify the actual edges
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ // The inequalities are set up so that when distanceTol = 0, class 0 is empty and classes -1 and 1 are disjoint
+ Edge& edge = edges[i];
+ PX_ASSERT(faceEdges[edge.m_indexF1].e2 == INT32_MIN);
+ faceEdges[edge.m_indexF1].e2 = (int32_t)i;
+ PX_ASSERT(faceEdges[edge.m_indexF2].e1 == INT32_MIN);
+ faceEdges[edge.m_indexF2].e1 = (int32_t)i;
+ const Real dist = plane.distance(getV0(edge));
+ if (dist < -distanceTol)
+ {
+ edgeClass[i] = -1;
+ negClassEmpty = false;
+ }
+ else if (dist < distanceTol)
+ {
+ edgeClass[i] = 0;
+ }
+ else
+ {
+ edgeClass[i] = 1;
+ posClassEmpty = false;
+ }
+ }
+
+ // Also classify fictitious "edges at infinity" for a complete description
+ uint32_t halfInfinitePlaneCount = 0;
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ Plane& face = faces[i];
+ FaceEdge& faceEdge = faceEdges[i];
+ PX_ASSERT(faceEdge.e2 != INT32_MIN || faceEdge.e1 != INT32_MIN);
+ if (faceEdge.e2 == INT32_MIN || faceEdge.e1 == INT32_MIN)
+ {
+ PX_ASSERT(halfInfinitePlaneCount < 2);
+ if (halfInfinitePlaneCount < 2)
+ {
+ const int32_t classIndex = (int32_t)(edges.size() + halfInfinitePlaneCount++);
+
+ Real cosTheta = (edgeDir ^ face.normal()) | planeNormal;
+ uint32_t realEdgeIndex;
+ if (faceEdge.e2 == INT32_MIN)
+ {
+ faceEdge.e2 = -classIndex; // Negating => fictitious edge.
+ realEdgeIndex = (uint32_t)faceEdge.e1;
+ }
+ else
+ {
+ faceEdge.e1 = -classIndex; // Negating => fictitious edge.
+ cosTheta *= -1;
+ realEdgeIndex = (uint32_t)faceEdge.e2;
+ }
+
+ if (cosTheta < -EPS)
+ {
+ edgeClass[classIndex] = -1;
+ negClassEmpty = false;
+ }
+ else if (cosTheta < EPS)
+ {
+ const Real d = plane.distance(getV0(edges[realEdgeIndex]));
+ if (d < -distanceTol)
+ {
+ edgeClass[classIndex] = -1;
+ negClassEmpty = false;
+ }
+ else if (d < distanceTol)
+ {
+ edgeClass[classIndex] = 0;
+ }
+ else
+ {
+ edgeClass[classIndex] = 1;
+ posClassEmpty = false;
+ }
+ }
+ else
+ {
+ edgeClass[classIndex] = 1;
+ posClassEmpty = false;
+ }
+ }
+ }
+ }
+
+ if (negClassEmpty)
+ {
+ setToEmptySet();
+ return;
+ }
+ if (posClassEmpty)
+ {
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+ return;
+ }
+
+ vectors.popBack(); // We will keep the line origins contiguous, and place the edge direction at the end of the array later
+
+ bool addFace = false;
+ FaceEdge newFaceEdge = { (int32_t)0x80000000, (int32_t)0x80000000 };
+ const uint32_t newFaceIndex = faces.size();
+
+ for (uint32_t i = faces.size(); i--;)
+ {
+ int32_t clippedEdgeIndex = (int32_t)edges.size();
+ Plane& face = faces[i];
+ FaceEdge& faceEdge = faceEdges[i];
+ PX_ASSERT(faceEdge.e2 != INT32_MIN && faceEdge.e1 != INT32_MIN);
+ uint32_t newEdgeIndex = 0xFFFFFFFF;
+ int32_t orig1Index = faceEdge.e2;
+ int32_t orig2Index = faceEdge.e1;
+ switch (edgeClass[physx::PxAbs(faceEdge.e2)])
+ {
+ case 1:
+ if (edgeClass[physx::PxAbs(faceEdge.e1)] == -1)
+ {
+ // Clip face
+ PX_ASSERT(((face.normal() ^ planeNormal) | edgeDir) > 0);
+ PX_ASSERT(newFaceEdge.e1 == INT32_MIN);
+ newFaceEdge.e1 = clippedEdgeIndex;
+ newEdgeIndex = edges.size();
+ Edge& newEdge = edges.insert();
+ newEdge.m_indexF1 = i;
+ newEdge.m_indexF2 = newFaceIndex;
+ faceEdge.e2 = clippedEdgeIndex;
+ addFace = true;
+ }
+ else
+ {
+ // Eliminate face
+ faces.replaceWithLast(i);
+ faceEdges[i] = faceEdges[faces.size()];
+ faceIndices[i] = faceIndices[faces.size()];
+ faceRemap[faceIndices[faces.size()]] = i;
+#ifdef _DEBUG // Should not be necessary, but helps with debugging
+ faceIndices[faces.size()] = 0xFFFFFFFF;
+ faceRemap[i] = 0xFFFFFFFF;
+#endif
+ }
+ break;
+ case 0:
+ if (edgeClass[physx::PxAbs(faceEdge.e1)] == -1)
+ {
+ // Keep this face, and use this edge on the new face
+ clippedEdgeIndex = faceEdge.e2;
+ PX_ASSERT(newFaceEdge.e1 == INT32_MIN);
+ newFaceEdge.e1 = faceEdge.e2;
+ edges[(uint32_t)faceEdge.e2].m_indexF2 = newFaceIndex;
+ addFace = true;
+ }
+ else
+ {
+ // Eliminate face
+ faces.replaceWithLast(i);
+ faceEdges[i] = faceEdges[faces.size()];
+ faceIndices[i] = faceIndices[faces.size()];
+ faceRemap[faceIndices[faces.size()]] = i;
+#ifdef _DEBUG // Should not be necessary, but helps with debugging
+ faceIndices[faces.size()] = 0xFFFFFFFF;
+ faceRemap[i] = 0xFFFFFFFF;
+#endif
+ }
+ break;
+ case -1:
+ switch (edgeClass[physx::PxAbs(faceEdge.e1)])
+ {
+ case 1: // Clip face
+ {
+ PX_ASSERT(((planeNormal ^ face.normal()) | edgeDir) > 0);
+ PX_ASSERT(newFaceEdge.e2 == INT32_MIN);
+ newFaceEdge.e2 = clippedEdgeIndex;
+ newEdgeIndex = edges.size();
+ Edge& newEdge = edges.insert();
+ newEdge.m_indexF1 = newFaceIndex;
+ newEdge.m_indexF2 = i;
+ faceEdge.e1 = clippedEdgeIndex;
+ addFace = true;
+ }
+ break;
+ case 0: // Keep this face, and use this edge on the new face
+ clippedEdgeIndex = faceEdge.e1;
+ PX_ASSERT(newFaceEdge.e2 == INT32_MIN);
+ newFaceEdge.e2 = faceEdge.e1;
+ edges[(uint32_t)faceEdge.e1].m_indexF1 = newFaceIndex;
+ addFace = true;
+ break;
+ }
+ }
+
+ if (newEdgeIndex < edges.size())
+ {
+ Edge& newEdge = edges[newEdgeIndex];
+ newEdge.m_indexV0 = vectors.size();
+ newEdge.m_indexV1 = 0xFFFFFFFF; // Will be replaced below
+ Pos& newOrig = *(Pos*)&vectors.insert();
+ if (orig1Index >= 0)
+ {
+ const Edge& e1 = edges[(uint32_t)orig1Index];
+ const Pos& o1 = getV0(e1);
+ const Real d1 = plane.distance(o1);
+ if (orig2Index >= 0)
+ {
+ const Edge& e2 = edges[(uint32_t)orig2Index];
+ const Pos& o2 = getV0(e2);
+ const Real d2 = plane.distance(o2);
+ newOrig = o1 + (d1 / (d1 - d2)) * (o2 - o1);
+
+ }
+ else
+ {
+ const Dir tangent = face.normal() ^ edgeDir;
+ const Real cosTheta = tangent | planeNormal;
+ PX_ASSERT(physx::PxAbs(cosTheta) > EPS);
+ newOrig = o1 - tangent * (d1 / cosTheta);
+ }
+ }
+ else
+ {
+ const Dir tangent = edgeDir ^ face.normal();
+ const Real cosTheta = tangent | planeNormal;
+ PX_ASSERT(physx::PxAbs(cosTheta) > EPS);
+ if (orig2Index >= 0)
+ {
+ const Edge& e2 = edges[(uint32_t)orig2Index];
+ const Pos& o2 = getV0(e2);
+ const Real d2 = plane.distance(o2);
+ newOrig = o2 - tangent * (d2 / cosTheta);
+ }
+ else
+ {
+ PX_ALWAYS_ASSERT(); // Should not have any full planes
+ }
+ }
+ }
+ }
+
+ if (addFace)
+ {
+ faceRemap[newFaceIndex] = faces.size();
+ faceIndices[faces.size()] = newFaceIndex;
+ faceEdges[faces.size()] = newFaceEdge;
+ Plane& newFace = faces.insert();
+ newFace = plane;
+ }
+
+ if (faces.size() == 0)
+ {
+ setToEmptySet();
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+ return;
+ }
+
+ // Replacing edge direction, and re-indexing
+ const uint32_t edgeDirIndex = vectors.size();
+ vectors.pushBack(edgeDir);
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ edges[i].m_indexV1 = edgeDirIndex;
+ }
+
+ // Eliminate unused edges (and remap face indices in remaining edges)
+ uint8_t* edgeMarks = (uint8_t*)PxAlloca(sizeof(uint8_t) * edges.size());
+ memset(edgeMarks, 0, sizeof(uint8_t)*edges.size());
+
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ FaceEdge& faceEdge = faceEdges[i];
+ if (faceEdge.e2 >= 0)
+ {
+ edgeMarks[faceEdge.e2] = 1;
+ }
+ if (faceEdge.e1 >= 0)
+ {
+ edgeMarks[faceEdge.e1] = 1;
+ }
+ }
+
+ for (uint32_t i = edges.size(); i--;)
+ {
+ if (edgeMarks[i] == 0)
+ {
+ edges.replaceWithLast(i);
+ }
+ else
+ {
+ Edge& edge = edges[i];
+ PX_ASSERT(faceRemap[edge.m_indexF1] != 0xFFFFFFFF);
+ if (faceRemap[edge.m_indexF1] != 0xFFFFFFFF)
+ {
+ edge.m_indexF1 = faceRemap[edge.m_indexF1];
+ }
+ PX_ASSERT(faceRemap[edge.m_indexF2] != 0xFFFFFFFF);
+ if (faceRemap[edge.m_indexF2] != 0xFFFFFFFF)
+ {
+ edge.m_indexF2 = faceRemap[edge.m_indexF2];
+ }
+ }
+ }
+
+ // Eliminate unused vectors (and remap vertex indices in edges)
+ int32_t* vectorOffsets = (int32_t*)PxAlloca(sizeof(int32_t) * vectors.size());
+ memset(vectorOffsets, 0, sizeof(int32_t)*vectors.size());
+
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Edge& edge = edges[i];
+ ++vectorOffsets[edge.m_indexV0];
+ ++vectorOffsets[edge.m_indexV1];
+ }
+
+ uint32_t vectorCount = 0;
+ for (uint32_t i = 0; i < vectors.size(); ++i)
+ {
+ const bool copy = vectorOffsets[i] > 0;
+ vectorOffsets[i] = (int32_t)vectorCount - (int32_t)i;
+ if (copy)
+ {
+ vectors[vectorCount++] = vectors[i];
+ }
+ }
+ vectors.resize(vectorCount);
+
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Edge& edge = edges[i];
+ edge.m_indexV0 += vectorOffsets[edge.m_indexV0];
+ edge.m_indexV1 += vectorOffsets[edge.m_indexV1];
+ }
+
+ PX_ASSERT(vectors.size() == edges.size() + 1);
+
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+
+ return;
+ }
+ }
+
+ // The hull will have vertices
+
+ // Compare vertex positions to input plane
+ bool negClassEmpty = true;
+ bool posClassEmpty = true;
+ int8_t* vertexClass = (int8_t*)PxAlloca(vertexCount * sizeof(int8_t));
+ for (uint32_t i = 0; i < vertexCount; ++i)
+ {
+ // The inequalities are set up so that when distanceTol = 0, class 0 is empty and classes -1 and 1 are disjoint
+ const Real dist = plane.distance(getVertex(i));
+ if (dist < -distanceTol)
+ {
+ vertexClass[i] = -1;
+ negClassEmpty = false;
+ }
+ else if (dist < distanceTol)
+ {
+ vertexClass[i] = 0;
+ }
+ else
+ {
+ vertexClass[i] = 1;
+ posClassEmpty = false;
+ }
+ }
+
+ if (vertexCount != 0)
+ {
+ // Also test "points at infinity" for better culling
+ for (uint32_t i = vertexCount; i < vectors.size(); ++i)
+ {
+ if (vectors[i][3] < (Real)0.5)
+ {
+ const Real cosTheta = plane | vectors[i];
+ if (cosTheta < -EPS)
+ {
+ negClassEmpty = false;
+ }
+ else if (cosTheta >= EPS)
+ {
+ posClassEmpty = false;
+ }
+ }
+ }
+
+ if (negClassEmpty)
+ {
+ setToEmptySet();
+ return;
+ }
+ if (posClassEmpty)
+ {
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+ return;
+ }
+ }
+
+ // Intersect new plane against edges.
+ const uint32_t newFaceIndex = faces.size();
+
+ // Potential number of new edges is the number of faces
+ Edge* newEdges = (Edge*)PxAlloca((newFaceIndex + 1) * sizeof(Edge));
+ memset(newEdges, 0xFF, (newFaceIndex + 1)*sizeof(Edge));
+
+ bool addFace = false;
+
+ for (uint32_t i = edges.size(); i--;)
+ {
+ Edge& edge = edges[i];
+ uint32_t clippedVertexIndex = vectors.size();
+ bool createNewEdge = false;
+ switch (getType(edge))
+ {
+ case EdgeType::LineSegment:
+ switch (vertexClass[edge.m_indexV0])
+ {
+ case -1:
+ switch (vertexClass[edge.m_indexV1])
+ {
+ case 1:
+ {
+ // Clip this edge.
+ const Pos& v0 = getV0(edge);
+ const Real d0 = plane.distance(v0);
+ const Pos& v1 = getV1(edge);
+ const Real d1 = plane.distance(v1);
+ const Pos clipVertex = v0 + (d0 / (d0 - d1)) * (v1 - v0);
+ edge.m_indexV1 = clippedVertexIndex;
+ vectors.pushBack(clipVertex);
+ createNewEdge = true;
+ break;
+ }
+ case 0:
+ createNewEdge = true;
+ clippedVertexIndex = edge.m_indexV1;
+ break;
+ }
+ break;
+ case 0:
+ if (vertexClass[edge.m_indexV1] == -1)
+ {
+ createNewEdge = true;
+ clippedVertexIndex = edge.m_indexV0;
+ }
+ else
+ {
+ // Eliminate this edge
+ edges.replaceWithLast(i);
+ }
+ break;
+ case 1:
+ if (vertexClass[edge.m_indexV1] == -1)
+ {
+ // Clip this edge.
+ const Pos& v0 = getV0(edge);
+ const Real d0 = plane.distance(v0);
+ const Pos& v1 = getV1(edge);
+ const Real d1 = plane.distance(v1);
+ const Pos clipVertex = v1 + (d1 / (d1 - d0)) * (v0 - v1);
+ edge.m_indexV0 = clippedVertexIndex;
+ vectors.pushBack(clipVertex);
+ createNewEdge = true;
+ }
+ else
+ {
+ // Eliminate this edge
+ edges.replaceWithLast(i);
+ }
+ break;
+ }
+ break;
+ case EdgeType::Ray:
+ {
+ const Dir& edgeDir = getDir(edge);
+ const Real den = edgeDir | planeNormal;
+ switch (vertexClass[edge.m_indexV0])
+ {
+ case -1:
+ if (den > EPS)
+ {
+ // Clip this edge.
+ const Pos& v0 = getV0(edge);
+ const Real d0 = plane.distance(v0);
+ const Pos clipVertex = v0 - edgeDir * (d0 / den);
+ edge.m_indexV1 = clippedVertexIndex;
+ vectors.pushBack(clipVertex);
+ createNewEdge = true;
+ }
+ break;
+ case 0:
+ if (den < -EPS)
+ {
+ createNewEdge = true;
+ clippedVertexIndex = edge.m_indexV0;
+ }
+ else
+ {
+ // Eliminate this edge
+ edges.replaceWithLast(i);
+ }
+ break;
+ case 1:
+ if (den < -EPS)
+ {
+ // Clip this edge.
+ const Pos& v0 = getV0(edge);
+ const Real d0 = plane.distance(v0);
+ const Pos clipVertex = v0 - edgeDir * (d0 / den);
+ edge.m_indexV0 = clippedVertexIndex;
+ vectors.pushBack(clipVertex);
+ createNewEdge = true;
+ }
+ else
+ {
+ // Eliminate this edge
+ edges.replaceWithLast(i);
+ }
+ break;
+ }
+ }
+ break;
+ case EdgeType::Line:
+ {
+ const Pos& point = getV0(edge);
+ const Dir& edgeDir = getDir(edge);
+ const Real h = plane.distance(point);
+ const Real den = edgeDir | planeNormal;
+ PX_ASSERT(physx::PxAbs(den) >= EPS);
+ // Clip this edge
+ clippedVertexIndex = edge.m_indexV0; // Re-use this vector (will become a vertex)
+ vectors[clippedVertexIndex] = point - edgeDir * (h / den);
+ if (den > 0) // Make sure the ray points in the correct direction
+ {
+ vectors[edge.m_indexV1] *= -(Real)1;
+ nvidia::swap(edge.m_indexF1, edge.m_indexF2);
+ }
+ createNewEdge = true;
+ }
+ break;
+ }
+
+ if (createNewEdge)
+ {
+ if (newEdges[edge.m_indexF1].m_indexV0 == 0xFFFFFFFF)
+ {
+ newEdges[edge.m_indexF1].m_indexV0 = clippedVertexIndex;
+ PX_ASSERT(newEdges[edge.m_indexF1].m_indexV1 == 0xFFFFFFFF);
+ newEdges[edge.m_indexF1].m_indexV1 = vectors.size();
+ Dir newDir = planeNormal ^ faces[edge.m_indexF1].normal();
+ newDir.normalize();
+ if ((newDir | faces[edge.m_indexF2].normal()) > 0)
+ {
+ newDir *= -(Real)1;
+ newEdges[edge.m_indexF1].m_indexF1 = edge.m_indexF1;
+ newEdges[edge.m_indexF1].m_indexF2 = newFaceIndex;
+ }
+ else
+ {
+ newEdges[edge.m_indexF1].m_indexF1 = newFaceIndex;
+ newEdges[edge.m_indexF1].m_indexF2 = edge.m_indexF1;
+ }
+ vectors.pushBack(newDir);
+ addFace = true;
+ }
+ else
+ {
+ PX_ASSERT(newEdges[edge.m_indexF1].m_indexV1 != 0xFFFFFFFF && vectors[newEdges[edge.m_indexF1].m_indexV1][3] == (Real)0);
+ newEdges[edge.m_indexF1].m_indexV1 = clippedVertexIndex;
+ }
+ if (newEdges[edge.m_indexF2].m_indexV0 == 0xFFFFFFFF)
+ {
+ newEdges[edge.m_indexF2].m_indexV0 = clippedVertexIndex;
+ PX_ASSERT(newEdges[edge.m_indexF2].m_indexV1 == 0xFFFFFFFF);
+ newEdges[edge.m_indexF2].m_indexV1 = vectors.size();
+ Dir newDir = faces[edge.m_indexF2].normal() ^ planeNormal;
+ newDir.normalize();
+ if ((newDir | faces[edge.m_indexF1].normal()) > 0)
+ {
+ newDir *= -(Real)1;
+ newEdges[edge.m_indexF2].m_indexF1 = newFaceIndex;
+ newEdges[edge.m_indexF2].m_indexF2 = edge.m_indexF2;
+ }
+ else
+ {
+ newEdges[edge.m_indexF2].m_indexF1 = edge.m_indexF2;
+ newEdges[edge.m_indexF2].m_indexF2 = newFaceIndex;
+ }
+ vectors.pushBack(newDir);
+ addFace = true;
+ }
+ else
+ {
+ PX_ASSERT(newEdges[edge.m_indexF2].m_indexV1 != 0xFFFFFFFF && vectors[newEdges[edge.m_indexF2].m_indexV1][3] == (Real)0);
+ newEdges[edge.m_indexF2].m_indexV1 = clippedVertexIndex;
+ }
+ }
+ }
+
+ if (addFace)
+ {
+ Plane& newFace = faces.insert();
+ newFace = plane;
+ }
+
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ Edge& newEdge = newEdges[i];
+ if (newEdge.m_indexV0 != 0xFFFFFFFF)
+ {
+ if (newEdge.m_indexV0 != newEdge.m_indexV1) // Skip split vertices
+ {
+ edges.pushBack(newEdge);
+ }
+ }
+ }
+
+ // Now eliminate unused faces and vectors
+ int32_t* vectorOffsets = (int32_t*)PxAlloca(sizeof(int32_t) * vectors.size());
+ int32_t* faceOffsets = (int32_t*)PxAlloca(sizeof(int32_t) * faces.size());
+ memset(vectorOffsets, 0, sizeof(int32_t)*vectors.size());
+ memset(faceOffsets, 0, sizeof(int32_t)*faces.size());
+
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Edge& edge = edges[i];
+ ++vectorOffsets[edge.m_indexV0];
+ ++vectorOffsets[edge.m_indexV1];
+ ++faceOffsets[edge.m_indexF1];
+ ++faceOffsets[edge.m_indexF2];
+ }
+
+ uint32_t vectorCount = 0;
+ for (uint32_t i = 0; i < vectors.size(); ++i)
+ {
+ const bool copy = vectorOffsets[i] > 0;
+ vectorOffsets[i] = (int32_t)vectorCount - (int32_t)i;
+ if (copy)
+ {
+ vectors[vectorCount++] = vectors[i];
+ }
+ }
+ vectors.resize(vectorCount);
+
+ uint32_t faceCount = 0;
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ const bool copy = faceOffsets[i] > 0;
+ faceOffsets[i] = (int32_t)faceCount - (int32_t)i;
+ if (copy)
+ {
+ faces[faceCount++] = faces[i];
+ }
+ }
+ faces.resize(faceCount);
+
+ PX_ASSERT(faceCount != 1); // Single faces would have been handled above.
+
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Edge& edge = edges[i];
+ edge.m_indexV0 += vectorOffsets[edge.m_indexV0];
+ edge.m_indexV1 += vectorOffsets[edge.m_indexV1];
+ edge.m_indexF1 += faceOffsets[edge.m_indexF1];
+ edge.m_indexF2 += faceOffsets[edge.m_indexF2];
+ }
+
+ if (faces.size() == 0)
+ {
+ setToEmptySet();
+ }
+
+ // Now sort vectors, vertices before directions. We will need to keep track of the mapping to re-index the edges.
+ uint32_t* vectorIndices = (uint32_t*)PxAlloca(sizeof(uint32_t) * vectors.size());
+ uint32_t* vectorRemap = (uint32_t*)PxAlloca(sizeof(uint32_t) * vectors.size());
+ for (uint32_t i = 0; i < vectors.size(); ++i)
+ {
+ vectorIndices[i] = vectorRemap[i] = i;
+ }
+ int32_t firstDirIndex = -1;
+ int32_t lastPosIndex = (int32_t)vectors.size();
+ for (;;)
+ {
+ // Correct this
+ while (++firstDirIndex < (int32_t)vectors.size())
+ {
+ if (vectors[(uint32_t)firstDirIndex][3] < (Real)0.5) // Should be 0 or 1, but in case some f.p. inexactness creeps in...
+ {
+ break;
+ }
+ }
+ while (--lastPosIndex >= 0)
+ {
+ if (vectors[(uint32_t)lastPosIndex][3] >= (Real)0.5) // Should be 0 or 1, but in case some f.p. inexactness creeps in...
+ {
+ break;
+ }
+ }
+ if (firstDirIndex > lastPosIndex)
+ {
+ break; // All's good
+ }
+ // Fix this - swap vectors, and update map
+ nvidia::swap(vectors[(uint32_t)firstDirIndex], vectors[(uint32_t)lastPosIndex]);
+ nvidia::swap(vectorIndices[(uint32_t)firstDirIndex], vectorIndices[(uint32_t)lastPosIndex]);
+ vectorRemap[vectorIndices[(uint32_t)firstDirIndex]] = (uint32_t)firstDirIndex;
+ vectorRemap[vectorIndices[(uint32_t)lastPosIndex]] = (uint32_t)lastPosIndex;
+ }
+ vertexCount = (uint32_t)firstDirIndex; // Correct vertex count
+
+ // Correct edge indices
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Edge& edge = edges[i];
+ edge.m_indexV0 = vectorRemap[edge.m_indexV0];
+ edge.m_indexV1 = vectorRemap[edge.m_indexV1];
+ }
+
+ SOFT_ASSERT(testConsistency(100 * distanceTol, (Real)1.0e-8));
+}
+
+ApexCSG::Real ApexCSG::Hull::calculateVolume() const
+{
+ if (isEmptySet())
+ {
+ return (Real)0;
+ }
+
+ if (edges.size() == 0 || vectors.size() > vertexCount)
+ {
+ return MAX_REAL;
+ }
+
+ // Volume is finite
+ PX_ASSERT(vertexCount != 0);
+
+ // Work relative to an internal point, for better accuracy
+ Vec4Real centroid((Real)0);
+ for (uint32_t i = 0; i < vertexCount; ++i)
+ {
+ centroid += vectors[i];
+ }
+ centroid /= (Real)vertexCount;
+
+ // Create a doubled edge list
+ const uint32_t edgeCount = edges.size();
+ const uint32_t edgeListSize = 2 * edgeCount;
+ Edge* edgeList = (Edge*)PxAlloca(edgeListSize * sizeof(Edge));
+ for (uint32_t i = 0; i < edgeCount; ++i)
+ {
+ edgeList[i] = edges[i];
+ edgeList[i + edgeCount] = edges[i];
+ nvidia::swap(edgeList[i + edgeCount].m_indexF1, edgeList[i + edgeCount].m_indexF2);
+ nvidia::swap(edgeList[i + edgeCount].m_indexV0, edgeList[i + edgeCount].m_indexV1);
+ }
+
+ // Sort edgeList by first face index
+ qsort(edgeList, edgeListSize, sizeof(Edge), compareEdgeFirstFaceIndices);
+
+ // A scratchpad for edge vertex locations
+ uint32_t* vertex0Locations = (uint32_t*)PxAlloca(vertexCount * sizeof(uint32_t));
+ memset(vertex0Locations, 0xFF, vertexCount * sizeof(uint32_t));
+
+ Real volume = 0;
+
+ // Edges are grouped by first face index - each group describes the polygonal face.
+ uint32_t groupStop = 0;
+ do
+ {
+ const uint32_t groupStart = groupStop;
+ const uint32_t faceIndex = edgeList[groupStart].m_indexF1;
+ while (++groupStop < edgeListSize && edgeList[groupStop].m_indexF1 == faceIndex) {}
+ // Evaluate group
+ if (groupStop - groupStart >= 3)
+ {
+ // Mark first vertex locations within group
+ for (uint32_t i = groupStart; i < groupStop; ++i)
+ {
+ Edge& edge = edgeList[i];
+ SOFT_ASSERT(vertex0Locations[edge.m_indexV0] == 0xFFFFFFFF);
+ vertex0Locations[edge.m_indexV0] = i;
+ }
+ const Dir d0 = vectors[edgeList[groupStart].m_indexV0] - centroid;
+ uint32_t i1 = edgeList[groupStart].m_indexV1;
+ Dir d1 = vectors[i1] - centroid;
+ const uint32_t tetCount = groupStop - groupStart - 2;
+ for (uint32_t i = 0; i < tetCount; ++i)
+ {
+ const uint32_t nextEdgeIndex = vertex0Locations[i1];
+ SOFT_ASSERT(nextEdgeIndex != 0xFFFFFFFF);
+ if (nextEdgeIndex == 0xFFFFFFFF)
+ {
+ break;
+ }
+ const uint32_t i2 = edgeList[nextEdgeIndex].m_indexV1;
+ const Dir d2 = vectors[i2] - centroid;
+ const Real tripleProduct = d0 | (d1 ^ d2);
+ SOFT_ASSERT(tripleProduct > -EPS_REAL);
+ volume += tripleProduct; // 6 times volume
+ i1 = i2;
+ d1 = d2;
+ }
+ // Unmark first vertex locations
+ for (uint32_t i = groupStart; i < groupStop; ++i)
+ {
+ Edge& edge = edgeList[i];
+ vertex0Locations[edge.m_indexV0] = 0xFFFFFFFF;
+ }
+ }
+ }
+ while (groupStop < edgeListSize);
+
+ volume *= (Real)0.166666666666666667;
+
+ return volume;
+}
+
+void ApexCSG::Hull::serialize(physx::PxFileBuf& stream) const
+{
+ apex::serialize(stream, faces);
+ apex::serialize(stream, edges);
+ apex::serialize(stream, vectors);
+ stream << vertexCount;
+ stream << allSpace;
+ stream << emptySet;
+}
+
+void ApexCSG::Hull::deserialize(physx::PxFileBuf& stream, uint32_t version)
+{
+ setToAllSpace();
+
+ apex::deserialize(stream, version, faces);
+ apex::deserialize(stream, version, edges);
+ apex::deserialize(stream, version, vectors);
+ stream >> vertexCount;
+ stream >> allSpace;
+ stream >> emptySet;
+}
+
+bool ApexCSG::Hull::testConsistency(ApexCSG::Real distanceTol, ApexCSG::Real angleTol) const
+{
+ bool halfInfiniteEdges = false;
+ for (uint32_t j = 0; j < edges.size(); ++j)
+ {
+ if (edges[j].m_indexV0 < vertexCount && edges[j].m_indexV1 >= vertexCount)
+ {
+ halfInfiniteEdges = true;
+ break;
+ }
+ }
+
+ for (uint32_t i = 0; i < vertexCount; ++i)
+ {
+ uint32_t count = 0;
+ for (uint32_t j = 0; j < edges.size(); ++j)
+ {
+ if (edges[j].m_indexV0 == i)
+ {
+ ++count;
+ }
+ if (edges[j].m_indexV1 == i)
+ {
+ ++count;
+ }
+ if (edges[j].m_indexV1 < vertexCount && edges[j].m_indexV0 == edges[j].m_indexV1)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: 0-length edge found.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ if (count < 3)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: vertex connected to fewer than 3 edges.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+
+ if (edges.size() == 0)
+ {
+ if (faces.size() >= 3)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: face count 3 or greater, but with no edges.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ if (vertexCount > 0)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: hull has vertices but no edges.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ return true;
+ }
+
+ bool halfInfiniteFaces = false;
+
+ for (uint32_t i = 0; i < faces.size(); ++i)
+ {
+ uint32_t count = 0;
+ for (uint32_t j = 0; j < edges.size(); ++j)
+ {
+ if (edges[j].m_indexF1 == i)
+ {
+ ++count;
+ }
+ if (edges[j].m_indexF2 == i)
+ {
+ ++count;
+ }
+ if (edges[j].m_indexF1 == edges[j].m_indexF2)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge connecting face to itself.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ if (count == 0)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: face has no edges", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ if (count == 1)
+ {
+ halfInfiniteFaces = true;
+ }
+ }
+
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ Real d;
+ const Edge& edge = edges[i];
+ if (edge.m_indexV1 >= vertexCount)
+ {
+ // Edge is a line or ray
+ d = faces[edge.m_indexF1].distance(getV0(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge line/ray origin not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF1].normal() | getDir(edge);
+ if (physx::PxAbs(d) > angleTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge line/ray direction not perpendicular to face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF2].distance(getV0(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge line/ray origin not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF2].normal() | getDir(edge);
+ if (physx::PxAbs(d) > angleTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge line/ray direction not perpendicular to face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ else
+ {
+ // Edge is a line segment
+ if (edge.m_indexV0 >= vertexCount)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge has a line/ray origin but a real destination point.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF1].distance(getV0(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge vertex 0 not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF1].distance(getV1(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge vertex 1 not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF2].distance(getV0(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge vertex 0 not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ d = faces[edge.m_indexF2].distance(getV1(edge));
+ if (physx::PxAbs(d) > distanceTol)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: edge vertex 1 not on face.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ }
+
+ if (vertexCount == 0)
+ {
+ if (!halfInfiniteFaces)
+ {
+ if (faces.size() != edges.size())
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: hull has edges but no vertices, with no half-infinite faces. Face count should equal edge count but does not.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ else
+ {
+ if (faces.size() != edges.size() + 1)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: hull has edges but no vertices, with half-infinite faces. Face count should be one more than edge count but is not.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ const Edge& edge0 = edges[0];
+ for (uint32_t i = 1; i < edges.size(); ++i)
+ {
+ const Edge& edge = edges[i];
+ Dir e = getDir(edge0) ^ getDir(edge);
+ if ((e | e) > (Real)EPS * EPS)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: prism edges not all facing the same direction.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ }
+ else
+ {
+ const uint32_t c = faces.size() - edges.size() + vertexCount;
+ if (!halfInfiniteEdges)
+ {
+ if (c != 2)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: bounded hull with faces, vertices and edges does not obey Euler's formula.", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ else
+ {
+ if (c != 1)
+ {
+ GetApexSDK()->getErrorCallback()->reportError(physx::PxErrorCode::eDEBUG_WARNING, "Hull::testConsistency: unbounded hull with faces, vertices and edges does not obey Euler's formula (with a vertex at infinity).", __FILE__, __LINE__);
+ PX_ALWAYS_ASSERT();
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+#endif \ No newline at end of file
diff --git a/APEX_1.4/shared/internal/src/authoring/ApexCSGMeshCleaning.cpp b/APEX_1.4/shared/internal/src/authoring/ApexCSGMeshCleaning.cpp
new file mode 100644
index 00000000..1cd80993
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/ApexCSGMeshCleaning.cpp
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#include "ApexUsingNamespace.h"
+#include "authoring/ApexCSGDefs.h"
+#include "PxErrorCallback.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+#pragma warning(disable:4505)
+
+namespace ApexCSG
+{
+
+PX_INLINE bool
+pointOnRay2D(const Vec2Real& point, const Vec2Real& lineOrig, const Vec2Real& lineDisp, Real distanceTol2)
+{
+ const Vec2Real disp = point - lineOrig;
+ const Real lineDisp2 = lineDisp | lineDisp;
+ const Real dispDotLineDisp = disp | lineDisp;
+ const Real distanceTol2LineDisp2 = distanceTol2 * lineDisp2;
+ if (dispDotLineDisp < distanceTol2LineDisp2)
+ {
+ return false;
+ }
+ if ((disp | disp) * lineDisp2 - square(dispDotLineDisp) > distanceTol2LineDisp2)
+ {
+ return false;
+ }
+ return true;
+}
+
+PX_INLINE bool
+diagonalIsValidInLoop(const LinkedEdge2D* edge, const LinkedEdge2D* otherEdge, const LinkedEdge2D* loop)
+{
+ const Vec2Real& diagonalStart = edge->v[0];
+ const Vec2Real& diagonalEnd = otherEdge->v[0];
+ const Vec2Real diagonalDisp = diagonalEnd - diagonalStart;
+
+ const LinkedEdge2D* loopEdge = loop;
+ LinkedEdge2D* loopEdgeNext = loopEdge->getAdj(1);
+ do
+ {
+ loopEdgeNext = loopEdge->getAdj(1);
+ if (loopEdge == edge || loopEdgeNext == edge || loopEdge == otherEdge || loopEdgeNext == otherEdge)
+ {
+ continue;
+ }
+ const Vec2Real& loopEdgeStart = loopEdge->v[0];
+ const Vec2Real& loopEdgeEnd = loopEdge->v[1];
+ const Vec2Real& loopEdgeDisp = loopEdgeEnd - loopEdgeStart;
+ if ((loopEdgeDisp ^(diagonalStart - loopEdgeStart)) * (loopEdgeDisp ^(diagonalEnd - loopEdgeStart)) <= 0 &&
+ (diagonalDisp ^(loopEdgeStart - diagonalStart)) * (diagonalDisp ^(loopEdgeEnd - diagonalStart)) <= 0)
+ {
+ // Edge intersection
+ return false;
+ }
+ }
+ while ((loopEdge = loopEdgeNext) != loop);
+
+ return true;
+}
+
+PX_INLINE bool
+diagonalIsValid(const LinkedEdge2D* edge, const LinkedEdge2D* otherEdge, const physx::Array<LinkedEdge2D*>& loops)
+{
+ for (uint32_t i = 0; i < loops.size(); ++i)
+ {
+ if (!diagonalIsValidInLoop(edge, otherEdge, loops[i]))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+PX_INLINE uint32_t
+pointCondition(const Vec2Real& point, const Vec2Real& n0, Real d0, const Vec2Real& n1, Real d1, const Vec2Real& n2, Real d2)
+{
+ return (uint32_t)((point|n0) + d0 >= (Real)0) | ((uint32_t)((point|n1) + d1 >= (Real)0)) << 1 | ((uint32_t)((point|n2) + d2 >= (Real)0)) << 2;
+}
+
+PX_INLINE bool
+triangulate(LinkedEdge2D* loop, physx::Array<Vec2Real>& planeVertices, nvidia::Pool<LinkedEdge2D>& linkedEdgePool)
+{
+ // Trivial cases
+ LinkedEdge2D* edge0 = loop;
+ LinkedEdge2D* edge1 = edge0->getAdj(1);
+ if (edge1 == edge0)
+ {
+ // Single vertex !?
+ linkedEdgePool.replace(edge0);
+ return true;
+ }
+
+ LinkedEdge2D* edge2 = edge1->getAdj(1);
+ if (edge2 == edge0)
+ {
+ // Degenerate
+ linkedEdgePool.replace(edge0);
+ linkedEdgePool.replace(edge1);
+ return true;
+ }
+
+ for(;;)
+ {
+ LinkedEdge2D* edge3 = edge2->getAdj(1);
+ if (edge3 == edge0)
+ {
+ // Single triangle - we're done
+ planeVertices.pushBack(edge0->v[0]);
+ planeVertices.pushBack(edge1->v[0]);
+ planeVertices.pushBack(edge2->v[0]);
+ linkedEdgePool.replace(edge0);
+ linkedEdgePool.replace(edge1);
+ linkedEdgePool.replace(edge2);
+ break;
+ }
+
+ // Polygon has more than three vertices. Find an ear
+ bool earFound = false;
+ do
+ {
+ Vec2Real e01 = edge1->v[0] - edge0->v[0];
+ Vec2Real e12 = edge2->v[0] - edge1->v[0];
+ if ((e01 ^ e12) > (Real)0)
+ {
+ // Convex, see if this is an ear
+ const Vec2Real n01 = e01.perp();
+ const Real d01 = -(n01 | edge0->v[0]);
+ const Vec2Real n12 = e12.perp();
+ const Real d12 = -(n12 | edge1->v[0]);
+ const Vec2Real n20 = Vec2Real(edge0->v[0] - edge2->v[0]).perp();
+ const Real d20 = -(n20 | edge2->v[0]);
+ LinkedEdge2D* eTest = edge3;
+ bool edgeIntersectsTriangle = false; // Until proven otherwise
+ do
+ {
+ if (pointCondition(eTest->v[0], n01, d01, n12, d12, n20, d20))
+ {
+ // Point is inside the triangle
+ edgeIntersectsTriangle = true;
+ break;
+ }
+ } while ((eTest = eTest->getAdj(1)) != edge0);
+ if (!edgeIntersectsTriangle)
+ {
+ // Ear found
+ planeVertices.pushBack(edge0->v[0]);
+ planeVertices.pushBack(edge1->v[0]);
+ planeVertices.pushBack(edge2->v[0]);
+ edge0->v[1] = edge0->v[0]; // So that the next and previous edges will join up to the correct location after remove() is called()
+ edge0->remove();
+ linkedEdgePool.replace(edge0);
+ if (edge0 == loop)
+ {
+ loop = edge1;
+ }
+ edge0 = edge1;
+ edge1 = edge2;
+ edge2 = edge3;
+ earFound = true;
+ break;
+ }
+ }
+
+ edge0 = edge1;
+ edge1 = edge2;
+ edge2 = edge3;
+ edge3 = edge3->getAdj(1);
+ } while (edge0 != loop);
+
+ if (!earFound)
+ {
+ // Something went wrong
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+mergeTriangles2D(physx::Array<Vec2Real>& planeVertices, nvidia::Pool<LinkedEdge2D>& linkedEdgePool, Real distanceTol)
+{
+ // Create a set of linked edges for each triangle. The initial set will consist of nothing but three-edge loops (the triangles).
+ physx::Array<LinkedEdge2D*> edges;
+ edges.reserve(planeVertices.size());
+ PX_ASSERT((planeVertices.size() % 3) == 0);
+ for (uint32_t i = 0; i < planeVertices.size(); i += 3)
+ {
+ LinkedEdge2D* v0 = linkedEdgePool.borrow();
+ edges.pushBack(v0);
+ LinkedEdge2D* v1 = linkedEdgePool.borrow();
+ edges.pushBack(v1);
+ LinkedEdge2D* v2 = linkedEdgePool.borrow();
+ edges.pushBack(v2);
+ v0->setAdj(1, v1);
+ v1->setAdj(1, v2);
+ v0->v[0] = planeVertices[i];
+ v0->v[1] = planeVertices[i + 1];
+ v1->v[0] = planeVertices[i + 1];
+ v1->v[1] = planeVertices[i + 2];
+ v2->v[0] = planeVertices[i + 2];
+ v2->v[1] = planeVertices[i];
+ }
+
+ const Real distanceTol2 = distanceTol * distanceTol;
+
+ // Find all edge overlaps and merge loops
+ for (uint32_t i = 0; i < edges.size(); ++i)
+ {
+ LinkedEdge2D* edge = edges[i];
+ const Vec2Real edgeDisp = edge->v[1] - edge->v[0];
+ for (uint32_t j = i + 1; j < edges.size(); ++j)
+ {
+ LinkedEdge2D* otherEdge = edges[j];
+ const Vec2Real otherEdgeDisp = otherEdge->v[1] - otherEdge->v[0];
+ if ((otherEdgeDisp | edgeDisp) < 0)
+ {
+ if (pointOnRay2D(otherEdge->v[0], edge->v[0], edgeDisp, distanceTol2) && pointOnRay2D(otherEdge->v[1], edge->v[1], -edgeDisp, distanceTol2))
+ {
+ edge->setAdj(0, otherEdge->getAdj(0));
+ }
+ else
+ if (pointOnRay2D(edge->v[0], otherEdge->v[0], otherEdgeDisp, distanceTol2) && pointOnRay2D(edge->v[1], otherEdge->v[1], -otherEdgeDisp, distanceTol2))
+ {
+ otherEdge->setAdj(0, edge->getAdj(0));
+ }
+ }
+ }
+ }
+
+ // Clean further by removing adjacent collinear edges. Also label loops.
+ physx::Array<LinkedEdge2D*> loops;
+ int32_t loopID = 0;
+ for (uint32_t i = edges.size(); i--;)
+ {
+ LinkedEdge2D* edge = edges[i];
+ if (edge == NULL || edge->isSolitary())
+ {
+ if (edge != NULL)
+ {
+ linkedEdgePool.replace(edge);
+ }
+ edges.replaceWithLast(i);
+ continue;
+ }
+ if (edge->loopID < 0)
+ {
+ do
+ {
+ edge->loopID = loopID;
+ LinkedEdge2D* prev = edge->getAdj(0);
+ LinkedEdge2D* next = edge->getAdj(1);
+ const Vec2Real edgeDisp = edge->v[1] - edge->v[0];
+ const Vec2Real prevDisp = prev->v[1] - prev->v[0];
+ const Real sumDisp2 = (prevDisp + edgeDisp).lengthSquared();
+ if (sumDisp2 < distanceTol2)
+ {
+ // These two edges cancel. Eliminate both.
+ next = edge->getAdj(1);
+ prev->v[1] = prev->v[0];
+ edge->v[0] = prev->v[1];
+ prev->remove();
+ edge->remove();
+ if (next == prev)
+ {
+ // Loops is empty
+ edge = NULL;
+ break; // Done
+ }
+ }
+ else
+ {
+ const Real h2 = square(prevDisp ^ edgeDisp) / sumDisp2;
+ if (h2 < distanceTol2)
+ {
+ // Collinear, remove
+ prev->v[1] = edge->v[0] = edge->v[1];
+ edge->remove();
+ }
+ }
+ edge = next;
+ }
+ while (edge->loopID < 0);
+ if (edge != NULL)
+ {
+ ++loopID;
+ loops.pushBack(edge);
+ }
+ }
+ }
+
+ if (loops.size() == 0)
+ {
+ // No loops, done
+ planeVertices.reset();
+ return true;
+ }
+
+ // The methods employed below are not optimal in time. But the majority of cases will be simple polygons
+ // with no holes and a small number of vertices. So an optimal algorithm probably isn't worth implementing.
+
+ // Merge all loops into one by finding diagonals to join them
+ while (loops.size() > 1)
+ {
+ LinkedEdge2D* loop = loops.back();
+ LinkedEdge2D* bestEdge = NULL;
+ LinkedEdge2D* bestOtherEdge = NULL;
+ uint32_t bestOtherLoopIndex = 0xFFFFFFFF;
+ Real minDist2 = MAX_REAL;
+ for (uint32_t i = loops.size() - 1; i--;)
+ {
+ LinkedEdge2D* otherLoop = loops[i];
+ LinkedEdge2D* otherEdge = otherLoop;
+ do
+ {
+ LinkedEdge2D* edge = loop;
+ do
+ {
+ if (diagonalIsValid(edge, otherEdge, loops))
+ {
+ const Real dist2 = (edge->v[0] - otherEdge->v[0]).lengthSquared();
+ if (dist2 < minDist2)
+ {
+ bestEdge = edge;
+ bestOtherEdge = otherEdge;
+ bestOtherLoopIndex = i;
+ minDist2 = dist2;
+ }
+ }
+ }
+ while ((edge = edge->getAdj(1)) != loop);
+ }
+ while ((otherEdge = otherEdge->getAdj(1)) != otherLoop);
+ }
+
+ if (bestOtherLoopIndex == 0xFFFFFFFF)
+ {
+ // Clean up loops
+ for (uint32_t i = 0; i < loops.size(); ++i)
+ {
+ LinkedEdge2D* edge = loops[i];
+ bool done = false;
+ do
+ {
+ done = edge->isSolitary();
+ LinkedEdge2D* next = edge->getAdj(1);
+ linkedEdgePool.replace(edge); // This also removes the link from the loop
+ edge = next;
+ }
+ while (!done);
+ }
+ return false;
+ }
+
+ // Create diagonal loop with correct endpoints
+ LinkedEdge2D* diagonal = linkedEdgePool.borrow();
+ LinkedEdge2D* reciprocal = linkedEdgePool.borrow();
+ diagonal->setAdj(1, reciprocal);
+ diagonal->v[1] = reciprocal->v[0] = bestEdge->v[0];
+ diagonal->v[0] = reciprocal->v[1] = bestOtherEdge->v[0];
+
+ // Insert diagonal loop, merging loops.back() with loops[i]
+ diagonal->setAdj(1, bestEdge);
+ reciprocal->setAdj(1, bestOtherEdge);
+ loops.popBack();
+ }
+
+ // Erase planeVertices, will reuse.
+ planeVertices.reset();
+
+ // We have one loop. Triangulate.
+ return triangulate(loops[0], planeVertices, linkedEdgePool);
+}
+
+static void
+mergeTriangles(physx::Array<nvidia::ExplicitRenderTriangle>& cleanedMesh, const Triangle* triangles, const Interpolator* frames, ClippedTriangleInfo* info, uint32_t triangleCount,
+ const physx::Array<Triangle>& originalTriangles, const Plane& plane, nvidia::Pool<LinkedEdge2D>& linkedEdgePool, Real distanceTol, const Mat4Real& BSPToMeshTM)
+{
+ if (triangleCount == 0)
+ {
+ return;
+ }
+
+ const uint32_t originalTriangleIndexStart = info[0].originalTriangleIndex;
+ const uint32_t originalTriangleIndexCount = info[triangleCount - 1].originalTriangleIndex - originalTriangleIndexStart + 1;
+
+ physx::Array<uint32_t> originalTriangleGroupStarts;
+ nvidia::createIndexStartLookup(originalTriangleGroupStarts, (int32_t)originalTriangleIndexStart, originalTriangleIndexCount,
+ (int32_t*)&info->originalTriangleIndex, triangleCount, sizeof(ClippedTriangleInfo));
+
+ // Now group equal reference frames, and transform into 2D
+ physx::Array<Vec2Real> planeVertices;
+ nvidia::IndexBank<uint32_t> frameIndices;
+ frameIndices.reserve(originalTriangleIndexCount);
+ frameIndices.lockCapacity(true);
+ while (frameIndices.freeCount())
+ {
+ planeVertices.reset(); // Erase, we'll reuse this array
+ uint32_t seedFrameIndex = 0;
+ frameIndices.useNextFree(seedFrameIndex);
+ const Interpolator& seedFrame = frames[seedFrameIndex + originalTriangleIndexStart];
+ const Triangle& seedOriginalTri = originalTriangles[originalTriangleIndexStart + seedFrameIndex];
+// const Plane plane(seedOriginalTri.normal, (Real)0.333333333333333333333 * (seedOriginalTri.vertices[0] + seedOriginalTri.vertices[1] + seedOriginalTri.vertices[2]));
+ const Dir& zAxis = plane.normal();
+ const uint32_t maxDir = physx::PxAbs(zAxis[0]) > physx::PxAbs(zAxis[1]) ?
+ (physx::PxAbs(zAxis[0]) > physx::PxAbs(zAxis[2]) ? 0u : 2u) :
+ (physx::PxAbs(zAxis[1]) > physx::PxAbs(zAxis[2]) ? 1u : 2u);
+ Dir xAxis((Real)0);
+ xAxis[(int32_t)(maxDir + 1) % 3] = (Real)1;
+ Dir yAxis = zAxis ^ xAxis;
+ yAxis.normalize();
+ xAxis = yAxis ^ zAxis;
+ Real signedArea = 0;
+ physx::Array<uint32_t> mergedFrameIndices;
+ mergedFrameIndices.pushBack(seedFrameIndex);
+ for (uint32_t i = originalTriangleGroupStarts[seedFrameIndex]; i < originalTriangleGroupStarts[seedFrameIndex + 1]; ++i)
+ {
+ const Triangle& triangle = triangles[info[i].clippedTriangleIndex];
+ const uint32_t ccw = (uint32_t)((triangle.normal | zAxis) > 0);
+ const Real sign = ccw ? (Real)1 : -(Real)1;
+ signedArea += sign * physx::PxAbs(triangle.area);
+ const uint32_t i1 = 2 - ccw;
+ const uint32_t i2 = 1 + ccw;
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[0], yAxis | triangle.vertices[0]));
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[i1], yAxis | triangle.vertices[i1]));
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[i2], yAxis | triangle.vertices[i2]));
+ }
+#if 1
+ const uint32_t* freeIndexPtrStop = frameIndices.usedIndices() + frameIndices.capacity();
+ for (const uint32_t* nextFreeIndexPtr = frameIndices.freeIndices(); nextFreeIndexPtr < freeIndexPtrStop; ++nextFreeIndexPtr)
+ {
+ const uint32_t nextFreeIndex = *nextFreeIndexPtr;
+ const Triangle& nextOriginalTri = originalTriangles[originalTriangleIndexStart + nextFreeIndex];
+ if (nextOriginalTri.submeshIndex != seedOriginalTri.submeshIndex)
+ {
+ continue; // Different submesh, don't use
+ }
+ if (plane.distance(nextOriginalTri.vertices[0]) > distanceTol ||
+ plane.distance(nextOriginalTri.vertices[1]) > distanceTol ||
+ plane.distance(nextOriginalTri.vertices[2]) > distanceTol)
+ {
+ continue; // Not coplanar
+ }
+ const Interpolator& nextFreeFrame = frames[nextFreeIndex + originalTriangleIndexStart];
+ // BRG - Ouch, any way to set these tolerances in a little less of an ad hoc fashion?
+ if (!nextFreeFrame.equals(seedFrame, (Real)0.001, (Real)0.001, (Real)0.001, (Real)0.01, (Real)0.001))
+ {
+ continue; // Frames different, don't use
+ }
+ // We can use this frame
+ frameIndices.use(nextFreeIndex);
+ mergedFrameIndices.pushBack(nextFreeIndex);
+ for (uint32_t i = originalTriangleGroupStarts[nextFreeIndex]; i < originalTriangleGroupStarts[nextFreeIndex + 1]; ++i)
+ {
+ const Triangle& triangle = triangles[info[i].clippedTriangleIndex];
+ const uint32_t ccw = (uint32_t)((triangle.normal | zAxis) > 0);
+ const Real sign = ccw ? (Real)1 : -(Real)1;
+ signedArea += sign * physx::PxAbs(triangle.area);
+ const uint32_t i1 = 2 - ccw;
+ const uint32_t i2 = 1 + ccw;
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[0], yAxis | triangle.vertices[0]));
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[i1], yAxis | triangle.vertices[i1]));
+ planeVertices.pushBack(Vec2Real(xAxis | triangle.vertices[i2], yAxis | triangle.vertices[i2]));
+ }
+ }
+#endif
+
+ // We've collected all of the clipped triangles that fit within a single reference frame, and transformed them into the x,y plane.
+ // Now process this collection.
+ const uint32_t oldPlaneVertexCount = planeVertices.size();
+ const bool success = mergeTriangles2D(planeVertices, linkedEdgePool, distanceTol);
+ if (success && planeVertices.size() < oldPlaneVertexCount)
+ {
+ // Transform back into 3 space and append to cleanedMesh
+ const uint32_t ccw = (uint32_t)(signedArea >= 0);
+ const Real sign = ccw ? (Real)1 : (Real) - 1;
+ const uint32_t vMap[3] = { 0, 2 - ccw, 1 + ccw };
+ const Pos planeOffset = Pos((Real)0) + (-plane.d()) * zAxis;
+ for (uint32_t i = 0; i < planeVertices.size(); i += 3)
+ {
+ nvidia::ExplicitRenderTriangle& cleanedTri = cleanedMesh.insert();
+ VertexData vertexData[3];
+ Triangle tri;
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const Vec2Real& planeVertex = planeVertices[i + vMap[v]];
+ tri.vertices[v] = planeOffset + planeVertex[0] * xAxis + planeVertex[1] * yAxis;
+ }
+ tri.transform(BSPToMeshTM);
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ seedFrame.interpolateVertexData(vertexData[v], tri.vertices[v]);
+ vertexData[v].normal *= sign;
+ }
+ tri.toExplicitRenderTriangle(cleanedTri, vertexData);
+ cleanedTri.submeshIndex = seedOriginalTri.submeshIndex;
+ cleanedTri.smoothingMask = seedOriginalTri.smoothingMask;
+ cleanedTri.extraDataIndex = seedOriginalTri.extraDataIndex;
+ }
+ }
+ else
+ {
+ // An error occurred, or we increased the triangle count. Just use original triangles
+ for (uint32_t i = 0; i < mergedFrameIndices.size(); ++i)
+ {
+ for (uint32_t j = originalTriangleGroupStarts[mergedFrameIndices[i]]; j < originalTriangleGroupStarts[mergedFrameIndices[i] + 1]; ++j)
+ {
+ Triangle tri = triangles[info[j].clippedTriangleIndex];
+ tri.transform(BSPToMeshTM);
+ nvidia::ExplicitRenderTriangle& cleanedMeshTri = cleanedMesh.insert();
+ VertexData vertexData[3];
+ for (int v = 0; v < 3; ++v)
+ {
+ frames[info[j].originalTriangleIndex].interpolateVertexData(vertexData[v], tri.vertices[v]);
+ if (!info[j].ccw)
+ {
+ vertexData[v].normal *= -1.0;
+ }
+ }
+ tri.toExplicitRenderTriangle(cleanedMeshTri, vertexData);
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+void
+cleanMesh(physx::Array<nvidia::ExplicitRenderTriangle>& cleanedMesh, const physx::Array<Triangle>& mesh, physx::Array<ClippedTriangleInfo>& triangleInfo, const physx::Array<Plane>& planes, const physx::Array<Triangle>& originalTriangles, const physx::Array<Interpolator>& frames, Real distanceTol, const Mat4Real& BSPToMeshTM)
+{
+ cleanedMesh.clear();
+
+ // Sort triangles into splitting plane groups, then original triangle groups
+ qsort(triangleInfo.begin(), triangleInfo.size(), sizeof(ClippedTriangleInfo), ClippedTriangleInfo::cmp);
+
+ physx::Array<uint32_t> planeGroupStarts;
+ nvidia::createIndexStartLookup(planeGroupStarts, 0, planes.size(), (int32_t*)&triangleInfo.begin()->planeIndex, triangleInfo.size(), sizeof(ClippedTriangleInfo));
+
+ nvidia::Pool<LinkedEdge2D> linkedEdgePool;
+ for (uint32_t i = 0; i < planes.size(); ++i)
+ {
+ mergeTriangles(cleanedMesh, mesh.begin(), frames.begin(), triangleInfo.begin() + planeGroupStarts[i], planeGroupStarts[i + 1] - planeGroupStarts[i], originalTriangles, planes[i], linkedEdgePool, distanceTol, BSPToMeshTM);
+ }
+}
+
+}
+#endif
diff --git a/APEX_1.4/shared/internal/src/authoring/Cutout.cpp b/APEX_1.4/shared/internal/src/authoring/Cutout.cpp
new file mode 100644
index 00000000..5d6ee916
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/Cutout.cpp
@@ -0,0 +1,1908 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+#ifdef _MANAGED
+#pragma managed(push, off)
+#endif
+
+#include "Apex.h"
+#include "PxMath.h"
+#include "ApexUsingNamespace.h"
+#include "ApexSharedUtils.h"
+#include "FractureTools.h"
+
+#include "authoring/Fracturing.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+#include "ApexSharedSerialization.h"
+
+#define CUTOUT_DISTANCE_THRESHOLD (0.7f)
+
+#define CUTOUT_DISTANCE_EPS (0.01f)
+
+struct POINT2D
+{
+ POINT2D() {}
+ POINT2D(int32_t _x, int32_t _y) : x(_x), y(_y) {}
+
+ int32_t x;
+ int32_t y;
+};
+
+// Unsigned modulus
+PX_INLINE uint32_t mod(int32_t n, uint32_t modulus)
+{
+ const int32_t d = n/(int32_t)modulus;
+ const int32_t m = n - d*(int32_t)modulus;
+ return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus;
+}
+
+PX_INLINE float square(float x)
+{
+ return x * x;
+}
+
+// 2D cross product
+PX_INLINE float dotXY(const physx::PxVec3& v, const physx::PxVec3& w)
+{
+ return v.x * w.x + v.y * w.y;
+}
+
+// Z-component of cross product
+PX_INLINE float crossZ(const physx::PxVec3& v, const physx::PxVec3& w)
+{
+ return v.x * w.y - v.y * w.x;
+}
+
+// z coordinates may be used to store extra info - only deal with x and y
+PX_INLINE float perpendicularDistanceSquared(const physx::PxVec3& v0, const physx::PxVec3& v1, const physx::PxVec3& v2)
+{
+ const physx::PxVec3 base = v2 - v0;
+ const physx::PxVec3 leg = v1 - v0;
+
+ const float baseLen2 = dotXY(base, base);
+
+ return baseLen2 > PX_EPS_F32 * dotXY(leg, leg) ? square(crossZ(base, leg)) / baseLen2 : 0.0f;
+}
+
+// z coordinates may be used to store extra info - only deal with x and y
+PX_INLINE float perpendicularDistanceSquared(const physx::Array< physx::PxVec3 >& cutout, uint32_t index)
+{
+ const uint32_t size = cutout.size();
+ return perpendicularDistanceSquared(cutout[(index + size - 1) % size], cutout[index], cutout[(index + 1) % size]);
+}
+
+
+struct CutoutVert
+{
+ int32_t cutoutIndex;
+ int32_t vertIndex;
+
+ void set(int32_t _cutoutIndex, int32_t _vertIndex)
+ {
+ cutoutIndex = _cutoutIndex;
+ vertIndex = _vertIndex;
+ }
+};
+
+struct NewVertex
+{
+ CutoutVert vertex;
+ float edgeProj;
+};
+
+static int compareNewVertices(const void* a, const void* b)
+{
+ const int32_t cutoutDiff = ((NewVertex*)a)->vertex.cutoutIndex - ((NewVertex*)b)->vertex.cutoutIndex;
+ if (cutoutDiff)
+ {
+ return cutoutDiff;
+ }
+ const int32_t vertDiff = ((NewVertex*)a)->vertex.vertIndex - ((NewVertex*)b)->vertex.vertIndex;
+ if (vertDiff)
+ {
+ return vertDiff;
+ }
+ const float projDiff = ((NewVertex*)a)->edgeProj - ((NewVertex*)b)->edgeProj;
+ return projDiff ? (projDiff < 0.0f ? -1 : 1) : 0;
+}
+
+template<typename T>
+class Map2d
+{
+public:
+ Map2d() : mMem(NULL) {}
+ Map2d(uint32_t width, uint32_t height) : mMem(NULL)
+ {
+ create_internal(width, height, NULL);
+ }
+ Map2d(uint32_t width, uint32_t height, T fillValue) : mMem(NULL)
+ {
+ create_internal(width, height, &fillValue);
+ }
+ Map2d(const Map2d& map)
+ {
+ *this = map;
+ }
+ ~Map2d()
+ {
+ delete [] mMem;
+ }
+
+ Map2d& operator = (const Map2d& map)
+ {
+ delete [] mMem;
+ mMem = NULL;
+ if (map.mMem)
+ {
+ create_internal(map.mWidth, map.mHeight, NULL);
+ memcpy(mMem, map.mMem, mWidth * mHeight);
+ }
+ return *this;
+ }
+
+ void create(uint32_t width, uint32_t height)
+ {
+ return create_internal(width, height, NULL);
+ }
+ void create(uint32_t width, uint32_t height, T fillValue)
+ {
+ create_internal(width, height, &fillValue);
+ }
+
+ void clear(const T value)
+ {
+ T* mem = mMem;
+ T* stop = mMem + mWidth * mHeight;
+ while (mem < stop)
+ {
+ *mem++ = value;
+ }
+ }
+
+ void setOrigin(uint32_t x, uint32_t y)
+ {
+ mOriginX = x;
+ mOriginY = y;
+ }
+
+ const T& operator()(int32_t x, int32_t y) const
+ {
+ x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
+ y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
+ return mMem[(uint32_t)(x + y * (int32_t)mWidth)];
+ }
+ T& operator()(int32_t x, int32_t y)
+ {
+ x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
+ y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
+ return mMem[(uint32_t)(x + y * (int32_t)mWidth)];
+ }
+
+private:
+
+ void create_internal(uint32_t width, uint32_t height, T* val)
+ {
+ delete [] mMem;
+ mWidth = width;
+ mHeight = height;
+ mMem = new T[mWidth * mHeight];
+ mOriginX = 0;
+ mOriginY = 0;
+ if (val)
+ {
+ clear(*val);
+ }
+ }
+
+ T* mMem;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mOriginX;
+ uint32_t mOriginY;
+};
+
+class BitMap
+{
+public:
+ BitMap() : mMem(NULL) {}
+ BitMap(uint32_t width, uint32_t height) : mMem(NULL)
+ {
+ create_internal(width, height, NULL);
+ }
+ BitMap(uint32_t width, uint32_t height, bool fillValue) : mMem(NULL)
+ {
+ create_internal(width, height, &fillValue);
+ }
+ BitMap(const BitMap& map)
+ {
+ *this = map;
+ }
+ ~BitMap()
+ {
+ delete [] mMem;
+ }
+
+ BitMap& operator = (const BitMap& map)
+ {
+ delete [] mMem;
+ mMem = NULL;
+ if (map.mMem)
+ {
+ create_internal(map.mWidth, map.mHeight, NULL);
+ memcpy(mMem, map.mMem, mHeight * mRowBytes);
+ }
+ return *this;
+ }
+
+ void create(uint32_t width, uint32_t height)
+ {
+ return create_internal(width, height, NULL);
+ }
+ void create(uint32_t width, uint32_t height, bool fillValue)
+ {
+ create_internal(width, height, &fillValue);
+ }
+
+ void clear(bool value)
+ {
+ memset(mMem, value ? 0xFF : 0x00, mRowBytes * mHeight);
+ }
+
+ void setOrigin(uint32_t x, uint32_t y)
+ {
+ mOriginX = x;
+ mOriginY = y;
+ }
+
+ bool read(int32_t x, int32_t y) const
+ {
+ x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
+ y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
+ return ((mMem[(x >> 3) + y * mRowBytes] >> (x & 7)) & 1) != 0;
+ }
+ void set(int32_t x, int32_t y)
+ {
+ x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
+ y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
+ mMem[(x >> 3) + y * mRowBytes] |= 1 << (x & 7);
+ }
+ void reset(int32_t x, int32_t y)
+ {
+ x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
+ y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
+ mMem[(x >> 3) + y * mRowBytes] &= ~(1 << (x & 7));
+ }
+
+private:
+
+ void create_internal(uint32_t width, uint32_t height, bool* val)
+ {
+ delete [] mMem;
+ mRowBytes = (width + 7) >> 3;
+ const uint32_t bytes = mRowBytes * height;
+ if (bytes == 0)
+ {
+ mWidth = mHeight = 0;
+ mMem = NULL;
+ return;
+ }
+ mWidth = width;
+ mHeight = height;
+ mMem = new uint8_t[bytes];
+ mOriginX = 0;
+ mOriginY = 0;
+ if (val)
+ {
+ clear(*val);
+ }
+ }
+
+ uint8_t* mMem;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mRowBytes;
+ uint32_t mOriginX;
+ uint32_t mOriginY;
+};
+
+
+PX_INLINE int32_t taxicabSine(int32_t i)
+{
+ // 0 1 1 1 0 -1 -1 -1
+ return (int32_t)((0x01A9 >> ((i & 7) << 1)) & 3) - 1;
+}
+
+// Only looks at x and y components
+PX_INLINE bool directionsXYOrderedCCW(const physx::PxVec3& d0, const physx::PxVec3& d1, const physx::PxVec3& d2)
+{
+ const bool ccw02 = crossZ(d0, d2) > 0.0f;
+ const bool ccw01 = crossZ(d0, d1) > 0.0f;
+ const bool ccw21 = crossZ(d2, d1) > 0.0f;
+ return ccw02 ? ccw01 && ccw21 : ccw01 || ccw21;
+}
+
+PX_INLINE float compareTraceSegmentToLineSegment(const physx::Array<POINT2D>& trace, int _start, int delta, float distThreshold, uint32_t width, uint32_t height, bool hasBorder)
+{
+ if (delta < 2)
+ {
+ return 0.0f;
+ }
+
+ const uint32_t size = trace.size();
+
+ uint32_t start = (uint32_t)_start, end = (uint32_t)(_start + delta) % size;
+
+ const bool startIsOnBorder = hasBorder && (trace[start].x == -1 || trace[start].x == (int)width || trace[start].y == -1 || trace[start].y == (int)height);
+ const bool endIsOnBorder = hasBorder && (trace[end].x == -1 || trace[end].x == (int)width || trace[end].y == -1 || trace[end].y == (int)height);
+
+ if (startIsOnBorder || endIsOnBorder)
+ {
+ if ((trace[start].x == -1 && trace[end].x == -1) ||
+ (trace[start].y == -1 && trace[end].y == -1) ||
+ (trace[start].x == (int)width && trace[end].x == (int)width) ||
+ (trace[start].y == (int)height && trace[end].y == (int)height))
+ {
+ return 0.0f;
+ }
+ return PX_MAX_F32;
+ }
+
+ physx::PxVec3 orig((float)trace[start].x, (float)trace[start].y, 0);
+ physx::PxVec3 dest((float)trace[end].x, (float)trace[end].y, 0);
+ physx::PxVec3 dir = dest - orig;
+
+ dir.normalize();
+
+ float aveError = 0.0f;
+
+ for (;;)
+ {
+ if (++start >= size)
+ {
+ start = 0;
+ }
+ if (start == end)
+ {
+ break;
+ }
+ physx::PxVec3 testDisp((float)trace[start].x, (float)trace[start].y, 0);
+ testDisp -= orig;
+ aveError += (float)(physx::PxAbs(testDisp.x * dir.y - testDisp.y * dir.x) >= distThreshold);
+ }
+
+ aveError /= delta - 1;
+
+ return aveError;
+}
+
+// Segment i starts at vi and ends at vi+ei
+// Tests for overlap in segments' projection onto xy plane
+// Returns distance between line segments. (Negative value indicates overlap.)
+PX_INLINE float segmentsIntersectXY(const physx::PxVec3& v0, const physx::PxVec3& e0, const physx::PxVec3& v1, const physx::PxVec3& e1)
+{
+ const physx::PxVec3 dv = v1 - v0;
+
+ physx::PxVec3 d0 = e0;
+ d0.normalize();
+ physx::PxVec3 d1 = e1;
+ d1.normalize();
+
+ const float c10 = crossZ(dv, d0);
+ const float d10 = crossZ(e1, d0);
+
+ float a1 = physx::PxAbs(c10);
+ float b1 = physx::PxAbs(c10 + d10);
+
+ if (c10 * (c10 + d10) < 0.0f)
+ {
+ if (a1 < b1)
+ {
+ a1 = -a1;
+ }
+ else
+ {
+ b1 = -b1;
+ }
+ }
+
+ const float c01 = crossZ(d1, dv);
+ const float d01 = crossZ(e0, d1);
+
+ float a2 = physx::PxAbs(c01);
+ float b2 = physx::PxAbs(c01 + d01);
+
+ if (c01 * (c01 + d01) < 0.0f)
+ {
+ if (a2 < b2)
+ {
+ a2 = -a2;
+ }
+ else
+ {
+ b2 = -b2;
+ }
+ }
+
+ return physx::PxMax(physx::PxMin(a1, b1), physx::PxMin(a2, b2));
+}
+
+// If point projects onto segment, returns true and proj is set to a
+// value in the range [0,1], indicating where along the segment (from v0 to v1)
+// the projection lies, and dist2 is set to the distance squared from point to
+// the line segment. Otherwise, returns false.
+// Note, if v1 = v0, then the function returns true with proj = 0.
+PX_INLINE bool projectOntoSegmentXY(float& proj, float& dist2, const physx::PxVec3& point, const physx::PxVec3& v0, const physx::PxVec3& v1, float margin)
+{
+ const physx::PxVec3 seg = v1 - v0;
+ const physx::PxVec3 x = point - v0;
+ const float seg2 = dotXY(seg, seg);
+ const float d = dotXY(x, seg);
+
+ if (d < 0.0f || d > seg2)
+ {
+ return false;
+ }
+
+ const float margin2 = margin * margin;
+
+ const float p = seg2 > 0.0f ? d / seg2 : 0.0f;
+ const float lineDist2 = d * p;
+
+ if (lineDist2 < margin2)
+ {
+ return false;
+ }
+
+ const float pPrime = 1.0f - p;
+ const float dPrime = seg2 - d;
+ const float lineDistPrime2 = dPrime * pPrime;
+
+ if (lineDistPrime2 < margin2)
+ {
+ return false;
+ }
+
+ proj = p;
+ dist2 = dotXY(x, x) - lineDist2;
+ return true;
+}
+
+PX_INLINE bool isOnBorder(const physx::PxVec3& v, uint32_t width, uint32_t height)
+{
+ return v.x < -0.5f || v.x >= width - 0.5f || v.y < -0.5f || v.y >= height - 0.5f;
+}
+
+static void createCutout(nvidia::Cutout& cutout, const physx::Array<POINT2D>& trace, float snapThreshold, uint32_t width, uint32_t height, bool hasBorder)
+{
+ cutout.vertices.reset();
+
+ const uint32_t traceSize = trace.size();
+
+ if (traceSize == 0)
+ {
+ return; // Nothing to do
+ }
+
+ uint32_t size = traceSize;
+
+ physx::Array<int> vertexIndices;
+
+ const float errorThreshold = 0.1f;
+
+ const float pixelCenterOffset = hasBorder ? 0.5f : 0.0f;
+
+ // Find best segment
+ uint32_t start = 0;
+ uint32_t delta = 0;
+ for (uint32_t iStart = 0; iStart < size; ++iStart)
+ {
+ uint32_t iDelta = (size >> 1) + (size & 1);
+ for (; iDelta > 1; --iDelta)
+ {
+ float fit = compareTraceSegmentToLineSegment(trace, (int32_t)iStart, (int32_t)iDelta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder);
+ if (fit < errorThreshold)
+ {
+ break;
+ }
+ }
+ if (iDelta > delta)
+ {
+ start = iStart;
+ delta = iDelta;
+ }
+ }
+ cutout.vertices.pushBack(physx::PxVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0));
+
+ // Now complete the loop
+ while ((size -= delta) > 0)
+ {
+ start = (start + delta) % traceSize;
+ cutout.vertices.pushBack(physx::PxVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0));
+ if (size == 1)
+ {
+ delta = 1;
+ break;
+ }
+ for (delta = size - 1; delta > 1; --delta)
+ {
+ float fit = compareTraceSegmentToLineSegment(trace, (int32_t)start, (int32_t)delta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder);
+ if (fit < errorThreshold)
+ {
+ break;
+ }
+ }
+ }
+
+ const float snapThresh2 = square(snapThreshold);
+
+ // Use the snapThreshold to clean up
+ while ((size = cutout.vertices.size()) >= 4)
+ {
+ bool reduced = false;
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ const uint32_t i1 = (i + 1) % size;
+ const uint32_t i2 = (i + 2) % size;
+ const uint32_t i3 = (i + 3) % size;
+ physx::PxVec3& v0 = cutout.vertices[i];
+ physx::PxVec3& v1 = cutout.vertices[i1];
+ physx::PxVec3& v2 = cutout.vertices[i2];
+ physx::PxVec3& v3 = cutout.vertices[i3];
+ const physx::PxVec3 d0 = v1 - v0;
+ const physx::PxVec3 d1 = v2 - v1;
+ const physx::PxVec3 d2 = v3 - v2;
+ const float den = crossZ(d0, d2);
+ if (den != 0)
+ {
+ const float recipDen = 1.0f / den;
+ const float s0 = crossZ(d1, d2) * recipDen;
+ const float s2 = crossZ(d0, d1) * recipDen;
+ if (s0 >= 0 || s2 >= 0)
+ {
+ if (d0.magnitudeSquared()*s0* s0 <= snapThresh2 && d2.magnitudeSquared()*s2* s2 <= snapThresh2)
+ {
+ v1 += d0 * s0;
+
+ uint32_t index = (uint32_t)(&v2 - cutout.vertices.begin());
+
+ cutout.vertices.remove(index);
+
+ reduced = true;
+ break;
+ }
+ }
+ }
+ }
+ if (!reduced)
+ {
+ break;
+ }
+ }
+}
+
+static void splitTJunctions(nvidia::CutoutSetImpl& cutoutSet, float threshold)
+{
+ // Set bounds reps
+ physx::Array<nvidia::BoundsRep> bounds;
+ physx::Array<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ).
+ physx::Array<nvidia::IntPair> overlaps;
+
+ const float distThreshold2 = threshold * threshold;
+
+ // Split T-junctions
+ uint32_t edgeCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ edgeCount += cutoutSet.cutouts[i].vertices.size();
+ }
+
+ bounds.resize(edgeCount);
+ cutoutMap.resize(edgeCount);
+
+ edgeCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ nvidia::Cutout& cutout = cutoutSet.cutouts[i];
+ const uint32_t cutoutSize = cutout.vertices.size();
+ for (uint32_t j = 0; j < cutoutSize; ++j)
+ {
+ bounds[edgeCount].aabb.include(cutout.vertices[j]);
+ bounds[edgeCount].aabb.include(cutout.vertices[(j + 1) % cutoutSize]);
+ PX_ASSERT(!bounds[edgeCount].aabb.isEmpty());
+ bounds[edgeCount].aabb.fattenFast(threshold);
+ cutoutMap[edgeCount].set((int32_t)i, (int32_t)j);
+ ++edgeCount;
+ }
+ }
+
+ // Find bounds overlaps
+ if (bounds.size() > 0)
+ {
+ boundsCalculateOverlaps(overlaps, nvidia::Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0]));
+ }
+
+ physx::Array<NewVertex> newVertices;
+ for (uint32_t overlapIndex = 0; overlapIndex < overlaps.size(); ++overlapIndex)
+ {
+ const nvidia::IntPair& mapPair = overlaps[overlapIndex];
+ const CutoutVert& seg0Map = cutoutMap[(uint32_t)mapPair.i0];
+ const CutoutVert& seg1Map = cutoutMap[(uint32_t)mapPair.i1];
+
+ if (seg0Map.cutoutIndex == seg1Map.cutoutIndex)
+ {
+ // Only split based on vertex/segment junctions from different cutouts
+ continue;
+ }
+
+ NewVertex newVertex;
+ float dist2 = 0;
+
+ const nvidia::Cutout& cutout0 = cutoutSet.cutouts[(uint32_t)seg0Map.cutoutIndex];
+ const uint32_t cutoutSize0 = cutout0.vertices.size();
+ const nvidia::Cutout& cutout1 = cutoutSet.cutouts[(uint32_t)seg1Map.cutoutIndex];
+ const uint32_t cutoutSize1 = cutout1.vertices.size();
+
+ if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout1.vertices[(uint32_t)seg1Map.vertIndex],
+ cutout1.vertices[(uint32_t)(seg1Map.vertIndex + 1) % cutoutSize1], 0.25f))
+ {
+ if (dist2 <= distThreshold2)
+ {
+ newVertex.vertex = seg1Map;
+ newVertices.pushBack(newVertex);
+ }
+ }
+
+ if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout0.vertices[(uint32_t)seg0Map.vertIndex],
+ cutout0.vertices[(uint32_t)(seg0Map.vertIndex + 1) % cutoutSize0], 0.25f))
+ {
+ if (dist2 <= distThreshold2)
+ {
+ newVertex.vertex = seg0Map;
+ newVertices.pushBack(newVertex);
+ }
+ }
+ }
+
+ if (newVertices.size())
+ {
+ // Sort new vertices
+ qsort(newVertices.begin(), newVertices.size(), sizeof(NewVertex), compareNewVertices);
+
+ // Insert new vertices
+ uint32_t lastCutoutIndex = 0xFFFFFFFF;
+ uint32_t lastVertexIndex = 0xFFFFFFFF;
+ float lastProj = 1.0f;
+ for (uint32_t newVertexIndex = newVertices.size(); newVertexIndex--;)
+ {
+ const NewVertex& newVertex = newVertices[newVertexIndex];
+ if (newVertex.vertex.cutoutIndex != (int32_t)lastCutoutIndex)
+ {
+ lastCutoutIndex = (uint32_t)newVertex.vertex.cutoutIndex;
+ lastVertexIndex = 0xFFFFFFFF;
+ }
+ if (newVertex.vertex.vertIndex != (int32_t)lastVertexIndex)
+ {
+ lastVertexIndex = (uint32_t)newVertex.vertex.vertIndex;
+ lastProj = 1.0f;
+ }
+ nvidia::Cutout& cutout = cutoutSet.cutouts[(uint32_t)newVertex.vertex.cutoutIndex];
+ const float proj = lastProj > 0.0f ? newVertex.edgeProj / lastProj : 0.0f;
+ const physx::PxVec3 pos = (1.0f - proj) * cutout.vertices[(uint32_t)newVertex.vertex.vertIndex]
+ + proj * cutout.vertices[(uint32_t)(newVertex.vertex.vertIndex + 1) % cutout.vertices.size()];
+ cutout.vertices.insert();
+ for (uint32_t n = cutout.vertices.size(); --n > (uint32_t)newVertex.vertex.vertIndex + 1;)
+ {
+ cutout.vertices[n] = cutout.vertices[n - 1];
+ }
+ cutout.vertices[(uint32_t)newVertex.vertex.vertIndex + 1] = pos;
+ lastProj = newVertex.edgeProj;
+ }
+ }
+}
+
+#if 0
+static void mergeVertices(CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height)
+{
+ // Set bounds reps
+ physx::Array<BoundsRep> bounds;
+ physx::Array<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ).
+ physx::Array<IntPair> overlaps;
+
+ const float threshold2 = threshold * threshold;
+
+ uint32_t vertexCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ vertexCount += cutoutSet.cutouts[i].vertices.size();
+ }
+
+ bounds.resize(vertexCount);
+ cutoutMap.resize(vertexCount);
+
+ vertexCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ Cutout& cutout = cutoutSet.cutouts[i];
+ for (uint32_t j = 0; j < cutout.vertices.size(); ++j)
+ {
+ physx::PxVec3& vertex = cutout.vertices[j];
+ physx::PxVec3 min(vertex.x - threshold, vertex.y - threshold, 0.0f);
+ physx::PxVec3 max(vertex.x + threshold, vertex.y + threshold, 0.0f);
+ bounds[vertexCount].aabb.set(min, max);
+ cutoutMap[vertexCount].set(i, j);
+ ++vertexCount;
+ }
+ }
+
+ // Find bounds overlaps
+ overlaps.reset();
+ if (bounds.size() > 0)
+ {
+ boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0]));
+ }
+
+ const uint32_t overlapCount = overlaps.size();
+ if (overlapCount)
+ {
+ // Sort overlaps by index0 and index1
+ qsort(overlaps.begin(), overlaps.size(), sizeof(IntPair), IntPair::compare);
+
+ // Process overlaps: merge vertices
+ uint32_t groupStart = 0;
+ uint32_t groupStop;
+ do
+ {
+ const int32_t groupI0 = overlaps[groupStart].i0;
+ groupStop = groupStart;
+ while (++groupStop < overlapCount)
+ {
+ const int32_t i0 = overlaps[groupStop].i0;
+ if (i0 != groupI0)
+ {
+ break;
+ }
+ }
+ // Process group
+ physx::PxVec3 straightV(0.0f);
+ uint32_t straightCount = 0;
+ physx::PxVec3 borderV(0.0f);
+ uint32_t borderCount = 0;
+ physx::PxVec3 v(0.0f);
+ float weight = 0.0f;
+ // Include i0
+ const CutoutVert& vertexMap = cutoutMap[overlaps[groupStart].i0];
+ Cutout& cutout = cutoutSet.cutouts[vertexMap.cutoutIndex];
+ float dist2 = perpendicularDistanceSquared(cutout.vertices, vertexMap.vertIndex);
+ if (isOnBorder(cutout.vertices[vertexMap.vertIndex], width, height))
+ {
+ borderV += cutout.vertices[vertexMap.vertIndex];
+ ++borderCount;
+ }
+ else if (dist2 < threshold2)
+ {
+ straightV += cutout.vertices[vertexMap.vertIndex];
+ ++straightCount;
+ }
+ else
+ {
+ const float recipDist2 = 1.0f / dist2;
+ weight += recipDist2;
+ v += cutout.vertices[vertexMap.vertIndex] * recipDist2;
+ }
+ for (uint32_t i = groupStart; i < groupStop; ++i)
+ {
+ const CutoutVert& vertexMap = cutoutMap[overlaps[i].i1];
+ Cutout& cutout = cutoutSet.cutouts[vertexMap.cutoutIndex];
+ dist2 = perpendicularDistanceSquared(cutout.vertices, vertexMap.vertIndex);
+ if (isOnBorder(cutout.vertices[vertexMap.vertIndex], width, height))
+ {
+ borderV += cutout.vertices[vertexMap.vertIndex];
+ ++borderCount;
+ }
+ else if (dist2 < threshold2)
+ {
+ straightV += cutout.vertices[vertexMap.vertIndex];
+ ++straightCount;
+ }
+ else
+ {
+ const float recipDist2 = 1.0f / dist2;
+ weight += recipDist2;
+ v += cutout.vertices[vertexMap.vertIndex] * recipDist2;
+ }
+ }
+ if (borderCount)
+ {
+ // If we have any borderVertices, these will be the only ones considered
+ v = (1.0f / borderCount) * borderV;
+ }
+ else if (straightCount)
+ {
+ // Otherwise if we have any straight angles, these will be the only ones considered
+ v = (1.0f / straightCount) * straightV;
+ }
+ else
+ {
+ v *= 1.0f / weight;
+ }
+ // Now replace all group vertices by v
+ {
+ const CutoutVert& vertexMap = cutoutMap[overlaps[groupStart].i0];
+ cutoutSet.cutouts[vertexMap.cutoutIndex].vertices[vertexMap.vertIndex] = v;
+ for (uint32_t i = groupStart; i < groupStop; ++i)
+ {
+ const CutoutVert& vertexMap = cutoutMap[overlaps[i].i1];
+ cutoutSet.cutouts[vertexMap.cutoutIndex].vertices[vertexMap.vertIndex] = v;
+ }
+ }
+ }
+ while ((groupStart = groupStop) < overlapCount);
+ }
+}
+#else
+static void mergeVertices(nvidia::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height)
+{
+ // Set bounds reps
+ uint32_t vertexCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ vertexCount += cutoutSet.cutouts[i].vertices.size();
+ }
+
+ physx::Array<nvidia::BoundsRep> bounds;
+ physx::Array<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ).
+ bounds.resize(vertexCount);
+ cutoutMap.resize(vertexCount);
+
+ vertexCount = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ nvidia::Cutout& cutout = cutoutSet.cutouts[i];
+ for (uint32_t j = 0; j < cutout.vertices.size(); ++j)
+ {
+ physx::PxVec3& vertex = cutout.vertices[j];
+ physx::PxVec3 min(vertex.x - threshold, vertex.y - threshold, 0.0f);
+ physx::PxVec3 max(vertex.x + threshold, vertex.y + threshold, 0.0f);
+ bounds[vertexCount].aabb = physx::PxBounds3(min, max);
+ cutoutMap[vertexCount].set((int32_t)i, (int32_t)j);
+ ++vertexCount;
+ }
+ }
+
+ // Find bounds overlaps
+ physx::Array<nvidia::IntPair> overlaps;
+ if (bounds.size() > 0)
+ {
+ boundsCalculateOverlaps(overlaps, nvidia::Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0]));
+ }
+ uint32_t overlapCount = overlaps.size();
+
+ if (overlapCount == 0)
+ {
+ return;
+ }
+
+ // Sort by first index
+ qsort(overlaps.begin(), overlapCount, sizeof(nvidia::IntPair), nvidia::IntPair::compare);
+
+ const float threshold2 = threshold * threshold;
+
+ physx::Array<nvidia::IntPair> pairs;
+
+ // Group by first index
+ physx::Array<uint32_t> lookup;
+ nvidia::createIndexStartLookup(lookup, 0, vertexCount, &overlaps.begin()->i0, overlapCount, sizeof(nvidia::IntPair));
+ for (uint32_t i = 0; i < vertexCount; ++i)
+ {
+ const uint32_t start = lookup[i];
+ const uint32_t stop = lookup[i + 1];
+ if (start == stop)
+ {
+ continue;
+ }
+ const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)overlaps[start].i0];
+ const physx::PxVec3& vert0 = cutoutSet.cutouts[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
+ const bool isOnBorder0 = !cutoutSet.periodic && isOnBorder(vert0, width, height);
+ for (uint32_t j = start; j < stop; ++j)
+ {
+ const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)overlaps[j].i1];
+ if (cutoutVert0.cutoutIndex == cutoutVert1.cutoutIndex)
+ {
+ // No pairs from the same cutout
+ continue;
+ }
+ const physx::PxVec3& vert1 = cutoutSet.cutouts[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
+ const bool isOnBorder1 = !cutoutSet.periodic && isOnBorder(vert1, width, height);
+ if (isOnBorder0 != isOnBorder1)
+ {
+ // No border/non-border pairs
+ continue;
+ }
+ if ((vert0 - vert1).magnitudeSquared() > threshold2)
+ {
+ // Distance outside threshold
+ continue;
+ }
+ // A keeper. Keep a symmetric list
+ nvidia::IntPair overlap = overlaps[j];
+ pairs.pushBack(overlap);
+ const int32_t i0 = overlap.i0;
+ overlap.i0 = overlap.i1;
+ overlap.i1 = i0;
+ pairs.pushBack(overlap);
+ }
+ }
+
+ // Sort by first index
+ qsort(pairs.begin(), pairs.size(), sizeof(nvidia::IntPair), nvidia::IntPair::compare);
+
+ // For every vertex, only keep closest neighbor from each cutout
+ nvidia::createIndexStartLookup(lookup, 0, vertexCount, &pairs.begin()->i0, pairs.size(), sizeof(nvidia::IntPair));
+ for (uint32_t i = 0; i < vertexCount; ++i)
+ {
+ const uint32_t start = lookup[i];
+ const uint32_t stop = lookup[i + 1];
+ if (start == stop)
+ {
+ continue;
+ }
+ const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)pairs[start].i0];
+ const physx::PxVec3& vert0 = cutoutSet.cutouts[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
+ uint32_t groupStart = start;
+ while (groupStart < stop)
+ {
+ uint32_t next = groupStart;
+ const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)pairs[next].i1];
+ int32_t currentOtherCutoutIndex = cutoutVert1.cutoutIndex;
+ const physx::PxVec3& vert1 = cutoutSet.cutouts[(uint32_t)currentOtherCutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
+ uint32_t keep = groupStart;
+ float minDist2 = (vert0 - vert1).magnitudeSquared();
+ while (++next < stop)
+ {
+ const CutoutVert& cutoutVertNext = cutoutMap[(uint32_t)pairs[next].i1];
+ if (currentOtherCutoutIndex != cutoutVertNext.cutoutIndex)
+ {
+ break;
+ }
+ const physx::PxVec3& vertNext = cutoutSet.cutouts[(uint32_t)cutoutVertNext.cutoutIndex].vertices[(uint32_t)cutoutVertNext.vertIndex];
+ const float dist2 = (vert0 - vertNext).magnitudeSquared();
+ if (dist2 < minDist2)
+ {
+ pairs[keep].set(-1, -1); // Invalidate
+ keep = next;
+ minDist2 = dist2;
+ }
+ else
+ {
+ pairs[next].set(-1, -1); // Invalidate
+ }
+ }
+ groupStart = next;
+ }
+ }
+
+ // Eliminate invalid pairs (compactify)
+ uint32_t pairCount = 0;
+ for (uint32_t i = 0; i < pairs.size(); ++i)
+ {
+ if (pairs[i].i0 >= 0 && pairs[i].i1 >= 0)
+ {
+ pairs[pairCount++] = pairs[i];
+ }
+ }
+ pairs.resize(pairCount);
+
+ // Snap points together
+ physx::Array<bool> pinned;
+ pinned.resize(vertexCount);
+ memset(pinned.begin(), 0, pinned.size()*sizeof(bool));
+
+ for (uint32_t i = 0; i < pairCount; ++i)
+ {
+ const uint32_t i0 = (uint32_t)pairs[i].i0;
+ bool& pinned0 = pinned[i0];
+ if (pinned0)
+ {
+ continue;
+ }
+ const CutoutVert& cutoutVert0 = cutoutMap[i0];
+ physx::PxVec3& vert0 = cutoutSet.cutouts[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
+ const uint32_t i1 = (uint32_t)pairs[i].i1;
+ bool& pinned1 = pinned[i1];
+ const CutoutVert& cutoutVert1 = cutoutMap[i1];
+ physx::PxVec3& vert1 = cutoutSet.cutouts[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
+ const physx::PxVec3 disp = vert1 - vert0;
+ // Move and pin
+ pinned0 = true;
+ if (pinned1)
+ {
+ vert0 = vert1;
+ }
+ else
+ {
+ vert0 += 0.5f * disp;
+ vert1 = vert0;
+ pinned1 = true;
+ }
+ }
+}
+#endif
+
+static void eliminateStraightAngles(nvidia::CutoutSetImpl& cutoutSet)
+{
+ // Eliminate straight angles
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ nvidia::Cutout& cutout = cutoutSet.cutouts[i];
+ uint32_t oldSize;
+ do
+ {
+ oldSize = cutout.vertices.size();
+ for (uint32_t j = 0; j < cutout.vertices.size();)
+ {
+// if( isOnBorder( cutout.vertices[j], width, height ) )
+// { // Don't eliminate border vertices
+// ++j;
+// continue;
+// }
+ if (perpendicularDistanceSquared(cutout.vertices, j) < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS)
+ {
+ cutout.vertices.remove(j);
+ }
+ else
+ {
+ ++j;
+ }
+ }
+ }
+ while (cutout.vertices.size() != oldSize);
+ }
+}
+
+static void simplifyCutoutSetImpl(nvidia::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height)
+{
+ splitTJunctions(cutoutSet, 1.0f);
+ mergeVertices(cutoutSet, threshold, width, height);
+ eliminateStraightAngles(cutoutSet);
+}
+
+static void cleanCutout(nvidia::Cutout& cutout, uint32_t loopIndex, float tolerance)
+{
+ nvidia::ConvexLoop& loop = cutout.convexLoops[loopIndex];
+ const float tolerance2 = tolerance * tolerance;
+ uint32_t oldSize;
+ do
+ {
+ oldSize = loop.polyVerts.size();
+ uint32_t size = oldSize;
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ nvidia::PolyVert& v0 = loop.polyVerts[(i + size - 1) % size];
+ nvidia::PolyVert& v1 = loop.polyVerts[i];
+ nvidia::PolyVert& v2 = loop.polyVerts[(i + 1) % size];
+ if (perpendicularDistanceSquared(cutout.vertices[v0.index], cutout.vertices[v1.index], cutout.vertices[v2.index]) <= tolerance2)
+ {
+ loop.polyVerts.remove(i);
+ --size;
+ --i;
+ }
+ }
+ }
+ while (loop.polyVerts.size() != oldSize);
+}
+
+static bool decomposeCutoutIntoConvexLoops(nvidia::Cutout& cutout, float cleanupTolerance = 0.0f)
+{
+ const uint32_t size = cutout.vertices.size();
+
+ if (size < 3)
+ {
+ return false;
+ }
+
+ // Initialize to one loop, which may not be convex
+ cutout.convexLoops.resize(1);
+ cutout.convexLoops[0].polyVerts.resize(size);
+
+ // See if the winding is ccw:
+
+ // Scale to normalized size to avoid overflows
+ physx::PxBounds3 bounds;
+ bounds.setEmpty();
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ bounds.include(cutout.vertices[i]);
+ }
+ physx::PxVec3 center = bounds.getCenter();
+ physx::PxVec3 extent = bounds.getExtents();
+ if (extent[0] < PX_EPS_F32 || extent[1] < PX_EPS_F32)
+ {
+ return false;
+ }
+ const physx::PxVec3 scale(1.0f / extent[0], 1.0f / extent[1], 0.0f);
+
+ // Find "area" (it will only be correct in sign!)
+ physx::PxVec3 prevV = (cutout.vertices[size - 1] - center).multiply(scale);
+ float area = 0.0f;
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ const physx::PxVec3 v = (cutout.vertices[i] - center).multiply(scale);
+ area += crossZ(prevV, v);
+ prevV = v;
+ }
+
+ if (physx::PxAbs(area) < PX_EPS_F32 * PX_EPS_F32)
+ {
+ return false;
+ }
+
+ const bool ccw = area > 0.0f;
+
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ nvidia::PolyVert& vert = cutout.convexLoops[0].polyVerts[i];
+ vert.index = (uint16_t)(ccw ? i : size - i - 1);
+ vert.flags = 0;
+ }
+
+ const float cleanupTolerance2 = square(cleanupTolerance);
+
+ // Find reflex vertices
+ for (uint32_t i = 0; i < cutout.convexLoops.size();)
+ {
+ nvidia::ConvexLoop& loop = cutout.convexLoops[i];
+ const uint32_t loopSize = loop.polyVerts.size();
+ if (loopSize <= 3)
+ {
+ ++i;
+ continue;
+ }
+ uint32_t j = 0;
+ for (; j < loopSize; ++j)
+ {
+ const physx::PxVec3& v0 = cutout.vertices[loop.polyVerts[(j + loopSize - 1) % loopSize].index];
+ const physx::PxVec3& v1 = cutout.vertices[loop.polyVerts[j].index];
+ const physx::PxVec3& v2 = cutout.vertices[loop.polyVerts[(j + 1) % loopSize].index];
+ const physx::PxVec3 e0 = v1 - v0;
+ if (crossZ(e0, v2 - v1) < 0.0f)
+ {
+ // reflex
+ break;
+ }
+ }
+ if (j < loopSize)
+ {
+ // Find a vertex
+ float minLen2 = PX_MAX_F32;
+ float maxMinDist = -PX_MAX_F32;
+ uint32_t kToUse = 0;
+ uint32_t mToUse = 2;
+ bool cleanSliceFound = false; // A transversal is parallel with an edge
+ for (uint32_t k = 0; k < loopSize; ++k)
+ {
+ const physx::PxVec3& vkPrev = cutout.vertices[loop.polyVerts[(k + loopSize - 1) % loopSize].index];
+ const physx::PxVec3& vk = cutout.vertices[loop.polyVerts[k].index];
+ const physx::PxVec3& vkNext = cutout.vertices[loop.polyVerts[(k + 1) % loopSize].index];
+ const uint32_t mStop = k ? loopSize : loopSize - 1;
+ for (uint32_t m = k + 2; m < mStop; ++m)
+ {
+ const physx::PxVec3& vmPrev = cutout.vertices[loop.polyVerts[(m + loopSize - 1) % loopSize].index];
+ const physx::PxVec3& vm = cutout.vertices[loop.polyVerts[m].index];
+ const physx::PxVec3& vmNext = cutout.vertices[loop.polyVerts[(m + 1) % loopSize].index];
+ const physx::PxVec3 newEdge = vm - vk;
+ if (!directionsXYOrderedCCW(vk - vkPrev, newEdge, vkNext - vk) ||
+ !directionsXYOrderedCCW(vm - vmPrev, -newEdge, vmNext - vm))
+ {
+ continue;
+ }
+ const float len2 = newEdge.magnitudeSquared();
+ float minDist = PX_MAX_F32;
+ for (uint32_t l = 0; l < loopSize; ++l)
+ {
+ const uint32_t l1 = (l + 1) % loopSize;
+ if (l == k || l1 == k || l == m || l1 == m)
+ {
+ continue;
+ }
+ const physx::PxVec3& vl = cutout.vertices[loop.polyVerts[l].index];
+ const physx::PxVec3& vl1 = cutout.vertices[loop.polyVerts[l1].index];
+ const float dist = segmentsIntersectXY(vl, vl1 - vl, vk, newEdge);
+ if (dist < minDist)
+ {
+ minDist = dist;
+ }
+ }
+ if (minDist <= 0.0f)
+ {
+ if (minDist > maxMinDist)
+ {
+ maxMinDist = minDist;
+ kToUse = k;
+ mToUse = m;
+ }
+ }
+ else
+ {
+ if (perpendicularDistanceSquared(vkPrev, vk, vm) <= cleanupTolerance2 ||
+ perpendicularDistanceSquared(vk, vm, vmNext) <= cleanupTolerance2)
+ {
+ if (!cleanSliceFound)
+ {
+ minLen2 = len2;
+ kToUse = k;
+ mToUse = m;
+ }
+ else
+ {
+ if (len2 < minLen2)
+ {
+ minLen2 = len2;
+ kToUse = k;
+ mToUse = m;
+ }
+ }
+ cleanSliceFound = true;
+ }
+ else if (!cleanSliceFound && len2 < minLen2)
+ {
+ minLen2 = len2;
+ kToUse = k;
+ mToUse = m;
+ }
+ }
+ }
+ }
+ nvidia::ConvexLoop& newLoop = cutout.convexLoops.insert();
+ nvidia::ConvexLoop& oldLoop = cutout.convexLoops[i];
+ newLoop.polyVerts.resize(mToUse - kToUse + 1);
+ for (uint32_t n = 0; n <= mToUse - kToUse; ++n)
+ {
+ newLoop.polyVerts[n] = oldLoop.polyVerts[kToUse + n];
+ }
+ newLoop.polyVerts[mToUse - kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge
+ oldLoop.polyVerts[kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge
+ oldLoop.polyVerts.removeRange(kToUse + 1, (mToUse - (kToUse + 1)));
+ if (cleanupTolerance > 0.0f)
+ {
+ cleanCutout(cutout, i, cleanupTolerance);
+ cleanCutout(cutout, cutout.convexLoops.size() - 1, cleanupTolerance);
+ }
+ }
+ else
+ {
+ if (cleanupTolerance > 0.0f)
+ {
+ cleanCutout(cutout, i, cleanupTolerance);
+ }
+ ++i;
+ }
+ }
+
+ return true;
+}
+
+static void traceRegion(physx::Array<POINT2D>& trace, Map2d<uint32_t>& regions, Map2d<uint8_t>& pathCounts, uint32_t regionIndex, const POINT2D& startPoint)
+{
+ POINT2D t = startPoint;
+ trace.reset();
+ trace.pushBack(t);
+ ++pathCounts(t.x, t.y); // Increment path count
+ // Find initial path direction
+ int32_t dirN;
+ for (dirN = 1; dirN < 8; ++dirN)
+ {
+ const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN));
+ if (regions(t1.x, t1.y) != regionIndex)
+ {
+ break;
+ }
+ }
+ bool done = false;
+ do
+ {
+ for (int32_t i = 1; i < 8; ++i) // Skip direction we just came from
+ {
+ --dirN;
+ const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN));
+ if (regions(t1.x, t1.y) != regionIndex)
+ {
+ if (t1.x == trace[0].x && t1.y == trace[0].y)
+ {
+ done = true;
+ break;
+ }
+ trace.pushBack(t1);
+ t = t1;
+ ++pathCounts(t.x, t.y); // Increment path count
+ dirN += 4;
+ break;
+ }
+ }
+ }
+ while (!done);
+}
+
+static void createCutoutSet(nvidia::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float snapThreshold, bool periodic)
+{
+ cutoutSet.cutouts.reset();
+ cutoutSet.periodic = periodic;
+ cutoutSet.dimensions = physx::PxVec2((float)bufferWidth, (float)bufferHeight);
+
+ if (!periodic)
+ {
+ cutoutSet.dimensions[0] += 1.0f;
+ cutoutSet.dimensions[1] += 1.0f;
+ }
+
+ if (pixelBuffer == NULL || bufferWidth == 0 || bufferHeight == 0)
+ {
+ return;
+ }
+
+ const int borderPad = periodic ? 0 : 2; // Padded for borders if not periodic
+ const int originCoord = periodic ? 0 : 1;
+
+ BitMap map(bufferWidth + borderPad, bufferHeight + borderPad, 0);
+ map.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
+
+ for (uint32_t y = 0; y < bufferHeight; ++y)
+ {
+ for (uint32_t x = 0; x < bufferWidth; ++x)
+ {
+ const uint32_t pix = 5033165 * (uint32_t)pixelBuffer[0] + 9898557 * (uint32_t)pixelBuffer[1] + 1845494 * (uint32_t)pixelBuffer[2];
+ pixelBuffer += 3;
+ if ((pix >> 28) != 0)
+ {
+ map.set((int32_t)x, (int32_t)y);
+ }
+ }
+ }
+
+ // Add borders if not tiling
+ if (!periodic)
+ {
+ for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x)
+ {
+ map.set(x, -1);
+ map.set(x, (int32_t)bufferHeight);
+ }
+ for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y)
+ {
+ map.set(-1, y);
+ map.set((int32_t)bufferWidth, y);
+ }
+ }
+
+ // Now search for regions
+
+ // Create a region map
+ Map2d<uint32_t> regions(bufferWidth + borderPad, bufferHeight + borderPad, 0xFFFFFFFF); // Initially an invalid value
+ regions.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
+
+ // Create a path counting map
+ Map2d<uint8_t> pathCounts(bufferWidth + borderPad, bufferHeight + borderPad, 0);
+ pathCounts.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
+
+ // Bump path counts on borders
+ if (!periodic)
+ {
+ for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x)
+ {
+ pathCounts(x, -1) = 1;
+ pathCounts(x, (int32_t)bufferHeight) = 1;
+ }
+ for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y)
+ {
+ pathCounts(-1, y) = 1;
+ pathCounts((int32_t)bufferWidth, y) = 1;
+ }
+ }
+
+ physx::Array<POINT2D> stack;
+ physx::Array<POINT2D> traceStarts;
+ physx::Array< physx::Array<POINT2D>* > traces;
+
+ // Initial fill of region maps and path maps
+ for (int32_t y = 0; y < (int32_t)bufferHeight; ++y)
+ {
+ for (int32_t x = 0; x < (int32_t)bufferWidth; ++x)
+ {
+ if (map.read(x-1, y) && !map.read(x, y))
+ {
+ // Found an empty spot next to a filled spot
+ POINT2D t(x - 1, y);
+ const uint32_t regionIndex = traceStarts.size();
+ traceStarts.pushBack(t); // Save off initial point
+ traces.insert(); // This must be the same size as traceStarts
+ traces.back() = (physx::Array<POINT2D>*)PX_ALLOC(sizeof(physx::Array<POINT2D>), PX_DEBUG_EXP("CutoutPoint2DSet"));
+ new(traces.back()) physx::Array<POINT2D>;
+ // Flood fill region map
+ stack.pushBack(POINT2D(x, y));
+ do
+ {
+ const POINT2D s = stack.back();
+ stack.popBack();
+ map.set(s.x, s.y);
+ regions(s.x, s.y) = regionIndex;
+ POINT2D n;
+ for (int32_t i = 0; i < 4; ++i)
+ {
+ const int32_t i0 = i & 1;
+ const int32_t i1 = (i >> 1) & 1;
+ n.x = s.x + i0 - i1;
+ n.y = s.y + i0 + i1 - 1;
+ if (!map.read(n.x, n.y))
+ {
+ stack.pushBack(n);
+ }
+ }
+ }
+ while (stack.size());
+ // Trace region
+ PX_ASSERT(map.read(t.x, t.y));
+ physx::Array<POINT2D>& trace = *traces[regionIndex];
+ traceRegion(trace, regions, pathCounts, regionIndex, t);
+ }
+ }
+ }
+
+ uint32_t cutoutCount = traces.size();
+
+ // Now expand regions until the paths completely overlap
+ bool somePathChanged;
+ int sanityCounter = 1000;
+ bool abort = false;
+ do
+ {
+ somePathChanged = false;
+ for (uint32_t i = 0; i < cutoutCount; ++i)
+ {
+ bool pathChanged = false;
+ physx::Array<POINT2D>& trace = *traces[i];
+ for (uint32_t j = 0; j < trace.size(); ++j)
+ {
+ const POINT2D& t = trace[j];
+ if (pathCounts(t.x, t.y) == 1)
+ {
+ regions(t.x, t.y) = i;
+ pathChanged = true;
+ }
+ }
+ if (pathChanged)
+ {
+ // Recalculate cutout
+ // Decrement pathCounts
+ for (uint32_t j = 0; j < trace.size(); ++j)
+ {
+ const POINT2D& t = trace[j];
+ --pathCounts(t.x, t.y);
+ }
+ // Erase trace
+ // Calculate new start point
+ POINT2D& t = traceStarts[i];
+ int stop = (int)cutoutSet.dimensions.x;
+ while (regions(t.x, t.y) == i)
+ {
+ --t.x;
+ if(--stop < 0)
+ {
+ // There is an error; abort
+ break;
+ }
+ }
+ if(stop < 0)
+ {
+ // Release traces and abort
+ abort = true;
+ somePathChanged = false;
+ break;
+ }
+ traceRegion(trace, regions, pathCounts, i, t);
+ somePathChanged = true;
+ }
+ }
+ if (--sanityCounter <= 0)
+ {
+ abort = true;
+ break;
+ }
+ }
+ while (somePathChanged);
+
+ if (abort)
+ {
+ for (uint32_t i = 0; i < cutoutCount; ++i)
+ {
+ traces[i]->~Array<POINT2D>();
+ PX_FREE(traces[i]);
+ }
+ cutoutCount = 0;
+ }
+
+ // Create cutouts
+ cutoutSet.cutouts.resize(cutoutCount);
+ for (uint32_t i = 0; i < cutoutCount; ++i)
+ {
+ createCutout(cutoutSet.cutouts[i], *traces[i], snapThreshold, bufferWidth, bufferHeight, !cutoutSet.periodic);
+ }
+
+ simplifyCutoutSetImpl(cutoutSet, snapThreshold, bufferWidth, bufferHeight);
+
+ // Release traces
+ for (uint32_t i = 0; i < cutoutCount; ++i)
+ {
+ traces[i]->~Array<POINT2D>();
+ PX_FREE(traces[i]);
+ }
+
+ // Decompose each cutout in the set into convex loops
+ uint32_t cutoutSetSize = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ bool success = decomposeCutoutIntoConvexLoops(cutoutSet.cutouts[i]);
+ if (success)
+ {
+ if (cutoutSetSize != i)
+ {
+ cutoutSet.cutouts[cutoutSetSize] = cutoutSet.cutouts[i];
+ }
+ ++cutoutSetSize;
+ }
+ }
+ cutoutSet.cutouts.resize(cutoutSetSize);
+}
+
+class Matrix22
+{
+public:
+ //! Default constructor
+ Matrix22()
+ {}
+
+ //! Construct from two base vectors
+ Matrix22(const physx::PxVec2& col0, const physx::PxVec2& col1)
+ : column0(col0), column1(col1)
+ {}
+
+ //! Construct from float[4]
+ explicit Matrix22(float values[]):
+ column0(values[0],values[1]),
+ column1(values[2],values[3])
+ {
+ }
+
+ //! Copy constructor
+ Matrix22(const Matrix22& other)
+ : column0(other.column0), column1(other.column1)
+ {}
+
+ //! Assignment operator
+ Matrix22& operator=(const Matrix22& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ return *this;
+ }
+
+ //! Set to identity matrix
+ static Matrix22 createIdentity()
+ {
+ return Matrix22(physx::PxVec2(1,0), physx::PxVec2(0,1));
+ }
+
+ //! Set to zero matrix
+ static Matrix22 createZero()
+ {
+ return Matrix22(physx::PxVec2(0.0f), physx::PxVec2(0.0f));
+ }
+
+ //! Construct from diagonal, off-diagonals are zero.
+ static Matrix22 createDiagonal(const physx::PxVec2& d)
+ {
+ return Matrix22(physx::PxVec2(d.x,0.0f), physx::PxVec2(0.0f,d.y));
+ }
+
+
+ //! Get transposed matrix
+ Matrix22 getTranspose() const
+ {
+ const physx::PxVec2 v0(column0.x, column1.x);
+ const physx::PxVec2 v1(column0.y, column1.y);
+
+ return Matrix22(v0,v1);
+ }
+
+ //! Get the real inverse
+ Matrix22 getInverse() const
+ {
+ const float det = getDeterminant();
+ Matrix22 inverse;
+
+ if(det != 0)
+ {
+ const float invDet = 1.0f/det;
+
+ inverse.column0[0] = invDet * column1[1];
+ inverse.column0[1] = invDet * (-column0[1]);
+
+ inverse.column1[0] = invDet * (-column1[0]);
+ inverse.column1[1] = invDet * column0[0];
+
+ return inverse;
+ }
+ else
+ {
+ return createIdentity();
+ }
+ }
+
+ //! Get determinant
+ float getDeterminant() const
+ {
+ return column0[0] * column1[1] - column0[1] * column1[0];
+ }
+
+ //! Unary minus
+ Matrix22 operator-() const
+ {
+ return Matrix22(-column0, -column1);
+ }
+
+ //! Add
+ Matrix22 operator+(const Matrix22& other) const
+ {
+ return Matrix22( column0+other.column0,
+ column1+other.column1);
+ }
+
+ //! Subtract
+ Matrix22 operator-(const Matrix22& other) const
+ {
+ return Matrix22( column0-other.column0,
+ column1-other.column1);
+ }
+
+ //! Scalar multiplication
+ Matrix22 operator*(float scalar) const
+ {
+ return Matrix22(column0*scalar, column1*scalar);
+ }
+
+ //! Matrix vector multiplication (returns 'this->transform(vec)')
+ physx::PxVec2 operator*(const physx::PxVec2& vec) const
+ {
+ return transform(vec);
+ }
+
+ //! Matrix multiplication
+ Matrix22 operator*(const Matrix22& other) const
+ {
+ //Rows from this <dot> columns from other
+ //column0 = transform(other.column0) etc
+ return Matrix22(transform(other.column0), transform(other.column1));
+ }
+
+ // a <op>= b operators
+
+ //! Equals-add
+ Matrix22& operator+=(const Matrix22& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ return *this;
+ }
+
+ //! Equals-sub
+ Matrix22& operator-=(const Matrix22& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ Matrix22& operator*=(float scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ float operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][(int)row];
+ }
+
+ //! Element access, mathematical way!
+ float& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][(int)row];
+ }
+
+ // Transform etc
+
+ //! Transform vector by matrix, equal to v' = M*v
+ physx::PxVec2 transform(const physx::PxVec2& other) const
+ {
+ return column0*other.x + column1*other.y;
+ }
+
+ physx::PxVec2& operator[](unsigned int num) {return (&column0)[num];}
+ const physx::PxVec2& operator[](unsigned int num) const {return (&column0)[num];}
+
+ //Data, see above for format!
+
+ physx::PxVec2 column0, column1; //the two base vectors
+};
+
+PX_INLINE bool calculateUVMapping(const nvidia::ExplicitRenderTriangle& triangle, physx::PxMat33& theResultMapping)
+{
+ physx::PxMat33 rMat;
+ physx::PxMat33 uvMat;
+ for (unsigned col = 0; col < 3; ++col)
+ {
+ rMat[col] = triangle.vertices[col].position;
+ uvMat[col] = physx::PxVec3(triangle.vertices[col].uv[0][0], triangle.vertices[col].uv[0][1], 1.0f);
+ }
+
+ if (uvMat.getDeterminant() == 0.0f)
+ {
+ return false;
+ }
+
+ theResultMapping = rMat*uvMat.getInverse();
+
+ return true;
+}
+
+static bool calculateUVMapping(nvidia::ExplicitHierarchicalMesh& theHMesh, const physx::PxVec3& theDir, physx::PxMat33& theResultMapping)
+{
+ physx::PxVec3 cutoutDir( theDir );
+ cutoutDir.normalize( );
+
+ const float cosineThreshold = physx::PxCos(3.141593f / 180); // 1 degree
+
+ nvidia::ExplicitRenderTriangle* triangleToUse = NULL;
+ float greatestCosine = -PX_MAX_F32;
+ float greatestArea = 0.0f; // for normals within the threshold
+ for ( uint32_t partIndex = 0; partIndex < theHMesh.partCount(); ++partIndex )
+ {
+ nvidia::ExplicitRenderTriangle* theTriangles = theHMesh.meshTriangles( partIndex );
+ uint32_t triangleCount = theHMesh.meshTriangleCount( partIndex );
+ for ( uint32_t tIndex = 0; tIndex < triangleCount; ++tIndex )
+ {
+ nvidia::ExplicitRenderTriangle& theTriangle = theTriangles[tIndex];
+ physx::PxVec3 theEdge1 = theTriangle.vertices[1].position - theTriangle.vertices[0].position;
+ physx::PxVec3 theEdge2 = theTriangle.vertices[2].position - theTriangle.vertices[0].position;
+ physx::PxVec3 theNormal = theEdge1.cross( theEdge2 );
+ float theArea = theNormal.normalize(); // twice the area, but that's ok
+
+ if (theArea == 0.0f)
+ {
+ continue;
+ }
+
+ const float cosine = cutoutDir.dot(theNormal);
+
+ if (cosine < cosineThreshold)
+ {
+ if (cosine > greatestCosine && greatestArea == 0.0f)
+ {
+ greatestCosine = cosine;
+ triangleToUse = &theTriangle;
+ }
+ }
+ else
+ {
+ if (theArea > greatestArea)
+ {
+ greatestArea = theArea;
+ triangleToUse = &theTriangle;
+ }
+ }
+ }
+ }
+
+ if (triangleToUse == NULL)
+ {
+ return false;
+ }
+
+ return calculateUVMapping(*triangleToUse, theResultMapping);
+}
+
+namespace nvidia
+{
+namespace apex
+{
+
+PX_INLINE void serialize(physx::PxFileBuf& stream, const PolyVert& p)
+{
+ stream << p.index << p.flags;
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, PolyVert& p)
+{
+ // original version
+ PX_UNUSED(version);
+ stream >> p.index >> p.flags;
+}
+
+PX_INLINE void serialize(physx::PxFileBuf& stream, const ConvexLoop& l)
+{
+ apex::serialize(stream, l.polyVerts);
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, ConvexLoop& l)
+{
+ // original version
+ apex::deserialize(stream, version, l.polyVerts);
+}
+
+PX_INLINE void serialize(physx::PxFileBuf& stream, const Cutout& c)
+{
+ apex::serialize(stream, c.vertices);
+ apex::serialize(stream, c.convexLoops);
+}
+
+PX_INLINE void deserialize(physx::PxFileBuf& stream, uint32_t version, Cutout& c)
+{
+ // original version
+ apex::deserialize(stream, version, c.vertices);
+ apex::deserialize(stream, version, c.convexLoops);
+}
+
+void CutoutSetImpl::serialize(physx::PxFileBuf& stream) const
+{
+ stream << (uint32_t)Current;
+
+ apex::serialize(stream, cutouts);
+}
+
+void CutoutSetImpl::deserialize(physx::PxFileBuf& stream)
+{
+ const uint32_t version = stream.readDword();
+
+ apex::deserialize(stream, version, cutouts);
+}
+
+}
+} // end namespace nvidia::apex
+
+namespace FractureTools
+{
+CutoutSet* createCutoutSet()
+{
+ return new nvidia::CutoutSetImpl();
+}
+
+void buildCutoutSet(CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float snapThreshold, bool periodic)
+{
+ ::createCutoutSet(*(nvidia::CutoutSetImpl*)&cutoutSet, pixelBuffer, bufferWidth, bufferHeight, snapThreshold, periodic);
+}
+
+bool calculateCutoutUVMapping(nvidia::ExplicitHierarchicalMesh& hMesh, const physx::PxVec3& targetDirection, physx::PxMat33& theMapping)
+{
+ return ::calculateUVMapping(hMesh, targetDirection, theMapping);
+}
+
+bool calculateCutoutUVMapping(const nvidia::ExplicitRenderTriangle& targetDirection, physx::PxMat33& theMapping)
+{
+ return ::calculateUVMapping(targetDirection, theMapping);
+}
+} // namespace FractureTools
+
+#endif
+
+#ifdef _MANAGED
+#pragma managed(pop)
+#endif
diff --git a/APEX_1.4/shared/internal/src/authoring/Fracturing.cpp b/APEX_1.4/shared/internal/src/authoring/Fracturing.cpp
new file mode 100644
index 00000000..76a998da
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/Fracturing.cpp
@@ -0,0 +1,7349 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+//#ifdef _MANAGED
+//#pragma managed(push, off)
+//#endif
+
+#include <stdarg.h>
+#include "Apex.h"
+#include "ApexSharedUtils.h"
+
+#include "authoring/Fracturing.h"
+#include "authoring/ApexGSA.h"
+#include "PsUserAllocated.h"
+#include "ApexRand.h"
+#include "PxErrorCallback.h"
+#include "PsString.h"
+#include "ApexUsingNamespace.h"
+#include "PxPlane.h"
+#include "PsMathUtils.h"
+#include "PsAllocator.h"
+#include "ConvexDecomposition.h"
+#include "Noise.h"
+#include "DestructibleAsset.h"
+#include "Link.h"
+#include "RenderDebugInterface.h"
+#include "PsSort.h"
+#include "PsInlineArray.h"
+#include "PsBitUtils.h"
+
+#define INCREMENTAL_GSA 0
+
+/////////////////////////////////////////////////////////////////////////////
+
+#include <stdio.h>
+
+#ifndef WITHOUT_APEX_AUTHORING
+#include "ApexSharedSerialization.h"
+
+
+using namespace nvidia; // !? Need to do this for PX_ALLOCA!?
+
+
+#define MAX_ALLOWED_ESTIMATED_CHUNK_TOTAL 10000
+
+#define CUTOUT_MAP_BOUNDS_TOLERANCE 0.0001f
+#define MESH_INSTANACE_TOLERANCE 0.025f
+
+static ApexCSG::BSPBuildParameters
+gDefaultBuildParameters;
+
+static bool gIslandGeneration = false;
+static unsigned gMicrogridSize = 65536;
+static BSPOpenMode::Enum gMeshMode = BSPOpenMode::Automatic;
+static int gVerbosity = 0;
+
+
+static CollisionVolumeDesc getVolumeDesc(const CollisionDesc& collisionDesc, unsigned depth)
+{
+ return collisionDesc.mDepthCount > 0 ? collisionDesc.mVolumeDescs[PxMin(depth, collisionDesc.mDepthCount-1)] : CollisionVolumeDesc();
+}
+
+PX_INLINE float extentDistance(float min0, float max0, float min1, float max1)
+{
+ return PxMax(min0 - max1, min1 - max0);
+}
+
+
+static physx::PxMat44 randomRotationMatrix(physx::PxVec3 zAxis, nvidia::QDSRand& rnd)
+{
+ physx::PxMat44 rot;
+ zAxis.normalize();
+ uint32_t maxDir = physx::PxAbs(zAxis.x) > physx::PxAbs(zAxis.y) ?
+ (physx::PxAbs(zAxis.x) > physx::PxAbs(zAxis.z) ? 0u : 2u) :
+ (physx::PxAbs(zAxis.y) > physx::PxAbs(zAxis.z) ? 1u : 2u);
+ physx::PxVec3 xAxis = physx::PxMat33(physx::PxIdentity)[(maxDir + 1) % 3];
+ physx::PxVec3 yAxis = zAxis.cross(xAxis);
+ yAxis.normalize();
+ xAxis = yAxis.cross(zAxis);
+
+ const float angle = rnd.getScaled(-physx::PxPi, physx::PxPi);
+ const float c = physx::PxCos(angle);
+ const float s = physx::PxSin(angle);
+
+ rot.column0 = physx::PxVec4(c*xAxis + s*yAxis, 0.0f);
+ rot.column1 = physx::PxVec4(c*yAxis - s*xAxis, 0.0f);
+ rot.column2 = physx::PxVec4(zAxis, 0.0f);
+ rot.column3 = physx::PxVec4(0.0f, 0.0f, 0.0f, 1.0f);
+
+ return rot;
+}
+
+PX_INLINE physx::PxVec3 randomPositionInTriangle(const physx::PxVec3& v1, const physx::PxVec3& v2, const physx::PxVec3& v3, nvidia::QDSRand& rnd)
+{
+ const physx::PxVec3 d1 = v2 - v1;
+ const physx::PxVec3 d2 = v3 - v1;
+ float c1 = rnd.getUnit();
+ float c2 = rnd.getUnit();
+ const float d = 1.0f - (c1+c2);
+ if (d < 0.0f)
+ {
+ c1 += d;
+ c2 += d;
+ }
+ return v1 + c1*d1 + c2*d2;
+}
+
+
+// Used by VoronoiCellPlaneIterator
+class ReciprocalSitePairLink : public Link
+{
+public:
+ ReciprocalSitePairLink() : Link(), m_recip(NULL)
+ {
+ }
+ ReciprocalSitePairLink(const ReciprocalSitePairLink& other) : Link()
+ {
+ index0 = other.index0;
+ index1 = other.index1;
+ plane = other.plane;
+ m_recip = NULL;
+ }
+ ~ReciprocalSitePairLink()
+ {
+ remove();
+ }
+
+ void setRecip(ReciprocalSitePairLink& recip)
+ {
+ PX_ASSERT(m_recip == NULL && recip.m_recip == NULL);
+ m_recip = &recip;
+ recip.m_recip = this;
+ }
+
+ ReciprocalSitePairLink* getRecip() const
+ {
+ return m_recip;
+ }
+
+ ReciprocalSitePairLink* getAdj(uint32_t which) const
+ {
+ return static_cast<ReciprocalSitePairLink*>(Link::getAdj(which));
+ }
+
+ void remove()
+ {
+ if (m_recip)
+ {
+ m_recip->m_recip = NULL;
+ m_recip = NULL;
+ }
+ Link::remove();
+ }
+
+ uint32_t index0, index1;
+ physx::PxPlane plane;
+
+private:
+ ReciprocalSitePairLink* m_recip;
+};
+
+
+struct SiteMidPlaneIteratorInit
+{
+ SiteMidPlaneIteratorInit() : first(NULL), stop(NULL) {}
+
+ ReciprocalSitePairLink* first;
+ ReciprocalSitePairLink* stop;
+};
+
+class SiteMidPlaneIterator
+{
+public:
+ SiteMidPlaneIterator(const SiteMidPlaneIteratorInit& listBounds) : current(listBounds.first), stop(listBounds.stop) {}
+
+ bool valid() const
+ {
+ return current != stop;
+ }
+
+ void inc()
+ {
+ current = current->getAdj(1);
+ }
+
+ ApexCSG::Plane plane() const
+ {
+ const physx::PxPlane& midPlane = current->plane;
+ ApexCSG::Plane plane(ApexCSG::Dir((ApexCSG::Real)midPlane.n.x, (ApexCSG::Real)midPlane.n.y,(ApexCSG::Real)midPlane.n.z), (ApexCSG::Real)midPlane.d);
+ plane.normalize();
+ return plane;
+ }
+
+private:
+ ReciprocalSitePairLink* current;
+ ReciprocalSitePairLink* stop;
+};
+
+class SiteMidPlaneIntersection : public ApexCSG::GSA::StaticConvexPolyhedron<SiteMidPlaneIterator, SiteMidPlaneIteratorInit>
+{
+public:
+ void setPlanes(ReciprocalSitePairLink* first, ReciprocalSitePairLink* stop)
+ {
+ m_initValues.first = first;
+ m_initValues.stop = stop;
+ }
+
+ void replacePlanes(ReciprocalSitePairLink* first, ReciprocalSitePairLink* stop, const physx::PxPlane& oldFlipPlane, const physx::PxPlane& newFlipPlane)
+ {
+ m_initValues.first = first;
+ m_initValues.stop = stop;
+#if INCREMENTAL_GSA
+ const ApexCSG::Plane oldFlipGSAPlane = ApexCSG::Plane(ApexCSG::Dir((ApexCSG::Real)oldFlipPlane.n.x, (ApexCSG::Real)oldFlipPlane.n.y, (ApexCSG::Real)oldFlipPlane.n.z), (ApexCSG::Real)oldFlipPlane.d);
+ const ApexCSG::Plane newFlipGSAPlane = ApexCSG::Plane(ApexCSG::Dir((ApexCSG::Real)newFlipPlane.n.x, (ApexCSG::Real)newFlipPlane.n.y, (ApexCSG::Real)newFlipPlane.n.z), (ApexCSG::Real)newFlipPlane.d);
+ for (int i = 0; i < 4; ++i)
+ {
+ if (m_S(0,i) == oldFlipGSAPlane(0) && m_S(1,i) == oldFlipGSAPlane(1) && m_S(2,i) == oldFlipGSAPlane(2) && m_S(3,i) == oldFlipGSAPlane(3))
+ {
+ m_S.setCol(i, -oldFlipGSAPlane);
+ }
+ if (m_S(0,i) == newFlipGSAPlane(0) && m_S(1,i) == newFlipGSAPlane(1) && m_S(2,i) == newFlipGSAPlane(2) && m_S(3,i) == newFlipGSAPlane(3))
+ {
+ m_S.setCol(i, -newFlipGSAPlane);
+ }
+ }
+#else
+ (void)oldFlipPlane;
+ (void)newFlipPlane;
+#endif
+ }
+
+ void resetPlanes()
+ {
+ m_initValues.first = NULL;
+ m_initValues.stop = NULL;
+ }
+};
+
+// Voronoi decomposition utility
+class VoronoiCellPlaneIterator
+{
+public:
+ VoronoiCellPlaneIterator(const physx::PxVec3* sites, uint32_t siteCount, const physx::PxPlane* boundPlanes = NULL, uint32_t boundPlaneCount = 0, uint32_t startSiteIndex = 0);
+
+ bool valid() const
+ {
+ return m_valid;
+ }
+
+ uint32_t cellIndex() const
+ {
+ return m_valid ? m_cellIndex : 0xFFFFFFFF;
+ }
+
+ const physx::PxPlane* cellPlanes() const
+ {
+ return m_valid ? m_cellPlanes.begin() : NULL;
+ }
+
+ uint32_t cellPlaneCount() const
+ {
+ return m_valid ? m_cellPlanes.size() : 0;
+ }
+
+ void inc()
+ {
+ if (m_valid)
+ {
+ if (m_startPair != &m_listRoot)
+ {
+ prepareOutput();
+ }
+ else
+ {
+ m_valid = false;
+ }
+ }
+ }
+
+private:
+ void prepareOutput();
+
+ // Input
+ const physx::PxVec3* m_sites;
+ uint32_t m_siteCount;
+ uint32_t m_boundPlaneCount;
+
+ // State and intermediate data
+ physx::Array<ReciprocalSitePairLink> m_sitePairs; // A symmetric array of site pairs and their bisector planes, in order (i major, j minor), with the diagonal removed
+ SiteMidPlaneIntersection m_test; // Used to see if a plane is necessary for a cell
+ ReciprocalSitePairLink* m_startPair; // Current start site pair
+ ReciprocalSitePairLink m_listRoot; // A stopping node
+
+ // Output
+ bool m_valid;
+ uint32_t m_cellIndex;
+ physx::Array<physx::PxPlane> m_cellPlanes;
+};
+
+VoronoiCellPlaneIterator::VoronoiCellPlaneIterator(const physx::PxVec3* sites, uint32_t siteCount, const physx::PxPlane* boundPlanes, uint32_t boundPlaneCount, uint32_t startSiteIndex)
+{
+ m_valid = false;
+
+ if (sites == NULL || startSiteIndex >= siteCount)
+ {
+ return;
+ }
+
+ m_valid = true;
+
+ m_sites = sites;
+ m_siteCount = siteCount;
+ m_cellIndex = startSiteIndex;
+ m_boundPlaneCount = boundPlanes != NULL ? boundPlaneCount : 0;
+
+ // Add the bound planes
+ m_cellPlanes.reserve(m_boundPlaneCount);
+ for (uint32_t boundPlaneNum = 0; boundPlaneNum < m_boundPlaneCount; ++boundPlaneNum)
+ {
+ m_cellPlanes.pushBack(boundPlanes[boundPlaneNum]);
+ }
+
+ // This should mean m_siteCount = 1. In this case, there are no planes (besides the bound planes)
+ if (m_siteCount < 2)
+ {
+ m_startPair = &m_listRoot; // Causes termination after one iteration
+ return;
+ }
+
+ // Fill in the pairs
+ m_sitePairs.resize(m_siteCount*(m_siteCount-1));
+ uint32_t pairIndex = 0;
+ for (uint32_t i = 0; i < m_siteCount; ++i)
+ {
+ for (uint32_t j = 0; j < m_siteCount; ++j)
+ {
+ if (j == i)
+ {
+ continue;
+ }
+ ReciprocalSitePairLink& pair = m_sitePairs[pairIndex];
+ if (j > i)
+ {
+ pair.setRecip(m_sitePairs[pairIndex+(j-i)*(m_siteCount-2)+1]);
+ }
+ pair.index0 = i;
+ pair.index1 = j;
+ pair.plane = physx::PxPlane(0.5f*(m_sites[j] + m_sites[i]), (m_sites[j] - m_sites[i]).getNormalized());
+ // Link together into a single loop
+ if (pairIndex > 0)
+ {
+ m_sitePairs[pairIndex-1].setAdj(1, &pair);
+ }
+
+ ++pairIndex;
+ }
+ }
+
+ // Start with the first pair in the array
+ m_startPair = &m_sitePairs[0];
+
+ // Create a list root
+ m_listRoot.setAdj(1, m_startPair);
+
+ // Find first pair with the desired index0
+ while (m_startPair->index0 != startSiteIndex && m_startPair != &m_listRoot)
+ {
+ m_startPair = m_startPair->getAdj(1);
+ }
+
+ prepareOutput();
+}
+
+void VoronoiCellPlaneIterator::prepareOutput()
+{
+ if (!m_valid)
+ {
+ return;
+ }
+
+ m_cellIndex = m_startPair->index0;
+
+ // Find the first pair with a different first site index, which is our end-marker
+ ReciprocalSitePairLink* stopPair = m_startPair->getAdj(1);
+ while (stopPair != &m_listRoot && stopPair->index0 == m_cellIndex)
+ {
+ stopPair = stopPair->getAdj(1);
+ }
+
+ PX_ASSERT(stopPair == &m_listRoot || stopPair->index0 == m_startPair->index0+1);
+
+ // Reset planes (keeping bound planes)
+ m_cellPlanes.resize(m_boundPlaneCount);
+
+ // Now iterate through this subset of the list, flipping one plane each time
+ ReciprocalSitePairLink* testPlanePair = m_startPair;
+ bool firstGSAUse = true;
+ physx::PxPlane lastPlane;
+ do
+ {
+ ReciprocalSitePairLink* nextPlanePair = testPlanePair->getAdj(1);
+ testPlanePair->plane = physx::PxPlane(-testPlanePair->plane.n, -testPlanePair->plane.d); // Flip
+ if (firstGSAUse)
+ {
+ m_test.setPlanes(m_startPair, stopPair);
+#if INCREMENTAL_GSA
+ firstGSAUse = false;
+#endif
+ }
+ else
+ {
+ m_test.replacePlanes(m_startPair, stopPair, lastPlane, testPlanePair->plane);
+ }
+ lastPlane = testPlanePair->plane;
+ const bool keep = (1 == ApexCSG::GSA::vs3d_test(m_test));
+ testPlanePair->plane = physx::PxPlane(-testPlanePair->plane.n, -testPlanePair->plane.d); // Flip back
+ if (keep)
+ {
+ // This is a bounding plane
+ m_cellPlanes.pushBack(testPlanePair->plane);
+ }
+ else
+ {
+ // Flipping this plane results in an empty set intersection. It is non-essential, so remove it
+ // And its reciprocal
+ if (testPlanePair->getRecip() == stopPair)
+ {
+ stopPair = stopPair->getAdj(1);
+ }
+ testPlanePair->getRecip()->remove();
+ if (testPlanePair == m_startPair)
+ {
+ m_startPair = m_startPair->getAdj(1);
+ }
+ testPlanePair->remove();
+ }
+ testPlanePair = nextPlanePair;
+ } while (testPlanePair != stopPair);
+
+ m_startPair = stopPair;
+}
+
+
+PX_INLINE bool segmentOnBorder(const physx::PxVec3& v0, const physx::PxVec3& v1, float width, float height)
+{
+ return
+ (v0.x < -0.5f && v1.x < -0.5f) ||
+ (v0.y < -0.5f && v1.y < -0.5f) ||
+ (v0.x >= width - 0.5f && v1.x >= width - 0.5f) ||
+ (v0.y >= height - 0.5f && v1.y >= height - 0.5f);
+}
+
+class Random : public ApexCSG::UserRandom
+{
+public:
+ uint32_t getInt()
+ {
+ return m_rnd.nextSeed();
+ }
+ float getReal(float min, float max)
+ {
+ return m_rnd.getScaled(min, max);
+ }
+
+ nvidia::QDSRand m_rnd;
+} userRnd;
+
+
+typedef ApexCSG::Vec<float,2> Vec2Float;
+typedef ApexCSG::Vec<float,3> Vec3Float;
+
+// TODO: Provide configurable octave parameter
+ApexCSG::PerlinNoise<float, 64, 2, Vec2Float > userPerlin2D(userRnd, 2, 1.5f, 2.5f);
+ApexCSG::PerlinNoise<float, 64, 3, Vec3Float > userPerlin3D(userRnd, 2, 1.5f, 2.5f);
+
+PX_INLINE bool edgesOverlap(const physx::PxVec3& pv00, const physx::PxVec3& pv01, const physx::PxVec3& pv10, const physx::PxVec3& pv11, float eps)
+{
+ physx::PxVec3 e0 = pv01 - pv00;
+ physx::PxVec3 e1 = pv11 - pv10;
+
+ if (e0.dot(e1) < 0)
+ {
+ return false;
+ }
+
+ float l0 = e0.normalize();
+ e1.normalize();
+
+ const physx::PxVec3 disp0 = pv10 - pv00;
+ const physx::PxVec3 disp1 = pv11 - pv00;
+
+ const float d10 = disp0.dot(e0);
+
+ const float d11 = disp1.dot(e0);
+
+ if (d11 < -eps)
+ {
+ return false;
+ }
+
+ if (d10 > l0 + eps)
+ {
+ return false;
+ }
+
+ const float disp02 = disp0.dot(disp0);
+ if (disp02 - d10 * d10 > eps * eps)
+ {
+ return false;
+ }
+
+ const float disp12 = disp1.dot(disp1);
+ if (disp12 - d11 * d11 > eps * eps)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+PX_INLINE bool trianglesOverlap(const physx::PxVec3& pv00, const physx::PxVec3& pv01, const physx::PxVec3& pv02, const physx::PxVec3& pv10, const physx::PxVec3& pv11, const physx::PxVec3& pv12, float eps)
+{
+ return edgesOverlap(pv00, pv02, pv10, pv11, eps) || edgesOverlap(pv00, pv02, pv11, pv12, eps) || edgesOverlap(pv00, pv02, pv12, pv10, eps) ||
+ edgesOverlap(pv01, pv00, pv10, pv11, eps) || edgesOverlap(pv01, pv00, pv11, pv12, eps) || edgesOverlap(pv01, pv00, pv12, pv10, eps) ||
+ edgesOverlap(pv02, pv01, pv10, pv11, eps) || edgesOverlap(pv02, pv01, pv11, pv12, eps) || edgesOverlap(pv02, pv01, pv12, pv10, eps);
+}
+
+
+// Returns a point uniformly distributed on the "polar cap" in +axisN direction, of azimuthal size range (in radians)
+PX_INLINE physx::PxVec3 randomNormal(uint32_t axisN, float range)
+{
+ physx::PxVec3 result;
+ const float cosTheta = 1.0f - (1.0f - physx::PxCos(range)) * userRnd.getReal(0.0f, 1.0f);
+ const float sinTheta = physx::PxSqrt(1.0f - cosTheta * cosTheta);
+ float cosPhi, sinPhi;
+ physx::shdfnd::sincos(userRnd.getReal(-physx::PxPi, physx::PxPi), sinPhi, cosPhi);
+ result[axisN % 3] = cosTheta;
+ result[(axisN + 1) % 3] = cosPhi * sinTheta;
+ result[(axisN + 2) % 3] = sinPhi * sinTheta;
+ return result;
+}
+
+void calculatePartition(int partition[3], const unsigned requestedSplits[3], const physx::PxVec3& extent, const float* targetProportions)
+{
+ partition[0] = (int32_t)requestedSplits[0] + 1;
+ partition[1] = (int32_t)requestedSplits[1] + 1;
+ partition[2] = (int32_t)requestedSplits[2] + 1;
+
+ if (targetProportions != NULL)
+ {
+ physx::PxVec3 n(extent[0] / targetProportions[0], extent[1] / targetProportions[1], extent[2] / targetProportions[2]);
+ n *= physx::PxVec3((float)partition[0], (float)partition[1], (float)partition[2]).dot(n) / n.magnitudeSquared();
+ // n now contains the # of partitions per axis closest to the desired # of partitions
+ // which give the correct target proportions. However, the numbers will not (in general)
+ // be integers, so round:
+ partition[0] = PxMax(1, (int)(n[0] + 0.5f));
+ partition[1] = PxMax(1, (int)(n[1] + 0.5f));
+ partition[2] = PxMax(1, (int)(n[2] + 0.5f));
+ }
+}
+
+static void outputMessage(const char* message, physx::PxErrorCode::Enum errorCode = physx::PxErrorCode::eNO_ERROR, int verbosity = 0) // Lower # = higher priority
+{
+ if (verbosity > gVerbosity)
+ {
+ return;
+ }
+
+ physx::PxErrorCallback* outputStream = nvidia::GetApexSDK()->getErrorCallback();
+ if (outputStream)
+ {
+ outputStream->reportError(errorCode, message, __FILE__, __LINE__);
+ }
+}
+
+struct ChunkIndexer
+{
+ ExplicitHierarchicalMeshImpl::Chunk* chunk;
+ int32_t parentIndex;
+ int32_t index;
+
+ static int compareParentIndices(const void* A, const void* B)
+ {
+ const int diff = ((const ChunkIndexer*)A)->parentIndex - ((const ChunkIndexer*)B)->parentIndex;
+ if (diff)
+ {
+ return diff;
+ }
+ return ((const ChunkIndexer*)A)->index - ((const ChunkIndexer*)B)->index;
+ }
+};
+
+static physx::PxBounds3 boundTriangles(const physx::Array<nvidia::ExplicitRenderTriangle>& triangles, const PxMat44& interiorTM)
+{
+ physx::PxBounds3 bounds;
+ bounds.setEmpty();
+ for (uint32_t triangleN = 0; triangleN < triangles.size(); ++triangleN)
+ {
+ for (int v = 0; v < 3; ++v)
+ {
+ physx::PxVec3 localVert = interiorTM.inverseRT().transform(triangles[triangleN].vertices[v].position);
+ bounds.include(localVert);
+ }
+ }
+ return bounds;
+}
+
+PX_INLINE void generateSliceAxes(uint32_t sliceAxes[3], uint32_t sliceAxisNum)
+{
+ switch (sliceAxisNum)
+ {
+ case 0:
+ sliceAxes[1] = 2;
+ sliceAxes[0] = 1;
+ break;
+ case 1:
+ sliceAxes[1] = 2;
+ sliceAxes[0] = 0;
+ break;
+ default:
+ case 2:
+ sliceAxes[1] = 1;
+ sliceAxes[0] = 0;
+ }
+ sliceAxes[2] = sliceAxisNum;
+}
+
+PX_INLINE physx::PxVec3 createAxis(uint32_t axisNum)
+{
+ return physx::PxMat33(physx::PxIdentity)[axisNum];
+}
+
+PX_INLINE void getCutoutSliceAxisAndSign(uint32_t& sliceAxisNum, uint32_t& sliceSignNum, uint32_t sliceDirIndex)
+{
+ sliceAxisNum = sliceDirIndex >> 1;
+ sliceSignNum = sliceDirIndex & 1;
+}
+
+typedef float(*NoiseFn)(float x, float y, float z, float& xGrad, float& yGrad, float& zGrad);
+
+static float planeWave(float x, float y, float, float& xGrad, float& yGrad, float& zGrad)
+{
+ float c, s;
+ physx::shdfnd::sincos(x + y, s, c);
+ xGrad = c;
+ yGrad = 0.0f;
+ zGrad = 0.0f;
+ return s;
+}
+
+static float perlin2D(float x, float y, float, float& xGrad, float& yGrad, float& zGrad)
+{
+ const float xy[] = {x, y};
+ float s = userPerlin2D.sample(Vec2Float(xy));
+ // TODO: Implement
+ xGrad = 0.0f;
+ yGrad = 0.0f;
+ zGrad = 0.0f;
+ return s;
+}
+
+static float perlin3D(float x, float y, float z, float& xGrad, float& yGrad, float& zGrad)
+{
+ const float xyz[] = {x, y, z};
+ float s = userPerlin3D.sample(Vec3Float(xyz));
+ // TODO: Implement
+ xGrad = 0.0f;
+ yGrad = 0.0f;
+ zGrad = 0.0f;
+ return s;
+}
+
+static NoiseFn noiseFns[] =
+{
+ planeWave,
+ perlin2D,
+ perlin3D
+};
+
+static int noiseFnCount = sizeof(noiseFns) / sizeof(noiseFns[0]);
+
+static void buildNoise(physx::Array<float>& f, physx::Array<physx::PxVec3>* n,
+ nvidia::IntersectMesh::GridPattern pattern, float cornerX, float cornerY, float xSpacing, float ySpacing, uint32_t numX, uint32_t numY,
+ float noiseAmplitude, float relativeFrequency, float xPeriod, float yPeriod,
+ int noiseType, int noiseDir)
+{
+ const uint32_t gridSize = (numX + 1) * (numY + 1);
+
+ if( f.size() != gridSize)
+ f.resize(gridSize, 0.);
+
+ if( n && n->size() != gridSize)
+ n->resize(gridSize, physx::PxVec3(0,0,0));
+
+ noiseType = physx::PxClamp(noiseType, 0 , noiseFnCount - 1);
+ NoiseFn noiseFn = noiseFns[noiseType];
+
+ // This differentiation between wave planes and perlin is rather arbitrary, but works alright
+ const uint32_t numModes = noiseType == FractureSliceDesc::NoiseWavePlane ? 20u : 4u;
+ const float amplitude = noiseAmplitude / physx::PxSqrt((float)numModes); // Scale by frequency?
+ for (uint32_t i = 0; i < numModes; ++i)
+ {
+ float phase = userRnd.getReal(-3.14159265f, 3.14159265f);
+ float freqShift = userRnd.getReal(0.0f, 3.0f);
+ float kx, ky;
+ switch (noiseDir)
+ {
+ case 0:
+ kx = physx::PxPow(2.0f, freqShift) * relativeFrequency / xSpacing;
+ ky = 0.0f;
+ break;
+ case 1:
+ kx = 0.0f;
+ ky = physx::PxPow(2.0f, freqShift) * relativeFrequency / ySpacing;
+ break;
+ default:
+ {
+ const float f = physx::PxPow(2.0f, freqShift) * relativeFrequency;
+ const float theta = userRnd.getReal(-3.14159265f, 3.14159265f);
+ const float c = physx::PxCos(theta);
+ const float s = physx::PxSin(theta);
+ kx = c * f / xSpacing;
+ ky = s * f / ySpacing;
+ }
+ }
+
+ if (xPeriod != 0.0f)
+ {
+ const float cx = (2.0f * 3.14159265f) / xPeriod;
+ const int nx = (int)physx::PxSign(kx) * (int)(physx::PxAbs(kx) / cx + 0.5f); // round
+ kx = nx * cx;
+ }
+
+ if (yPeriod != 0.0f)
+ {
+ // Make sure the wavenumbers are integers
+ const float cy = (2.0f * 3.14159265f) / yPeriod;
+ const int ny = (int)physx::PxSign(ky) * (int)(physx::PxAbs(ky) / cy + 0.5f); // round
+ ky = ny * cy;
+ }
+
+ uint32_t pointN = 0;
+ float y = cornerY;
+ for (uint32_t iy = 0; iy <= numY; ++iy, y += ySpacing)
+ {
+ float x = cornerX;
+ for (uint32_t ix = 0; ix <= numX; ++ix, x += xSpacing, ++pointN)
+ {
+ if (pattern == nvidia::IntersectMesh::Equilateral && (((iy & 1) == 0 && ix == numX) || ((iy & 1) != 0 && ix == 1)))
+ {
+ x -= 0.5f * xSpacing;
+ }
+ float xGrad, yGrad, zGrad;
+ // TODO: Find point in 3D space for use with NoisePerlin3D
+ f[pointN] += amplitude * noiseFn(x * kx - phase, y * ky - phase, 0, xGrad, yGrad, zGrad);
+ if (n) (*n)[pointN] += physx::PxVec3(-xGrad * kx * amplitude, -yGrad * ky * amplitude, 0.0f);
+ }
+ }
+ }
+
+}
+
+// noiseDir = 0 => X
+// noiseDir = 1 => Y
+// noiseDir = -1 => userRnd
+void nvidia::IntersectMesh::build(GridPattern pattern, const physx::PxPlane& plane,
+ float cornerX, float cornerY, float xSpacing, float ySpacing, uint32_t numX, uint32_t numY,
+ const PxMat44& tm, float noiseAmplitude, float relativeFrequency, float xPeriod, float yPeriod,
+ int noiseType, int noiseDir, uint32_t submeshIndex, uint32_t frameIndex, const nvidia::TriangleFrame& triangleFrame, bool forceGrid)
+{
+ m_pattern = pattern;
+ m_plane = plane;
+ m_cornerX = cornerX;
+ m_cornerY = cornerY;
+ m_xSpacing = xSpacing;
+ m_ySpacing = ySpacing;
+ m_numX = numX;
+ m_numY = numY;
+ m_tm = tm;
+
+ if (relativeFrequency == 0.0f)
+ {
+ // 0 frequency only provides a plane offset
+ m_plane.d += userRnd.getReal(-noiseAmplitude, noiseAmplitude);
+ noiseAmplitude = 0.0f;
+ }
+
+ if (!forceGrid && noiseAmplitude == 0.0f)
+ {
+ // Without noise, we only need one triangle
+ m_pattern = Equilateral;
+ m_vertices.resize(3);
+ m_triangles.resize(1);
+
+ const float rX = 0.5f * (xSpacing * numX);
+ const float rY = 0.5f * (ySpacing * numY);
+ const float centerX = cornerX + rX;
+ const float centerY = cornerY + rY;
+
+ // Circumscribe rectangle
+ const float R = physx::PxSqrt(rX * rX + rY * rY);
+
+ // Fit equilateral triangle around circle
+ const float x = 1.73205081f * R;
+ m_vertices[0].position = tm.transform(physx::PxVec3(centerX, centerY + 2 * R, 0));
+ m_vertices[1].position = tm.transform(physx::PxVec3(centerX - x, centerY - R, 0));
+ m_vertices[2].position = tm.transform(physx::PxVec3(centerX + x, centerY - R, 0));
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ m_vertices[i].normal = m_plane.n;
+ m_vertices[i].tangent = tm.column0.getXYZ();
+ m_vertices[i].binormal = tm.column1.getXYZ();
+ m_vertices[i].color = ColorRGBA(255, 255, 255, 255);
+ }
+
+ ExplicitRenderTriangle& triangle = m_triangles[0];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ Vertex& gridVertex = m_vertices[v];
+ triangle.vertices[v] = gridVertex;
+ triangleFrame.interpolateVertexData(triangle.vertices[v]);
+ // Only really needed to interpolate u,v... replace normals and tangents with proper ones
+ triangle.vertices[v].normal = gridVertex.normal;
+ triangle.vertices[v].tangent = gridVertex.tangent;
+ triangle.vertices[v].binormal = gridVertex.binormal;
+ }
+ triangle.extraDataIndex = frameIndex;
+ triangle.smoothingMask = 0;
+ triangle.submeshIndex = (int32_t)submeshIndex;
+
+ return;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ physx::PxVec3 corner = m_tm.transform(physx::PxVec3(m_cornerX, m_cornerY, 0));
+ const physx::PxVec3 localX = m_tm.column0.getXYZ() * m_xSpacing;
+ const physx::PxVec3 localY = m_tm.column1.getXYZ() * m_ySpacing;
+ const physx::PxVec3 localZ = m_tm.column2.getXYZ();
+
+ // Vertices:
+ m_vertices.resize((m_numX + 1) * (m_numY + 1));
+ const physx::PxVec3 halfLocalX = 0.5f * localX;
+ uint32_t pointN = 0;
+ physx::PxVec3 side = corner;
+ for (uint32_t iy = 0; iy <= m_numY; ++iy, side += localY)
+ {
+ physx::PxVec3 point = side;
+ for (uint32_t ix = 0; ix <= m_numX; ++ix, point += localX)
+ {
+ if (m_pattern == nvidia::IntersectMesh::Equilateral && (((iy & 1) == 0 && ix == m_numX) || ((iy & 1) != 0 && ix == 1)))
+ {
+ point -= halfLocalX;
+ }
+ Vertex& vertex = m_vertices[pointN++];
+ vertex.position = point;
+ vertex.normal = physx::PxVec3(0.0f);
+ }
+ }
+
+ // Build noise
+ physx::Array<float> f(m_vertices.size(), 0.);
+ physx::Array<physx::PxVec3> n(m_vertices.size(), physx::PxVec3(0,0,0));
+ buildNoise(f, &n, pattern, m_cornerX, m_cornerY, m_xSpacing, m_ySpacing, m_numX, m_numY,
+ noiseAmplitude, relativeFrequency, xPeriod, yPeriod, noiseType, noiseDir);
+ pointN = 0;
+ for (uint32_t iy = 0; iy <= m_numY; ++iy)
+ {
+ for (uint32_t ix = 0; ix <= m_numX; ++ix, ++pointN)
+ {
+ Vertex& vertex = m_vertices[pointN];
+ vertex.position += localZ * f[pointN];
+ vertex.normal += n[pointN];
+ }
+ }
+
+ // Normalize normals and put in correct frame
+ for (pointN = 0; pointN < m_vertices.size(); pointN++)
+ {
+ Vertex& vertex = m_vertices[pointN];
+ vertex.normal.z = 1.0f;
+ vertex.normal.normalize();
+ vertex.normal = m_tm.rotate(vertex.normal);
+ vertex.tangent = m_tm.column1.getXYZ().cross(vertex.normal);
+ vertex.tangent.normalize();
+ vertex.color = ColorRGBA(255, 255, 255, 255);
+ vertex.binormal = vertex.normal.cross(vertex.tangent);
+ }
+
+ m_triangles.resize(2 * m_numX * m_numY);
+ uint32_t triangleN = 0;
+ uint32_t index = 0;
+ const uint32_t tpattern[12] = { 0, m_numX + 2, m_numX + 1, 0, 1, m_numX + 2, 0, 1, m_numX + 1, 1, m_numX + 2, m_numX + 1 };
+ for (uint32_t iy = 0; iy < m_numY; ++iy)
+ {
+ const uint32_t* yPattern = tpattern + (iy & 1) * 6;
+ for (uint32_t ix = 0; ix < m_numX; ++ix, ++index)
+ {
+ const uint32_t* ytPattern = yPattern;
+ for (uint32_t it = 0; it < 2; ++it, ytPattern += 3)
+ {
+ ExplicitRenderTriangle& triangle = m_triangles[triangleN++];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ Vertex& gridVertex = m_vertices[index + ytPattern[v]];
+ triangle.vertices[v] = gridVertex;
+ triangleFrame.interpolateVertexData(triangle.vertices[v]);
+ // Only really needed to interpolate u,v... replace normals and tangents with proper ones
+ triangle.vertices[v].normal = gridVertex.normal;
+ triangle.vertices[v].tangent = gridVertex.tangent;
+ triangle.vertices[v].binormal = gridVertex.binormal;
+ }
+ triangle.extraDataIndex = frameIndex;
+ triangle.smoothingMask = 0;
+ triangle.submeshIndex = (int32_t)submeshIndex;
+ }
+ }
+ ++index;
+ }
+}
+
+static const int gSliceDirs[6][3] =
+{
+ {0, 1, 2}, // XYZ
+ {1, 2, 0}, // YZX
+ {2, 0, 1}, // ZXY
+ {2, 1, 0}, // ZYX
+ {1, 0, 2}, // YXZ
+ {0, 2, 1} // XZY
+};
+
+struct GridParameters
+{
+ GridParameters() :
+ sizeScale(1.0f),
+ xPeriod(0.0f),
+ yPeriod(0.0f),
+ interiorSubmeshIndex(0xFFFFFFFF),
+ materialFrameIndex(0xFFFFFFFF),
+ forceGrid(false)
+ {
+ }
+
+ physx::Array< nvidia::ExplicitRenderTriangle >* level0Mesh;
+ float sizeScale;
+ nvidia::NoiseParameters noise;
+ float xPeriod;
+ float yPeriod;
+ uint32_t interiorSubmeshIndex;
+ uint32_t materialFrameIndex;
+ nvidia::TriangleFrame triangleFrame;
+ bool forceGrid;
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+static PX_INLINE uint32_t nearestPowerOf2(uint32_t v)
+{
+ v = v > 0 ? v - 1 : 0;
+
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ ++v;
+
+ return v;
+}
+
+nvidia::DisplacementMapVolumeImpl::DisplacementMapVolumeImpl()
+: width(0),
+ height(0),
+ depth(0)
+{
+
+}
+
+void nvidia::DisplacementMapVolumeImpl::init(const FractureSliceDesc& desc)
+{
+ PX_UNUSED(desc);
+
+ // Compute the number of slices for each plane
+ uint32_t slices[3];
+ slices[0] = slices[1] = slices[2] = 0;
+ uint32_t maxGridSize = 0;
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ for (uint32_t j = 1; j < desc.maxDepth; ++j)
+ {
+ if (slices[i] == 0)
+ slices[i] = desc.sliceParameters[j].splitsPerPass[i];
+ else
+ slices[i] *= desc.sliceParameters[j].splitsPerPass[i];
+ }
+ for (uint32_t j = 0; j < desc.maxDepth; ++j)
+ {
+ if (desc.sliceParameters[j].noise[i].gridSize > (int)maxGridSize)
+ maxGridSize = (uint32_t)desc.sliceParameters[j].noise[i].gridSize;
+ }
+ }
+
+ width = 4 * nearestPowerOf2(PxMax(maxGridSize, PxMax(slices[0], slices[1])));
+ height = width;
+ depth = 4 * nearestPowerOf2(PxMax(maxGridSize, slices[2]));
+}
+
+
+void nvidia::DisplacementMapVolumeImpl::getData(uint32_t& w, uint32_t& h, uint32_t& d, uint32_t& size, unsigned char const** ppData) const
+{
+ PX_ASSERT(ppData);
+ if(data.size() == 0)
+ buildData();
+
+ w = width;
+ h = height;
+ d = depth;
+ size = data.size();
+ *ppData = data.begin();
+}
+
+template<typename T, typename U>
+class Conversion
+{
+public:
+ static PX_INLINE U convert(T x)
+ {
+ return (U)physx::PxClamp(x, (T)-1, (T)1);
+ }
+};
+
+template<typename T>
+class Conversion<T, unsigned char>
+{
+public:
+ static PX_INLINE unsigned char convert(T x)
+ {
+ unsigned char value = (unsigned char)((physx::PxClamp(x, (T)-1, (T)1) + 1) * .5 * 255);
+ return value;
+ }
+};
+
+void nvidia::DisplacementMapVolumeImpl::buildData(physx::PxVec3 scale) const
+{
+ // For now, we forgo use of the scaling parameter
+ PX_UNUSED(scale);
+
+ const uint32_t numChannels = 4; // ZYX -> BGRA
+ const uint32_t channelSize = sizeof(unsigned char);
+ const uint32_t stride = numChannels * channelSize;
+ const uint32_t size = width * depth * height * stride;
+ data.resize(size);
+
+ const float dX = width > 1 ? 1.0f/(width - 1) : 0.0f;
+ const float dY = height > 1 ? 1.0f/(height- 1) : 0.0f;
+ const float dZ = depth > 1 ? 1.0f/(depth - 1) : 0.0f;
+
+ uint32_t index = 0;
+ float z = 0.0f;
+ for (uint32_t i = 0; i < depth; ++i, z+=dZ)
+ {
+ float y = 0.0f;
+ for (uint32_t j = 0; j < height; ++j, y+=dY)
+ {
+ float x = 0.0f;
+ for (uint32_t k = 0; k < width; ++k, x+=dX, index+=stride)
+ {
+ const float xyz[] = {x, y ,z};
+ const float yzx[] = {y, z, x};
+
+ // Random offsets in x and y, with the z offset as a combination of the two
+ // As long as we're consistent here in our ordering, noise will be a smooth vector function of position
+ float xOffset = userPerlin3D.sample(Vec3Float(xyz));
+ float yOffset = userPerlin3D.sample(Vec3Float(yzx));
+ float zOffset = (xOffset + yOffset) * 0.5f;
+
+ // ZXY -> RGB
+ data[index] = Conversion<float, unsigned char>::convert(zOffset);
+ data[index+1] = Conversion<float, unsigned char>::convert(yOffset);
+ data[index+2] = Conversion<float, unsigned char>::convert(xOffset);
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static void buildIntersectMesh(nvidia::IntersectMesh& mesh,
+ const physx::PxPlane& plane,
+ const nvidia::MaterialFrame& materialFrame,
+ int noiseType = FractureSliceDesc::NoiseWavePlane,
+ const GridParameters* gridParameters = NULL)
+{
+ if (!gridParameters)
+ {
+ mesh.build(plane);
+ return;
+ }
+
+ PxMat44 tm = materialFrame.mCoordinateSystem;
+
+ physx::PxBounds3 localPlaneBounds = boundTriangles(*gridParameters->level0Mesh, tm);
+
+ const physx::PxVec3 diameter = localPlaneBounds.maximum - localPlaneBounds.minimum;
+ const float planeDiameter = PxMax(diameter.x, diameter.y);
+ // No longer fattening - the BSP does not have side boundaries, so we will not shave off any of the mesh.
+ // localPlaneBounds.fatten( 0.005f*planeDiameter ); // To ensure we get the whole mesh
+ const float gridSpacing = planeDiameter / gridParameters->noise.gridSize;
+
+ physx::PxVec3 center = localPlaneBounds.getCenter();
+ physx::PxVec3 extent = localPlaneBounds.getExtents();
+
+#if 0 // Equilateral
+ const float offset = 0.5f;
+ const float yRatio = 0.866025404f;
+ const nvidia::IntersectMesh::GridPattern pattern = nvidia::IntersectMesh::Equilateral;
+ const float xSpacing = gridSpacing;
+ const float numX = physx::PxCeil(2 * extent.x / xSpacing + offset);
+ const float cornerX = center.x - 0.5f * (numX - offset) * xSpacing;
+ const float ySpacing = yRatio * gridSpacing;
+ const float numY = physx::PxCeil(2 * extent.y / ySpacing);
+ const float cornerY = center.y - 0.5f * numY * ySpacing;
+#else // Right
+ const nvidia::IntersectMesh::GridPattern pattern = nvidia::IntersectMesh::Right;
+ const float numX = gridParameters->xPeriod != 0.0f ? gridParameters->noise.gridSize : physx::PxCeil(2 * extent.x / gridSpacing);
+ const float xSpacing = 2 * extent.x / numX;
+ const float cornerX = center.x - extent.x;
+ const float numY = gridParameters->yPeriod != 0.0f ? gridParameters->noise.gridSize : physx::PxCeil(2 * extent.y / gridSpacing);
+ const float ySpacing = 2 * extent.y / numY;
+ const float cornerY = center.y - extent.y;
+#endif
+
+ const float noiseAmplitude = gridParameters->sizeScale * gridParameters->noise.amplitude;
+
+ mesh.build(pattern, plane, cornerX, cornerY, xSpacing, ySpacing, (uint32_t)numX, (uint32_t)numY, tm,
+ noiseAmplitude, gridParameters->noise.frequency, gridParameters->xPeriod, gridParameters->yPeriod, noiseType, -1,
+ gridParameters->interiorSubmeshIndex, gridParameters->materialFrameIndex, gridParameters->triangleFrame, gridParameters->forceGrid);
+}
+
+PX_INLINE physx::PxPlane createSlicePlane(const physx::PxVec3& center, const physx::PxVec3& extent, int sliceDir, int sliceDirNum,
+ const float sliceWidths[3], const float linearNoise[3], const float angularNoise[3])
+{
+ // Orient the plane (+apply the angular noise) and compute the d parameter (+apply the linear noise)
+ physx::PxVec3 slicePoint = center;
+ slicePoint[(unsigned)sliceDir] += (sliceDirNum + 1) * sliceWidths[(unsigned)sliceDir] - extent[(unsigned)sliceDir];
+ const physx::PxVec3 normal = randomNormal((uint32_t)sliceDir, angularNoise[(unsigned)sliceDir]);
+ return physx::PxPlane(normal, -normal.dot(slicePoint) + sliceWidths[(unsigned)sliceDir] * linearNoise[(unsigned)sliceDir] * userRnd.getReal(-0.5f, 0.5f));
+}
+
+static void buildSliceBSP(ApexCSG::IApexBSP& sliceBSP, ExplicitHierarchicalMeshImpl& hMesh, const nvidia::NoiseParameters& noise,
+ const physx::PxVec3& extent, int sliceDir, int sliceDepth, const physx::PxPlane planes[3],
+ const nvidia::FractureMaterialDesc& materialDesc, int noiseType = FractureSliceDesc::NoiseWavePlane, bool useDisplacementMaps = false)
+{
+ // Build grid and slice BSP
+ nvidia::IntersectMesh grid;
+ GridParameters gridParameters;
+ gridParameters.interiorSubmeshIndex = materialDesc.interiorSubmeshIndex;
+ // Defer noise generation if we're using displacement maps
+ gridParameters.noise = useDisplacementMaps ? nvidia::NoiseParameters() : noise;
+ gridParameters.level0Mesh = &hMesh.mParts[0]->mMesh;
+ gridParameters.sizeScale = extent[(unsigned)sliceDir];
+ gridParameters.materialFrameIndex = hMesh.addMaterialFrame();
+ nvidia::MaterialFrame materialFrame = hMesh.getMaterialFrame(gridParameters.materialFrameIndex);
+ materialFrame.buildCoordinateSystemFromMaterialDesc(materialDesc, planes[(unsigned)sliceDir]);
+ materialFrame.mFractureMethod = nvidia::FractureMethod::Slice;
+ materialFrame.mFractureIndex = sliceDir;
+ materialFrame.mSliceDepth = (uint32_t)sliceDepth;
+ hMesh.setMaterialFrame(gridParameters.materialFrameIndex, materialFrame);
+ gridParameters.triangleFrame.setFlat(materialFrame.mCoordinateSystem, materialDesc.uvScale, materialDesc.uvOffset);
+ buildIntersectMesh(grid, planes[(unsigned)sliceDir], materialFrame, noiseType, &gridParameters);
+
+ ApexCSG::BSPTolerances bspTolerances = ApexCSG::gDefaultTolerances;
+ bspTolerances.linear = 1.e-9f;
+ bspTolerances.angular = 0.00001f;
+ sliceBSP.setTolerances(bspTolerances);
+
+ ApexCSG::BSPBuildParameters bspBuildParams = gDefaultBuildParameters;
+ bspBuildParams.rnd = &userRnd;
+ bspBuildParams.internalTransform = sliceBSP.getInternalTransform();
+
+ if(useDisplacementMaps)
+ {
+ // Displacement map generation is deferred until the end of fracturing
+ // This used to be where a slice would populate a displacement map with
+ // offsets along the plane, but no longer
+ }
+
+ sliceBSP.fromMesh(&grid.m_triangles[0], grid.m_triangles.size(), bspBuildParams);
+}
+
+PX_INLINE ApexCSG::IApexBSP* createFractureBSP(physx::PxPlane slicePlanes[3], ApexCSG::IApexBSP*& sliceBSP, ApexCSG::IApexBSP& sourceBSP,
+ ExplicitHierarchicalMeshImpl& hMesh, float& childVolume, float minVolume,
+ const physx::PxVec3& center, const physx::PxVec3& extent, int sliceDir, int sliceDirNum, int sliceDepth,
+ const float sliceWidths[3], const float linearNoise[3], const float angularNoise[3],
+ const nvidia::NoiseParameters& noise, const nvidia::FractureMaterialDesc& materialDesc, int noiseType, bool useDisplacementMaps)
+{
+ const physx::PxPlane oldSlicePlane = slicePlanes[sliceDir];
+ slicePlanes[sliceDir] = createSlicePlane(center, extent, sliceDir, sliceDirNum, sliceWidths, linearNoise, angularNoise);
+ if (sliceBSP == NULL)
+ {
+ sliceBSP = createBSP(hMesh.mBSPMemCache, sourceBSP.getInternalTransform());
+ buildSliceBSP(*sliceBSP, hMesh, noise, extent, sliceDir, sliceDepth, slicePlanes, materialDesc, noiseType, useDisplacementMaps);
+ }
+ sourceBSP.combine(*sliceBSP);
+ ApexCSG::IApexBSP* bsp = createBSP(hMesh.mBSPMemCache, sourceBSP.getInternalTransform());
+ bsp->op(sourceBSP, ApexCSG::Operation::Intersection);
+#if 1 // Eliminating volume calculation here, for performance. May introduce it later once the mesh is calculated.
+ sourceBSP.op(sourceBSP, ApexCSG::Operation::A_Minus_B);
+ if (minVolume <= 0 || (bsp->getType() != ApexCSG::BSPType::Empty_Set && sourceBSP.getType() != ApexCSG::BSPType::Empty_Set))
+ {
+ childVolume = 1.0f;
+ }
+ else
+ {
+ // We will ignore this slice
+ if (sourceBSP.getType() != ApexCSG::BSPType::Empty_Set)
+ {
+ // chunk bsp volume too small
+ slicePlanes[sliceDir] = oldSlicePlane;
+ bsp->release();
+ bsp = NULL;
+ childVolume = 0.0f;
+ }
+ else
+ {
+ // remainder is too small. Terminate slicing along this direction
+ childVolume = 1.0f;
+ }
+ }
+#else
+ float bspArea, bspVolume;
+ bsp->getSurfaceAreaAndVolume(bspArea, bspVolume, true);
+ float remainingBSPArea, remainingBSPVolume;
+ sourceBSP.getSurfaceAreaAndVolume(remainingBSPArea, remainingBSPVolume, true, ApexCSG::Operation::A_Minus_B);
+ if (minVolume <= 0 || (bspVolume >= minVolume && remainingBSPVolume >= minVolume))
+ {
+ sourceBSP.op(sourceBSP, ApexCSG::Operation::A_Minus_B);
+ childVolume = bspVolume;
+ }
+ else
+ {
+ // We will ignore this slice
+ if (remainingBSPVolume >= minVolume)
+ {
+ // chunk bsp volume too small
+ slicePlanes[sliceDir] = oldSlicePlane;
+ bsp->release();
+ bsp = NULL;
+ sourceBSP.op(sourceBSP, ApexCSG::Operation::Set_A);
+ childVolume = 0.0f;
+ }
+ else
+ {
+ // remainder is too small. Terminate slicing along this direction
+ bsp->op(sourceBSP, ApexCSG::Operation::Set_A);
+ sourceBSP.op(sourceBSP, ApexCSG::Operation::Empty_Set);
+ childVolume = bspVolume + remainingBSPVolume;
+ }
+ }
+#endif
+ return bsp;
+}
+
+static bool hierarchicallySplitChunkInternal
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ uint32_t relativeSliceDepth,
+ physx::PxPlane chunkTrailingPlanes[3],
+ physx::PxPlane chunkLeadingPlanes[3],
+ const ApexCSG::IApexBSP& chunkBSP,
+ float chunkVolume,
+ const nvidia::FractureSliceDesc& desc,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+ );
+
+static bool createChunk
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ uint32_t relativeSliceDepth,
+ physx::PxPlane trailingPlanes[3],
+ physx::PxPlane leadingPlanes[3],
+ float chunkVolume,
+ const nvidia::FractureSliceDesc& desc,
+ const ApexCSG::IApexBSP& parentBSP,
+ const ApexCSG::IApexBSP& fractureBSP,
+ const nvidia::SliceParameters& sliceParameters,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ bool canceled = false;
+ ApexCSG::IApexBSP* chunkBSP = createBSP(hMesh.mBSPMemCache);
+#if 0
+ chunkBSP->copy(parentBSP);
+ chunkBSP->combine(fractureBSP);
+#else
+ chunkBSP->copy(fractureBSP);
+ chunkBSP->combine(parentBSP);
+#endif
+ chunkBSP->op(*chunkBSP, ApexCSG::Operation::Intersection);
+
+ if (chunkBSP->getType() == ApexCSG::BSPType::Empty_Set)
+ {
+ return true;
+ }
+
+ if (gIslandGeneration)
+ {
+ chunkBSP = chunkBSP->decomposeIntoIslands();
+ }
+
+ CollisionVolumeDesc volumeDesc = getVolumeDesc(collisionDesc, hMesh.depth(chunkIndex)+1);
+
+ const physx::PxVec3 minimumExtents = hMesh.chunkBounds(0).getExtents().multiply(physx::PxVec3(desc.minimumChunkSize[0], desc.minimumChunkSize[1], desc.minimumChunkSize[2]));
+
+ while (chunkBSP != NULL)
+ {
+ if (!canceled)
+ {
+ // Create a mesh with chunkBSP (or its islands)
+ const uint32_t newPartIndex = hMesh.addPart();
+ const uint32_t newChunkIndex = hMesh.addChunk();
+ chunkBSP->toMesh(hMesh.mParts[newPartIndex]->mMesh);
+ hMesh.buildMeshBounds(newPartIndex);
+ hMesh.buildCollisionGeometryForPart(newPartIndex, volumeDesc);
+ hMesh.mChunks[newChunkIndex]->mParentIndex = (int32_t)chunkIndex;
+ hMesh.mChunks[newChunkIndex]->mPartIndex = (int32_t)newPartIndex;
+ if (hMesh.mParts[(uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex]->mFlags & ExplicitHierarchicalMeshImpl::Part::MeshOpen)
+ {
+ hMesh.mParts[newPartIndex]->mFlags |= ExplicitHierarchicalMeshImpl::Part::MeshOpen;
+ }
+ // Trim hull in directions where splitting is noisy
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ if ((sliceParameters.noise[i].amplitude != 0.0f || volumeDesc.mHullMethod != nvidia::ConvexHullMethod::WRAP_GRAPHICS_MESH) &&
+ volumeDesc.mHullMethod != nvidia::ConvexHullMethod::CONVEX_DECOMPOSITION)
+ {
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[newPartIndex]->mCollision.size(); ++hullIndex)
+ {
+ PartConvexHullProxy& hull = *hMesh.mParts[newPartIndex]->mCollision[hullIndex];
+ float min, max;
+ hull.impl.extent(min, max, trailingPlanes[i].n);
+ if (max > min)
+ {
+ physx::PxPlane clipPlane = trailingPlanes[i];
+ clipPlane.d = PxMin(clipPlane.d, -(0.8f * (max - min) + min)); // 20% clip bound
+ hull.impl.intersectPlaneSide(clipPlane);
+ }
+ hull.impl.extent(min, max, leadingPlanes[i].n);
+ if (max > min)
+ {
+ physx::PxPlane clipPlane = leadingPlanes[i];
+ clipPlane.d = PxMin(clipPlane.d, -(0.8f * (max - min) + min)); // 20% clip bound
+ hull.impl.intersectPlaneSide(clipPlane);
+ }
+ }
+ }
+ }
+ if (hMesh.mParts[newPartIndex]->mMesh.size() > 0 && hMesh.mParts[newPartIndex]->mCollision.size() > 0 && // We have a mesh and collision hulls
+ (hMesh.chunkBounds(newChunkIndex).getExtents() - minimumExtents).minElement() >= 0.0f) // Chunk is large enough
+ {
+ // Proper chunk
+ hMesh.mParts[newPartIndex]->mMeshBSP->copy(*chunkBSP);
+ if (relativeSliceDepth < desc.maxDepth)
+ {
+ // Recurse
+ canceled = !hierarchicallySplitChunkInternal(hMesh, newChunkIndex, relativeSliceDepth, trailingPlanes, leadingPlanes, *chunkBSP, chunkVolume, desc, collisionDesc, progressListener, cancel);
+ }
+ }
+ else
+ {
+ // No mesh, no colision, or too small. Eliminate.
+ hMesh.removeChunk(newChunkIndex);
+ hMesh.removePart(newPartIndex);
+ }
+ }
+ if (chunkBSP == &parentBSP)
+ {
+ // No islands were generated; break from loop
+ break;
+ }
+ // Get next bsp in island decomposition
+ ApexCSG::IApexBSP* nextBSP = chunkBSP->getNext();
+ chunkBSP->release();
+ chunkBSP = nextBSP;
+ }
+
+ return !canceled;
+}
+
+static bool hierarchicallySplitChunkInternal
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ uint32_t relativeSliceDepth,
+ physx::PxPlane chunkTrailingPlanes[3],
+ physx::PxPlane chunkLeadingPlanes[3],
+ const ApexCSG::IApexBSP& chunkBSP,
+ float chunkVolume,
+ const nvidia::FractureSliceDesc& desc,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ if (relativeSliceDepth >= desc.maxDepth)
+ {
+ return true; // No slice parameters at this depth
+ }
+
+ const physx::PxBounds3 bounds = hMesh.chunkBounds(chunkIndex);
+
+ if (chunkIndex >= hMesh.chunkCount() || bounds.isEmpty())
+ {
+ return true; // Done, nothing in chunk
+ }
+
+ bool canceled = false; // our own copy of *cancel
+
+ physx::PxVec3 center = bounds.getCenter();
+ physx::PxVec3 extent = bounds.getExtents();
+
+ if (relativeSliceDepth == 0)
+ {
+ chunkTrailingPlanes[0] = physx::PxPlane(-1, 0, 0, bounds.minimum[0]);
+ chunkTrailingPlanes[1] = physx::PxPlane(0, -1, 0, bounds.minimum[1]);
+ chunkTrailingPlanes[2] = physx::PxPlane(0, 0, -1, bounds.minimum[2]);
+ chunkLeadingPlanes[0] = physx::PxPlane(1, 0, 0, -bounds.maximum[0]);
+ chunkLeadingPlanes[1] = physx::PxPlane(0, 1, 0, -bounds.maximum[1]);
+ chunkLeadingPlanes[2] = physx::PxPlane(0, 0, 1, -bounds.maximum[2]);
+ }
+
+ // Get parameters for this depth
+ const nvidia::SliceParameters& sliceParameters = desc.sliceParameters[relativeSliceDepth++];
+
+ // Determine slicing at this level
+ int partition[3];
+ calculatePartition(partition, sliceParameters.splitsPerPass, extent, desc.useTargetProportions ? desc.targetProportions : NULL);
+
+ // Slice volume rejection ratio, perhaps should be exposed
+ const float volumeRejectionRatio = 0.1f;
+ // Resulting slices must have at least this volume
+ const float minChildVolume = volumeRejectionRatio * chunkVolume / (partition[0] * partition[1] * partition[2]);
+
+ const bool slicingThrough = sliceParameters.order >= 6;
+
+ const uint32_t sliceDirOrder = slicingThrough ? 0u : (uint32_t)sliceParameters.order;
+ const uint32_t sliceDir0 = (uint32_t)gSliceDirs[sliceDirOrder][0];
+ const uint32_t sliceDir1 = (uint32_t)gSliceDirs[sliceDirOrder][1];
+ const uint32_t sliceDir2 = (uint32_t)gSliceDirs[sliceDirOrder][2];
+ const float sliceWidths[3] = { 2.0f * extent[0] / partition[0], 2.0f * extent[1] / partition[1], 2.0f * extent[2] / partition[2] };
+
+ nvidia::HierarchicalProgressListener localProgressListener(PxMax(partition[0]*partition[1]*partition[2], 1), &progressListener);
+
+ // If we are slicing through, then we need to cache the slice BSPs in the 2nd and 3rd directions
+ physx::Array<ApexCSG::IApexBSP*> sliceBSPs1;
+ physx::Array<ApexCSG::IApexBSP*> sliceBSPs2;
+ if (slicingThrough)
+ {
+ sliceBSPs1.resize((uint32_t)partition[(uint32_t)gSliceDirs[sliceDirOrder][1]] - 1, NULL);
+ sliceBSPs2.resize((uint32_t)partition[(uint32_t)gSliceDirs[sliceDirOrder][2]] - 1, NULL);
+ }
+
+ // If we are not slicingb through, we can re-use this sliceBSP
+ ApexCSG::IApexBSP* reusedSliceBSP = NULL;
+
+ physx::PxPlane trailingPlanes[3];
+ physx::PxPlane leadingPlanes[3];
+
+ float childVolume = 0.0f;
+
+ ApexCSG::IApexBSP* fractureBSP0 = createBSP(hMesh.mBSPMemCache, chunkBSP.getInternalTransform());
+
+ const int sliceDepth = (int)hMesh.depth(chunkIndex) + 1;
+
+ trailingPlanes[sliceDir0] = chunkTrailingPlanes[sliceDir0];
+ leadingPlanes[sliceDir0] = physx::PxPlane(-trailingPlanes[sliceDir0].n, -trailingPlanes[sliceDir0].d);
+ for (int sliceDir0Num = 0; sliceDir0Num < partition[sliceDir0] && !canceled; ++sliceDir0Num)
+ {
+ ApexCSG::IApexBSP* fractureBSP1 = fractureBSP0; // This is the default; if there is a need to slice it will be replaced below.
+ if (sliceDir0Num + 1 < partition[sliceDir0])
+ {
+ // Slice off piece in the 0 direction
+ fractureBSP1 = createFractureBSP(leadingPlanes, reusedSliceBSP, *fractureBSP0, hMesh, childVolume, 0, center, extent, (int32_t)sliceDir0, sliceDir0Num, sliceDepth, sliceWidths,
+ sliceParameters.linearVariation, sliceParameters.angularVariation, sliceParameters.noise[sliceDir0],
+ desc.materialDesc[sliceDir0], (int32_t)desc.noiseMode, desc.useDisplacementMaps);
+ reusedSliceBSP->release();
+ reusedSliceBSP = NULL;
+ }
+ else
+ {
+ leadingPlanes[sliceDir0] = chunkLeadingPlanes[sliceDir0];
+ }
+ trailingPlanes[sliceDir1] = chunkTrailingPlanes[sliceDir1];
+ leadingPlanes[sliceDir1] = physx::PxPlane(-trailingPlanes[sliceDir1].n, -trailingPlanes[sliceDir1].d);
+ for (int sliceDir1Num = 0; sliceDir1Num < partition[sliceDir1] && !canceled; ++sliceDir1Num)
+ {
+ ApexCSG::IApexBSP* fractureBSP2 = fractureBSP1; // This is the default; if there is a need to slice it will be replaced below.
+ if (sliceDir1Num + 1 < partition[sliceDir1])
+ {
+ // Slice off piece in the 1 direction
+ ApexCSG::IApexBSP*& sliceBSP = !slicingThrough ? reusedSliceBSP : sliceBSPs1[(uint32_t)sliceDir1Num];
+ fractureBSP2 = createFractureBSP(leadingPlanes, sliceBSP, *fractureBSP1, hMesh, childVolume, 0, center, extent, (int32_t)sliceDir1, sliceDir1Num, sliceDepth,
+ sliceWidths, sliceParameters.linearVariation, sliceParameters.angularVariation, sliceParameters.noise[sliceDir1],
+ desc.materialDesc[sliceDir1], (int32_t)desc.noiseMode, desc.useDisplacementMaps);
+ if (sliceBSP == reusedSliceBSP)
+ {
+ reusedSliceBSP->release();
+ reusedSliceBSP = NULL;
+ }
+ }
+ else
+ {
+ leadingPlanes[sliceDir1] = chunkLeadingPlanes[sliceDir1];
+ }
+ trailingPlanes[sliceDir2] = chunkTrailingPlanes[sliceDir2];
+ leadingPlanes[sliceDir2] = physx::PxPlane(-trailingPlanes[sliceDir2].n, -trailingPlanes[sliceDir2].d);
+ for (int sliceDir2Num = 0; sliceDir2Num < partition[sliceDir2] && !canceled; ++sliceDir2Num)
+ {
+ ApexCSG::IApexBSP* fractureBSP3 = fractureBSP2; // This is the default; if there is a need to slice it will be replaced below.
+ if (sliceDir2Num + 1 < partition[sliceDir2])
+ {
+ // Slice off piece in the 2 direction
+ ApexCSG::IApexBSP*& sliceBSP = !slicingThrough ? reusedSliceBSP : sliceBSPs2[(uint32_t)sliceDir2Num];
+ fractureBSP3 = createFractureBSP(leadingPlanes, sliceBSP, *fractureBSP2, hMesh, childVolume, minChildVolume, center, extent, (int32_t)sliceDir2, sliceDir2Num, sliceDepth,
+ sliceWidths, sliceParameters.linearVariation, sliceParameters.angularVariation, sliceParameters.noise[sliceDir2],
+ desc.materialDesc[sliceDir2], (int32_t)desc.noiseMode, desc.useDisplacementMaps);
+ if (sliceBSP == reusedSliceBSP)
+ {
+ reusedSliceBSP->release();
+ reusedSliceBSP = NULL;
+ }
+ }
+ else
+ {
+ leadingPlanes[sliceDir2] = chunkLeadingPlanes[sliceDir2];
+ }
+ if (fractureBSP3 != NULL)
+ {
+ if (hMesh.mParts[(uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex]->mFlags & ExplicitHierarchicalMeshImpl::Part::MeshOpen)
+ {
+ fractureBSP3->deleteTriangles(); // Don't use interior triangles on an open mesh
+ }
+ canceled = !createChunk(hMesh, chunkIndex, relativeSliceDepth, trailingPlanes, leadingPlanes, childVolume, desc, chunkBSP, *fractureBSP3, sliceParameters, collisionDesc, localProgressListener, cancel);
+ }
+ localProgressListener.completeSubtask();
+ // We no longer need fractureBSP3
+ if (fractureBSP3 != NULL && fractureBSP3 != fractureBSP2)
+ {
+ fractureBSP3->release();
+ fractureBSP3 = NULL;
+ }
+ trailingPlanes[sliceDir2] = physx::PxPlane(-leadingPlanes[sliceDir2].n, -leadingPlanes[sliceDir2].d);
+ // Check for cancellation
+ if (cancel != NULL && *cancel)
+ {
+ canceled = true;
+ }
+ }
+ // We no longer need fractureBSP2
+ if (fractureBSP2 != NULL && fractureBSP2 != fractureBSP1)
+ {
+ fractureBSP2->release();
+ fractureBSP2 = NULL;
+ }
+ trailingPlanes[sliceDir1] = physx::PxPlane(-leadingPlanes[sliceDir1].n, -leadingPlanes[sliceDir1].d);
+ // Check for cancellation
+ if (cancel != NULL && *cancel)
+ {
+ canceled = true;
+ }
+ }
+ // We no longer need fractureBSP1
+ if (fractureBSP1 != NULL && fractureBSP1 != fractureBSP0)
+ {
+ fractureBSP1->release();
+ fractureBSP1 = NULL;
+ }
+ trailingPlanes[sliceDir0] = physx::PxPlane(-leadingPlanes[sliceDir0].n, -leadingPlanes[sliceDir0].d);
+ // Check for cancellation
+ if (cancel != NULL && *cancel)
+ {
+ canceled = true;
+ }
+ }
+ fractureBSP0->release();
+
+ while (sliceBSPs2.size())
+ {
+ if (sliceBSPs2.back() != NULL)
+ {
+ sliceBSPs2.back()->release();
+ }
+ sliceBSPs2.popBack();
+ }
+ while (sliceBSPs1.size())
+ {
+ if (sliceBSPs1.back() != NULL)
+ {
+ sliceBSPs1.back()->release();
+ }
+ sliceBSPs1.popBack();
+ }
+
+ return !canceled;
+}
+
+
+struct TriangleLockInfo
+{
+ TriangleLockInfo() : lockedVertices(0), lockedEdges(0), originalTriangleIndex(0xFFFFFFFF) {}
+
+ uint16_t lockedVertices; // (lockedVertices>>N)&1 => vertex N is locked
+ uint16_t lockedEdges; // (lockedEdges>>M)&1 => edge M is locked
+ uint32_t originalTriangleIndex;
+};
+
+PX_INLINE float square(float x)
+{
+ return x*x;
+}
+
+// Returns edge of triangle, and position on edge (in pointOnEdge) if an edge is split, otherwise returns -1
+// If a valid edge index is returned, also returns distance squared from the point to the edge in perp2
+PX_INLINE int32_t pointOnAnEdge(physx::PxVec3& pointOnEdge, float& perp2, const physx::PxVec3& point, const nvidia::ExplicitRenderTriangle& triangle, float paraTol2, float perpTol2)
+{
+ int32_t edgeIndex = -1;
+ float closestPerp2E2 = 1.0f;
+ float closestE2 = 0.0f;
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ const physx::PxVec3& v0 = triangle.vertices[i].position;
+ const physx::PxVec3& v1 = triangle.vertices[(i+1)%3].position;
+ const physx::PxVec3 e = v1 - v0;
+ const float e2 = e.magnitudeSquared();
+ const float perpTol2e2 = perpTol2*e2;
+ const physx::PxVec3 d0 = point - v0;
+ const float d02 = d0.magnitudeSquared();
+ if (d02 < paraTol2)
+ {
+ return -1;
+ }
+ if (e2 <= 4.0f*paraTol2)
+ {
+ continue;
+ }
+ const float d0e = d0.dot(e);
+ if (d0e < 0.0f || d0e > e2)
+ {
+ continue; // point does not project down onto the edge
+ }
+ const float perp2e2 = d0.cross(e).magnitudeSquared();
+ if (perp2e2 > perpTol2e2)
+ {
+ continue; // Point too far from edge
+ }
+ // Point is close to an edge. Consider it if it's the closest.
+ if (perp2e2*closestE2 < closestPerp2E2*e2)
+ {
+ closestPerp2E2 = perp2e2;
+ closestE2 = e2;
+ edgeIndex = (int32_t)i;
+ }
+ }
+
+ if (edgeIndex < 0 || closestE2 == 0.0f)
+ {
+ return -1;
+ }
+
+ const physx::PxVec3& v0 = triangle.vertices[edgeIndex].position;
+ const physx::PxVec3& v1 = triangle.vertices[(edgeIndex+1)%3].position;
+
+ if ((point-v0).magnitudeSquared() < paraTol2 || (point-v1).magnitudeSquared() < paraTol2)
+ {
+ return -1;
+ }
+
+ pointOnEdge = point;
+ perp2 = closestE2 > 0.0f ? closestPerp2E2/closestE2 : 0.0f;
+
+ return edgeIndex;
+}
+
+// Returns shared edge of triangleA if an edge is shared, otherwise returns -1
+PX_INLINE int32_t trianglesShareEdge(const nvidia::ExplicitRenderTriangle& triangleA, const nvidia::ExplicitRenderTriangle& triangleB, float tol2)
+{
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ const physx::PxVec3 eA = triangleA.vertices[(i+1)%3].position - triangleA.vertices[i].position;
+ const float eA2 = eA.magnitudeSquared();
+ const float tol2eA2 = tol2*eA2;
+ // We will search for edges pointing in the opposite direction only
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ const physx::PxVec3 d0 = triangleB.vertices[j].position - triangleA.vertices[i].position;
+ const float d0A = d0.dot(eA);
+ if (d0A <= 0.0f)
+ {
+ continue; // edge on B starts before edge on A
+ }
+ const physx::PxVec3 d1 = triangleB.vertices[(j+1)%3].position - triangleA.vertices[i].position;
+ const float d1A = d1.dot(eA);
+ if (d1A >= eA2)
+ {
+ continue; // edge on B ends after edge on A
+ }
+ if (d0A <= d1A)
+ {
+ continue; // edges don't point in opposite directions
+ }
+ if (d0.cross(eA).magnitudeSquared() > tol2eA2)
+ {
+ continue; // one vertex on B is not close enough to the edge of A
+ }
+ if (d1.cross(eA).magnitudeSquared() > tol2eA2)
+ {
+ continue; // other vertex on B is not close enough to the edge of A
+ }
+ // These edges appear to have an overlap, to within tolerance
+ return (int32_t)i;
+ }
+ }
+
+ return -1;
+}
+
+// Positive tol means that interference will be registered even if the triangles are a small distance apart.
+// Negative tol means that interference will not be registered even if the triangles have a small overlap.
+PX_INLINE bool trianglesInterfere(const nvidia::ExplicitRenderTriangle& triangleA, const nvidia::ExplicitRenderTriangle& triangleB, float tol)
+{
+ // Check extent of B relative to plane of A
+ const physx::PxPlane planeA(0.333333333f*(triangleA.vertices[0].position + triangleA.vertices[1].position + triangleA.vertices[2].position), triangleA.calculateNormal().getNormalized());
+ physx::PxVec3 dispB(planeA.distance(triangleB.vertices[0].position), planeA.distance(triangleB.vertices[1].position), planeA.distance(triangleB.vertices[2].position));
+ if (extentDistance(dispB.minElement(), dispB.maxElement(), 0.0f, 0.0f) > tol)
+ {
+ return false;
+ }
+
+ // Check extent of A relative to plane of B
+ const physx::PxPlane planeB(0.333333333f*(triangleB.vertices[0].position + triangleB.vertices[1].position + triangleB.vertices[2].position), triangleB.calculateNormal().getNormalized());
+ physx::PxVec3 dispA(planeB.distance(triangleA.vertices[0].position), planeB.distance(triangleA.vertices[1].position), planeB.distance(triangleA.vertices[2].position));
+ if (extentDistance(dispA.minElement(), dispA.maxElement(), 0.0f, 0.0f) > tol)
+ {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ physx::PxVec3 eA = triangleA.vertices[(i+1)%3].position - triangleA.vertices[i].position;
+ eA.normalize();
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ physx::PxVec3 eB = triangleB.vertices[(j+1)%3].position - triangleB.vertices[j].position;
+ eB.normalize();
+ physx::PxVec3 n = eA.cross(eB);
+ if (n.normalize() > 0.00001f)
+ {
+ dispA = physx::PxVec3(n.dot(triangleA.vertices[0].position), n.dot(triangleA.vertices[1].position), n.dot(triangleA.vertices[2].position));
+ dispB = physx::PxVec3(n.dot(triangleB.vertices[0].position), n.dot(triangleB.vertices[1].position), n.dot(triangleB.vertices[2].position));
+ if (extentDistance(dispA.minElement(), dispA.maxElement(), dispB.minElement(), dispB.maxElement()) > tol)
+ {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+PX_INLINE bool segmentIntersectsTriangle(const physx::PxVec3 orig, const physx::PxVec3 dest, const nvidia::ExplicitRenderTriangle& triangle, float tol)
+{
+ // Check extent of segment relative to plane of triangle
+ const physx::PxPlane plane(0.333333333f*(triangle.vertices[0].position + triangle.vertices[1].position + triangle.vertices[2].position), triangle.calculateNormal().getNormalized());
+ const float dist0 = plane.distance(orig);
+ const float dist1 = plane.distance(dest);
+ if (extentDistance(PxMin(dist0, dist1), PxMax(dist0, dist1), 0.0f, 0.0f) > tol)
+ {
+ return false;
+ }
+
+ // Test to see if the segment goes through the triangle
+ const float signDist0 = physx::PxSign(dist0);
+ const physx::PxVec3 disp = dest-orig;
+ const physx::PxVec3 relV[3] = {triangle.vertices[0].position - orig, triangle.vertices[1].position - orig, triangle.vertices[2].position - orig};
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ if (physx::PxSign(relV[v].cross(relV[(v+1)%3]).dot(disp)) == signDist0)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+struct VertexRep
+{
+ BoundsRep bounds;
+ physx::PxVec3 position; // Not necessarily the center of bounds, after we snap vertices
+ physx::PxVec3 normal;
+};
+
+class MeshProcessor
+{
+public:
+ class FreeVertexIt
+ {
+ public:
+ FreeVertexIt(MeshProcessor& meshProcessor) : mMeshProcessor(meshProcessor), mVertexRep(NULL)
+ {
+ mTriangleIndex = meshProcessor.mTrianglePartition;
+ mVertexIndex = 0;
+ advanceToNextValidState();
+ }
+
+ PX_INLINE bool valid() const
+ {
+ return mTriangleIndex < mMeshProcessor.mMesh->size();
+ }
+
+ PX_INLINE void inc()
+ {
+ if (valid())
+ {
+ ++mVertexIndex;
+ advanceToNextValidState();
+ }
+ }
+
+ PX_INLINE uint32_t triangleIndex() const
+ {
+ return mTriangleIndex;
+ }
+
+ PX_INLINE uint32_t vertexIndex() const
+ {
+ return mVertexIndex;
+ }
+
+ PX_INLINE VertexRep& vertexRep() const
+ {
+ return *mVertexRep;
+ }
+
+ private:
+ FreeVertexIt& operator=(const FreeVertexIt&);
+
+ PX_INLINE void advanceToNextValidState()
+ {
+ for (; valid(); ++mTriangleIndex, mVertexIndex = 0)
+ {
+ for (; mVertexIndex < 3; ++mVertexIndex)
+ {
+ const uint32_t relativeTriangleIndex = mTriangleIndex-mMeshProcessor.mTrianglePartition;
+ if (!((mMeshProcessor.mTriangleInfo[relativeTriangleIndex].lockedVertices >> mVertexIndex)&1))
+ {
+ mVertexRep = &mMeshProcessor.mVertexBounds[3*relativeTriangleIndex + mVertexIndex];
+ return;
+ }
+ }
+ }
+ }
+
+ MeshProcessor& mMeshProcessor;
+ uint32_t mTriangleIndex;
+ uint32_t mVertexIndex;
+ VertexRep* mVertexRep;
+ };
+
+ class FreeNeighborVertexIt
+ {
+ public:
+ FreeNeighborVertexIt(MeshProcessor& meshProcessor, uint32_t triangleIndex, uint32_t vertexIndex)
+ : mMeshProcessor(meshProcessor)
+ , mTriangleIndex(triangleIndex)
+ , mVertexIndex(vertexIndex)
+ , mVertexRep(NULL)
+ {
+ const uint32_t vertexRepIndex = 3*(triangleIndex - mMeshProcessor.mTrianglePartition) + vertexIndex;
+ mNeighbors = meshProcessor.mVertexNeighborhoods.getNeighbors(vertexRepIndex);
+ mNeighborStop = mNeighbors + meshProcessor.mVertexNeighborhoods.getNeighborCount(vertexRepIndex);
+ advanceToNextValidState();
+ }
+
+ PX_INLINE bool valid() const
+ {
+ return mNeighbors < mNeighborStop;
+ }
+
+ PX_INLINE void inc()
+ {
+ if (valid())
+ {
+ ++mNeighbors;
+ advanceToNextValidState();
+ }
+ }
+
+ PX_INLINE uint32_t triangleIndex() const
+ {
+ return mTriangleIndex;
+ }
+
+ PX_INLINE uint32_t vertexIndex() const
+ {
+ return mVertexIndex;
+ }
+
+ PX_INLINE VertexRep& vertexRep() const
+ {
+ return *mVertexRep;
+ }
+
+ private:
+ FreeNeighborVertexIt& operator=(const FreeNeighborVertexIt&);
+
+ PX_INLINE void advanceToNextValidState()
+ {
+ for (; valid(); ++mNeighbors)
+ {
+ const uint32_t neighbor = *mNeighbors;
+ const uint32_t relativeTriangleIndex = neighbor/3;
+ mTriangleIndex = mMeshProcessor.mTrianglePartition + relativeTriangleIndex;
+ mVertexIndex = neighbor - 3*relativeTriangleIndex;
+ mVertexRep = &mMeshProcessor.mVertexBounds[neighbor];
+ if (!((mMeshProcessor.mTriangleInfo[relativeTriangleIndex].lockedVertices >> mVertexIndex)&1))
+ {
+ return;
+ }
+ }
+ }
+
+ MeshProcessor& mMeshProcessor;
+ const uint32_t* mNeighbors;
+ const uint32_t* mNeighborStop;
+ uint32_t mTriangleIndex;
+ uint32_t mVertexIndex;
+ VertexRep* mVertexRep;
+ };
+
+ MeshProcessor()
+ {
+ reset();
+ }
+
+ // Removes any triangles with a width less than minWidth
+ static void removeSlivers(physx::Array<nvidia::ExplicitRenderTriangle>& mesh, float minWidth)
+ {
+ const float minWidth2 = minWidth*minWidth;
+ for (uint32_t i = mesh.size(); i--;)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ const physx::PxVec3 edge = (triangle.vertices[(j+1)%3].position - triangle.vertices[j].position).getNormalized();
+ if ((triangle.vertices[(j+2)%3].position - triangle.vertices[j].position).cross(edge).magnitudeSquared() < minWidth2)
+ {
+ mesh.replaceWithLast(i);
+ break;
+ }
+ }
+ }
+ }
+
+ // trianglePartition is a point in the part mesh where we want to start processing. We will assume that triangles
+ // before this index are locked, and vertices in triangles after this index will be locked if they coincide with
+ // locked triangles
+ void setMesh(physx::Array<nvidia::ExplicitRenderTriangle>& mesh, physx::Array<nvidia::ExplicitRenderTriangle>* parentMesh, uint32_t trianglePartition, float tol)
+ {
+ reset();
+
+ if (mesh.size() == 0)
+ {
+ return;
+ }
+
+ mMesh = &mesh;
+ mTrianglePartition = PxMin(trianglePartition, mesh.size());
+ mTol = physx::PxAbs(tol);
+ mPadding = 2*mTol;
+
+ mParentMesh = parentMesh;
+
+ // Find part triangle neighborhoods, expanding triangle bounds by some padding factor
+ mOriginalTriangleBounds.resize(mesh.size());
+ for (uint32_t i = 0; i < mesh.size(); ++i)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ mOriginalTriangleBounds[i].aabb.setEmpty();
+ mOriginalTriangleBounds[i].aabb.include(triangle.vertices[0].position);
+ mOriginalTriangleBounds[i].aabb.include(triangle.vertices[1].position);
+ mOriginalTriangleBounds[i].aabb.include(triangle.vertices[2].position);
+ mOriginalTriangleBounds[i].aabb.fattenFast(mPadding);
+ }
+ mOriginalTriangleNeighborhoods.setBounds(&mOriginalTriangleBounds[0], mOriginalTriangleBounds.size(), sizeof(mOriginalTriangleBounds[0]));
+
+ // Create additional triangle info. This will parallel the mesh after the partition
+ mTriangleInfo.resize(mesh.size()-mTrianglePartition);
+ // Also create triangle interpolators from the original triangles
+ mTriangleFrames.resize(mesh.size()-mTrianglePartition);
+ // As well as child triangle info
+ mTriangleChildLists.resize(mesh.size()-mTrianglePartition);
+ for (uint32_t i = mTrianglePartition; i < mesh.size(); ++i)
+ {
+ mTriangleInfo[i-mTrianglePartition].originalTriangleIndex = i; // Use the original triangles' neighborhood info
+ mTriangleFrames[i-mTrianglePartition].setFromTriangle(mesh[i]);
+ mTriangleChildLists[i-mTrianglePartition].pushBack(i); // These are all of the triangles that use the corresponding triangle frame, and will be represented by the corresponding bounds for spatial lookup
+ }
+ }
+
+ void lockBorderVertices()
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+
+ const float tol2 = mTol*mTol;
+
+ for (uint32_t i = mTrianglePartition; i < mesh.size(); ++i)
+ {
+ // Use neighbor info to find out if we should lock any of the vertices
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ const uint32_t neighborCount = mOriginalTriangleNeighborhoods.getNeighborCount(i);
+ const uint32_t* neighbors = mOriginalTriangleNeighborhoods.getNeighbors(i);
+ for (uint32_t j = 0; j < neighborCount; ++j)
+ {
+ const uint32_t neighbor = *neighbors++;
+ if (neighbor < mTrianglePartition)
+ {
+ // Neighbor is not a process triangle - if it shares an edge then lock vertices
+ const int32_t sharedEdge = trianglesShareEdge(triangle, mesh[neighbor], tol2);
+ if (sharedEdge >= 0)
+ {
+ mTriangleInfo[i-mTrianglePartition].lockedVertices |= (1<<sharedEdge) | (1<<((sharedEdge+1)%3));
+ mTriangleInfo[i-mTrianglePartition].lockedEdges |= 1<<sharedEdge;
+ }
+ // Check triangle vertices against neighbor's edges as well
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const physx::PxVec3& point = triangle.vertices[v].position;
+ physx::PxVec3 pointOnEdge;
+ float perp2;
+ if (0 <= pointOnAnEdge(pointOnEdge, perp2, point, mesh[neighbor], 0.0f, tol2))
+ {
+ mTriangleInfo[i-mTrianglePartition].lockedVertices |= 1<<v;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void removeTJunctions()
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+
+ const float tol2 = mTol*mTol;
+
+ const uint32_t originalMeshSize = mesh.size();
+ for (uint32_t i = mTrianglePartition; i < originalMeshSize; ++i)
+ {
+ const uint32_t neighborCount = mOriginalTriangleNeighborhoods.getNeighborCount(i);
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const physx::PxVec3& point = mesh[i].vertices[v].position;
+ int32_t neighborToSplit = -1;
+ int32_t edgeToSplit = -1;
+ float leastPerp2 = PX_MAX_F32;
+ const uint32_t* neighbors = mOriginalTriangleNeighborhoods.getNeighbors(i);
+ for (uint32_t j = 0; j < neighborCount; ++j)
+ {
+ const uint32_t originalNeighbor = *neighbors++;
+ if (originalNeighbor >= mTrianglePartition)
+ {
+ physx::Array<uint32_t>& triangleChildList = mTriangleChildLists[originalNeighbor - mTrianglePartition];
+ const uint32_t triangleChildListSize = triangleChildList.size();
+ for (uint32_t k = 0; k < triangleChildListSize; ++k)
+ {
+ const uint32_t neighbor = triangleChildList[k];
+ // Neighbor is a process triangle - split it at this triangle's vertices
+ const physx::PxVec3& point = mesh[i].vertices[v].position;
+ physx::PxVec3 pointOnEdge;
+ float perp2 = 0.0f;
+ const int32_t edge = pointOnAnEdge(pointOnEdge, perp2, point, mesh[neighbor], tol2, tol2);
+ if (edge >= 0 && !((mTriangleInfo[neighbor - mTrianglePartition].lockedEdges >> edge)&1))
+ {
+ if (perp2 < leastPerp2)
+ {
+ neighborToSplit = (int32_t)neighbor;
+ edgeToSplit = edge;
+ leastPerp2 = perp2;
+ }
+ }
+ }
+ }
+ }
+ if (neighborToSplit >= 0 && edgeToSplit >= 0)
+ {
+ splitTriangle((uint32_t)neighborToSplit, (uint32_t)edgeToSplit, point);
+ }
+ }
+ }
+ }
+
+ void subdivide(float maxEdgeLength)
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+ const float maxEdgeLength2 = maxEdgeLength*maxEdgeLength;
+
+ const float tol2 = mTol*mTol;
+
+ // Pass through list and split edges that are too long
+ bool splitDone;
+ do
+ {
+ splitDone = false;
+ for (uint32_t i = mTrianglePartition; i < mesh.size(); ++i) // this array (as well as info) might grow during the loop
+ {
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ // Find the longest edge of this triangle
+ const float edgeLengthSquared[3] =
+ {
+ (triangle.vertices[1].position - triangle.vertices[0].position).magnitudeSquared(),
+ (triangle.vertices[2].position - triangle.vertices[1].position).magnitudeSquared(),
+ (triangle.vertices[0].position - triangle.vertices[2].position).magnitudeSquared()
+ };
+ const int longestEdge = edgeLengthSquared[1] > edgeLengthSquared[0] ? (edgeLengthSquared[2] > edgeLengthSquared[1] ? 2 : 1) : (edgeLengthSquared[2] > edgeLengthSquared[0] ? 2 : 0);
+ if (edgeLengthSquared[longestEdge] > maxEdgeLength2)
+ {
+ // Split this edge
+ const nvidia::ExplicitRenderTriangle oldTriangle = triangle; // Save off old triangle for neighbor edge check
+ const physx::PxVec3 newVertexPosition = 0.5f*(triangle.vertices[longestEdge].position + triangle.vertices[(longestEdge + 1)%3].position);
+ splitTriangle(i, (uint32_t)longestEdge, newVertexPosition);
+ // Now split neighbor edges
+ const uint32_t neighborCount = mOriginalTriangleNeighborhoods.getNeighborCount(i);
+ const uint32_t* neighbors = mOriginalTriangleNeighborhoods.getNeighbors(i);
+ for (uint32_t j = 0; j < neighborCount; ++j)
+ {
+ const uint32_t originalNeighbor = *neighbors++;
+ if (originalNeighbor >= mTrianglePartition)
+ {
+ physx::Array<uint32_t>& triangleChildList = mTriangleChildLists[originalNeighbor - mTrianglePartition];
+ for (uint32_t k = 0; k < triangleChildList.size(); ++k)
+ {
+ const uint32_t neighbor = triangleChildList[k];
+ if (neighbor >= mTrianglePartition)
+ {
+ // Neighbor is a process triangle - split it too, if the neighbor shares an edge, and the split point is on the shared edge
+ const int32_t sharedEdge = trianglesShareEdge(oldTriangle, mesh[neighbor], tol2);
+ if (sharedEdge >= 0)
+ {
+ physx::PxVec3 pointOnEdge;
+ float perp2;
+ const int32_t edgeToSplit = pointOnAnEdge(pointOnEdge, perp2, newVertexPosition, mesh[neighbor], tol2, tol2);
+ if (edgeToSplit == sharedEdge && !((mTriangleInfo[neighbor - mTrianglePartition].lockedEdges >> edgeToSplit)&1))
+ {
+ splitTriangle(neighbor, (uint32_t)edgeToSplit, pointOnEdge);
+ }
+ }
+ }
+ }
+ }
+ }
+ splitDone = true;
+ }
+ }
+ } while (splitDone);
+ }
+
+ void snapVertices(float snapTol)
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+
+ // Create a small bounding cube for each vertex
+ const uint32_t vertexCount = 3*(mesh.size()-mTrianglePartition);
+ mVertexBounds.resize(vertexCount);
+
+ if (mVertexBounds.size() == 0)
+ {
+ return;
+ }
+
+ VertexRep* vertexRep = &mVertexBounds[0];
+ for (uint32_t i = mTrianglePartition; i < mesh.size(); ++i)
+ {
+ const physx::PxVec3 normal = mesh[i].calculateNormal();
+ for (uint32_t j = 0; j < 3; ++j, ++vertexRep)
+ {
+ vertexRep->bounds.aabb.include(mesh[i].vertices[j].position);
+ vertexRep->bounds.aabb.fattenFast(snapTol);
+ vertexRep->position = mesh[i].vertices[j].position;
+ vertexRep->normal = normal;
+ }
+ }
+
+ // Generate neighbor info
+ mVertexNeighborhoods.setBounds(&mVertexBounds[0].bounds, vertexCount, sizeof(VertexRep));
+
+ const float snapTol2 = snapTol*snapTol;
+
+ // Run through all free vertices, look for neighbors that are free, and snap them together
+ for (MeshProcessor::FreeVertexIt it(*this); it.valid(); it.inc())
+ {
+ Vertex& vertex = mesh[it.triangleIndex()].vertices[it.vertexIndex()];
+ VertexRep& vertexRep = it.vertexRep();
+ uint32_t N = 1;
+ const physx::PxVec3 oldVertexPosition = vertex.position;
+ for (MeshProcessor::FreeNeighborVertexIt nIt(*this, it.triangleIndex(), it.vertexIndex()); nIt.valid(); nIt.inc())
+ {
+ Vertex& neighborVertex = mesh[nIt.triangleIndex()].vertices[nIt.vertexIndex()];
+ if ((neighborVertex.position-oldVertexPosition).magnitudeSquared() < snapTol2)
+ {
+ vertex.position += neighborVertex.position;
+ vertexRep.normal += nIt.vertexRep().normal;
+ ++N;
+ }
+ }
+ vertex.position *= 1.0f/N;
+ vertexRep.position = vertex.position;
+ vertexRep.normal.normalize();
+ for (MeshProcessor::FreeNeighborVertexIt nIt(*this, it.triangleIndex(), it.vertexIndex()); nIt.valid(); nIt.inc())
+ {
+ Vertex& neighborVertex = mesh[nIt.triangleIndex()].vertices[nIt.vertexIndex()];
+ if ((neighborVertex.position-oldVertexPosition).magnitudeSquared() < snapTol2)
+ {
+ neighborVertex.position = vertex.position;
+ nIt.vertexRep().position = vertex.position;
+ nIt.vertexRep().normal = vertexRep.normal;
+ }
+ }
+ }
+ }
+
+ void resolveIntersections(float relaxationFactor = 0.5f, uint32_t maxIterations = 10)
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+
+ if (mesh.size() == 0 || mParentMesh == NULL)
+ {
+ return;
+ }
+
+ physx::Array<nvidia::ExplicitRenderTriangle>& parentMesh = *mParentMesh;
+
+ // Find neighborhoods for the new active triangles, the inactive triangles, and the face mesh triangles
+ const uint32_t parentMeshSize = parentMesh.size();
+ physx::Array<BoundsRep> triangleBounds;
+ triangleBounds.resize(parentMeshSize + mesh.size());
+ // Triangles from the face mesh
+ for (uint32_t i = 0; i < parentMeshSize; ++i)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = parentMesh[i];
+ BoundsRep& boundsRep = triangleBounds[i];
+ boundsRep.aabb.setEmpty();
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ boundsRep.aabb.include(triangle.vertices[v].position);
+ }
+ boundsRep.aabb.fattenFast(mPadding);
+ }
+ // Triangles from the part mesh
+ for (uint32_t i = 0; i < mesh.size(); ++i)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ BoundsRep& boundsRep = triangleBounds[i+parentMeshSize];
+ boundsRep.aabb.setEmpty();
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ boundsRep.aabb.include(triangle.vertices[v].position);
+ // Also include the triangle's original vertices if it's a process triangle, so we can check for tunneling
+ if (i >= mTrianglePartition)
+ {
+ VertexRep& vertexRep = mVertexBounds[3*(i-mTrianglePartition) + v];
+ boundsRep.aabb.include(vertexRep.position);
+ }
+ }
+ boundsRep.aabb.fattenFast(mPadding);
+ }
+
+ NeighborLookup triangleNeighborhoods;
+ triangleNeighborhoods.setBounds(&triangleBounds[0], triangleBounds.size(), sizeof(triangleBounds[0]));
+
+ const nvidia::ExplicitRenderTriangle* parentMeshStart = parentMeshSize ? &parentMesh[0] : NULL;
+
+ // Find interfering pairs of triangles
+ physx::Array<IntPair> interferingPairs;
+ for (uint32_t repIndex = mTrianglePartition+parentMeshSize; repIndex < mesh.size()+parentMeshSize; ++repIndex)
+ {
+ const uint32_t neighborCount = triangleNeighborhoods.getNeighborCount(repIndex);
+ const uint32_t* neighborRepIndices = triangleNeighborhoods.getNeighbors(repIndex);
+ for (uint32_t j = 0; j < neighborCount; ++j)
+ {
+ const uint32_t neighborRepIndex = *neighborRepIndices++;
+ if (repIndex > neighborRepIndex) // Only count each pair once
+ {
+ if (trianglesInterfere(repIndex, neighborRepIndex, parentMeshStart, parentMeshSize, -mTol))
+ {
+ IntPair& pair = interferingPairs.insert();
+ pair.set((int32_t)repIndex, (int32_t)neighborRepIndex);
+ }
+ }
+ }
+ }
+
+ // Now run through the interference list, pulling the vertices in the offending triangles back to
+ // their original positions. Iterate until there are no more interfering triangles, or the maximum
+ // number of iterations is reached.
+ physx::Array<bool> handled;
+ handled.resize(mesh.size() - mTrianglePartition, false);
+ for (uint32_t iterN = 0; iterN < maxIterations && interferingPairs.size(); ++iterN)
+ {
+ for (uint32_t pairN = 0; pairN < interferingPairs.size(); ++pairN)
+ {
+ const IntPair& pair = interferingPairs[pairN];
+ const uint32_t i0 = (uint32_t)pair.i0;
+ const uint32_t i1 = (uint32_t)pair.i1;
+ if (i0 >= mTrianglePartition + parentMeshSize && !handled[i0 - mTrianglePartition - parentMeshSize])
+ {
+ relaxTriangleFreeVertices(i0 - parentMeshSize, relaxationFactor);
+ handled[i0 - mTrianglePartition - parentMeshSize] = true;
+ }
+ if (i1 >= mTrianglePartition + parentMeshSize && !handled[i1 - mTrianglePartition - parentMeshSize])
+ {
+ relaxTriangleFreeVertices(i1 - parentMeshSize, relaxationFactor);
+ handled[i1 - mTrianglePartition - parentMeshSize] = true;
+ }
+ }
+ // We've given the vertices a relaxation pass. Reset the handled list, and remove pairs that no longer interfere
+ for (uint32_t pairN = interferingPairs.size(); pairN--;)
+ {
+ const IntPair& pair = interferingPairs[pairN];
+ const uint32_t i0 = (uint32_t)pair.i0;
+ const uint32_t i1 = (uint32_t)pair.i1;
+ if (i0 >= mTrianglePartition + parentMeshSize)
+ {
+ handled[i0 - mTrianglePartition - parentMeshSize] = false;
+ }
+ if (i1 >= mTrianglePartition + parentMeshSize)
+ {
+ handled[i1 - mTrianglePartition - parentMeshSize] = false;
+ }
+ if (!trianglesInterfere(i0, i1, parentMeshStart, parentMeshSize, -mTol))
+ {
+ interferingPairs.replaceWithLast(pairN);
+ }
+ }
+ }
+ }
+
+private:
+ void reset()
+ {
+ mMesh = NULL;
+ mParentMesh = NULL;
+ mTrianglePartition = 0;
+ mOriginalTriangleBounds.resize(0);
+ mTol = 0.0f;
+ mPadding = 0.0f;
+ mOriginalTriangleNeighborhoods.setBounds(NULL, 0, 0);
+ mTriangleInfo.resize(0);
+ mTriangleFrames.resize(0);
+ mTriangleChildLists.resize(0);
+ mVertexBounds.resize(0);
+ mVertexNeighborhoods.setBounds(NULL, 0, 0);
+ }
+
+ PX_INLINE void splitTriangle(uint32_t triangleIndex, uint32_t edgeIndex, const physx::PxVec3& newVertexPosition)
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+ nvidia::ExplicitRenderTriangle& triangle = mesh[triangleIndex];
+ const unsigned nextEdge = (edgeIndex + 1)%3;
+ nvidia::ExplicitRenderTriangle newTriangle = triangle;
+ TriangleLockInfo& info = mTriangleInfo[triangleIndex-mTrianglePartition];
+ TriangleLockInfo newInfo = info;
+ const bool splitEdgeIsLocked = ((info.lockedEdges>>edgeIndex)&1) != 0;
+ info.lockedEdges &= ~(uint16_t)(1<<((edgeIndex + 2)%3)); // New edge is not locked
+ if (!splitEdgeIsLocked)
+ {
+ info.lockedVertices &= ~(uint16_t)(1<<edgeIndex); // New vertex is not locked if split edge is not locked
+ }
+ newInfo.lockedEdges &= ~(uint16_t)(1<<nextEdge); // New edge is not locked
+ if (!splitEdgeIsLocked)
+ {
+ newInfo.lockedVertices &= ~(uint16_t)(1<<nextEdge); // New vertex is not locked if split edge is not locked
+ }
+ const nvidia::TriangleFrame& triangleFrame = mTriangleFrames[newInfo.originalTriangleIndex-mTrianglePartition];
+ triangle.vertices[edgeIndex].position = newVertexPosition;
+ triangleFrame.interpolateVertexData(triangle.vertices[edgeIndex]);
+ newTriangle.vertices[nextEdge]= triangle.vertices[edgeIndex];
+ const uint32_t newTriangleIndex = mesh.size();
+ mesh.pushBack(newTriangle);
+ mTriangleInfo.pushBack(newInfo);
+ mTriangleChildLists[newInfo.originalTriangleIndex-mTrianglePartition].pushBack(newTriangleIndex);
+ }
+
+ PX_INLINE void relaxTriangleFreeVertices(uint32_t triangleIndex, float relaxationFactor)
+ {
+ const float tol2 = mTol*mTol;
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+ const uint32_t relativeIndex = triangleIndex - mTrianglePartition;
+ TriangleLockInfo& info = mTriangleInfo[relativeIndex];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ if (!((info.lockedVertices >> v)&1))
+ {
+ Vertex& vertex = mesh[triangleIndex].vertices[v];
+ VertexRep& vertexRep = mVertexBounds[3*relativeIndex + v];
+ const physx::PxVec3 oldVertexPosition = vertex.position;
+ vertex.position = (1.0f - relaxationFactor)*vertex.position + relaxationFactor*vertexRep.position;
+ for (MeshProcessor::FreeNeighborVertexIt nIt(*this, triangleIndex, v); nIt.valid(); nIt.inc())
+ {
+ Vertex& neighborVertex = mesh[nIt.triangleIndex()].vertices[nIt.vertexIndex()];
+ if ((neighborVertex.position-oldVertexPosition).magnitudeSquared() < tol2)
+ {
+ neighborVertex.position = vertex.position;
+ }
+ }
+ }
+ }
+ }
+
+ PX_INLINE bool trianglesInterfere(uint32_t indexA, uint32_t indexB, const nvidia::ExplicitRenderTriangle* parentMeshStart, uint32_t parentMeshSize, float tol)
+ {
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = *mMesh;
+ const nvidia::ExplicitRenderTriangle& triangleA = indexA >= parentMeshSize ? mesh[indexA - parentMeshSize] : parentMeshStart[indexA];
+ const nvidia::ExplicitRenderTriangle& triangleB = indexB >= parentMeshSize ? mesh[indexB - parentMeshSize] : parentMeshStart[indexB];
+
+ // Check for static interference
+ if (::trianglesInterfere(triangleA, triangleB, tol))
+ {
+ return true;
+ }
+
+ // See if one of the vertices of A swept through B
+ if (indexA >= mTrianglePartition + parentMeshSize)
+ {
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ VertexRep& vertexRep = mVertexBounds[3*(indexA-mTrianglePartition-parentMeshSize) + v];
+ if (segmentIntersectsTriangle(vertexRep.position, triangleA.vertices[v].position, triangleB, tol))
+ {
+ return true;
+ }
+ }
+ }
+
+ // See if one of the vertices of B swept through A
+ if (indexB >= mTrianglePartition + parentMeshSize)
+ {
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ VertexRep& vertexRep = mVertexBounds[3*(indexB-mTrianglePartition-parentMeshSize) + v];
+ if (segmentIntersectsTriangle(vertexRep.position, triangleB.vertices[v].position, triangleA, tol))
+ {
+ return true;
+ }
+ }
+ }
+
+ // No interference found
+ return false;
+ }
+
+
+ physx::Array<nvidia::ExplicitRenderTriangle>* mMesh;
+ physx::Array<nvidia::ExplicitRenderTriangle>* mParentMesh;
+ uint32_t mTrianglePartition;
+ physx::Array<BoundsRep> mOriginalTriangleBounds;
+ float mTol;
+ float mPadding;
+ NeighborLookup mOriginalTriangleNeighborhoods;
+ physx::Array<TriangleLockInfo> mTriangleInfo;
+ physx::Array<nvidia::TriangleFrame> mTriangleFrames;
+ physx::Array< physx::Array<uint32_t> > mTriangleChildLists;
+ physx::Array<VertexRep> mVertexBounds;
+ NeighborLookup mVertexNeighborhoods;
+
+ friend class FreeVertexIt;
+ friend class FreeNeighborVertexIt;
+};
+
+
+PX_INLINE bool triangleIsPartOfActiveSet(const nvidia::ExplicitRenderTriangle& triangle, const ExplicitHierarchicalMeshImpl& hMesh)
+{
+ if (triangle.extraDataIndex >= hMesh.mMaterialFrames.size())
+ {
+ return false;
+ }
+
+ const nvidia::MaterialFrame& materialFrame = hMesh.mMaterialFrames[triangle.extraDataIndex];
+
+ return materialFrame.mFractureIndex == -1;
+}
+
+
+static void applyNoiseToChunk
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t parentPartIndex,
+ uint32_t partIndex,
+ const nvidia::NoiseParameters& noise,
+ float gridScale
+ )
+{
+ if (partIndex >= hMesh.mParts.size() || parentPartIndex >= hMesh.mParts.size())
+ {
+ return;
+ }
+
+ // Mesh and mesh size
+ float level0Size = hMesh.mParts[(uint32_t)hMesh.mChunks[0]->mPartIndex]->mBounds.getExtents().magnitude();
+ physx::Array<nvidia::ExplicitRenderTriangle>& partMesh = hMesh.mParts[partIndex]->mMesh;
+ physx::Array<nvidia::ExplicitRenderTriangle>& parentPartMesh = hMesh.mParts[parentPartIndex]->mMesh;
+
+ // Grid parameters
+ const float gridSize = physx::PxAbs(gridScale) / PxMax(2, noise.gridSize);
+ if (gridSize == 0.0f)
+ {
+ return;
+ }
+
+ const float tol = 0.0001f*gridSize;
+
+ // MeshProcessor::removeSlivers(partMesh, 0.5f*tol);
+
+ // Sort triangles based upon whether or not they are part of the active group.
+ // Put the active triangles last in the list, so we only need traverse them when splitting
+ uint32_t inactiveTriangleCount = 0;
+ uint32_t highMark = partMesh.size();
+ while (inactiveTriangleCount < highMark)
+ {
+ if (!triangleIsPartOfActiveSet(partMesh[inactiveTriangleCount], hMesh))
+ {
+ ++inactiveTriangleCount;
+ }
+ else
+ if (triangleIsPartOfActiveSet(partMesh[highMark-1], hMesh))
+ {
+ --highMark;
+ }
+ else
+ {
+ nvidia::swap(partMesh[inactiveTriangleCount++], partMesh[--highMark]);
+ }
+ }
+ PX_ASSERT(inactiveTriangleCount == highMark);
+
+ MeshProcessor chunkMeshProcessor;
+ chunkMeshProcessor.setMesh(partMesh, &parentPartMesh, inactiveTriangleCount, tol);
+ chunkMeshProcessor.lockBorderVertices();
+ chunkMeshProcessor.removeTJunctions();
+ chunkMeshProcessor.subdivide(gridSize);
+ chunkMeshProcessor.snapVertices(4*tol);
+
+ // Now create and apply noise field
+ const uint32_t rndSeedSave = userRnd.m_rnd.seed();
+ userRnd.m_rnd.setSeed(0);
+ const float scaledAmplitude = noise.amplitude*level0Size;
+ const uint32_t numModes = 10;
+ const float amplitude = scaledAmplitude / physx::PxSqrt((float)numModes); // Scale by frequency?
+ const float spatialFrequency = noise.frequency*(physx::PxTwoPi/gridSize);
+ float phase[numModes][3];
+ physx::PxVec3 k[numModes][3];
+ for (uint32_t n = 0; n < numModes; ++n)
+ {
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ phase[n][i] = userRnd.getReal(-physx::PxPi, physx::PxPi);
+ k[n][i] = physx::PxVec3(userRnd.getReal(-1.0f, 1.0f), userRnd.getReal(-1.0f, 1.0f), userRnd.getReal(-1.0f, 1.0f));
+ k[n][i].normalize(); // Not a uniform spherical distribution, but it's ok
+ k[n][i] *= spatialFrequency;
+ }
+ }
+ userRnd.m_rnd.setSeed(rndSeedSave);
+
+ for (MeshProcessor::FreeVertexIt it(chunkMeshProcessor); it.valid(); it.inc())
+ {
+ physx::PxVec3& r = partMesh[it.triangleIndex()].vertices[it.vertexIndex()].position;
+ physx::PxVec3 field(0.0f);
+ physx::PxMat33 gradient(physx::PxVec3(0.0f), physx::PxVec3(0.0f), physx::PxVec3(0.0f));
+ for (uint32_t n = 0; n < numModes; ++n)
+ {
+ for (uint32_t i = 0; i < 3; ++i)
+ {
+ const float phi = k[n][i].dot(r) + phase[n][i];
+ field[i] += amplitude*physx::PxSin(phi);
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ gradient(i,j) += amplitude*k[n][i][j]*physx::PxCos(phi);
+ }
+ }
+ }
+ r += field.dot(it.vertexRep().normal)*it.vertexRep().normal;
+ physx::PxVec3 g = gradient.transformTranspose(it.vertexRep().normal);
+ physx::PxVec3& n = partMesh[it.triangleIndex()].vertices[it.vertexIndex()].normal;
+ n += g.dot(n)*n - g;
+ n.normalize();
+ physx::PxVec3& t = partMesh[it.triangleIndex()].vertices[it.vertexIndex()].tangent;
+ t -= t.dot(n)*n;
+ t.normalize();
+ partMesh[it.triangleIndex()].vertices[it.vertexIndex()].binormal = n.cross(t);
+ }
+
+ // Fix up any mesh intersections that may have resulted from the application of noise
+ chunkMeshProcessor.resolveIntersections();
+}
+
+
+static bool voronoiSplitChunkInternal
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ const ApexCSG::IApexBSP& chunkBSP,
+ const nvidia::FractureVoronoiDesc& desc,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ bool canceled = false;
+
+ physx::Array<physx::PxVec3> sitesForChunk;
+ const physx::PxVec3* sites = desc.sites;
+ uint32_t siteCount = desc.siteCount;
+ if (desc.chunkIndices != NULL)
+ {
+ for (uint32_t siteN = 0; siteN < desc.siteCount; ++siteN)
+ {
+ if (desc.chunkIndices[siteN] == chunkIndex)
+ {
+ sitesForChunk.pushBack(desc.sites[siteN]);
+ }
+ }
+ siteCount = sitesForChunk.size();
+ sites = siteCount > 0 ? &sitesForChunk[0] : NULL;
+ }
+
+ if (siteCount < 2)
+ {
+ return !canceled; // Don't want to generate a single child which is a duplicate of the parent, when siteCount == 1
+ }
+
+ HierarchicalProgressListener progress((int32_t)PxMax(siteCount, 1u), &progressListener);
+
+ const float minimumRadius2 = hMesh.chunkBounds(0).getExtents().magnitudeSquared()*desc.minimumChunkSize*desc.minimumChunkSize;
+
+ for (VoronoiCellPlaneIterator i(sites, siteCount); i.valid(); i.inc())
+ {
+ // Create a voronoi cell for this site
+ ApexCSG::IApexBSP* cellBSP = createBSP(hMesh.mBSPMemCache, chunkBSP.getInternalTransform()); // BSPs start off representing all space
+ ApexCSG::IApexBSP* planeBSP = createBSP(hMesh.mBSPMemCache, chunkBSP.getInternalTransform());
+ const physx::PxPlane* planes = i.cellPlanes();
+ for (uint32_t planeN = 0; planeN < i.cellPlaneCount(); ++planeN)
+ {
+ const physx::PxPlane& plane = planes[planeN];
+
+ // Create single-plane slice BSP
+ nvidia::IntersectMesh grid;
+ GridParameters gridParameters;
+ gridParameters.interiorSubmeshIndex = desc.materialDesc.interiorSubmeshIndex;
+ // Defer noise generation if we're using displacement maps
+ gridParameters.noise = nvidia::NoiseParameters();
+ gridParameters.level0Mesh = &hMesh.mParts[0]->mMesh;
+ gridParameters.materialFrameIndex = hMesh.addMaterialFrame();
+ nvidia::MaterialFrame materialFrame = hMesh.getMaterialFrame(gridParameters.materialFrameIndex);
+ materialFrame.buildCoordinateSystemFromMaterialDesc(desc.materialDesc, plane);
+ materialFrame.mFractureMethod = nvidia::FractureMethod::Voronoi;
+ materialFrame.mSliceDepth = hMesh.depth(chunkIndex) + 1;
+ // Leaving the materialFrame.mFractureMethod at the default of -1, since voronoi cutout faces are not be associated with a direction index
+ hMesh.setMaterialFrame(gridParameters.materialFrameIndex, materialFrame);
+ gridParameters.triangleFrame.setFlat(materialFrame.mCoordinateSystem, desc.materialDesc.uvScale, desc.materialDesc.uvOffset);
+ buildIntersectMesh(grid, plane, materialFrame, (int32_t)desc.noiseMode, &gridParameters);
+
+ ApexCSG::BSPBuildParameters bspBuildParams = gDefaultBuildParameters;
+ bspBuildParams.internalTransform = chunkBSP.getInternalTransform();
+ bspBuildParams.rnd = &userRnd;
+
+ if(desc.useDisplacementMaps)
+ {
+ // Displacement map generation is deferred until the end of fracturing
+ // This used to be where a slice would populate a displacement map with
+ // offsets along the plane, but no longer
+ }
+
+ planeBSP->fromMesh(&grid.m_triangles[0], grid.m_triangles.size(), bspBuildParams);
+ cellBSP->combine(*planeBSP);
+ cellBSP->op(*cellBSP, ApexCSG::Operation::Intersection);
+ }
+ planeBSP->release();
+
+ if (hMesh.mParts[(uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex]->mFlags & ExplicitHierarchicalMeshImpl::Part::MeshOpen)
+ {
+ cellBSP->deleteTriangles(); // Don't use interior triangles on an open mesh
+ }
+
+ ApexCSG::IApexBSP* bsp = createBSP(hMesh.mBSPMemCache);
+ bsp->copy(*cellBSP);
+ bsp->combine(chunkBSP);
+ bsp->op(*bsp, ApexCSG::Operation::Intersection);
+ cellBSP->release();
+
+ if (gIslandGeneration)
+ {
+ bsp = bsp->decomposeIntoIslands();
+ }
+
+ while (bsp != NULL)
+ {
+ if (cancel != NULL && *cancel)
+ {
+ canceled = true;
+ }
+
+ if (!canceled)
+ {
+ // Create a mesh with chunkBSP (or its islands)
+ const uint32_t newPartIndex = hMesh.addPart();
+ const uint32_t newChunkIndex = hMesh.addChunk();
+ bsp->toMesh(hMesh.mParts[newPartIndex]->mMesh);
+ hMesh.mParts[newPartIndex]->mMeshBSP->copy(*bsp);
+ hMesh.buildMeshBounds(newPartIndex);
+ hMesh.buildCollisionGeometryForPart(newPartIndex, getVolumeDesc(collisionDesc, hMesh.depth(chunkIndex)+1));
+ hMesh.mChunks[newChunkIndex]->mParentIndex = (int32_t)chunkIndex;
+ hMesh.mChunks[newChunkIndex]->mPartIndex = (int32_t)newPartIndex;
+ if (hMesh.mParts[(uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex]->mFlags & ExplicitHierarchicalMeshImpl::Part::MeshOpen)
+ {
+ hMesh.mParts[newPartIndex]->mFlags |= ExplicitHierarchicalMeshImpl::Part::MeshOpen;
+ }
+ if (hMesh.mParts[newPartIndex]->mMesh.size() == 0 || hMesh.mParts[newPartIndex]->mCollision.size() == 0 ||
+ hMesh.chunkBounds(newChunkIndex).getExtents().magnitudeSquared() < minimumRadius2)
+ {
+ // Either no mesh, no collision, or too small. Eliminate.
+ hMesh.removeChunk(newChunkIndex);
+ hMesh.removePart(newPartIndex);
+ }
+ else
+ {
+ // Apply graphical noise to new part, if requested
+ if (desc.faceNoise.amplitude > 0.0f){
+ const uint32_t parentPartIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+ applyNoiseToChunk(hMesh, parentPartIndex, newPartIndex, desc.faceNoise, hMesh.meshBounds(newPartIndex).getExtents().magnitude());
+ }
+ }
+ }
+ // Get next bsp in island decomposition
+ ApexCSG::IApexBSP* nextBSP = bsp->getNext();
+ bsp->release();
+ bsp = nextBSP;
+ }
+
+ progress.completeSubtask();
+ }
+
+ return !canceled;
+}
+
+namespace FractureTools
+{
+
+void setBSPTolerances
+(
+ float linearTolerance,
+ float angularTolerance,
+ float baseTolerance,
+ float clipTolerance,
+ float cleaningTolerance
+)
+{
+ ApexCSG::gDefaultTolerances.linear = linearTolerance;
+ ApexCSG::gDefaultTolerances.angular = angularTolerance;
+ ApexCSG::gDefaultTolerances.base = baseTolerance;
+ ApexCSG::gDefaultTolerances.clip = clipTolerance;
+ ApexCSG::gDefaultTolerances.cleaning = cleaningTolerance;
+}
+
+void setBSPBuildParameters
+(
+ float logAreaSigmaThreshold,
+ uint32_t testSetSize,
+ float splitWeight,
+ float imbalanceWeight
+)
+{
+ gDefaultBuildParameters.logAreaSigmaThreshold = logAreaSigmaThreshold;
+ gDefaultBuildParameters.testSetSize = testSetSize;
+ gDefaultBuildParameters.splitWeight = splitWeight;
+ gDefaultBuildParameters.imbalanceWeight = imbalanceWeight;
+}
+
+static void trimChunkHulls(ExplicitHierarchicalMeshImpl& hMesh, uint32_t* chunkIndexArray, uint32_t chunkIndexArraySize, float maxTrimFraction)
+{
+ // Outer array is indexed by chunk #, and is of size chunkIndexArraySize
+ // Middle array is indexed by hull # for chunkIndexArray[chunk #], is of the same size as the part mCollision array associated with the chunk
+ // Inner array is a list of trim planes to be applied to each hull
+ physx::Array< physx::Array< physx::Array<physx::PxPlane> > > chunkHullTrimPlanes;
+
+ // Initialize arrays
+ chunkHullTrimPlanes.resize(chunkIndexArraySize);
+ for (uint32_t chunkNum = 0; chunkNum < chunkIndexArraySize; ++chunkNum)
+ {
+ physx::Array< physx::Array<physx::PxPlane> >& hullTrimPlanes = chunkHullTrimPlanes[chunkNum];
+ const uint32_t chunkIndex = chunkIndexArray[chunkNum];
+ const uint32_t partIndex = (uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex;
+ const uint32_t hullCount = hMesh.mParts[partIndex]->mCollision.size();
+ hullTrimPlanes.resize(hullCount);
+ }
+
+ const physx::PxVec3 identityScale(1.0f);
+
+ // Compare each chunk's hulls against other chunk hulls, building up list of trim planes. O(N^2), but so far this is only used for multi-fbx level 1 chunks, so N shouldn't be too large.
+ for (uint32_t chunkNum0 = 0; chunkNum0 < chunkIndexArraySize; ++chunkNum0)
+ {
+ const uint32_t chunkIndex0 = chunkIndexArray[chunkNum0];
+ const uint32_t partIndex0 = (uint32_t)hMesh.mChunks[chunkIndex0]->mPartIndex;
+ const physx::PxTransform tm0(hMesh.mChunks[chunkIndex0]->mInstancedPositionOffset);
+ const uint32_t hullCount0 = hMesh.mParts[partIndex0]->mCollision.size();
+ physx::Array< physx::Array<physx::PxPlane> >& hullTrimPlanes0 = chunkHullTrimPlanes[chunkNum0];
+ for (uint32_t hullIndex0 = 0; hullIndex0 < hullCount0; ++hullIndex0)
+ {
+ PartConvexHullProxy* hull0 = hMesh.mParts[partIndex0]->mCollision[hullIndex0];
+ physx::Array<physx::PxPlane>& trimPlanes0 = hullTrimPlanes0[hullIndex0];
+
+ // Inner set of loops for other chunks
+ for (uint32_t chunkNum1 = chunkNum0+1; chunkNum1 < chunkIndexArraySize; ++chunkNum1)
+ {
+ const uint32_t chunkIndex1 = chunkIndexArray[chunkNum1];
+ const uint32_t partIndex1 = (uint32_t)hMesh.mChunks[chunkIndex1]->mPartIndex;
+ const physx::PxTransform tm1(hMesh.mChunks[chunkIndex1]->mInstancedPositionOffset);
+ const uint32_t hullCount1 = hMesh.mParts[partIndex1]->mCollision.size();
+ physx::Array< physx::Array<physx::PxPlane> >& hullTrimPlanes1 = chunkHullTrimPlanes[chunkNum1];
+ for (uint32_t hullIndex1 = 0; hullIndex1 < hullCount1; ++hullIndex1)
+ {
+ PartConvexHullProxy* hull1 = hMesh.mParts[partIndex1]->mCollision[hullIndex1];
+ physx::Array<physx::PxPlane>& trimPlanes1 = hullTrimPlanes1[hullIndex1];
+
+ // Test overlap
+ ConvexHullImpl::Separation separation;
+ if (ConvexHullImpl::hullsInProximity(hull0->impl, tm0, identityScale, hull1->impl, tm1, identityScale, 0.0f, &separation))
+ {
+ // Add trim planes if there's an overlap
+ physx::PxPlane& trimPlane0 = trimPlanes0.insert();
+ trimPlane0 = separation.plane;
+ trimPlane0.d = PxMin(trimPlane0.d, maxTrimFraction*(separation.max0-separation.min0) - separation.max0); // Bound clip distance
+ trimPlane0.d += trimPlane0.n.dot(tm0.p); // Transform back into part local space
+ physx::PxPlane& trimPlane1 = trimPlanes1.insert();
+ trimPlane1 = physx::PxPlane(-separation.plane.n, -separation.plane.d);
+ trimPlane1.d = PxMin(trimPlane1.d, maxTrimFraction*(separation.max1-separation.min1) + separation.min1); // Bound clip distance
+ trimPlane1.d += trimPlane1.n.dot(tm1.p); // Transform back into part local space
+ }
+ }
+ }
+ }
+ }
+
+ // Now traverse trim plane list and apply it to the chunks's hulls
+ for (uint32_t chunkNum = 0; chunkNum < chunkIndexArraySize; ++chunkNum)
+ {
+ const uint32_t chunkIndex = chunkIndexArray[chunkNum];
+ const uint32_t partIndex = (uint32_t)hMesh.mChunks[chunkIndex]->mPartIndex;
+ const uint32_t hullCount = hMesh.mParts[partIndex]->mCollision.size();
+ physx::Array< physx::Array<physx::PxPlane> >& hullTrimPlanes = chunkHullTrimPlanes[chunkNum];
+ for (uint32_t hullIndex = hullCount; hullIndex--;) // Traverse backwards, in case we need to delete empty hulls
+ {
+ PartConvexHullProxy* hull = hMesh.mParts[partIndex]->mCollision[hullIndex];
+ physx::Array<physx::PxPlane>& trimPlanes = hullTrimPlanes[hullIndex];
+ for (uint32_t planeIndex = 0; planeIndex < trimPlanes.size(); ++planeIndex)
+ {
+ hull->impl.intersectPlaneSide(trimPlanes[planeIndex]);
+ if (hull->impl.isEmpty())
+ {
+ PX_DELETE(hMesh.mParts[partIndex]->mCollision[hullIndex]);
+ hMesh.mParts[partIndex]->mCollision.replaceWithLast(hullIndex);
+ break;
+ }
+ }
+ }
+ // Make sure we haven't deleted every collision hull
+ if (hMesh.mParts[partIndex]->mCollision.size() == 0)
+ {
+ CollisionVolumeDesc collisionVolumeDesc;
+ collisionVolumeDesc.mHullMethod = ConvexHullMethod::WRAP_GRAPHICS_MESH; // Should we use something simpler, like a box?
+ hMesh.buildCollisionGeometryForPart(partIndex, collisionVolumeDesc);
+ }
+ }
+}
+
+bool buildExplicitHierarchicalMesh
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ const nvidia::ExplicitRenderTriangle* meshTriangles,
+ uint32_t meshTriangleCount,
+ const nvidia::ExplicitSubmeshData* submeshData,
+ uint32_t submeshCount,
+ uint32_t* meshPartition,
+ uint32_t meshPartitionCount,
+ int32_t* parentIndices,
+ uint32_t parentIndexCount
+)
+{
+ bool flatDepthOne = parentIndexCount == 0;
+
+ const bool havePartition = meshPartition != NULL && meshPartitionCount > 1;
+
+ if (!havePartition)
+ {
+ flatDepthOne = true; // This only makes sense if we have a partition
+ }
+
+ if (parentIndices == NULL)
+ {
+ parentIndexCount = 0;
+ }
+
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+ hMesh.clear();
+ hMesh.addPart();
+ hMesh.mParts[0]->mMesh.reset();
+ const uint32_t part0Size = !flatDepthOne ? meshPartition[0] : meshTriangleCount; // Build level 0 part out of all of the triangles if flatDepthOne = true
+ hMesh.mParts[0]->mMesh.reserve(part0Size);
+ uint32_t nextTriangle = 0;
+ for (uint32_t i = 0; i < part0Size; ++i)
+ {
+ hMesh.mParts[0]->mMesh.pushBack(meshTriangles[nextTriangle++]);
+ }
+ hMesh.buildMeshBounds(0);
+ hMesh.addChunk();
+ hMesh.mChunks[0]->mParentIndex = -1;
+ hMesh.mChunks[0]->mPartIndex = 0;
+
+ if (flatDepthOne)
+ {
+ nextTriangle = 0; // reset
+ }
+
+ physx::Array<bool> hasChildren(meshPartitionCount+1, false);
+
+ if (havePartition)
+ {
+ // We have a partition - build hierarchy
+ uint32_t partIndex = 1;
+ const uint32_t firstLevel1Part = !flatDepthOne ? 1u : 0u;
+ for (uint32_t i = firstLevel1Part; i < meshPartitionCount; ++i, ++partIndex)
+ {
+ hMesh.addPart();
+ hMesh.mParts[partIndex]->mMesh.reset();
+ hMesh.mParts[partIndex]->mMesh.reserve(meshPartition[i] - nextTriangle);
+ while (nextTriangle < meshPartition[i])
+ {
+ hMesh.mParts[partIndex]->mMesh.pushBack(meshTriangles[nextTriangle++]);
+ }
+ hMesh.buildMeshBounds(partIndex);
+ hMesh.addChunk();
+ hMesh.mChunks[partIndex]->mParentIndex = partIndex < parentIndexCount ? parentIndices[partIndex] : 0; // partIndex = chunkIndex here
+ if (hMesh.mChunks[partIndex]->mParentIndex >= 0)
+ {
+ hasChildren[(uint32_t)hMesh.mChunks[partIndex]->mParentIndex] = true;
+ }
+ hMesh.mChunks[partIndex]->mPartIndex = (int32_t)partIndex; // partIndex = chunkIndex here
+ }
+ }
+
+ // Submesh data
+ hMesh.mSubmeshData.reset();
+ hMesh.mSubmeshData.reserve(submeshCount);
+ for (uint32_t i = 0; i < submeshCount; ++i)
+ {
+ hMesh.mSubmeshData.pushBack(submeshData[i]);
+ }
+
+ for (uint32_t i = 0; i < hMesh.mChunks.size(); ++i)
+ {
+ hMesh.mChunks[i]->mPrivateFlags |= ExplicitHierarchicalMeshImpl::Chunk::Root;
+ if (!hasChildren[i])
+ {
+ hMesh.mChunks[i]->mPrivateFlags |= ExplicitHierarchicalMeshImpl::Chunk::RootLeaf;
+ }
+ }
+
+ hMesh.mRootSubmeshCount = submeshCount;
+
+ hMesh.sortChunks();
+
+ return true;
+}
+
+// If destructibleAsset == NULL, no hierarchy is assumed and we must have only one part in the render mesh.
+static bool buildExplicitHierarchicalMeshFromApexAssetsInternal(ExplicitHierarchicalMeshImpl& hMesh, const nvidia::apex::RenderMeshAsset& renderMeshAsset,
+ const nvidia::apex::DestructibleAsset* destructibleAsset, uint32_t maxRootDepth = UINT32_MAX)
+{
+ if (renderMeshAsset.getPartCount() == 0)
+ {
+ return false;
+ }
+
+ if (destructibleAsset == NULL && renderMeshAsset.getPartCount() != 1)
+ {
+ return false;
+ }
+
+ hMesh.clear();
+
+ // Create parts
+ for (uint32_t partIndex = 0; partIndex < renderMeshAsset.getPartCount(); ++partIndex)
+ {
+ const uint32_t newPartIndex = hMesh.addPart();
+ PX_ASSERT(newPartIndex == partIndex);
+ ExplicitHierarchicalMeshImpl::Part* part = hMesh.mParts[newPartIndex];
+ // Fill in fields except for mesh (will be done in submesh loop below)
+ // part->mMeshBSP is NULL, that's OK
+ part->mBounds = renderMeshAsset.getBounds(partIndex);
+ if (destructibleAsset != NULL)
+ {
+ // Get collision data from destructible asset
+ part->mCollision.reserve(destructibleAsset->getPartConvexHullCount(partIndex));
+ for (uint32_t hullIndex = 0; hullIndex < destructibleAsset->getPartConvexHullCount(partIndex); ++hullIndex)
+ {
+ NvParameterized::Interface* hullParams = destructibleAsset->getPartConvexHullArray(partIndex)[hullIndex];
+ if (hullParams != NULL)
+ {
+ PartConvexHullProxy* newHull = PX_NEW(PartConvexHullProxy)();
+ part->mCollision.pushBack(newHull);
+ newHull->impl.mParams->copy(*hullParams);
+ }
+ }
+ }
+ }
+
+ // Deduce root and interior submesh info
+ hMesh.mRootSubmeshCount = 0; // Incremented below
+
+ // Fill in mesh and get submesh data
+ hMesh.mSubmeshData.reset();
+ hMesh.mSubmeshData.reserve(renderMeshAsset.getSubmeshCount());
+ for (uint32_t submeshIndex = 0; submeshIndex < renderMeshAsset.getSubmeshCount(); ++submeshIndex)
+ {
+ const nvidia::RenderSubmesh& submesh = renderMeshAsset.getSubmesh(submeshIndex);
+
+ // Submesh data
+ nvidia::ExplicitSubmeshData& submeshData = hMesh.mSubmeshData.pushBack(nvidia::ExplicitSubmeshData());
+ nvidia::strlcpy(submeshData.mMaterialName, nvidia::ExplicitSubmeshData::MaterialNameBufferSize, renderMeshAsset.getMaterialName(submeshIndex));
+ submeshData.mVertexFormat.mBonesPerVertex = 1;
+
+ // Mesh
+ const nvidia::VertexBuffer& vb = submesh.getVertexBuffer();
+ const nvidia::VertexFormat& vbFormat = vb.getFormat();
+ const uint32_t submeshVertexCount = vb.getVertexCount();
+ if (submeshVertexCount == 0)
+ {
+ continue;
+ }
+
+ // Get vb data:
+ physx::Array<physx::PxVec3> positions;
+ physx::Array<physx::PxVec3> normals;
+ physx::Array<physx::PxVec4> tangents; // Handle vec4 tangents
+ physx::Array<physx::PxVec3> binormals;
+ physx::Array<nvidia::ColorRGBA> colors;
+ physx::Array<nvidia::VertexUV> uvs[VertexFormat::MAX_UV_COUNT];
+
+ // Positions
+ const int32_t positionBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID(RenderVertexSemantic::POSITION));
+ positions.resize(submeshVertexCount);
+ submeshData.mVertexFormat.mHasStaticPositions = vb.getBufferData(&positions[0], nvidia::RenderDataFormat::FLOAT3, sizeof(physx::PxVec3),
+ (uint32_t)positionBufferIndex, 0, submeshVertexCount);
+ if (!submeshData.mVertexFormat.mHasStaticPositions)
+ {
+ return false; // Need a position buffer!
+ }
+
+ // Normals
+ const int32_t normalBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID(RenderVertexSemantic::NORMAL));
+ normals.resize(submeshVertexCount);
+ submeshData.mVertexFormat.mHasStaticNormals = vb.getBufferData(&normals[0], nvidia::RenderDataFormat::FLOAT3, sizeof(physx::PxVec3),
+ (uint32_t)normalBufferIndex, 0, submeshVertexCount);
+ if (!submeshData.mVertexFormat.mHasStaticNormals)
+ {
+ ::memset(&normals[0], 0, submeshVertexCount*sizeof(physx::PxVec3)); // Fill with zeros
+ }
+
+ // Tangents
+ const int32_t tangentBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID(RenderVertexSemantic::TANGENT));
+ tangents.resize(submeshVertexCount, physx::PxVec4(physx::PxVec3(0.0f), 1.0f)); // Fill with (0,0,0,1), in case we read 3-component tangents
+ switch (vbFormat.getBufferFormat((uint32_t)tangentBufferIndex))
+ {
+ case nvidia::RenderDataFormat::BYTE_SNORM3:
+ case nvidia::RenderDataFormat::SHORT_SNORM3:
+ case nvidia::RenderDataFormat::FLOAT3:
+ submeshData.mVertexFormat.mHasStaticTangents = vb.getBufferData(&tangents[0], nvidia::RenderDataFormat::FLOAT3, sizeof(physx::PxVec4), (uint32_t)tangentBufferIndex, 0, submeshVertexCount);
+ break;
+ case nvidia::RenderDataFormat::BYTE_SNORM4:
+ case nvidia::RenderDataFormat::SHORT_SNORM4:
+ case nvidia::RenderDataFormat::FLOAT4:
+ submeshData.mVertexFormat.mHasStaticTangents = vb.getBufferData(&tangents[0], nvidia::RenderDataFormat::FLOAT4, sizeof(physx::PxVec4), (uint32_t)tangentBufferIndex, 0, submeshVertexCount);
+ break;
+ default:
+ submeshData.mVertexFormat.mHasStaticTangents = false;
+ break;
+ }
+
+ // Binormals
+ const int32_t binormalBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID(RenderVertexSemantic::BINORMAL));
+ binormals.resize(submeshVertexCount);
+ submeshData.mVertexFormat.mHasStaticBinormals = vb.getBufferData(&binormals[0], nvidia::RenderDataFormat::FLOAT3, sizeof(physx::PxVec3),
+ (uint32_t)binormalBufferIndex, 0, submeshVertexCount);
+ if (!submeshData.mVertexFormat.mHasStaticBinormals)
+ {
+ submeshData.mVertexFormat.mHasStaticBinormals = submeshData.mVertexFormat.mHasStaticNormals && submeshData.mVertexFormat.mHasStaticTangents;
+ for (uint32_t i = 0; i < submeshVertexCount; ++i)
+ {
+ binormals[i] = physx::PxSign(tangents[i][3])*normals[i].cross(tangents[i].getXYZ()); // Build from normals and tangents. If one of these doesn't exist we'll get (0,0,0)'s
+ }
+ }
+
+ // Colors
+ const int32_t colorBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID(RenderVertexSemantic::COLOR));
+ colors.resize(submeshVertexCount);
+ submeshData.mVertexFormat.mHasStaticColors = vb.getBufferData(&colors[0], nvidia::RenderDataFormat::B8G8R8A8, sizeof(nvidia::ColorRGBA),
+ (uint32_t)colorBufferIndex, 0, submeshVertexCount);
+ if (!submeshData.mVertexFormat.mHasStaticColors)
+ {
+ ::memset(&colors[0], 0xFF, submeshVertexCount*sizeof(nvidia::ColorRGBA)); // Fill with 0xFF
+ }
+
+ // UVs
+ submeshData.mVertexFormat.mUVCount = 0;
+ uint32_t uvNum = 0;
+ for (; uvNum < VertexFormat::MAX_UV_COUNT; ++uvNum)
+ {
+ uvs[uvNum].resize(submeshVertexCount);
+ const int32_t uvBufferIndex = vbFormat.getBufferIndexFromID(vbFormat.getSemanticID((RenderVertexSemantic::Enum)(RenderVertexSemantic::TEXCOORD0 + uvNum)));
+ if (!vb.getBufferData(&uvs[uvNum][0], nvidia::RenderDataFormat::FLOAT2, sizeof(nvidia::VertexUV),
+ (uint32_t)uvBufferIndex, 0, submeshVertexCount))
+ {
+ break;
+ }
+ }
+ submeshData.mVertexFormat.mUVCount = uvNum;
+ for (; uvNum < VertexFormat::MAX_UV_COUNT; ++uvNum)
+ {
+ uvs[uvNum].resize(submeshVertexCount);
+ ::memset(&uvs[uvNum][0], 0, submeshVertexCount*sizeof(nvidia::VertexUV)); // Fill with zeros
+ }
+
+ // Now create triangles
+ bool rootChunkHasTrianglesWithThisSubmesh = false;
+ for (uint32_t partIndex = 0; partIndex < renderMeshAsset.getPartCount(); ++partIndex)
+ {
+ ExplicitHierarchicalMeshImpl::Part* part = hMesh.mParts[partIndex];
+ physx::Array<nvidia::ExplicitRenderTriangle>& triangles = part->mMesh;
+ const uint32_t* indexBuffer = submesh.getIndexBuffer(partIndex);
+ const uint32_t* smoothingGroups = submesh.getSmoothingGroups(partIndex);
+ const uint32_t indexCount = submesh.getIndexCount(partIndex);
+ PX_ASSERT((indexCount%3) == 0);
+ const uint32_t triangleCount = indexCount/3;
+ triangles.reserve(triangles.size() + triangleCount);
+ if (triangleCount > 0 && destructibleAsset != NULL)
+ {
+ for (uint32_t chunkIndex = 0; chunkIndex < destructibleAsset->getChunkCount(); ++chunkIndex)
+ {
+ if (destructibleAsset->getPartIndex(chunkIndex) == partIndex)
+ {
+ // This part is in a root chunk. Make sure we've accounted for all of its submeshes
+ rootChunkHasTrianglesWithThisSubmesh = true;
+ break;
+ }
+ }
+ }
+ for (uint32_t triangleNum = 0; triangleNum < triangleCount; ++triangleNum)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = triangles.pushBack(nvidia::ExplicitRenderTriangle());
+ triangle.extraDataIndex = 0xFFFFFFFF;
+ triangle.smoothingMask = smoothingGroups != NULL ? smoothingGroups[triangleNum] : 0;
+ triangle.submeshIndex = (int32_t)submeshIndex;
+ for (unsigned v = 0; v < 3; ++v)
+ {
+ const uint32_t index = *indexBuffer++;
+ nvidia::Vertex& vertex = triangle.vertices[v];
+ vertex.position = positions[index];
+ vertex.normal = normals[index];
+ vertex.tangent = tangents[index].getXYZ();
+ vertex.binormal = binormals[index];
+ vertex.color = VertexColor(ColorRGBA(colors[index]));
+ for (uint32_t uvNum = 0; uvNum < VertexFormat::MAX_UV_COUNT; ++uvNum)
+ {
+ vertex.uv[uvNum] = uvs[uvNum][index];
+ }
+ vertex.boneIndices[0] = (uint16_t)partIndex;
+ }
+ }
+ }
+
+ if (rootChunkHasTrianglesWithThisSubmesh)
+ {
+ hMesh.mRootSubmeshCount = submeshIndex+1;
+ }
+ }
+
+ // Create chunks
+ if (destructibleAsset != NULL)
+ {
+ physx::Array<bool> hasRootChildren(destructibleAsset->getChunkCount(), false);
+ for (uint32_t chunkIndex = 0; chunkIndex < destructibleAsset->getChunkCount(); ++chunkIndex)
+ {
+ const uint32_t newChunkIndex = hMesh.addChunk();
+ PX_ASSERT(newChunkIndex == chunkIndex);
+ ExplicitHierarchicalMeshImpl::Chunk* chunk = hMesh.mChunks[newChunkIndex];
+ // Fill in fields of chunk
+ chunk->mParentIndex = destructibleAsset->getChunkParentIndex(chunkIndex);
+ chunk->mFlags = destructibleAsset->getChunkFlags(chunkIndex);
+ chunk->mPartIndex = (int32_t)destructibleAsset->getPartIndex(chunkIndex);
+ chunk->mInstancedPositionOffset = destructibleAsset->getChunkPositionOffset(chunkIndex);
+ chunk->mInstancedUVOffset = destructibleAsset->getChunkUVOffset(chunkIndex);
+ if (destructibleAsset->getChunkDepth(chunkIndex) <= maxRootDepth)
+ {
+ chunk->mPrivateFlags |= ExplicitHierarchicalMeshImpl::Chunk::Root; // We will assume every chunk is a root chunk
+ if (chunk->mParentIndex >= 0 && chunk->mParentIndex < (physx::PxI32)destructibleAsset->getChunkCount())
+ {
+ hasRootChildren[(physx::PxU32)chunk->mParentIndex] = true;
+ }
+ }
+ }
+
+ // See which root chunks have no children; these are root leaves
+ for (uint32_t chunkIndex = 0; chunkIndex < destructibleAsset->getChunkCount(); ++chunkIndex)
+ {
+ ExplicitHierarchicalMeshImpl::Chunk* chunk = hMesh.mChunks[chunkIndex];
+ if (chunk->isRootChunk() && !hasRootChildren[chunkIndex])
+ {
+ chunk->mPrivateFlags |= ExplicitHierarchicalMeshImpl::Chunk::RootLeaf;
+ }
+ }
+ }
+ else
+ {
+ // No destructible asset, there's just one chunk
+ const uint32_t newChunkIndex = hMesh.addChunk();
+ PX_ASSERT(newChunkIndex == 0);
+ ExplicitHierarchicalMeshImpl::Chunk* chunk = hMesh.mChunks[newChunkIndex];
+ // Fill in fields of chunk
+ chunk->mParentIndex = -1;
+ chunk->mFlags = 0; // Can't retrieve this
+ chunk->mPartIndex = 0;
+ chunk->mInstancedPositionOffset = physx::PxVec3(0.0f);
+ chunk->mInstancedUVOffset = physx::PxVec2(0.0f);
+ chunk->mPrivateFlags |= (ExplicitHierarchicalMeshImpl::Chunk::Root | ExplicitHierarchicalMeshImpl::Chunk::RootLeaf);
+ }
+
+ return true;
+}
+
+PX_INLINE bool trianglesTouch(const nvidia::ExplicitRenderTriangle& t1, const nvidia::ExplicitRenderTriangle& t2)
+{
+ PX_UNUSED(t1);
+ PX_UNUSED(t2);
+ return true; // For now, just keep AABB test. May want to do better.
+}
+
+static void partitionMesh(physx::Array<uint32_t>& partition, nvidia::ExplicitRenderTriangle* mesh, uint32_t meshTriangleCount, float padding)
+{
+ // Find triangle neighbors
+ physx::Array<nvidia::BoundsRep> triangleBounds;
+ triangleBounds.reserve(meshTriangleCount);
+ for (uint32_t i = 0; i < meshTriangleCount; ++i)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = mesh[i];
+ nvidia::BoundsRep& rep = triangleBounds.insert();
+ for (int j = 0; j < 3; ++j)
+ {
+ rep.aabb.include(triangle.vertices[j].position);
+ }
+ rep.aabb.fattenFast(padding);
+ }
+
+ NeighborLookup triangleNeighborhoods;
+ triangleNeighborhoods.setBounds(&triangleBounds[0], triangleBounds.size(), sizeof(triangleBounds[0]));
+
+ // Re-ordering the mesh in-place will make the neighborhoods invalid, so we re-map
+ physx::Array<uint32_t> triangleRemap(meshTriangleCount);
+ physx::Array<uint32_t> triangleRemapInv(meshTriangleCount);
+ for (uint32_t i = 0; i < meshTriangleCount; ++i)
+ {
+ triangleRemap[i] = i;
+ triangleRemapInv[i] = i;
+ }
+
+ partition.resize(0);
+ uint32_t nextTriangle = 0;
+ while (nextTriangle < meshTriangleCount)
+ {
+ uint32_t partitionStop = nextTriangle+1;
+ do
+ {
+ const uint32_t r = triangleRemap[nextTriangle];
+ const uint32_t neighborCount = triangleNeighborhoods.getNeighborCount(r);
+ const uint32_t* neighbors = triangleNeighborhoods.getNeighbors(r);
+ for (uint32_t n = 0; n < neighborCount; ++n)
+ {
+ const uint32_t s = triangleRemapInv[neighbors[n]];
+ if (s <= partitionStop || !trianglesTouch(mesh[nextTriangle], mesh[s]))
+ {
+ continue;
+ }
+ nvidia::swap(triangleRemapInv[triangleRemap[partitionStop]], triangleRemapInv[triangleRemap[s]]);
+ nvidia::swap(triangleRemap[partitionStop], triangleRemap[s]);
+ nvidia::swap(mesh[partitionStop], mesh[s]);
+ ++partitionStop;
+ }
+ } while(nextTriangle++ < partitionStop);
+ partition.pushBack(nextTriangle);
+ }
+}
+
+uint32_t partitionMeshByIslands
+(
+ nvidia::ExplicitRenderTriangle* mesh,
+ uint32_t meshTriangleCount,
+ uint32_t* meshPartition,
+ uint32_t meshPartitionMaxCount,
+ float padding
+)
+{
+ // Adjust padding for mesh size
+ physx::PxBounds3 bounds;
+ bounds.setEmpty();
+ for (uint32_t i = 0; i < meshTriangleCount; ++i)
+ {
+ for (int j = 0; j < 3; ++j)
+ {
+ bounds.include(mesh[i].vertices[j].position);
+ }
+ }
+ padding *= bounds.getExtents().magnitude();
+
+ physx::Array<uint32_t> partition;
+ partitionMesh(partition, mesh, meshTriangleCount, padding);
+
+ for (uint32_t i = 0; i < meshPartitionMaxCount && i < partition.size(); ++i)
+ {
+ meshPartition[i] = partition[i];
+ }
+
+ return partition.size();
+}
+
+bool buildExplicitHierarchicalMeshFromRenderMeshAsset(ExplicitHierarchicalMesh& iHMesh, const nvidia::apex::RenderMeshAsset& renderMeshAsset, uint32_t maxRootDepth)
+{
+ return buildExplicitHierarchicalMeshFromApexAssetsInternal(*(ExplicitHierarchicalMeshImpl*)&iHMesh, renderMeshAsset, NULL, maxRootDepth);
+}
+
+bool buildExplicitHierarchicalMeshFromDestructibleAsset(ExplicitHierarchicalMesh& iHMesh, const nvidia::apex::DestructibleAsset& destructibleAsset, uint32_t maxRootDepth)
+{
+ if (destructibleAsset.getChunkCount() == 0)
+ {
+ return false;
+ }
+
+ const nvidia::RenderMeshAsset* renderMeshAsset = destructibleAsset.getRenderMeshAsset();
+ if (renderMeshAsset == NULL)
+ {
+ return false;
+ }
+
+ return buildExplicitHierarchicalMeshFromApexAssetsInternal(*(ExplicitHierarchicalMeshImpl*)&iHMesh, *renderMeshAsset, &destructibleAsset, maxRootDepth);
+}
+
+
+class MeshSplitter
+{
+public:
+ virtual bool validate(ExplicitHierarchicalMeshImpl& hMesh) = 0;
+
+ virtual void initialize(ExplicitHierarchicalMeshImpl& hMesh) = 0;
+
+ virtual bool process
+ (
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ const ApexCSG::IApexBSP& chunkBSP,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+ ) = 0;
+
+ virtual bool finalize(ExplicitHierarchicalMeshImpl& hMesh) = 0;
+};
+
+static bool splitMeshInternal
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ ExplicitHierarchicalMesh& iHMeshCore,
+ bool exportCoreMesh,
+ int32_t coreMeshImprintSubmeshIndex,
+ const MeshProcessingParameters& meshProcessingParams,
+ MeshSplitter& splitter,
+ const CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+ ExplicitHierarchicalMeshImpl& hMeshCore = *(ExplicitHierarchicalMeshImpl*)&iHMeshCore;
+
+ if (hMesh.partCount() == 0)
+ {
+ return false;
+ }
+
+ bool rootDepthIsZero = hMesh.mChunks[0]->isRootChunk(); // Until proven otherwise
+ for (uint32_t chunkIndex = 1; rootDepthIsZero && chunkIndex < hMesh.chunkCount(); ++chunkIndex)
+ {
+ rootDepthIsZero = !hMesh.mChunks[chunkIndex]->isRootChunk();
+ }
+
+ if (!rootDepthIsZero && hMeshCore.partCount() > 0 && exportCoreMesh)
+ {
+ char message[1000];
+ sprintf(message, "Warning: cannot export core mesh with multiple-mesh root mesh. Will not export core.");
+ outputMessage(message, physx::PxErrorCode::eDEBUG_WARNING);
+ exportCoreMesh = false;
+ }
+
+ if (!splitter.validate(hMesh))
+ {
+ return false;
+ }
+
+ // Save state if cancel != NULL
+ physx::PxFileBuf* save = NULL;
+ class NullEmbedding : public ExplicitHierarchicalMesh::Embedding
+ {
+ void serialize(physx::PxFileBuf& stream, Embedding::DataType type) const
+ {
+ (void)stream;
+ (void)type;
+ }
+ void deserialize(physx::PxFileBuf& stream, Embedding::DataType type, uint32_t version)
+ {
+ (void)stream;
+ (void)type;
+ (void)version;
+ }
+ } embedding;
+ if (cancel != NULL)
+ {
+ save = nvidia::GetApexSDK()->createMemoryWriteStream();
+ if (save != NULL)
+ {
+ hMesh.serialize(*save, embedding);
+ }
+ }
+ bool canceled = false;
+
+ hMesh.buildCollisionGeometryForPart(0, getVolumeDesc(collisionDesc, 0));
+
+ userRnd.m_rnd.setSeed(randomSeed);
+
+ // Call initialization callback
+ splitter.initialize(hMesh);
+
+ // Make sure we've got BSPs at root depth
+ for (uint32_t i = 0; i < hMesh.chunkCount(); ++i)
+ {
+ if (!hMesh.mChunks[i]->isRootLeafChunk())
+ {
+ continue;
+ }
+ uint32_t partIndex = (uint32_t)*hMesh.partIndex(i);
+ if (hMesh.mParts[partIndex]->mMeshBSP->getType() != ApexCSG::BSPType::Nontrivial)
+ {
+ outputMessage("Building mesh BSP...");
+ progressListener.setProgress(0);
+ if (hMesh.calculatePartBSP(partIndex, randomSeed, meshProcessingParams.microgridSize, meshProcessingParams.meshMode, &progressListener, cancel))
+ {
+ outputMessage("Mesh BSP completed.");
+ }
+ else
+ {
+ outputMessage("Mesh BSP failed.");
+ canceled = true;
+ }
+ userRnd.m_rnd.setSeed(randomSeed);
+ }
+ }
+
+#if 0 // Debugging aid - uses BSP mesh generation to replace level 0 mesh
+ hMesh.mParts[*hMesh.partIndex(0)]->mMeshBSP->toMesh(hMesh.mParts[0]->mMesh);
+#endif
+
+ hMesh.clear(true);
+
+ ExplicitHierarchicalMeshImpl tempCoreMesh;
+
+ uint32_t coreChunkIndex = 0xFFFFFFFF;
+ uint32_t corePartIndex = 0xFFFFFFFF;
+ if (hMeshCore.partCount() > 0 && !canceled)
+ {
+ // We have a core mesh.
+ tempCoreMesh.set(iHMeshCore);
+
+ if (exportCoreMesh)
+ {
+ // Use it as our first split
+ // Core starts as original mesh
+ coreChunkIndex = hMesh.addChunk();
+ corePartIndex = hMesh.addPart();
+ hMesh.mChunks[coreChunkIndex]->mPartIndex = (int32_t)corePartIndex;
+ hMesh.mParts[corePartIndex]->mMesh = hMeshCore.mParts[0]->mMesh;
+ hMesh.buildMeshBounds(corePartIndex);
+ hMesh.buildCollisionGeometryForPart(corePartIndex, getVolumeDesc(collisionDesc, 1));
+ hMesh.mChunks[coreChunkIndex]->mParentIndex = 0;
+ }
+
+ // Add necessary submesh data to hMesh from core.
+ physx::Array<uint32_t> submeshMap(tempCoreMesh.mSubmeshData.size());
+ if (exportCoreMesh || coreMeshImprintSubmeshIndex < 0)
+ {
+ for (uint32_t i = 0; i < tempCoreMesh.mSubmeshData.size(); ++i)
+ {
+ nvidia::ExplicitSubmeshData& coreSubmeshData = tempCoreMesh.mSubmeshData[i];
+ submeshMap[i] = hMesh.mSubmeshData.size();
+ for (uint32_t j = 0; j < hMesh.mSubmeshData.size(); ++j)
+ {
+ nvidia::ExplicitSubmeshData& submeshData = hMesh.mSubmeshData[j];
+ if (0 == nvidia::strcmp(submeshData.mMaterialName, coreSubmeshData.mMaterialName))
+ {
+ submeshMap[i] = j;
+ break;
+ }
+ }
+ if (submeshMap[i] == hMesh.mSubmeshData.size())
+ {
+ hMesh.mSubmeshData.pushBack(coreSubmeshData);
+ }
+ }
+ }
+
+ if (coreMeshImprintSubmeshIndex >= (int32_t)hMesh.mSubmeshData.size())
+ {
+ coreMeshImprintSubmeshIndex = 0;
+ }
+
+ for (uint32_t i = 0; i < tempCoreMesh.chunkCount(); ++i)
+ {
+ if (!tempCoreMesh.mChunks[i]->isRootChunk())
+ {
+ continue;
+ }
+
+ // Remap materials
+ uint32_t partIndex = (uint32_t)*tempCoreMesh.partIndex(i);
+ for (uint32_t j = 0; j < tempCoreMesh.mParts[partIndex]->mMesh.size(); ++j)
+ {
+ nvidia::ExplicitRenderTriangle& tri = tempCoreMesh.mParts[partIndex]->mMesh[j];
+ if (tri.submeshIndex >= 0 && tri.submeshIndex < (int32_t)submeshMap.size())
+ {
+ tri.submeshIndex = coreMeshImprintSubmeshIndex < 0 ? (int32_t)submeshMap[(uint32_t)tri.submeshIndex] : coreMeshImprintSubmeshIndex;
+ if (exportCoreMesh && i == 0)
+ {
+ hMesh.mParts[corePartIndex]->mMesh[j].submeshIndex = (int32_t)submeshMap[(uint32_t)hMesh.mParts[corePartIndex]->mMesh[j].submeshIndex];
+ }
+ }
+ else
+ {
+ tri.submeshIndex = coreMeshImprintSubmeshIndex;
+ }
+ }
+
+ // Make sure we've got BSPs up to hMesh.mRootDepth
+ if (tempCoreMesh.mParts[partIndex]->mMeshBSP->getType() != ApexCSG::BSPType::Nontrivial)
+ {
+ outputMessage("Building core mesh BSP...");
+ progressListener.setProgress(0);
+ if(tempCoreMesh.calculatePartBSP(partIndex, randomSeed, meshProcessingParams.microgridSize, meshProcessingParams.meshMode, &progressListener, cancel))
+ {
+ outputMessage("Core mesh BSP completed.");
+ }
+ else
+ {
+ outputMessage("Core mesh BSP calculation failed.");
+ canceled = true;
+ }
+ userRnd.m_rnd.setSeed(randomSeed);
+ }
+ }
+ }
+
+ gIslandGeneration = meshProcessingParams.islandGeneration;
+ gMicrogridSize = meshProcessingParams.microgridSize;
+ gVerbosity = meshProcessingParams.verbosity;
+
+ for (uint32_t chunkIndex = 0; chunkIndex < hMesh.mChunks.size() && !canceled; ++chunkIndex)
+ {
+ const uint32_t depth = hMesh.depth(chunkIndex);
+
+ if (!hMesh.mChunks[chunkIndex]->isRootLeafChunk())
+ {
+ continue; // Only process core leaf chunk
+ }
+
+ if (chunkIndex == coreChunkIndex)
+ {
+ continue; // Ignore core chunk
+ }
+
+ uint32_t partIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+
+ ApexCSG::IApexBSP* seedBSP = createBSP(hMesh.mBSPMemCache);
+ seedBSP->copy(*hMesh.mParts[partIndex]->mMeshBSP);
+
+ // Subtract out core
+ bool partModified = false;
+ for (uint32_t i = 0; i < tempCoreMesh.chunkCount(); ++i)
+ {
+ if (!tempCoreMesh.mChunks[i]->isRootLeafChunk())
+ {
+ continue;
+ }
+ uint32_t corePartIndex = (uint32_t)*tempCoreMesh.partIndex(i);
+ if (tempCoreMesh.mParts[corePartIndex]->mMeshBSP != NULL)
+ {
+ ApexCSG::IApexBSP* rescaledCoreMeshBSP = createBSP(hMesh.mBSPMemCache);
+ rescaledCoreMeshBSP->copy(*tempCoreMesh.mParts[corePartIndex]->mMeshBSP, physx::PxMat44(physx::PxIdentity), seedBSP->getInternalTransform());
+ seedBSP->combine(*rescaledCoreMeshBSP);
+ rescaledCoreMeshBSP->release();
+ seedBSP->op(*seedBSP, ApexCSG::Operation::A_Minus_B);
+ partModified = true;
+ }
+ }
+
+ if (partModified && depth > 0)
+ {
+ // Create part from modified seedBSP (unless it's at level 0)
+ seedBSP->toMesh(hMesh.mParts[partIndex]->mMesh);
+ if (hMesh.mParts[partIndex]->mMesh.size() != 0)
+ {
+ hMesh.mParts[partIndex]->mMeshBSP->copy(*seedBSP);
+ hMesh.buildCollisionGeometryForPart(partIndex, getVolumeDesc(collisionDesc, depth));
+ }
+ }
+
+#if 0 // Should always have been true
+ if (depth == hMesh.mRootDepth)
+#endif
+ {
+ // At hMesh.mRootDepth - split
+ outputMessage("Splitting...");
+ progressListener.setProgress(0);
+ canceled = !splitter.process(hMesh, chunkIndex, *seedBSP, collisionDesc, progressListener, cancel);
+ outputMessage("splitting completed.");
+ }
+
+ seedBSP->release();
+ }
+
+ // Restore if canceled
+ if (canceled && save != NULL)
+ {
+ uint32_t len;
+ const void* mem = nvidia::GetApexSDK()->getMemoryWriteBuffer(*save, len);
+ physx::PxFileBuf* load = nvidia::GetApexSDK()->createMemoryReadStream(mem, len);
+ if (load != NULL)
+ {
+ hMesh.deserialize(*load, embedding);
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*load);
+ }
+ }
+
+ if (save != NULL)
+ {
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*save);
+ }
+
+ if (canceled)
+ {
+ return false;
+ }
+
+ if (meshProcessingParams.removeTJunctions && hMesh.mParts.size())
+ {
+ MeshProcessor meshProcessor;
+ const float size = hMesh.mParts[0]->mBounds.getExtents().magnitude();
+ for (uint32_t i = 0; i < hMesh.partCount(); ++i)
+ {
+ meshProcessor.setMesh(hMesh.mParts[i]->mMesh, NULL, 0, 0.0001f*size);
+ meshProcessor.removeTJunctions();
+ }
+ }
+
+ physx::Array<uint32_t> remap;
+ hMesh.sortChunks(&remap);
+
+ hMesh.createPartSurfaceNormals();
+
+ if (corePartIndex < hMesh.partCount())
+ {
+ // Create reasonable collision hulls when there is a core mesh
+ coreChunkIndex = remap[coreChunkIndex];
+ const PxTransform idTM(PxIdentity);
+ const physx::PxVec3 idScale(1.0f);
+ for (uint32_t coreHullIndex = 0; coreHullIndex < hMesh.mParts[corePartIndex]->mCollision.size(); ++coreHullIndex)
+ {
+ const PartConvexHullProxy& coreHull = *hMesh.mParts[corePartIndex]->mCollision[coreHullIndex];
+ for (uint32_t i = 1; i < hMesh.partCount(); ++i)
+ {
+ if (i == coreChunkIndex)
+ {
+ continue;
+ }
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[i]->mCollision.size(); ++hullIndex)
+ {
+ PartConvexHullProxy& hull = *hMesh.mParts[i]->mCollision[hullIndex];
+ ConvexHullImpl::Separation separation;
+ if (ConvexHullImpl::hullsInProximity(coreHull.impl, idTM, idScale, hull.impl, idTM, idScale, 0.0f, &separation))
+ {
+ const float hullWidth = separation.max1 - separation.min1;
+ const float overlap = separation.max0 - separation.min1;
+ if (overlap < 0.25f * hullWidth)
+ {
+ // Trim the hull
+ hull.impl.intersectPlaneSide(physx::PxPlane(-separation.plane.n, -separation.max0));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return splitter.finalize(hMesh);
+}
+
+// Note: chunks must be in breadth-first order
+static void deleteChunkChildren
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ bool deleteChunk = false
+ )
+{
+ for (uint32_t index = hMesh.chunkCount(); index-- > chunkIndex+1;)
+ {
+ if (hMesh.mChunks[index]->mParentIndex == (int32_t)chunkIndex)
+ {
+ deleteChunkChildren(hMesh, index, true);
+ }
+ }
+
+ if (deleteChunk)
+ {
+ const int32_t partIndex = hMesh.mChunks[chunkIndex]->mPartIndex;
+ hMesh.removeChunk(chunkIndex);
+ bool partIndexUsed = false;
+ for (uint32_t index = 0; index < hMesh.chunkCount(); ++index)
+ {
+ if (hMesh.mChunks[index]->mPartIndex == partIndex)
+ {
+ partIndexUsed = true;
+ break;
+ }
+ }
+ if (!partIndexUsed)
+ {
+ hMesh.removePart((uint32_t)partIndex);
+ }
+ }
+}
+
+static bool splitChunkInternal
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ uint32_t chunkIndex,
+ const FractureTools::MeshProcessingParameters& meshProcessingParams,
+ MeshSplitter& splitter,
+ const CollisionDesc& collisionDesc,
+ uint32_t* randomSeed,
+ IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ const int32_t* partIndexPtr = iHMesh.partIndex(chunkIndex);
+ if (partIndexPtr == NULL)
+ {
+ return true;
+ }
+ const uint32_t partIndex = (uint32_t)*partIndexPtr;
+
+ gIslandGeneration = meshProcessingParams.islandGeneration;
+ gMicrogridSize = meshProcessingParams.microgridSize;
+ gVerbosity = meshProcessingParams.verbosity;
+
+ outputMessage("Splitting...");
+
+ // Save state if cancel != NULL
+ physx::PxFileBuf* save = NULL;
+ class NullEmbedding : public ExplicitHierarchicalMesh::Embedding
+ {
+ void serialize(physx::PxFileBuf& stream, Embedding::DataType type) const
+ {
+ (void)stream;
+ (void)type;
+ }
+ void deserialize(physx::PxFileBuf& stream, Embedding::DataType type, uint32_t version)
+ {
+ (void)stream;
+ (void)type;
+ (void)version;
+ }
+ } embedding;
+
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+
+ if (cancel != NULL)
+ {
+ save = nvidia::GetApexSDK()->createMemoryWriteStream();
+ if (save != NULL)
+ {
+ hMesh.serialize(*save, embedding);
+ }
+ }
+ bool canceled = false;
+
+ progressListener.setProgress(0);
+
+ // Delete chunk children
+ deleteChunkChildren(hMesh, chunkIndex);
+
+ // Reseed if requested
+ if (randomSeed != NULL)
+ {
+ userRnd.m_rnd.setSeed(*randomSeed);
+ }
+ const uint32_t seed = userRnd.m_rnd.seed();
+
+ // Fracture chunk
+ ApexCSG::IApexBSP* chunkMeshBSP = hMesh.mParts[partIndex]->mMeshBSP;
+
+ // Make sure we've got a BSP. If this is a root chunk, it may not have been created yet.
+ if (chunkMeshBSP->getType() != ApexCSG::BSPType::Nontrivial)
+ {
+ if (!hMesh.mChunks[chunkIndex]->isRootChunk())
+ {
+ outputMessage("Warning: Building a BSP for a non-root mesh. This should have been created by a splitting process.");
+ }
+ outputMessage("Building mesh BSP...");
+ progressListener.setProgress(0);
+ hMesh.calculatePartBSP(partIndex, seed, meshProcessingParams.microgridSize, meshProcessingParams.meshMode, &progressListener);
+ outputMessage("Mesh BSP completed.");
+ userRnd.m_rnd.setSeed(seed);
+ }
+
+ const uint32_t oldPartCount = hMesh.mParts.size();
+
+ canceled = !splitter.process(hMesh, chunkIndex, *chunkMeshBSP, collisionDesc, progressListener, cancel);
+
+ // Restore if canceled
+ if (canceled && save != NULL)
+ {
+ uint32_t len;
+ const void* mem = nvidia::GetApexSDK()->getMemoryWriteBuffer(*save, len);
+ physx::PxFileBuf* load = nvidia::GetApexSDK()->createMemoryReadStream(mem, len);
+ if (load != NULL)
+ {
+ hMesh.deserialize(*load, embedding);
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*load);
+ }
+ }
+
+ if (save != NULL)
+ {
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*save);
+ }
+
+ if (canceled)
+ {
+ return false;
+ }
+
+ if (meshProcessingParams.removeTJunctions)
+ {
+ MeshProcessor meshProcessor;
+ const float size = hMesh.mParts[partIndex]->mBounds.getExtents().magnitude();
+ for (uint32_t i = oldPartCount; i < hMesh.partCount(); ++i)
+ {
+ meshProcessor.setMesh(hMesh.mParts[i]->mMesh, NULL, 0, 0.0001f*size);
+ meshProcessor.removeTJunctions();
+ }
+ }
+
+ hMesh.sortChunks();
+
+ hMesh.createPartSurfaceNormals();
+
+ return true;
+}
+
+
+static uint32_t createVoronoiSitesInsideMeshInternal
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ const uint32_t* chunkIndices,
+ uint32_t chunkCount,
+ physx::PxVec3* siteBuffer,
+ uint32_t* siteChunkIndices,
+ uint32_t siteCount,
+ uint32_t* randomSeed,
+ uint32_t* microgridSize,
+ BSPOpenMode::Enum meshMode,
+ nvidia::IProgressListener& progressListener
+)
+{
+ if (randomSeed != NULL)
+ {
+ userRnd.m_rnd.setSeed(*randomSeed);
+ }
+
+ const uint32_t microgridSizeToUse = microgridSize != NULL ? *microgridSize : gMicrogridSize;
+
+ // Make sure we've got BSPs for all chunks
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ const uint32_t chunkIndex = chunkIndices[chunkNum];
+ uint32_t partIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+ if (hMesh.mParts[partIndex]->mMeshBSP->getType() != ApexCSG::BSPType::Nontrivial)
+ {
+ outputMessage("Building mesh BSP...");
+ progressListener.setProgress(0);
+ if (randomSeed == NULL)
+ {
+ outputMessage("Warning: no random seed given in createVoronoiSitesInsideMeshInternal but BSP must be built. Using seed = 0.", physx::PxErrorCode::eDEBUG_WARNING);
+ }
+ hMesh.calculatePartBSP(partIndex, (randomSeed != NULL ? *randomSeed : 0), microgridSizeToUse, meshMode, &progressListener);
+ outputMessage("Mesh BSP completed.");
+ if (randomSeed != NULL)
+ {
+ userRnd.m_rnd.setSeed(*randomSeed);
+ }
+ }
+ }
+
+ // Come up with distribution that is weighted by chunk volume, but also tries to ensure each chunk gets at least one site.
+ float totalVolume = 0.0f;
+ physx::Array<float> volumes(chunkCount);
+ physx::Array<uint32_t> siteCounts(chunkCount);
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ const uint32_t chunkIndex = chunkIndices[chunkNum];
+ const physx::PxBounds3 bounds = hMesh.chunkBounds(chunkIndex);
+ const physx::PxVec3 extents = bounds.getExtents();
+ volumes[chunkNum] = extents.x*extents.y*extents.z;
+ totalVolume += volumes[chunkNum];
+ siteCounts[chunkNum] = 0;
+ }
+
+ // Now fill in site counts
+ if (totalVolume <= 0.0f)
+ {
+ totalVolume = 1.0f; // To avoid divide-by-zero
+ }
+
+ // Make site count proportional to volume, within error due to quantization. Ensure at least one site per chunk, even if "zero volume"
+ // is recorded (it might be an open-meshed chunk). We distinguish between zero and one sites per chunk, even though they have the same
+ // effect on a chunk, since using one site per chunk will reduce the number of sites available for other chunks. The aim is to have
+ // control over the number of chunks generated, so we will avoid using zero sites per chunk.
+ uint32_t totalSiteCount = 0;
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ siteCounts[chunkNum] = PxMax(1U, (uint32_t)(siteCount*volumes[chunkNum]/totalVolume));
+ totalSiteCount += siteCounts[chunkNum];
+ }
+
+ // Add sites if we need to. This can happen due to the rounding.
+ while (totalSiteCount < siteCount)
+ {
+ uint32_t chunkToAddSite = 0;
+ float greatestDeficit = -PX_MAX_F32;
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ const float defecit = siteCount*volumes[chunkNum]/totalVolume - (float)siteCounts[chunkNum];
+ if (defecit > greatestDeficit)
+ {
+ greatestDeficit = defecit;
+ chunkToAddSite = chunkNum;
+ }
+ }
+ ++siteCounts[chunkToAddSite];
+ ++totalSiteCount;
+ }
+
+ // Remove sites if necessary. This is much more likely.
+ while (totalSiteCount > siteCount)
+ {
+ uint32_t chunkToRemoveSite = 0;
+ float greatestSurplus = -PX_MAX_F32;
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ const float surplus = (float)siteCounts[chunkNum] - siteCount*volumes[chunkNum]/totalVolume;
+ if (surplus > greatestSurplus)
+ {
+ greatestSurplus = surplus;
+ chunkToRemoveSite = chunkNum;
+ }
+ }
+ --siteCounts[chunkToRemoveSite];
+ --totalSiteCount;
+ }
+
+ // Now generate the actual sites
+ uint32_t totalSitesGenerated = 0;
+ for (uint32_t chunkNum = 0; chunkNum < chunkCount; ++chunkNum)
+ {
+ const uint32_t chunkIndex = chunkIndices[chunkNum];
+ uint32_t partIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+ ApexCSG::IApexBSP* meshBSP = hMesh.mParts[partIndex]->mMeshBSP;
+ const physx::PxBounds3 bounds = hMesh.chunkBounds(chunkIndex);
+ uint32_t sitesGenerated = 0;
+ uint32_t attemptsLeft = 100000;
+ while ( sitesGenerated < siteCounts[chunkNum])
+ {
+ const physx::PxVec3 site(userRnd.getReal(bounds.minimum.x, bounds.maximum.x), userRnd.getReal(bounds.minimum.y, bounds.maximum.y), userRnd.getReal(bounds.minimum.z, bounds.maximum.z));
+ if (!attemptsLeft || meshBSP->pointInside(site - *hMesh.instancedPositionOffset(chunkIndex)))
+ {
+ siteBuffer[totalSitesGenerated] = site;
+ if (siteChunkIndices != NULL)
+ {
+ siteChunkIndices[totalSitesGenerated] = chunkIndex;
+ }
+ ++sitesGenerated;
+ ++totalSitesGenerated;
+ }
+ if (attemptsLeft)
+ {
+ --attemptsLeft;
+ }
+ }
+ }
+
+ return totalSitesGenerated;
+}
+
+class HierarchicalMeshSplitter : public MeshSplitter
+{
+private:
+ HierarchicalMeshSplitter& operator=(const HierarchicalMeshSplitter&);
+
+public:
+ HierarchicalMeshSplitter(const FractureSliceDesc& desc) : mDesc(desc)
+ {
+ }
+
+ bool validate(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ // Try to see if we're going to generate too many chunks
+ uint32_t estimatedTotalChunkCount = 0;
+ for (uint32_t chunkIndex = 0; chunkIndex < hMesh.chunkCount(); ++chunkIndex)
+ {
+ if (!hMesh.mChunks[chunkIndex]->isRootLeafChunk())
+ {
+ continue;
+ }
+ uint32_t partIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+ uint32_t estimatedLevelChunkCount = 1;
+ physx::PxVec3 estimatedExtent = hMesh.mParts[partIndex]->mBounds.getExtents();
+ for (uint32_t chunkDepth = 0; chunkDepth < mDesc.maxDepth; ++chunkDepth)
+ {
+ // Get parameters for this depth
+ const nvidia::SliceParameters& sliceParameters = mDesc.sliceParameters[chunkDepth];
+ int partition[3];
+ calculatePartition(partition, sliceParameters.splitsPerPass, estimatedExtent, mDesc.useTargetProportions ? mDesc.targetProportions : NULL);
+ estimatedLevelChunkCount *= partition[0] * partition[1] * partition[2];
+ estimatedTotalChunkCount += estimatedLevelChunkCount;
+ if (estimatedTotalChunkCount > MAX_ALLOWED_ESTIMATED_CHUNK_TOTAL)
+ {
+ char message[1000];
+ shdfnd::snprintf(message,1000, "Slicing chunk count is estimated to be %d chunks, exceeding the maximum allowed estimated total of %d chunks. Aborting. Try using fewer slices, or a smaller fracture depth.",
+ estimatedTotalChunkCount, (int)MAX_ALLOWED_ESTIMATED_CHUNK_TOTAL);
+ outputMessage(message, physx::PxErrorCode::eINTERNAL_ERROR);
+ return false;
+ }
+ estimatedExtent[0] /= partition[0];
+ estimatedExtent[1] /= partition[1];
+ estimatedExtent[2] /= partition[2];
+ }
+ }
+
+ return true;
+ }
+
+ void initialize(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ if (mDesc.useDisplacementMaps)
+ {
+ hMesh.initializeDisplacementMapVolume(mDesc);
+ }
+
+ for (int i = 0; i < 3; ++i)
+ {
+ hMesh.mSubmeshData.resize(PxMax(hMesh.mRootSubmeshCount, mDesc.materialDesc[i].interiorSubmeshIndex + 1));
+ }
+ }
+
+ bool process
+ (
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ const ApexCSG::IApexBSP& chunkBSP,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+ )
+ {
+ physx::PxPlane trailingPlanes[3]; // passing in depth = 0 will initialize these
+ physx::PxPlane leadingPlanes[3];
+#if 1 // Eliminating volume calculation here, for performance. May introduce it later once the mesh is calculated.
+ const float chunkVolume = 1.0f;
+#else
+ float chunkArea, chunkVolume;
+ chunkBSP.getSurfaceAreaAndVolume(chunkArea, chunkVolume, true);
+#endif
+ return hierarchicallySplitChunkInternal(hMesh, chunkIndex, 0, trailingPlanes, leadingPlanes, chunkBSP, chunkVolume, mDesc, collisionDesc, progressListener, cancel);
+ }
+
+ bool finalize(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ if (mDesc.instanceChunks)
+ {
+ for (uint32_t i = 0; i < hMesh.partCount(); ++i)
+ {
+ hMesh.mChunks[i]->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ }
+ }
+
+ return true;
+ }
+
+protected:
+ const FractureSliceDesc& mDesc;
+};
+
+bool createHierarchicallySplitMesh
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ ExplicitHierarchicalMesh& iHMeshCore,
+ bool exportCoreMesh,
+ int32_t coreMeshImprintSubmeshIndex, // If this is < 0, use the core mesh materials (was applyCoreMeshMaterialToNeighborChunks). Otherwise, use the given submesh.
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureSliceDesc& desc,
+ const CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ HierarchicalMeshSplitter splitter(desc);
+
+ return splitMeshInternal(
+ iHMesh,
+ iHMeshCore,
+ exportCoreMesh,
+ coreMeshImprintSubmeshIndex,
+ meshProcessingParams,
+ splitter,
+ collisionDesc,
+ randomSeed,
+ progressListener,
+ cancel);
+}
+
+bool hierarchicallySplitChunk
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ uint32_t chunkIndex,
+ const FractureTools::MeshProcessingParameters& meshProcessingParams,
+ const FractureTools::FractureSliceDesc& desc,
+ const CollisionDesc& collisionDesc,
+ uint32_t* randomSeed,
+ IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ HierarchicalMeshSplitter splitter(desc);
+
+ return splitChunkInternal(iHMesh, chunkIndex, meshProcessingParams, splitter, collisionDesc, randomSeed, progressListener, cancel);
+}
+
+PX_INLINE PxMat44 createCutoutFrame(const physx::PxVec3& center, const physx::PxVec3& extents, uint32_t sliceAxes[3], uint32_t sliceSignNum, const FractureCutoutDesc& desc, bool& invertX)
+{
+ const uint32_t sliceDirIndex = sliceAxes[2] * 2 + sliceSignNum;
+ const float sliceSign = sliceSignNum ? -1.0f : 1.0f;
+ physx::PxVec3 n = createAxis(sliceAxes[2]) * sliceSign;
+ PxMat44 cutoutTM;
+ cutoutTM.column2 = PxVec4(n, 0.f);
+ float applySign;
+ switch (sliceAxes[2])
+ {
+ case 0:
+ applySign = 1.0f;
+ break;
+ case 1:
+ applySign = -1.0f;
+ break;
+ default:
+ case 2:
+ applySign = 1.0f;
+ }
+ const physx::PxVec3 p = createAxis(sliceAxes[1]);
+ const float cutoutPadding = desc.tileFractureMap ? 0.0f : 0.0001f;
+ cutoutTM.column1 = PxVec4(p, 0.f);
+ cutoutTM.column0 = PxVec4(p.cross(n), 0.f);
+ float cutoutWidth = 2 * extents[sliceAxes[0]] * (1.0f + cutoutPadding);
+ float cutoutHeight = 2 * extents[sliceAxes[1]] * (1.0f + cutoutPadding);
+ cutoutWidth *= (desc.cutoutWidthInvert[sliceDirIndex] ? -1.0f : 1.0f) * desc.cutoutWidthScale[sliceDirIndex];
+ cutoutHeight *= (desc.cutoutHeightInvert[sliceDirIndex] ? -1.0f : 1.0f) * desc.cutoutHeightScale[sliceDirIndex];
+ cutoutTM.scale(physx::PxVec4(cutoutWidth / desc.cutoutSizeX, cutoutHeight / desc.cutoutSizeY, 1.0f, 1.0f));
+ cutoutTM.setPosition(physx::PxVec3(0.0f));
+ float sign = applySign * sliceSign;
+ invertX = sign < 0.0f;
+
+ PxVec3 cutoutPosition(0.0, 0.0, 0.0);
+
+ cutoutPosition[sliceAxes[0]] = center[sliceAxes[0]] - sign * (0.5f * cutoutWidth + desc.cutoutWidthOffset[sliceDirIndex] * extents[sliceAxes[0]]);
+ cutoutPosition[sliceAxes[1]] = center[sliceAxes[1]] - 0.5f * cutoutHeight + desc.cutoutHeightOffset[sliceDirIndex] * extents[sliceAxes[1]];
+
+ cutoutTM.setPosition(cutoutPosition);
+ return cutoutTM;
+}
+
+static bool createCutoutChunk(ExplicitHierarchicalMeshImpl& hMesh, ApexCSG::IApexBSP& cutoutBSP, /*IApexBSP& remainderBSP,*/
+ const ApexCSG::IApexBSP& sourceBSP, uint32_t sourceIndex,
+ const CollisionVolumeDesc& volumeDesc, volatile bool* cancel)
+{
+// remainderBSP.combine( cutoutBSP );
+// remainderBSP.op( remainderBSP, Operation::A_Minus_B );
+ cutoutBSP.combine(sourceBSP);
+ cutoutBSP.op(cutoutBSP, ApexCSG::Operation::Intersection);
+ // BRG - should apply island generation here
+// if( gIslandGeneration )
+// {
+// }
+ const uint32_t newPartIndex = hMesh.addPart();
+ hMesh.mParts[newPartIndex]->mMeshBSP->release();
+ hMesh.mParts[newPartIndex]->mMeshBSP = &cutoutBSP; // Save off and own this
+ cutoutBSP.toMesh(hMesh.mParts[newPartIndex]->mMesh);
+ if (hMesh.mParts[newPartIndex]->mMesh.size() > 0)
+ {
+ hMesh.mParts[newPartIndex]->mMeshBSP->copy(cutoutBSP);
+ hMesh.buildMeshBounds(newPartIndex);
+ hMesh.buildCollisionGeometryForPart(newPartIndex, volumeDesc);
+ const uint32_t newChunkIndex = hMesh.addChunk();
+ hMesh.mChunks[newChunkIndex]->mParentIndex = (int32_t)sourceIndex;
+ hMesh.mChunks[newChunkIndex]->mPartIndex = (int32_t)newPartIndex;
+ }
+ else
+ {
+ hMesh.removePart(newPartIndex);
+ }
+
+ return cancel == NULL || !(*cancel);
+}
+
+static void addQuad(ExplicitHierarchicalMeshImpl& hMesh, physx::Array<nvidia::ExplicitRenderTriangle>& mesh, uint32_t sliceDepth, uint32_t submeshIndex,
+ const physx::PxVec2& interiorUVScale, const physx::PxVec3& v0, const physx::PxVec3& v1, const physx::PxVec3& v2, const physx::PxVec3& v3)
+{
+ // Create material frame TM
+ const uint32_t materialIndex = hMesh.addMaterialFrame();
+ nvidia::MaterialFrame materialFrame = hMesh.getMaterialFrame(materialIndex);
+
+ nvidia::FractureMaterialDesc materialDesc;
+
+ /* BRG: these should be obtained from an alternative set of material descs (one for each cutout direction), which describe the UV layout around the chunk cutout. */
+ materialDesc.uAngle = 0.0f;
+ materialDesc.uvOffset = physx::PxVec2(0.0f);
+ materialDesc.uvScale = interiorUVScale;
+
+ materialDesc.tangent = v1 - v0;
+ materialDesc.tangent.normalize();
+ physx::PxVec3 normal = materialDesc.tangent.cross(v3 - v0);
+ normal.normalize();
+ const physx::PxPlane plane(v0, normal);
+
+ materialFrame.buildCoordinateSystemFromMaterialDesc(materialDesc, plane);
+ materialFrame.mFractureMethod = nvidia::FractureMethod::Cutout;
+ materialFrame.mFractureIndex = -1; // Signifying that it's a cutout around the chunk, so we shouldn't make assumptions about the face direction
+ materialFrame.mSliceDepth = sliceDepth;
+
+ hMesh.setMaterialFrame(materialIndex, materialFrame);
+
+ // Create interpolator for triangle quantities
+
+ nvidia::TriangleFrame triangleFrame(materialFrame.mCoordinateSystem, interiorUVScale, physx::PxVec2(0.0f));
+
+ // Fill one triangle
+ nvidia::ExplicitRenderTriangle& tri0 = mesh.insert();
+ memset(&tri0, 0, sizeof(nvidia::ExplicitRenderTriangle));
+ tri0.submeshIndex = (int32_t)submeshIndex;
+ tri0.extraDataIndex = materialIndex;
+ tri0.smoothingMask = 0;
+ tri0.vertices[0].position = v0;
+ tri0.vertices[1].position = v1;
+ tri0.vertices[2].position = v2;
+ for (int i = 0; i < 3; ++i)
+ {
+ triangleFrame.interpolateVertexData(tri0.vertices[i]);
+ }
+
+ // ... and another
+ nvidia::ExplicitRenderTriangle& tri1 = mesh.insert();
+ memset(&tri1, 0, sizeof(nvidia::ExplicitRenderTriangle));
+ tri1.submeshIndex = (int32_t)submeshIndex;
+ tri1.extraDataIndex = materialIndex;
+ tri1.smoothingMask = 0;
+ tri1.vertices[0].position = v2;
+ tri1.vertices[1].position = v3;
+ tri1.vertices[2].position = v0;
+ for (int i = 0; i < 3; ++i)
+ {
+ triangleFrame.interpolateVertexData(tri1.vertices[i]);
+ }
+}
+
+float getDeterminant(const physx::PxMat44& mt)
+{
+ return mt.column0.getXYZ().dot(mt.column1.getXYZ().cross(mt.column2.getXYZ()));
+}
+
+static bool createCutout(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t faceChunkIndex,
+ ApexCSG::IApexBSP& faceBSP, // May be modified
+ const nvidia::Cutout& cutout,
+ const PxMat44& cutoutTM,
+ const nvidia::FractureCutoutDesc& desc,
+ const nvidia::NoiseParameters& edgeNoise,
+ float cutoutDepth,
+ const nvidia::FractureMaterialDesc& materialDesc,
+ const CollisionVolumeDesc& volumeDesc,
+ const physx::PxPlane& minPlane,
+ const physx::PxPlane& maxPlane,
+ nvidia::HierarchicalProgressListener& localProgressListener,
+ volatile bool* cancel)
+{
+ bool canceled = false;
+
+ const float cosSmoothingThresholdAngle = physx::PxCos(desc.facetNormalMergeThresholdAngle * physx::PxPi / 180.0f);
+
+ physx::Array<physx::PxPlane> trimPlanes;
+ const uint32_t oldPartCount = hMesh.partCount();
+ localProgressListener.setSubtaskWork(1);
+ const uint32_t loopCount = desc.splitNonconvexRegions ? cutout.convexLoops.size() : 1;
+
+ const bool ccw = getDeterminant(cutoutTM) > (float)0;
+
+ const uint32_t facePartIndex = (uint32_t)*hMesh.partIndex(faceChunkIndex);
+
+ const uint32_t sliceDepth = hMesh.depth(faceChunkIndex) + 1;
+
+ for (uint32_t j = 0; j < loopCount && !canceled; ++j)
+ {
+ const uint32_t loopSize = desc.splitNonconvexRegions ? cutout.convexLoops[j].polyVerts.size() : cutout.vertices.size();
+ if (desc.splitNonconvexRegions)
+ {
+ trimPlanes.reset();
+ }
+ // Build mesh which surrounds the cutout
+ physx::Array<nvidia::ExplicitRenderTriangle> loopMesh;
+ for (uint32_t k = 0; k < loopSize; ++k)
+ {
+ uint32_t kPrime = ccw ? k : loopSize - 1 - k;
+ uint32_t kPrimeNext = ccw ? ((kPrime + 1) % loopSize) : (kPrime == 0 ? (loopSize - 1) : (kPrime-1));
+ const uint32_t vertexIndex0 = desc.splitNonconvexRegions ? cutout.convexLoops[j].polyVerts[kPrime].index : kPrime;
+ const uint32_t vertexIndex1 = desc.splitNonconvexRegions ? cutout.convexLoops[j].polyVerts[kPrimeNext].index : kPrimeNext;
+ const physx::PxVec3& v0 = cutout.vertices[vertexIndex0];
+ const physx::PxVec3& v1 = cutout.vertices[vertexIndex1];
+ const physx::PxVec3 v0World = cutoutTM.transform(v0);
+ const physx::PxVec3 v1World = cutoutTM.transform(v1);
+ const physx::PxVec3 quad0 = minPlane.project(v0World);
+ const physx::PxVec3 quad1 = minPlane.project(v1World);
+ const physx::PxVec3 quad2 = maxPlane.project(v1World);
+ const physx::PxVec3 quad3 = maxPlane.project(v0World);
+ addQuad(hMesh, loopMesh, sliceDepth, materialDesc.interiorSubmeshIndex, materialDesc.uvScale, quad0, quad1, quad2, quad3);
+ if (cutout.convexLoops.size() == 1 || desc.splitNonconvexRegions)
+ {
+ physx::PxVec3 planeNormal = (quad2 - quad0).cross(quad3 - quad1);
+ planeNormal.normalize();
+ trimPlanes.pushBack(physx::PxPlane(0.25f * (quad0 + quad1 + quad2 + quad3), planeNormal));
+ }
+ }
+ // Smooth the mesh's normals and tangents
+ PX_ASSERT(loopMesh.size() == 2 * loopSize);
+ if (loopMesh.size() == 2 * loopSize)
+ {
+ for (uint32_t k = 0; k < loopSize; ++k)
+ {
+ const uint32_t triIndex0 = 2 * k;
+ const uint32_t frameIndex = loopMesh[triIndex0].extraDataIndex;
+ PX_ASSERT(frameIndex == loopMesh[triIndex0 + 1].extraDataIndex);
+ physx::PxMat44& frame = hMesh.mMaterialFrames[frameIndex].mCoordinateSystem;
+ const uint32_t triIndex2 = 2 * ((k + 1) % loopSize);
+ const uint32_t nextFrameIndex = loopMesh[triIndex2].extraDataIndex;
+ PX_ASSERT(nextFrameIndex == loopMesh[triIndex2 + 1].extraDataIndex);
+ physx::PxMat44& nextFrame = hMesh.mMaterialFrames[nextFrameIndex].mCoordinateSystem;
+ const physx::PxVec3 normalK = frame.column2.getXYZ();
+ const physx::PxVec3 normalK1 = nextFrame.column2.getXYZ();
+ if (normalK.dot(normalK1) < cosSmoothingThresholdAngle)
+ {
+ continue;
+ }
+ physx::PxVec3 normal = normalK + normalK1;
+ normal.normalize();
+ loopMesh[triIndex0].vertices[1].normal = normal;
+ loopMesh[triIndex0].vertices[2].normal = normal;
+ loopMesh[triIndex0 + 1].vertices[0].normal = normal;
+ loopMesh[triIndex2].vertices[0].normal = normal;
+ loopMesh[triIndex2 + 1].vertices[1].normal = normal;
+ loopMesh[triIndex2 + 1].vertices[2].normal = normal;
+ }
+ for (uint32_t k = 0; k < loopMesh.size(); ++k)
+ {
+ nvidia::ExplicitRenderTriangle& tri = loopMesh[k];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ nvidia::Vertex& vert = tri.vertices[v];
+ vert.tangent = vert.binormal.cross(vert.normal);
+ }
+ }
+ }
+ // Create loop cutout BSP
+ ApexCSG::IApexBSP* loopBSP = createBSP(hMesh.mBSPMemCache);
+ ApexCSG::BSPBuildParameters bspBuildParams = gDefaultBuildParameters;
+ bspBuildParams.rnd = &userRnd;
+ bspBuildParams.internalTransform = faceBSP.getInternalTransform();
+ loopBSP->fromMesh(&loopMesh[0], loopMesh.size(), bspBuildParams);
+ const uint32_t oldSize = hMesh.partCount();
+ // loopBSP will be modified and owned by the new chunk.
+ canceled = !createCutoutChunk(hMesh, *loopBSP, faceBSP, /**cutoutSource,*/ faceChunkIndex, volumeDesc, cancel);
+ for (uint32_t partN = oldSize; partN < hMesh.partCount(); ++partN)
+ {
+ // Apply graphical noise to new parts, if requested
+ if (edgeNoise.amplitude > 0.0f)
+ {
+ applyNoiseToChunk(hMesh, facePartIndex, partN, edgeNoise, cutoutDepth);
+ }
+ // Trim new part collision hulls
+ for (uint32_t trimN = 0; trimN < trimPlanes.size(); ++trimN)
+ {
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[partN]->mCollision.size(); ++hullIndex)
+ {
+ hMesh.mParts[partN]->mCollision[hullIndex]->impl.intersectPlaneSide(trimPlanes[trimN]);
+ }
+ }
+ }
+ localProgressListener.completeSubtask();
+ }
+ // Trim hulls
+ if (!canceled)
+ {
+ for (uint32_t partN = oldPartCount; partN < hMesh.partCount(); ++partN)
+ {
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[partN]->mCollision.size(); ++hullIndex)
+ {
+ ConvexHullImpl& hull = hMesh.mParts[partN]->mCollision[hullIndex]->impl;
+ if (!desc.splitNonconvexRegions)
+ {
+ for (uint32_t trimN = 0; trimN < trimPlanes.size(); ++trimN)
+ {
+ hull.intersectPlaneSide(trimPlanes[trimN]);
+ }
+ }
+// hull.intersectHull(hMesh.mParts[faceChunkIndex]->mCollision.impl); // Do we need this?
+ }
+ }
+ }
+
+ return !canceled;
+}
+
+static void instanceChildren(ExplicitHierarchicalMeshImpl& hMesh, uint32_t instancingChunkIndex, uint32_t instancedChunkIndex)
+{
+ for (uint32_t chunkIndex = 0; chunkIndex < hMesh.chunkCount(); ++chunkIndex)
+ {
+ if (hMesh.mChunks[chunkIndex]->mParentIndex == (int32_t)instancingChunkIndex)
+ {
+ // Found a child. Instance.
+ const uint32_t instancedChildIndex = hMesh.addChunk();
+ hMesh.mChunks[instancedChildIndex]->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ hMesh.mChunks[instancedChildIndex]->mParentIndex = (int32_t)instancedChunkIndex;
+ hMesh.mChunks[instancedChildIndex]->mPartIndex = hMesh.mChunks[chunkIndex]->mPartIndex; // Same part as instancing child
+ hMesh.mChunks[instancedChildIndex]->mInstancedPositionOffset = hMesh.mChunks[instancedChunkIndex]->mInstancedPositionOffset; // Same offset as parent
+ hMesh.mChunks[instancedChildIndex]->mInstancedUVOffset = hMesh.mChunks[instancedChunkIndex]->mInstancedUVOffset; // Same offset as parent
+ instanceChildren(hMesh, chunkIndex, instancedChildIndex); // Recurse
+ }
+ }
+}
+
+static bool createFaceCutouts
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t faceChunkIndex,
+ ApexCSG::IApexBSP& faceBSP, // May be modified
+ const nvidia::FractureCutoutDesc& desc,
+ const nvidia::NoiseParameters& edgeNoise,
+ float cutoutDepth,
+ const nvidia::FractureMaterialDesc& materialDesc,
+ const nvidia::CutoutSetImpl& cutoutSet,
+ const PxMat44& cutoutTM,
+ const float mapXLow,
+ const float mapYLow,
+ const CollisionDesc& collisionDesc,
+ const nvidia::FractureSliceDesc& sliceDesc,
+ const nvidia::FractureVoronoiDesc& voronoiDesc,
+ const physx::PxPlane& facePlane,
+ const physx::PxVec3& localCenter,
+ const physx::PxVec3& localExtents,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ nvidia::FractureVoronoiDesc cutoutVoronoiDesc;
+ physx::Array<physx::PxVec3> perChunkSites;
+
+ switch (desc.chunkFracturingMethod)
+ {
+ case FractureCutoutDesc::VoronoiFractureCutoutChunks:
+ {
+ cutoutVoronoiDesc = voronoiDesc;
+ perChunkSites.resize(voronoiDesc.siteCount);
+ cutoutVoronoiDesc.sites = voronoiDesc.siteCount > 0 ? &perChunkSites[0] : NULL;
+ cutoutVoronoiDesc.siteCount = voronoiDesc.siteCount;
+ }
+ break;
+ }
+
+ // IApexBSP* cutoutSource = createBSP( hMesh.mBSPMemCache );
+ // cutoutSource->copy( faceBSP );
+
+ const physx::PxVec3 faceNormal = facePlane.n;
+
+ // "Sandwich" planes
+ const float centerDisp = -facePlane.d - localExtents[2];
+ const float paddedExtent = 1.01f * localExtents[2];
+ const physx::PxPlane maxPlane(faceNormal, -centerDisp - paddedExtent);
+ const physx::PxPlane minPlane(faceNormal, -centerDisp + paddedExtent);
+
+ bool canceled = false;
+
+ // Tiling bounds
+ const float xTol = CUTOUT_MAP_BOUNDS_TOLERANCE*localExtents[0];
+ const float yTol = CUTOUT_MAP_BOUNDS_TOLERANCE*localExtents[1];
+ const float mapWidth = cutoutTM.column0.magnitude()*cutoutSet.getDimensions()[0];
+ const float mapHeight = cutoutTM.column1.magnitude()*cutoutSet.getDimensions()[1];
+ const float boundsXLow = localCenter[0] - localExtents[0];
+ const float boundsWidth = 2*localExtents[0];
+ const float boundsYLow = localCenter[1] - localExtents[1];
+ const float boundsHeight = 2*localExtents[1];
+ int32_t ixStart = desc.tileFractureMap ? (int32_t)physx::PxFloor((boundsXLow - mapXLow)/mapWidth + xTol) : 0;
+ int32_t ixStop = desc.tileFractureMap ? (int32_t)physx::PxCeil((boundsXLow - mapXLow + boundsWidth)/mapWidth - xTol) : 1;
+ int32_t iyStart = desc.tileFractureMap ? (int32_t)physx::PxFloor((boundsYLow - mapYLow)/mapHeight + yTol) : 0;
+ int32_t iyStop = desc.tileFractureMap ? (int32_t)physx::PxCeil((boundsYLow - mapYLow + boundsHeight)/mapHeight - yTol) : 1;
+
+ // Find UV map
+
+ const physx::PxVec3 xDir = cutoutTM.column0.getXYZ().getNormalized();
+ const physx::PxVec3 yDir = cutoutTM.column1.getXYZ().getNormalized();
+
+ // First find a good representative face triangle
+ const float faceDiffTolerance = 0.001f;
+ uint32_t uvMapTriangleIndex = 0;
+ float uvMapTriangleIndexFaceDiff = PX_MAX_F32;
+ float uvMapTriangleIndexArea = 0.0f;
+ const uint32_t facePartIndex = (uint32_t)*hMesh.partIndex(faceChunkIndex);
+ const uint32_t facePartTriangleCount = hMesh.meshTriangleCount(facePartIndex);
+ const nvidia::ExplicitRenderTriangle* facePartTriangles = hMesh.meshTriangles(facePartIndex);
+ for (uint32_t triN = 0; triN < facePartTriangleCount; ++triN)
+ {
+ const nvidia::ExplicitRenderTriangle& tri = facePartTriangles[triN];
+ physx::PxVec3 triNormal = (tri.vertices[1].position - tri.vertices[0].position).cross(tri.vertices[2].position - tri.vertices[0].position);
+ const float triArea = triNormal.normalize(); // Actually twice the area, but it's OK
+ const float triFaceDiff = (faceNormal-triNormal).magnitude();
+ if (triFaceDiff < uvMapTriangleIndexFaceDiff - faceDiffTolerance || (triFaceDiff < uvMapTriangleIndexFaceDiff + faceDiffTolerance && triArea > uvMapTriangleIndexArea))
+ { // Significantly better normal, or normal is close and the area is bigger
+ uvMapTriangleIndex = triN;
+ uvMapTriangleIndexFaceDiff = triFaceDiff;
+ uvMapTriangleIndexArea = triArea;
+ }
+ }
+
+ // Set up interpolation for UV channel 0
+ nvidia::TriangleFrame uvMapTriangleFrame(facePartTriangles[uvMapTriangleIndex], (uint64_t)1<<nvidia::TriangleFrame::UV0_u | (uint64_t)1<<nvidia::TriangleFrame::UV0_v);
+
+ if (cutoutSet.isPeriodic())
+ {
+ --ixStart;
+ ++ixStop;
+ --iyStart;
+ ++iyStop;
+ }
+
+#define FORCE_INSTANCING 0
+#if !FORCE_INSTANCING
+ const float volumeTol = PxMax(localExtents[0]*localExtents[1]*localExtents[2]*MESH_INSTANACE_TOLERANCE*MESH_INSTANACE_TOLERANCE*MESH_INSTANACE_TOLERANCE, (float)1.0e-15);
+// const float areaTol = PxMax((localExtents[0]*localExtents[1]+localExtents[1]*localExtents[2]+localExtents[2]*localExtents[0])*MESH_INSTANACE_TOLERANCE*MESH_INSTANACE_TOLERANCE, (float)1.0e-10);
+#endif
+
+ const bool instanceCongruentChunks = desc.instancingMode == FractureCutoutDesc::InstanceCongruentChunks || desc.instancingMode == FractureCutoutDesc::InstanceAllChunks;
+
+ // Estimate total work for progress
+ uint32_t totalWork = 0;
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size(); ++i)
+ {
+ totalWork += cutoutSet.cutouts[i].convexLoops.size();
+ }
+ totalWork *= (ixStop-ixStart)*(iyStop-iyStart);
+ nvidia::HierarchicalProgressListener localProgressListener(PxMax((int)totalWork, 1), &progressListener);
+
+ physx::Array<uint32_t> unhandledChunks;
+
+ if (cutoutDepth == 0.0f)
+ {
+ cutoutDepth = 2*localExtents[2]; // handle special case of all-the-way-through cutout. cutoutDepth is only used for noise grid calculations
+ }
+
+ const unsigned cutoutChunkDepth = hMesh.depth(faceChunkIndex) + 1;
+
+ // Loop over cutouts on the outside loop. For each cutout, create all tiled (potential) clones
+ for (uint32_t i = 0; i < cutoutSet.cutouts.size() && !canceled; ++i)
+ {
+ // Keep track of starting chunk count. We will process the newly created chunks below.
+ const uint32_t oldChunkCount = hMesh.chunkCount();
+
+ for (int32_t iy = iyStart; iy < iyStop && !canceled; ++iy)
+ {
+ for (int32_t ix = ixStart; ix < ixStop && !canceled; ++ix)
+ {
+ physx::PxVec3 offset = (float)ix*mapWidth*xDir + (float)iy*mapHeight*yDir;
+ nvidia::Vertex interpolation;
+ interpolation.position = offset;
+ uvMapTriangleFrame.interpolateVertexData(interpolation);
+ // BRG - note bizarre need to flip v...
+ physx::PxVec2 uvOffset(interpolation.uv[0].u, -interpolation.uv[0].v);
+ PxMat44 offsetCutoutTM(cutoutTM);
+ offsetCutoutTM.setPosition(offsetCutoutTM.getPosition() + offset);
+ const uint32_t newChunkIndex = hMesh.chunkCount();
+ canceled = !createCutout(hMesh, faceChunkIndex, faceBSP, cutoutSet.cutouts[i], offsetCutoutTM, desc, edgeNoise, cutoutDepth, materialDesc, getVolumeDesc(collisionDesc, cutoutChunkDepth), minPlane, maxPlane, localProgressListener, cancel);
+ if (!canceled && instanceCongruentChunks && newChunkIndex < hMesh.chunkCount())
+ {
+ PX_ASSERT(newChunkIndex + 1 == hMesh.chunkCount());
+ if (newChunkIndex + 1 == hMesh.chunkCount())
+ {
+ hMesh.mChunks[newChunkIndex]->mInstancedPositionOffset = offset;
+ hMesh.mChunks[newChunkIndex]->mInstancedUVOffset = uvOffset;
+ }
+ }
+ }
+ }
+
+ // Keep track of which chunks we've checked for congruence
+ const uint32_t possibleCongruentChunkCount = hMesh.chunkCount()-oldChunkCount;
+
+ unhandledChunks.resize(possibleCongruentChunkCount);
+ uint32_t index = hMesh.chunkCount();
+ for (uint32_t i = 0; i < possibleCongruentChunkCount; ++i)
+ {
+ unhandledChunks[i] = --index;
+ }
+
+ uint32_t unhandledChunkCount = possibleCongruentChunkCount;
+ while (unhandledChunkCount > 0)
+ {
+ // Have a fresh chunk to test for instancing.
+ const uint32_t chunkIndex = unhandledChunks[--unhandledChunkCount];
+ ExplicitHierarchicalMeshImpl::Chunk* chunk = hMesh.mChunks[chunkIndex];
+
+ // Record its offset and rebase
+ const physx::PxVec3 instancingBaseOffset = chunk->mInstancedPositionOffset;
+ const physx::PxVec2 instancingBaseUVOffset = chunk->mInstancedUVOffset;
+ chunk->mInstancedPositionOffset = physx::PxVec3(0.0f);
+ chunk->mInstancedUVOffset = physx::PxVec2(0.0f);
+
+ // If this option is selected, slice regions further
+ switch (desc.chunkFracturingMethod)
+ {
+ case FractureCutoutDesc::SliceFractureCutoutChunks:
+ {
+ // Split hierarchically
+ physx::PxPlane trailingPlanes[3]; // passing in depth = 0 will initialize these
+ physx::PxPlane leadingPlanes[3];
+#if 1 // Eliminating volume calculation here, for performance. May introduce it later once the mesh is calculated.
+ const float bspVolume = 1.0f;
+#else
+ float bspArea, bspVolume;
+ chunkBSP->getSurfaceAreaAndVolume(bspArea, bspVolume, true);
+#endif
+ canceled = !hierarchicallySplitChunkInternal(hMesh, chunkIndex, 0, trailingPlanes, leadingPlanes, *hMesh.mParts[(uint32_t)chunk->mPartIndex]->mMeshBSP, bspVolume, sliceDesc, collisionDesc, localProgressListener, cancel);
+ }
+ break;
+ case FractureCutoutDesc::VoronoiFractureCutoutChunks:
+ {
+ // Voronoi split
+ cutoutVoronoiDesc.siteCount = createVoronoiSitesInsideMeshInternal(hMesh, &chunkIndex, 1, voronoiDesc.siteCount > 0 ? &perChunkSites[0] : NULL, NULL, voronoiDesc.siteCount, NULL, &gMicrogridSize, gMeshMode, progressListener );
+ canceled = !voronoiSplitChunkInternal(hMesh, chunkIndex, *hMesh.mParts[(uint32_t)chunk->mPartIndex]->mMeshBSP, cutoutVoronoiDesc, collisionDesc, localProgressListener, cancel);
+ }
+ break;
+ }
+
+ // Now see if we can instance this chunk
+ if (unhandledChunkCount > 0)
+ {
+ bool congruentChunkFound = false;
+ uint32_t testChunkCount = unhandledChunkCount;
+ while (testChunkCount > 0)
+ {
+ const uint32_t testChunkIndex = unhandledChunks[--testChunkCount];
+ ExplicitHierarchicalMeshImpl::Chunk* testChunk = hMesh.mChunks[testChunkIndex];
+ const uint32_t testPartIndex = (uint32_t)testChunk->mPartIndex;
+ ExplicitHierarchicalMeshImpl::Part* testPart = hMesh.mParts[testPartIndex];
+
+ // Create a shifted BSP of the test chunk
+ ApexCSG::IApexBSP* combinedBSP = createBSP(hMesh.mBSPMemCache);
+ const physx::PxMat44 tm(physx::PxMat33(physx::PxIdentity), instancingBaseOffset-testChunk->mInstancedPositionOffset);
+ combinedBSP->copy(*testPart->mMeshBSP, tm);
+ combinedBSP->combine(*hMesh.mParts[(uint32_t)chunk->mPartIndex]->mMeshBSP);
+ float xorArea, xorVolume;
+ combinedBSP->getSurfaceAreaAndVolume(xorArea, xorVolume, true, ApexCSG::Operation::Exclusive_Or);
+ combinedBSP->release();
+ if (xorVolume <= volumeTol)
+ {
+ // XOR of the two volumes is nearly zero. Consider these chunks to be congruent, and instance.
+ congruentChunkFound = true;
+ testChunk->mInstancedPositionOffset -= instancingBaseOffset; // Correct offset
+ testChunk->mInstancedUVOffset -= instancingBaseUVOffset; // Correct offset
+ testChunk->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced; // Set instance flag
+ hMesh.removePart((uint32_t)testChunk->mPartIndex); // Remove part for this chunk, since we'll be instancing another part
+ testChunk->mPartIndex = chunk->mPartIndex;
+ instanceChildren(hMesh, chunkIndex, testChunkIndex); // Recursive
+ --unhandledChunkCount; // This chunk is handled now
+ nvidia::swap(unhandledChunks[unhandledChunkCount],unhandledChunks[testChunkCount]); // Keep the unhandled chunk array packed
+ }
+ }
+
+ // If the chunk is instanced, then mark it so
+ if (congruentChunkFound)
+ {
+ chunk->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ }
+ }
+ }
+
+ // Second pass at cutout chunks
+ for (uint32_t j = 0; j < possibleCongruentChunkCount && !canceled; ++j)
+ {
+ ExplicitHierarchicalMeshImpl::Chunk* chunk = hMesh.mChunks[oldChunkCount+j];
+ if ((chunk->mFlags & nvidia::apex::DestructibleAsset::ChunkIsInstanced) == 0)
+ {
+ // This chunk will not be instanced. Zero its offsets.
+ chunk->mInstancedPositionOffset = physx::PxVec3(0.0f);
+ chunk->mInstancedUVOffset = physx::PxVec2(0.0f);
+ }
+ }
+ }
+
+// cutoutSource->release();
+
+ return !canceled;
+}
+
+static bool cutoutFace
+(
+ ExplicitHierarchicalMeshImpl& hMesh,
+ physx::Array<physx::PxPlane>& faceTrimPlanes,
+ ApexCSG::IApexBSP* coreBSP,
+ uint32_t& coreChunkIndex, // This may be changed if the original core chunk is sliced away completely
+ const nvidia::FractureCutoutDesc& desc,
+ const nvidia::NoiseParameters& backfaceNoise,
+ const nvidia::NoiseParameters& edgeNoise,
+ const nvidia::FractureMaterialDesc& materialDesc,
+ const int32_t fractureIndex,
+ const physx::PxPlane& facePlane,
+ const nvidia::CutoutSet& iCutoutSetImpl,
+ const PxMat44& cutoutTM,
+ const float mapXLow,
+ const float mapYLow,
+ const physx::PxBounds3& localBounds,
+ const float cutoutDepth,
+ const nvidia::FractureSliceDesc& sliceDesc,
+ const nvidia::FractureVoronoiDesc& voronoiDesc,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ bool& stop,
+ volatile bool* cancel
+)
+{
+ nvidia::HierarchicalProgressListener localProgressListener(PxMax((int32_t)iCutoutSetImpl.getCutoutCount(), 1), &progressListener);
+
+ const physx::PxVec3 localExtents = localBounds.getExtents();
+ const physx::PxVec3 localCenter = localBounds.getCenter();
+
+ const float sizeScale = PxMax(PxMax(localExtents.x, localExtents.y), localExtents.z);
+
+ uint32_t corePartIndex = (uint32_t)hMesh.mChunks[coreChunkIndex]->mPartIndex;
+
+ const uint32_t oldSize = hMesh.chunkCount();
+ ApexCSG::IApexBSP* faceBSP = createBSP(hMesh.mBSPMemCache); // face BSP defaults to all space
+ uint32_t faceChunkIndex = 0xFFFFFFFF;
+ if (cutoutDepth > 0.0f) // (depth = 0) => slice all the way through
+ {
+ nvidia::IntersectMesh grid;
+
+ const float mapWidth = cutoutTM.column0.magnitude()*iCutoutSetImpl.getDimensions()[0];
+ const float mapHeight = cutoutTM.column1.magnitude()*iCutoutSetImpl.getDimensions()[1];
+
+ // Create faceBSP from grid
+ GridParameters gridParameters;
+ gridParameters.interiorSubmeshIndex = materialDesc.interiorSubmeshIndex;
+ gridParameters.noise = backfaceNoise;
+ gridParameters.level0Mesh = &hMesh.mParts[0]->mMesh; // must be set each time, since this can move with array resizing
+ gridParameters.sizeScale = sizeScale;
+ if (desc.instancingMode != FractureCutoutDesc::DoNotInstance)
+ {
+ gridParameters.xPeriod = mapWidth;
+ gridParameters.yPeriod = mapHeight;
+ }
+ // Create the slicing plane
+ physx::PxPlane slicePlane = facePlane;
+ slicePlane.d += cutoutDepth;
+ gridParameters.materialFrameIndex = hMesh.addMaterialFrame();
+ nvidia::MaterialFrame materialFrame = hMesh.getMaterialFrame(gridParameters.materialFrameIndex);
+ materialFrame.buildCoordinateSystemFromMaterialDesc(materialDesc, slicePlane);
+ materialFrame.mFractureMethod = nvidia::FractureMethod::Cutout;
+ materialFrame.mFractureIndex = fractureIndex;
+ materialFrame.mSliceDepth = hMesh.depth(coreChunkIndex) + 1;
+ hMesh.setMaterialFrame(gridParameters.materialFrameIndex, materialFrame);
+ gridParameters.triangleFrame.setFlat(materialFrame.mCoordinateSystem, materialDesc.uvScale, materialDesc.uvOffset);
+ buildIntersectMesh(grid, slicePlane, materialFrame, (int32_t)sliceDesc.noiseMode, &gridParameters);
+ ApexCSG::BSPTolerances bspTolerances = ApexCSG::gDefaultTolerances;
+ bspTolerances.linear = 0.00001f;
+ bspTolerances.angular = 0.00001f;
+ faceBSP->setTolerances(bspTolerances);
+ ApexCSG::BSPBuildParameters bspBuildParams = gDefaultBuildParameters;
+ bspBuildParams.rnd = &userRnd;
+ bspBuildParams.internalTransform = coreBSP->getInternalTransform();
+ faceBSP->fromMesh(&grid.m_triangles[0], grid.m_triangles.size(), bspBuildParams);
+ coreBSP->combine(*faceBSP);
+ faceBSP->op(*coreBSP, ApexCSG::Operation::A_Minus_B);
+ coreBSP->op(*coreBSP, ApexCSG::Operation::Intersection);
+ uint32_t facePartIndex = hMesh.addPart();
+ faceChunkIndex = hMesh.addChunk();
+ hMesh.mChunks[faceChunkIndex]->mPartIndex = (int32_t)facePartIndex;
+ faceBSP->toMesh(hMesh.mParts[facePartIndex]->mMesh);
+ CollisionVolumeDesc volumeDesc = getVolumeDesc(collisionDesc, hMesh.depth(coreChunkIndex) + 1);
+ if (hMesh.mParts[facePartIndex]->mMesh.size() != 0)
+ {
+ hMesh.mParts[facePartIndex]->mMeshBSP->copy(*faceBSP);
+ hMesh.buildMeshBounds(facePartIndex);
+ hMesh.buildCollisionGeometryForPart(facePartIndex, volumeDesc);
+ if (desc.trimFaceCollisionHulls && (gridParameters.noise.amplitude != 0.0f || volumeDesc.mHullMethod != nvidia::ConvexHullMethod::WRAP_GRAPHICS_MESH))
+ {
+ // Trim backface
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[facePartIndex]->mCollision.size(); ++hullIndex)
+ {
+ ConvexHullImpl& hull = hMesh.mParts[facePartIndex]->mCollision[hullIndex]->impl;
+ hull.intersectPlaneSide(physx::PxPlane(-slicePlane.n, -slicePlane.d));
+ faceTrimPlanes.pushBack(slicePlane);
+ }
+ }
+ hMesh.mChunks[faceChunkIndex]->mParentIndex = 0;
+ }
+ else
+ {
+ hMesh.removePart(facePartIndex);
+ hMesh.removeChunk(faceChunkIndex);
+ faceChunkIndex = 0xFFFFFFFF;
+ facePartIndex = 0xFFFFFFFF;
+ }
+ }
+ else
+ {
+ // Slicing goes all the way through
+ faceBSP->copy(*coreBSP);
+ if (oldSize == coreChunkIndex + 1)
+ {
+ // Core hasn't been split yet. We don't want a copy of the original mesh at level 1, so remove it.
+ hMesh.removePart(corePartIndex--);
+ hMesh.removeChunk(coreChunkIndex--);
+ }
+ faceChunkIndex = coreChunkIndex;
+ // This will break us out of both loops (only want to slice all the way through once):
+ stop = true;
+ }
+
+ localProgressListener.setSubtaskWork(1);
+
+ bool canceled = false;
+
+ if (faceChunkIndex < hMesh.chunkCount())
+ {
+ // We have a face chunk. Create cutouts
+ canceled = !createFaceCutouts(hMesh, faceChunkIndex, *faceBSP, desc, edgeNoise, cutoutDepth, materialDesc, *(const nvidia::CutoutSetImpl*)&iCutoutSetImpl, cutoutTM, mapXLow, mapYLow, collisionDesc,
+ sliceDesc, voronoiDesc, facePlane, localCenter, localExtents, localProgressListener, cancel);
+ // If there is anything left in the face, attach it as unfracturable
+ // Volume rejection ratio, perhaps should be exposed
+#if 0 // BRG - to do : better treatment of face leftover
+ const float volumeRejectionRatio = 0.0001f;
+ if (faceBSP->getVolume() >= volumeRejectionRatio * faceVolumeEstimate)
+ {
+ const uint32_t newPartIndex = hMesh.addPart();
+ faceBSP->toMesh(hMesh.mParts[newPartIndex]->mMesh);
+ if (hMesh.mParts[newPartIndex]->mMesh.size() != 0)
+ {
+ hMesh.mParts[newPartIndex]->mMeshBSP->copy(*faceBSP);
+ hMesh.buildMeshBounds(newPartIndex);
+ hMesh.mParts[newPartIndex]->mCollision.setEmpty(); // BRG - to do : better treatment of face leftover
+ hMesh.mParts[newPartIndex]->mParentIndex = faceChunkIndex;
+ chunkFlags.resize(hMesh.partCount(), 0);
+ }
+ else
+ {
+ hMesh.removePart(newPartIndex);
+ }
+ }
+#endif
+
+ localProgressListener.completeSubtask();
+ }
+ faceBSP->release();
+
+ return !canceled;
+}
+
+bool createChippedMesh
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ const nvidia::MeshProcessingParameters& meshProcessingParams,
+ const nvidia::FractureCutoutDesc& desc,
+ const nvidia::CutoutSet& iCutoutSetImpl,
+ const nvidia::FractureSliceDesc& sliceDesc,
+ const nvidia::FractureVoronoiDesc& voronoiDesc,
+ const CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+
+ if (hMesh.partCount() == 0)
+ {
+ return false;
+ }
+
+ outputMessage("Chipping...");
+ progressListener.setProgress(0);
+
+ // Save state if cancel != NULL
+ physx::PxFileBuf* save = NULL;
+ class NullEmbedding : public ExplicitHierarchicalMesh::Embedding
+ {
+ void serialize(physx::PxFileBuf& stream, Embedding::DataType type) const
+ {
+ (void)stream;
+ (void)type;
+ }
+ void deserialize(physx::PxFileBuf& stream, Embedding::DataType type, uint32_t version)
+ {
+ (void)stream;
+ (void)type;
+ (void)version;
+ }
+ } embedding;
+ if (cancel != NULL)
+ {
+ save = nvidia::GetApexSDK()->createMemoryWriteStream();
+ if (save != NULL)
+ {
+ hMesh.serialize(*save, embedding);
+ }
+ }
+
+ hMesh.buildCollisionGeometryForPart(0, getVolumeDesc(collisionDesc, 0));
+
+ userRnd.m_rnd.setSeed(randomSeed);
+
+ if (hMesh.mParts[0]->mMeshBSP->getType() != ApexCSG::BSPType::Nontrivial)
+ {
+ outputMessage("Building mesh BSP...");
+ progressListener.setProgress(0);
+ hMesh.calculateMeshBSP(randomSeed, &progressListener, &meshProcessingParams.microgridSize, meshProcessingParams.meshMode);
+ outputMessage("Mesh BSP completed.");
+ userRnd.m_rnd.setSeed(randomSeed);
+ }
+
+ gIslandGeneration = meshProcessingParams.islandGeneration;
+ gMicrogridSize = meshProcessingParams.microgridSize;
+ gVerbosity = meshProcessingParams.verbosity;
+
+ if (hMesh.mParts[0]->mBounds.isEmpty())
+ {
+ return false; // Done, nothing in mesh
+ }
+
+ hMesh.clear(true);
+
+ for (int i = 0; i < FractureCutoutDesc::DirectionCount; ++i)
+ {
+ if ((desc.directions >> i) & 1)
+ {
+ hMesh.mSubmeshData.resize(PxMax(hMesh.mRootSubmeshCount, desc.cutoutParameters[i].materialDesc.interiorSubmeshIndex + 1));
+ }
+ }
+ switch (desc.chunkFracturingMethod)
+ {
+ case FractureCutoutDesc::SliceFractureCutoutChunks:
+ for (int i = 0; i < 3; ++i)
+ {
+ hMesh.mSubmeshData.resize(PxMax(hMesh.mRootSubmeshCount, sliceDesc.materialDesc[i].interiorSubmeshIndex + 1));
+ }
+ break;
+ case FractureCutoutDesc::VoronoiFractureCutoutChunks:
+ hMesh.mSubmeshData.resize(PxMax(hMesh.mRootSubmeshCount, voronoiDesc.materialDesc.interiorSubmeshIndex + 1));
+ break;
+ }
+
+ // Count directions
+ uint32_t directionCount = 0;
+ uint32_t directions = desc.directions;
+ while (directions)
+ {
+ directions = (directions - 1)&directions;
+ ++directionCount;
+ }
+
+ if (directionCount == 0 && desc.userDefinedDirection.isZero()) // directions = 0 is the way we invoke the user-supplied normal "UV-based" cutout fracturing
+ {
+ return true; // Done, no split directions
+ }
+
+ // Validate direction ordering
+ bool dirUsed[FractureCutoutDesc::DirectionCount];
+ memset(dirUsed, 0, sizeof(dirUsed) / sizeof(dirUsed[0]));
+ for (uint32_t dirIndex = 0; dirIndex < FractureCutoutDesc::DirectionCount; ++dirIndex)
+ {
+ // The direction must be one found in FractureCutoutDesc::Directions
+ // and must not be used twice, if it is enabled
+ if ((directions & desc.directionOrder[dirIndex]) &&
+ (!shdfnd::isPowerOfTwo(desc.directionOrder[dirIndex]) ||
+ desc.directionOrder[dirIndex] <= 0 ||
+ desc.directionOrder[dirIndex] > FractureCutoutDesc::PositiveZ ||
+ dirUsed[lowestSetBit(desc.directionOrder[dirIndex])]))
+ {
+ outputMessage("Invalid direction ordering, each direction may be used just once, "
+ "and must correspond to a direction defined in FractureCutoutDesc::Directions.",
+ physx::PxErrorCode::eINTERNAL_ERROR);
+ return false;
+ }
+ dirUsed[dirIndex] = true;
+ }
+
+ nvidia::HierarchicalProgressListener localProgressListener(PxMax((int32_t)directionCount, 1), &progressListener);
+
+ // Core starts as original mesh
+ uint32_t corePartIndex = hMesh.addPart();
+ uint32_t coreChunkIndex = hMesh.addChunk();
+ hMesh.mParts[corePartIndex]->mMesh = hMesh.mParts[0]->mMesh;
+ hMesh.buildMeshBounds(0);
+ hMesh.mChunks[coreChunkIndex]->mParentIndex = 0;
+ hMesh.mChunks[coreChunkIndex]->mPartIndex = (int32_t)corePartIndex;
+
+ ApexCSG::IApexBSP* coreBSP = createBSP(hMesh.mBSPMemCache);
+ coreBSP->copy(*hMesh.mParts[0]->mMeshBSP);
+
+ physx::Array<physx::PxPlane> faceTrimPlanes;
+
+ const physx::PxBounds3& worldBounds = hMesh.mParts[0]->mBounds;
+ const physx::PxVec3& extents = worldBounds.getExtents();
+ const physx::PxVec3& center = worldBounds.getCenter();
+
+ SliceParameters* sliceParametersAtDepth = (SliceParameters*)PxAlloca(sizeof(SliceParameters) * sliceDesc.maxDepth);
+
+ bool canceled = false;
+ bool stop = false;
+ for (uint32_t dirNum = 0; dirNum < FractureCutoutDesc::DirectionCount && !stop && !canceled; ++dirNum)
+ {
+ const uint32_t sliceDirIndex = lowestSetBit(desc.directionOrder[dirNum]);
+ uint32_t sliceAxisNum, sliceSignNum;
+ getCutoutSliceAxisAndSign(sliceAxisNum, sliceSignNum, sliceDirIndex);
+ {
+ if ((desc.directions >> sliceDirIndex) & 1)
+ {
+ uint32_t sliceAxes[3];
+ generateSliceAxes(sliceAxes, sliceAxisNum);
+
+ localProgressListener.setSubtaskWork(1);
+
+ physx::PxPlane facePlane;
+ facePlane.n = physx::PxVec3(0, 0, 0);
+ facePlane.n[sliceAxisNum] = sliceSignNum ? -1.0f : 1.0f;
+ facePlane.d = -(facePlane.n[sliceAxisNum] * center[sliceAxisNum] + extents[sliceAxisNum]); // coincides with depth = 0
+
+ bool invertX;
+ const PxMat44 cutoutTM = createCutoutFrame(center, extents, sliceAxes, sliceSignNum, desc, invertX);
+
+ // Tiling bounds
+ const float mapWidth = cutoutTM.column0.magnitude()*iCutoutSetImpl.getDimensions()[0];
+ const float mapXLow = cutoutTM.getPosition()[sliceAxes[0]] - ((invertX != desc.cutoutWidthInvert[sliceDirIndex])? mapWidth : 0.0f);
+ const float mapHeight = cutoutTM.column1.magnitude()*iCutoutSetImpl.getDimensions()[1];
+ const float mapYLow = cutoutTM.getPosition()[sliceAxes[1]] - (desc.cutoutHeightInvert[sliceDirIndex] ? mapHeight : 0.0f);
+
+ physx::PxBounds3 localBounds;
+ for (unsigned i = 0; i < 3; ++i)
+ {
+ localBounds.minimum[i] = worldBounds.minimum[sliceAxes[i]];
+ localBounds.maximum[i] = worldBounds.maximum[sliceAxes[i]];
+ }
+
+ // Slice desc, if needed
+ nvidia::FractureSliceDesc cutoutSliceDesc;
+ // Create a sliceDesc based off of the GUI slice desc's X and Y components, applied to the
+ // two axes appropriate for this cutout direction.
+ cutoutSliceDesc = sliceDesc;
+ cutoutSliceDesc.sliceParameters = sliceParametersAtDepth;
+ for (unsigned depth = 0; depth < sliceDesc.maxDepth; ++depth)
+ {
+ cutoutSliceDesc.sliceParameters[depth] = sliceDesc.sliceParameters[depth];
+ }
+ for (uint32_t axisN = 0; axisN < 3; ++axisN)
+ {
+ cutoutSliceDesc.targetProportions[sliceAxes[axisN]] = sliceDesc.targetProportions[axisN];
+ for (uint32_t depth = 0; depth < sliceDesc.maxDepth; ++depth)
+ {
+ cutoutSliceDesc.sliceParameters[depth].splitsPerPass[sliceAxes[axisN]] = sliceDesc.sliceParameters[depth].splitsPerPass[axisN];
+ cutoutSliceDesc.sliceParameters[depth].linearVariation[sliceAxes[axisN]] = sliceDesc.sliceParameters[depth].linearVariation[axisN];
+ cutoutSliceDesc.sliceParameters[depth].angularVariation[sliceAxes[axisN]] = sliceDesc.sliceParameters[depth].angularVariation[axisN];
+ cutoutSliceDesc.sliceParameters[depth].noise[sliceAxes[axisN]] = sliceDesc.sliceParameters[depth].noise[axisN];
+ }
+ }
+
+ canceled = !cutoutFace(hMesh, faceTrimPlanes, coreBSP, coreChunkIndex, desc, desc.cutoutParameters[sliceDirIndex].backfaceNoise, desc.cutoutParameters[sliceDirIndex].edgeNoise,
+ desc.cutoutParameters[sliceDirIndex].materialDesc, (int32_t)sliceDirIndex, facePlane, iCutoutSetImpl, cutoutTM, mapXLow, mapYLow, localBounds,
+ desc.cutoutParameters[sliceDirIndex].depth, cutoutSliceDesc, voronoiDesc, collisionDesc, localProgressListener, stop, cancel);
+
+ localProgressListener.completeSubtask();
+ }
+ }
+ }
+
+ if (desc.directions == 0) // user-supplied normal "UV-based" cutout fracturing
+ {
+ localProgressListener.setSubtaskWork(1);
+
+ // Create cutout transform from user's supplied mapping and direction
+ const physx::PxVec3 userNormal = desc.userDefinedDirection.getNormalized();
+
+ PxMat44 cutoutTM;
+ cutoutTM.column0 = PxVec4(desc.userUVMapping.column0/(float)desc.cutoutSizeX, 0.f);
+ cutoutTM.column1 = PxVec4(desc.userUVMapping.column1/(float)desc.cutoutSizeY, 0.f);
+ cutoutTM.column2 = PxVec4(userNormal, 0.f);
+ cutoutTM.setPosition(desc.userUVMapping.column2);
+
+ // Also create a local frame to get the local bounds for the mesh
+ const physx::PxMat33 globalToLocal = physx::PxMat33(desc.userUVMapping.column0.getNormalized(), desc.userUVMapping.column1.getNormalized(), userNormal).getTranspose();
+
+ physx::Array<nvidia::ExplicitRenderTriangle>& mesh = hMesh.mParts[0]->mMesh;
+ physx::PxBounds3 localBounds;
+ localBounds.setEmpty();
+ for (uint32_t i = 0; i < mesh.size(); ++i)
+ {
+ nvidia::ExplicitRenderTriangle& tri = mesh[i];
+ for (int v = 0; v < 3; ++v)
+ {
+ localBounds.include(globalToLocal*tri.vertices[v].position);
+ }
+ }
+
+ physx::PxPlane facePlane;
+ facePlane.n = userNormal;
+ facePlane.d = -localBounds.maximum[2]; // coincides with depth = 0
+
+ // Tiling bounds
+ const physx::PxVec3 localOrigin = globalToLocal*cutoutTM.getPosition();
+ const float mapXLow = localOrigin[0];
+ const float mapYLow = localOrigin[1];
+
+ canceled = !cutoutFace(hMesh, faceTrimPlanes, coreBSP, coreChunkIndex, desc, desc.userDefinedCutoutParameters.backfaceNoise, desc.userDefinedCutoutParameters.edgeNoise,
+ desc.userDefinedCutoutParameters.materialDesc, 6, facePlane, iCutoutSetImpl, cutoutTM, mapXLow, mapYLow, localBounds, desc.userDefinedCutoutParameters.depth,
+ sliceDesc, voronoiDesc, collisionDesc, localProgressListener, stop, cancel);
+
+ localProgressListener.completeSubtask();
+ }
+
+ if (!canceled && coreChunkIndex != 0)
+ {
+ coreBSP->toMesh(hMesh.mParts[corePartIndex]->mMesh);
+ if (hMesh.mParts[corePartIndex]->mMesh.size() != 0)
+ {
+ hMesh.mParts[corePartIndex]->mMeshBSP->copy(*coreBSP);
+ hMesh.buildCollisionGeometryForPart(coreChunkIndex, getVolumeDesc(collisionDesc, hMesh.depth(coreChunkIndex)));
+ for (uint32_t i = 0; i < faceTrimPlanes.size(); ++i)
+ {
+ for (uint32_t hullIndex = 0; hullIndex < hMesh.mParts[corePartIndex]->mCollision.size(); ++hullIndex)
+ {
+ ConvexHullImpl& hull = hMesh.mParts[corePartIndex]->mCollision[hullIndex]->impl;
+ hull.intersectPlaneSide(faceTrimPlanes[i]);
+ }
+ }
+ }
+ else
+ {
+ // Remove core mesh and chunk
+ if (corePartIndex < hMesh.mParts.size())
+ {
+ hMesh.removePart(corePartIndex);
+ }
+ if (coreChunkIndex < hMesh.mChunks.size())
+ {
+ hMesh.removeChunk(coreChunkIndex);
+ }
+ coreChunkIndex = 0xFFFFFFFF;
+ corePartIndex = 0xFFFFFFFF;
+ }
+ }
+
+ coreBSP->release();
+
+ // Restore if canceled
+ if (canceled && save != NULL)
+ {
+ uint32_t len;
+ const void* mem = nvidia::GetApexSDK()->getMemoryWriteBuffer(*save, len);
+ physx::PxFileBuf* load = nvidia::GetApexSDK()->createMemoryReadStream(mem, len);
+ if (load != NULL)
+ {
+ hMesh.deserialize(*load, embedding);
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*load);
+ }
+ }
+
+ if (save != NULL)
+ {
+ nvidia::GetApexSDK()->releaseMemoryReadStream(*save);
+ }
+
+ if (canceled)
+ {
+ return false;
+ }
+
+ if (meshProcessingParams.removeTJunctions)
+ {
+ MeshProcessor meshProcessor;
+ for (uint32_t i = 0; i < hMesh.partCount(); ++i)
+ {
+ meshProcessor.setMesh(hMesh.mParts[i]->mMesh, NULL, 0, 0.0001f*extents.magnitude());
+ meshProcessor.removeTJunctions();
+ }
+ }
+
+ hMesh.sortChunks();
+
+ hMesh.createPartSurfaceNormals();
+
+ if (desc.instancingMode == FractureCutoutDesc::InstanceAllChunks)
+ {
+ for (uint32_t i = 0; i < hMesh.chunkCount(); ++i)
+ {
+ hMesh.mChunks[i]->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ }
+ }
+
+ outputMessage("chipping completed.");
+
+ return true;
+}
+
+class VoronoiMeshSplitter : public MeshSplitter
+{
+private:
+ VoronoiMeshSplitter& operator=(const VoronoiMeshSplitter&);
+
+public:
+ VoronoiMeshSplitter(const FractureVoronoiDesc& desc) : mDesc(desc)
+ {
+ }
+
+ bool validate(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ if (hMesh.chunkCount() == 0)
+ {
+ return false;
+ }
+
+ if (mDesc.siteCount == 0)
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ void initialize(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ hMesh.mSubmeshData.resize(PxMax(hMesh.mRootSubmeshCount, mDesc.materialDesc.interiorSubmeshIndex + 1));
+
+ // Need to split out DM parameters
+// if (mDesc.useDisplacementMaps)
+// {
+// hMesh.initializeDisplacementMapVolume(mDesc);
+// }
+ }
+
+ bool process
+ (
+ ExplicitHierarchicalMeshImpl& hMesh,
+ uint32_t chunkIndex,
+ const ApexCSG::IApexBSP& chunkBSP,
+ const CollisionDesc& collisionDesc,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+ )
+ {
+ return voronoiSplitChunkInternal(hMesh, chunkIndex, chunkBSP, mDesc, collisionDesc, progressListener, cancel);
+ }
+
+ bool finalize(ExplicitHierarchicalMeshImpl& hMesh)
+ {
+ if (mDesc.instanceChunks)
+ {
+ for (uint32_t i = 0; i < hMesh.partCount(); ++i)
+ {
+ hMesh.mChunks[i]->mFlags |= nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ }
+ }
+
+ return true;
+ }
+
+protected:
+ const FractureVoronoiDesc& mDesc;
+};
+
+bool createVoronoiSplitMesh
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ ExplicitHierarchicalMesh& iHMeshCore,
+ bool exportCoreMesh,
+ int32_t coreMeshImprintSubmeshIndex, // If this is < 0, use the core mesh materials (was applyCoreMeshMaterialToNeighborChunks). Otherwise, use the given submesh.
+ const MeshProcessingParameters& meshProcessingParams,
+ const FractureVoronoiDesc& desc,
+ const CollisionDesc& collisionDesc,
+ uint32_t randomSeed,
+ nvidia::IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ VoronoiMeshSplitter splitter(desc);
+
+ return splitMeshInternal(
+ iHMesh,
+ iHMeshCore,
+ exportCoreMesh,
+ coreMeshImprintSubmeshIndex,
+ meshProcessingParams,
+ splitter,
+ collisionDesc,
+ randomSeed,
+ progressListener,
+ cancel);
+}
+
+bool voronoiSplitChunk
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ uint32_t chunkIndex,
+ const FractureTools::MeshProcessingParameters& meshProcessingParams,
+ const FractureTools::FractureVoronoiDesc& desc,
+ const CollisionDesc& collisionDesc,
+ uint32_t* randomSeed,
+ IProgressListener& progressListener,
+ volatile bool* cancel
+)
+{
+ VoronoiMeshSplitter splitter(desc);
+
+ return splitChunkInternal(iHMesh, chunkIndex, meshProcessingParams, splitter, collisionDesc, randomSeed, progressListener, cancel);
+}
+
+uint32_t createVoronoiSitesInsideMesh
+(
+ ExplicitHierarchicalMesh& iHMesh,
+ physx::PxVec3* siteBuffer,
+ uint32_t* siteChunkIndices,
+ uint32_t siteCount,
+ uint32_t* randomSeed,
+ uint32_t* microgridSize,
+ BSPOpenMode::Enum meshMode,
+ IProgressListener& progressListener,
+ uint32_t chunkIndex
+)
+{
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+
+ physx::Array<uint32_t> chunkList;
+
+ if (hMesh.mChunks.size() == 0)
+ {
+ return 0;
+ }
+
+ if (chunkIndex >= hMesh.chunkCount())
+ {
+ // Find root-depth chunks
+ for (uint32_t chunkIndex = 0; chunkIndex < hMesh.chunkCount(); ++chunkIndex)
+ {
+ if (hMesh.mChunks[chunkIndex]->isRootLeafChunk())
+ {
+ chunkList.pushBack(chunkIndex);
+ }
+ }
+
+ if (chunkList.size() > 0)
+ {
+ return createVoronoiSitesInsideMeshInternal(hMesh, &chunkList[0], chunkList.size(), siteBuffer, siteChunkIndices, siteCount, randomSeed, microgridSize, meshMode, progressListener);
+ }
+
+ return 0; // This means we didn't find a root leaf chunk
+ }
+
+ return createVoronoiSitesInsideMeshInternal(hMesh, &chunkIndex, 1, siteBuffer, siteChunkIndices, siteCount, randomSeed, microgridSize, meshMode, progressListener);
+}
+
+// Defining these structs here, so as not to offend gnu's sensibilities
+struct TriangleData
+{
+ uint16_t chunkIndex;
+ physx::PxVec3 triangleNormal;
+ float summedAreaWeight;
+ const nvidia::ExplicitRenderTriangle* triangle;
+};
+
+struct InstanceInfo
+{
+ uint8_t meshIndex;
+ int32_t chunkIndex; // Using a int32_t so that the createIndexStartLookup can do its thing
+ physx::PxMat44 relativeTransform;
+
+ struct ChunkIndexLessThan
+ {
+ PX_INLINE bool operator()(const InstanceInfo& x, const InstanceInfo& y) const
+ {
+ return x.chunkIndex < y.chunkIndex;
+ }
+ };
+};
+
+uint32_t createScatterMeshSites
+(
+ uint8_t* meshIndices,
+ physx::PxMat44* relativeTransforms,
+ uint32_t* chunkMeshStarts,
+ uint32_t scatterMeshInstancesBufferSize,
+ ExplicitHierarchicalMesh& iHMesh,
+ uint32_t targetChunkCount,
+ const uint16_t* targetChunkIndices,
+ uint32_t* randomSeed,
+ uint32_t scatterMeshAssetCount,
+ nvidia::RenderMeshAsset** scatterMeshAssets,
+ const uint32_t* minCount,
+ const uint32_t* maxCount,
+ const float* minScales,
+ const float* maxScales,
+ const float* maxAngles
+)
+{
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&iHMesh;
+
+ // Cap asset count to 1-byte range
+ if (scatterMeshAssetCount > 255)
+ {
+ scatterMeshAssetCount = 255;
+ }
+
+ // Set random seed if requested
+ if (randomSeed != NULL)
+ {
+ userRnd.m_rnd.setSeed(*randomSeed);
+ }
+
+ // Counts for each scatter mesh asset
+ physx::Array<uint32_t> counts(scatterMeshAssetCount, 0);
+
+ // Create convex hulls for each scatter mesh and add up valid weights
+ physx::Array<physx::PxVec3> vertices; // Reusing this array for convex hull building
+ physx::Array<PartConvexHullProxy> hulls(scatterMeshAssetCount);
+ uint32_t scatterMeshInstancesRequested = 0;
+ for (uint32_t scatterMeshAssetIndex = 0; scatterMeshAssetIndex < scatterMeshAssetCount; ++scatterMeshAssetIndex)
+ {
+ hulls[scatterMeshAssetIndex].impl.setEmpty();
+ const nvidia::RenderMeshAsset* rma = scatterMeshAssets[scatterMeshAssetIndex];
+ if (rma != NULL)
+ {
+ vertices.resize(0);
+ for (uint32_t submeshIndex = 0; submeshIndex < rma->getSubmeshCount(); ++submeshIndex)
+ {
+ const nvidia::RenderSubmesh& submesh = rma->getSubmesh(submeshIndex);
+ const nvidia::VertexBuffer& vertexBuffer = submesh.getVertexBuffer();
+ if (vertexBuffer.getVertexCount() > 0)
+ {
+ const nvidia::VertexFormat& vertexFormat = vertexBuffer.getFormat();
+ const int32_t posBufferIndex = vertexFormat.getBufferIndexFromID(vertexFormat.getSemanticID(nvidia::RenderVertexSemantic::POSITION));
+ const uint32_t oldVertexCount = vertices.size();
+ vertices.resize(oldVertexCount + vertexBuffer.getVertexCount());
+ if (!vertexBuffer.getBufferData(&vertices[oldVertexCount], nvidia::RenderDataFormat::FLOAT3, sizeof(physx::PxVec3), (uint32_t)posBufferIndex, 0, vertexBuffer.getVertexCount()))
+ {
+ vertices.resize(oldVertexCount); // Operation failed, revert vertex array size
+ }
+ }
+ }
+ if (vertices.size() > 0)
+ {
+ physx::Array<physx::PxVec3> directions;
+ ConvexHullImpl::createKDOPDirections(directions, nvidia::ConvexHullMethod::USE_6_DOP);
+ hulls[scatterMeshAssetIndex].impl.buildKDOP(&vertices[0], vertices.size(), sizeof(vertices[0]), &directions[0], directions.size());
+ if (!hulls[scatterMeshAssetIndex].impl.isEmpty())
+ {
+ counts[scatterMeshAssetIndex] = (uint32_t)userRnd.m_rnd.getScaled((float)minCount[scatterMeshAssetIndex], (float)maxCount[scatterMeshAssetIndex] + 1.0f);
+ scatterMeshInstancesRequested += counts[scatterMeshAssetIndex];
+ }
+ }
+ }
+ }
+
+ // Cap at buffer size
+ if (scatterMeshInstancesRequested > scatterMeshInstancesBufferSize)
+ {
+ scatterMeshInstancesRequested = scatterMeshInstancesBufferSize;
+ }
+
+ // Return if no instances requested
+ if (scatterMeshInstancesRequested == 0)
+ {
+ return 0;
+ }
+
+ // Count the interior triangles in all of the target chunks, and add up their areas
+ // Build an area-weighted lookup table for the various triangles (also reference the chunks)
+ physx::Array<TriangleData> triangleTable;
+ float summedAreaWeight = 0.0f;
+ for (uint32_t chunkNum = 0; chunkNum < targetChunkCount; ++chunkNum)
+ {
+ const uint16_t chunkIndex = targetChunkIndices[chunkNum];
+ if (chunkIndex >= hMesh.chunkCount())
+ {
+ continue;
+ }
+ const uint32_t partIndex = (uint32_t)*hMesh.partIndex(chunkIndex);
+ const nvidia::ExplicitRenderTriangle* triangles = hMesh.meshTriangles(partIndex);
+ const uint32_t triangleCount = hMesh.meshTriangleCount(partIndex);
+ for (uint32_t triangleIndex = 0; triangleIndex < triangleCount; ++triangleIndex)
+ {
+ const ExplicitRenderTriangle& triangle = triangles[triangleIndex];
+ if (triangle.extraDataIndex != 0xFFFFFFFF) // See if this is an interior triangle
+ {
+
+ TriangleData& triangleData = triangleTable.insert();
+ triangleData.chunkIndex = chunkIndex;
+ triangleData.triangleNormal = triangle.calculateNormal();
+ summedAreaWeight += triangleData.triangleNormal.normalize();
+ triangleData.summedAreaWeight = summedAreaWeight;
+ triangleData.triangle = &triangle;
+ }
+ }
+ }
+
+ // Normalize summed area table
+ if (summedAreaWeight <= 0.0f)
+ {
+ return 0; // Non-normalizable
+ }
+ const float recipSummedAreaWeight = 1.0f/summedAreaWeight;
+ for (uint32_t triangleNum = 0; triangleNum < triangleTable.size()-1; ++triangleNum)
+ {
+ triangleTable[triangleNum].summedAreaWeight *= recipSummedAreaWeight;
+ }
+ triangleTable[triangleTable.size()-1].summedAreaWeight = 1.0f; // Just to be sure
+
+ // Reserve instance info
+ physx::Array<InstanceInfo> instanceInfo;
+ instanceInfo.reserve(scatterMeshInstancesRequested);
+
+ // Add scatter meshes
+ ApexCSG::IApexBSP* hullBSP = createBSP(hMesh.mBSPMemCache);
+ if (hullBSP == NULL)
+ {
+ return 0;
+ }
+
+ physx::Array<physx::PxPlane> planes; // Reusing this array for bsp building
+
+ for (uint32_t scatterMeshAssetIndex = 0; scatterMeshAssetIndex < scatterMeshAssetCount && instanceInfo.size() < scatterMeshInstancesRequested; ++scatterMeshAssetIndex)
+ {
+ bool success = true;
+ for (uint32_t count = 0; success && count < counts[scatterMeshAssetIndex] && instanceInfo.size() < scatterMeshInstancesRequested; ++count)
+ {
+ success = false;
+ for (uint32_t trial = 0; !success && trial < 1000; ++trial)
+ {
+ // Pick triangle
+ const TriangleData* triangleData = NULL;
+ const float unitRndForTriangle = userRnd.m_rnd.getUnit();
+ for (uint32_t triangleNum = 0; triangleNum < triangleTable.size(); ++triangleNum)
+ {
+ if (triangleTable[triangleNum].summedAreaWeight > unitRndForTriangle)
+ {
+ triangleData = &triangleTable[triangleNum];
+ break;
+ }
+ }
+ if (triangleData == NULL)
+ {
+ continue;
+ }
+
+ // pick scale, angle, and position and build transform
+ const float scale = physx::PxExp(userRnd.m_rnd.getScaled(physx::PxLog(minScales[scatterMeshAssetIndex]), physx::PxLog(maxScales[scatterMeshAssetIndex])));
+ const float angle = (physx::PxPi/180.0f)*userRnd.m_rnd.getScaled(0.0f, maxAngles[scatterMeshAssetIndex]);
+ // random position in triangle
+ const Vertex* vertices = triangleData->triangle->vertices;
+ const physx::PxVec3 position = randomPositionInTriangle(vertices[0].position, vertices[1].position, vertices[2].position, userRnd.m_rnd);
+ physx::PxVec3 zAxis = triangleData->triangleNormal;
+ // Rotate z axis into arbitrary vector in triangle plane
+ physx::PxVec3 para = vertices[1].position - vertices[0].position;
+ if (para.normalize() > 0.0f)
+ {
+ float cosPhi, sinPhi;
+ physx::shdfnd::sincos(angle, sinPhi, cosPhi);
+ zAxis = cosPhi*zAxis + sinPhi*para;
+ }
+ physx::PxMat44 tm = randomRotationMatrix(zAxis, userRnd.m_rnd);
+ tm.setPosition(position);
+ tm.scale(physx::PxVec4(physx::PxVec3(scale), 1.0f));
+
+ const int32_t parentIndex = *hMesh.parentIndex(triangleData->chunkIndex);
+ if (parentIndex >= 0)
+ {
+ const uint32_t parentPartIndex = (uint32_t)*hMesh.partIndex((uint32_t)parentIndex);
+ ApexCSG::IApexBSP* parentPartBSP = hMesh.mParts[parentPartIndex]->mMeshBSP;
+ if (parentPartBSP != NULL)
+ {
+ // Create BSP from hull and transform
+ PartConvexHullProxy& hull = hulls[scatterMeshAssetIndex];
+ planes.resize(hull.impl.getPlaneCount());
+ for (uint32_t planeIndex = 0; planeIndex < hull.impl.getPlaneCount(); ++planeIndex)
+ {
+ planes[planeIndex] = hull.impl.getPlane(planeIndex);
+ }
+ hullBSP->fromConvexPolyhedron(&planes[0], planes.size(), parentPartBSP->getInternalTransform());
+ hullBSP->copy(*hullBSP, tm);
+
+ // Now combine with chunk parent bsp, and see if the mesh hull bsp lies within the parent bsp
+ hullBSP->combine(*hMesh.mParts[parentPartIndex]->mMeshBSP);
+ hullBSP->op(*hullBSP, ApexCSG::Operation::A_Minus_B);
+ if (hullBSP->getType() == ApexCSG::BSPType::Empty_Set) // True if the hull lies entirely within the parent chunk
+ {
+ success = true;
+ InstanceInfo& info = instanceInfo.insert();
+ info.meshIndex = (uint8_t)scatterMeshAssetIndex;
+ info.chunkIndex = (int32_t)triangleData->chunkIndex;
+ info.relativeTransform = tm;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ hullBSP->release();
+
+ // Now sort the instance info by chunk index
+ if (instanceInfo.size() > 1)
+ {
+ nvidia::sort<InstanceInfo, InstanceInfo::ChunkIndexLessThan>(instanceInfo.begin(), instanceInfo.size(), InstanceInfo::ChunkIndexLessThan());
+ }
+
+ // Write the info to the output arrays
+ for (uint32_t instanceNum = 0; instanceNum < instanceInfo.size() && instanceNum < scatterMeshInstancesBufferSize; ++instanceNum) // Second condition instanceNum < scatterMeshInstancesBufferSize should not be necessary
+ {
+ const InstanceInfo& info = instanceInfo[instanceNum];
+ meshIndices[instanceNum] = info.meshIndex;
+ relativeTransforms[instanceNum] = info.relativeTransform;
+ }
+
+ // Finally create an indexed lookup
+ if (instanceInfo.size() > 0)
+ {
+ physx::Array<uint32_t> lookup;
+ createIndexStartLookup(lookup, 0, hMesh.chunkCount(), &instanceInfo[0].chunkIndex, instanceInfo.size(), sizeof(InstanceInfo));
+
+ // .. and copy it into the output lookup table
+ for (uint32_t chunkLookup = 0; chunkLookup <= hMesh.chunkCount(); ++chunkLookup) // <= is intentional
+ {
+ chunkMeshStarts[chunkLookup] = lookup[chunkLookup];
+ }
+ }
+
+ return instanceInfo.size();
+}
+
+PX_INLINE bool intersectPlanes(physx::PxVec3& pos, physx::PxVec3& dir, const physx::PxPlane& plane0, const physx::PxPlane& plane1)
+{
+ dir = plane0.n.cross(plane1.n);
+
+ if(dir.normalize() < PX_EPS_F32)
+ {
+ return false;
+ }
+
+ pos = physx::PxVec3(0.0f);
+
+ for (int iter = 3; iter--;)
+ {
+ // Project onto plane0:
+ pos = plane0.project(pos);
+
+ // Raycast to plane1:
+ const physx::PxVec3 b = dir.cross(plane0.n);
+ pos -= (plane1.distance(pos)/(b.dot(plane1.n)))*b;
+ }
+
+ return true;
+}
+
+PX_INLINE void renderConvex(nvidia::RenderDebugInterface& debugRender, const physx::PxPlane* planes, uint32_t planeCount, uint32_t color, float tolerance)
+{
+ RENDER_DEBUG_IFACE(&debugRender)->setCurrentColor(color);
+
+ physx::Array<physx::PxVec3> endpoints;
+
+ const float tol2 = tolerance*tolerance;
+
+ for (uint32_t i = 0; i < planeCount; ++i)
+ {
+ // We'll be drawing polygons in this plane
+ const physx::PxPlane& plane_i = planes[i];
+ endpoints.resize(0);
+ for (uint32_t j = 0; j < planeCount; ++j)
+ {
+ if (j == i)
+ {
+ continue;
+ }
+ const physx::PxPlane& plane_j = planes[j];
+ // Find potential edge from intersection if plane_i and plane_j
+ physx::PxVec3 orig;
+ physx::PxVec3 edgeDir;
+ if (!intersectPlanes(orig, edgeDir, plane_i, plane_j))
+ {
+ continue;
+ }
+ float minS = -PX_MAX_F32;
+ float maxS = PX_MAX_F32;
+ bool intersectionFound = true;
+ // Clip to planes
+ for (uint32_t k = 0; k < planeCount; ++k)
+ {
+ if (k == i || i == j)
+ {
+ continue;
+ }
+ const physx::PxPlane& plane_k = planes[k];
+ const float num = -plane_k.distance(orig);
+ const float den = edgeDir.dot(plane_k.n);
+ if (physx::PxAbs(den) > 10*PX_EPS_F32)
+ {
+ const float s = num/den;
+ if (den > 0.0f)
+ {
+ maxS = PxMin(maxS, s);
+ }
+ else
+ {
+ minS = PxMax(minS, s);
+ }
+ if (maxS <= minS)
+ {
+ intersectionFound = false;
+ break;
+ }
+ }
+ else
+ if (num < -tolerance)
+ {
+ intersectionFound = false;
+ break;
+ }
+ }
+ if (intersectionFound)
+ {
+ endpoints.pushBack(orig + minS * edgeDir);
+ endpoints.pushBack(orig + maxS * edgeDir);
+ }
+ }
+ if (endpoints.size() > 2)
+ {
+ physx::Array<physx::PxVec3> verts;
+ verts.pushBack(endpoints[endpoints.size()-2]);
+ verts.pushBack(endpoints[endpoints.size()-1]);
+ endpoints.popBack();
+ endpoints.popBack();
+ while (endpoints.size())
+ {
+ uint32_t closestN = 0;
+ float closestDist2 = PX_MAX_F32;
+ for (uint32_t n = 0; n < endpoints.size(); ++n)
+ {
+ const float dist2 = (endpoints[n] - verts[verts.size()-1]).magnitudeSquared();
+ if (dist2 < closestDist2)
+ {
+ closestDist2 = dist2;
+ closestN = n;
+ }
+ }
+ if ((endpoints[closestN^1] - verts[0]).magnitudeSquared() < tol2)
+ {
+ break;
+ }
+ verts.pushBack(endpoints[closestN^1]);
+ endpoints.replaceWithLast(closestN^1);
+ endpoints.replaceWithLast(closestN);
+ }
+ if (verts.size() > 2)
+ {
+ if (((verts[1]-verts[0]).cross(verts[2]-verts[0])).dot(plane_i.n) < 0.0f)
+ {
+ for (uint32_t n = verts.size()/2; n--;)
+ {
+ nvidia::swap(verts[n], verts[verts.size()-1-n]);
+ }
+ }
+ RENDER_DEBUG_IFACE(&debugRender)->debugPolygon(verts.size(), &verts[0]);
+ }
+ }
+ }
+}
+
+void visualizeVoronoiCells
+(
+ nvidia::RenderDebugInterface& debugRender,
+ const physx::PxVec3* sites,
+ uint32_t siteCount,
+ const uint32_t* cellColors,
+ uint32_t cellColorCount,
+ const physx::PxBounds3& bounds,
+ uint32_t cellIndex /* = 0xFFFFFFFF */
+)
+{
+ // Rendering tolerance
+ const float tolerance = 1.0e-5f*bounds.getDimensions().magnitude();
+
+ // Whether or not to use cellColors
+ const bool useCellColors = cellColors != NULL && cellColorCount > 0;
+
+ // Whether to draw a single cell or all cells
+ const bool drawSingleCell = cellIndex < siteCount;
+
+ // Create bound planes
+ physx::Array<physx::PxPlane> boundPlanes;
+ boundPlanes.reserve(6);
+ boundPlanes.pushBack(physx::PxPlane(-1.0f, 0.0f, 0.0f, bounds.minimum.x));
+ boundPlanes.pushBack(physx::PxPlane(1.0f, 0.0f, 0.0f, -bounds.maximum.x));
+ boundPlanes.pushBack(physx::PxPlane(0.0f, -1.0f, 0.0f, bounds.minimum.y));
+ boundPlanes.pushBack(physx::PxPlane(0.0f, 1.0f, 0.0f, -bounds.maximum.y));
+ boundPlanes.pushBack(physx::PxPlane(0.0f, 0.0f, -1.0f, bounds.minimum.z));
+ boundPlanes.pushBack(physx::PxPlane(0.0f, 0.0f, 1.0f, -bounds.maximum.z));
+
+ // Iterate over cells
+ for (VoronoiCellPlaneIterator i(sites, siteCount, boundPlanes.begin(), boundPlanes.size(), drawSingleCell ? cellIndex : 0); i.valid(); i.inc())
+ {
+ const uint32_t cellColor = useCellColors ? cellColors[i.cellIndex()%cellColorCount] : 0xFFFFFFFF;
+ renderConvex(debugRender, i.cellPlanes(), i.cellPlaneCount(), cellColor, tolerance);
+ if (drawSingleCell)
+ {
+ break;
+ }
+ }
+}
+
+bool buildSliceMesh
+(
+ nvidia::IntersectMesh& intersectMesh,
+ ExplicitHierarchicalMesh& referenceMesh,
+ const physx::PxPlane& slicePlane,
+ const FractureTools::NoiseParameters& noiseParameters,
+ uint32_t randomSeed
+)
+{
+ if( referenceMesh.chunkCount() == 0 )
+ {
+ return false;
+ }
+
+ ExplicitHierarchicalMeshImpl& hMesh = *(ExplicitHierarchicalMeshImpl*)&referenceMesh;
+
+ GridParameters gridParameters;
+ gridParameters.interiorSubmeshIndex = 0;
+ gridParameters.noise = noiseParameters;
+ const uint32_t partIndex = (uint32_t)hMesh.mChunks[0]->mPartIndex;
+ gridParameters.level0Mesh = &hMesh.mParts[partIndex]->mMesh;
+ physx::PxVec3 extents = hMesh.mParts[partIndex]->mBounds.getExtents();
+ gridParameters.sizeScale = physx::PxAbs(extents.x*slicePlane.n.x) + physx::PxAbs(extents.y*slicePlane.n.y) + physx::PxAbs(extents.z*slicePlane.n.z);
+ gridParameters.materialFrameIndex = hMesh.addMaterialFrame();
+ nvidia::MaterialFrame materialFrame = hMesh.getMaterialFrame(gridParameters.materialFrameIndex );
+ nvidia::FractureMaterialDesc materialDesc;
+ materialFrame.buildCoordinateSystemFromMaterialDesc(materialDesc, slicePlane);
+ materialFrame.mFractureMethod = nvidia::FractureMethod::Unknown; // This is only a slice preview
+ hMesh.setMaterialFrame(gridParameters.materialFrameIndex, materialFrame);
+ gridParameters.triangleFrame.setFlat(materialFrame.mCoordinateSystem, physx::PxVec2(1.0f), physx::PxVec2(0.0f));
+ gridParameters.forceGrid = true;
+ userRnd.m_rnd.setSeed(randomSeed);
+ buildIntersectMesh(intersectMesh, slicePlane, materialFrame, 0, &gridParameters);
+
+ return true;
+}
+
+} // namespace FractureTools
+
+namespace nvidia
+{
+namespace apex
+{
+
+void buildCollisionGeometry(physx::Array<PartConvexHullProxy*>& volumes, const CollisionVolumeDesc& desc,
+ const physx::PxVec3* vertices, uint32_t vertexCount, uint32_t vertexByteStride,
+ const uint32_t* indices, uint32_t indexCount)
+{
+ ConvexHullMethod::Enum hullMethod = desc.mHullMethod;
+
+ do
+ {
+ if (hullMethod == nvidia::ConvexHullMethod::CONVEX_DECOMPOSITION)
+ {
+ resizeCollision(volumes, 0);
+
+ CONVEX_DECOMPOSITION::ConvexDecomposition* decomposer = CONVEX_DECOMPOSITION::createConvexDecomposition();
+ if (decomposer != NULL)
+ {
+ CONVEX_DECOMPOSITION::DecompDesc decompDesc;
+ decompDesc.mCpercent = desc.mConcavityPercent;
+ //TODO:JWR decompDesc.mPpercent = desc.mMergeThreshold;
+ decompDesc.mDepth = desc.mRecursionDepth;
+
+ decompDesc.mVcount = vertexCount;
+ decompDesc.mVertices = (float*)vertices;
+ decompDesc.mTcount = indexCount / 3;
+ decompDesc.mIndices = indices;
+
+ uint32_t hullCount = decomposer->performConvexDecomposition(decompDesc);
+ resizeCollision(volumes, hullCount);
+ for (uint32_t hullIndex = 0; hullIndex < hullCount; ++hullIndex)
+ {
+ CONVEX_DECOMPOSITION::ConvexResult* result = decomposer->getConvexResult(hullIndex,false);
+ volumes[hullIndex]->buildFromPoints(result->mHullVertices, result->mHullVcount, 3 * sizeof(float));
+ if (volumes[hullIndex]->impl.isEmpty())
+ {
+ // fallback
+ physx::Array<physx::PxVec3> directions;
+ ConvexHullImpl::createKDOPDirections(directions, nvidia::ConvexHullMethod::USE_26_DOP);
+ volumes[hullIndex]->impl.buildKDOP(result->mHullVertices, result->mHullVcount, 3 * sizeof(float), directions.begin(), directions.size());
+ }
+ }
+ decomposer->release();
+ }
+
+ if(volumes.size() > 0)
+ {
+ break;
+ }
+
+ // fallback
+ hullMethod = nvidia::ConvexHullMethod::WRAP_GRAPHICS_MESH;
+ }
+
+ resizeCollision(volumes, 1);
+
+ if (hullMethod == nvidia::ConvexHullMethod::WRAP_GRAPHICS_MESH)
+ {
+ volumes[0]->buildFromPoints(vertices, vertexCount, vertexByteStride);
+ if (!volumes[0]->impl.isEmpty())
+ {
+ break;
+ }
+
+ // fallback
+ hullMethod = nvidia::ConvexHullMethod::USE_26_DOP;
+ }
+
+ physx::Array<physx::PxVec3> directions;
+ ConvexHullImpl::createKDOPDirections(directions, hullMethod);
+ volumes[0]->impl.buildKDOP(vertices, vertexCount, vertexByteStride, directions.begin(), directions.size());
+ } while(0);
+
+ // Reduce hulls
+ for (uint32_t hullIndex = 0; hullIndex < volumes.size(); ++hullIndex)
+ {
+ // First try uninflated, then try with inflation. This may find a better reduction
+ volumes[hullIndex]->reduceHull(desc.mMaxVertexCount, desc.mMaxEdgeCount, desc.mMaxFaceCount, false);
+ volumes[hullIndex]->reduceHull(desc.mMaxVertexCount, desc.mMaxEdgeCount, desc.mMaxFaceCount, true);
+ }
+}
+
+
+// Serialization of ExplicitSubmeshData
+
+
+void serialize(physx::PxFileBuf& stream, const ExplicitSubmeshData& d)
+{
+ ApexSimpleString materialName(d.mMaterialName);
+ apex::serialize(stream, materialName);
+ stream << d.mVertexFormat.mWinding;
+ stream << d.mVertexFormat.mHasStaticPositions;
+ stream << d.mVertexFormat.mHasStaticNormals;
+ stream << d.mVertexFormat.mHasStaticTangents;
+ stream << d.mVertexFormat.mHasStaticBinormals;
+ stream << d.mVertexFormat.mHasStaticColors;
+ stream << d.mVertexFormat.mHasStaticSeparateBoneBuffer;
+ stream << d.mVertexFormat.mHasStaticDisplacements;
+ stream << d.mVertexFormat.mHasDynamicPositions;
+ stream << d.mVertexFormat.mHasDynamicNormals;
+ stream << d.mVertexFormat.mHasDynamicTangents;
+ stream << d.mVertexFormat.mHasDynamicBinormals;
+ stream << d.mVertexFormat.mHasDynamicColors;
+ stream << d.mVertexFormat.mHasDynamicSeparateBoneBuffer;
+ stream << d.mVertexFormat.mHasDynamicDisplacements;
+ stream << d.mVertexFormat.mUVCount;
+ stream << d.mVertexFormat.mBonesPerVertex;
+}
+
+void deserialize(physx::PxFileBuf& stream, uint32_t apexVersion, uint32_t meshVersion, ExplicitSubmeshData& d)
+{
+ ApexSimpleString materialName;
+ apex::deserialize(stream, apexVersion, materialName);
+ nvidia::strlcpy(d.mMaterialName, ExplicitSubmeshData::MaterialNameBufferSize, materialName.c_str());
+
+ if (apexVersion >= ApexStreamVersion::CleanupOfApexRenderMesh)
+ {
+ stream >> d.mVertexFormat.mWinding;
+ stream >> d.mVertexFormat.mHasStaticPositions;
+ stream >> d.mVertexFormat.mHasStaticNormals;
+ stream >> d.mVertexFormat.mHasStaticTangents;
+ stream >> d.mVertexFormat.mHasStaticBinormals;
+ stream >> d.mVertexFormat.mHasStaticColors;
+ stream >> d.mVertexFormat.mHasStaticSeparateBoneBuffer;
+ if (meshVersion >= ExplicitHierarchicalMeshImpl::DisplacementData)
+ stream >> d.mVertexFormat.mHasStaticDisplacements;
+ stream >> d.mVertexFormat.mHasDynamicPositions;
+ stream >> d.mVertexFormat.mHasDynamicNormals;
+ stream >> d.mVertexFormat.mHasDynamicTangents;
+ stream >> d.mVertexFormat.mHasDynamicBinormals;
+ stream >> d.mVertexFormat.mHasDynamicColors;
+ stream >> d.mVertexFormat.mHasDynamicSeparateBoneBuffer;
+ if (meshVersion >= ExplicitHierarchicalMeshImpl::DisplacementData)
+ stream >> d.mVertexFormat.mHasDynamicDisplacements;
+ stream >> d.mVertexFormat.mUVCount;
+ if (apexVersion < ApexStreamVersion::RemovedTextureTypeInformationFromVertexFormat)
+ {
+ // Dead data
+ uint32_t textureTypes[VertexFormat::MAX_UV_COUNT];
+ for (uint32_t i = 0; i < VertexFormat::MAX_UV_COUNT; ++i)
+ {
+ stream >> textureTypes[i];
+ }
+ }
+ stream >> d.mVertexFormat.mBonesPerVertex;
+ }
+ else
+ {
+#if 0 // BRG - to do, implement conversion
+ bool hasPosition;
+ bool hasNormal;
+ bool hasTangent;
+ bool hasBinormal;
+ bool hasColor;
+ uint32_t numBonesPerVertex;
+ uint32_t uvCount;
+ RenderCullMode::Enum winding = RenderCullMode::CLOCKWISE;
+
+ // PH: assuming position and normal as the default dynamic flags
+ uint32_t dynamicFlags = VertexFormatFlag::POSITION | VertexFormatFlag::NORMAL;
+
+ if (version >= ApexStreamVersion::AddedRenderCullModeToRenderMeshAsset)
+ {
+ //stream.readBuffer( &winding, sizeof(winding) );
+ stream >> winding;
+ }
+ if (version >= ApexStreamVersion::AddedDynamicVertexBufferField)
+ {
+ stream >> dynamicFlags;
+ }
+ if (version >= ApexStreamVersion::AddingTextureTypeInformationToVertexFormat)
+ {
+ stream >> hasPosition;
+ stream >> hasNormal;
+ stream >> hasTangent;
+ stream >> hasBinormal;
+ stream >> hasColor;
+ if (version >= ApexStreamVersion::RenderMeshAssetRedesign)
+ {
+ stream >> numBonesPerVertex;
+ }
+ else
+ {
+ bool hasBoneIndex;
+ stream >> hasBoneIndex;
+ numBonesPerVertex = hasBoneIndex ? 1 : 0;
+ }
+ stream >> uvCount;
+ if (version < ApexStreamVersion::RemovedTextureTypeInformationFromVertexFormat)
+ {
+ // Dead data
+ uint32_t textureTypes[VertexFormat::MAX_UV_COUNT];
+ for (uint32_t i = 0; i < VertexFormat::MAX_UV_COUNT; ++i)
+ {
+ stream >> textureTypes[i];
+ }
+ }
+ }
+ else
+ {
+ uint32_t data;
+ stream >> data;
+ hasPosition = (data & (1 << 8)) != 0;
+ hasNormal = (data & (1 << 9)) != 0;
+ hasTangent = (data & (1 << 10)) != 0;
+ hasBinormal = (data & (1 << 11)) != 0;
+ hasColor = (data & (1 << 12)) != 0;
+ numBonesPerVertex = (data & (1 << 13)) != 0 ? 1 : 0;
+ uvCount = data & 0xFF;
+ }
+
+ d.mVertexFormat.mWinding = winding;
+ d.mVertexFormat.mHasStaticPositions = hasPosition;
+ d.mVertexFormat.mHasStaticNormals = hasNormal;
+ d.mVertexFormat.mHasStaticTangents = hasTangent;
+ d.mVertexFormat.mHasStaticBinormals = hasBinormal;
+ d.mVertexFormat.mHasStaticColors = hasColor;
+ d.mVertexFormat.mHasStaticSeparateBoneBuffer = false;
+ d.mVertexFormat.mHasDynamicPositions = (dynamicFlags & VertexFormatFlag::POSITION) != 0;
+ d.mVertexFormat.mHasDynamicNormals = (dynamicFlags & VertexFormatFlag::NORMAL) != 0;
+ d.mVertexFormat.mHasDynamicTangents = (dynamicFlags & VertexFormatFlag::TANGENT) != 0;
+ d.mVertexFormat.mHasDynamicBinormals = (dynamicFlags & VertexFormatFlag::BINORMAL) != 0;
+ d.mVertexFormat.mHasDynamicColors = (dynamicFlags & VertexFormatFlag::COLOR) != 0;
+ d.mVertexFormat.mHasDynamicSeparateBoneBuffer = (dynamicFlags & VertexFormatFlag::SEPARATE_BONE_BUFFER) != 0;
+ d.mVertexFormat.mUVCount = uvCount;
+ d.mVertexFormat.mBonesPerVertex = numBonesPerVertex;
+
+ if (version >= ApexStreamVersion::RenderMeshAssetRedesign)
+ {
+ uint32_t customBufferCount;
+ stream >> customBufferCount;
+ for (uint32_t i = 0; i < customBufferCount; i++)
+ {
+ uint32_t stringLength;
+ stream >> stringLength;
+ PX_ASSERT(stringLength < 254);
+ char buf[256];
+ stream.read(buf, stringLength);
+ buf[stringLength] = 0;
+ uint32_t format;
+ stream >> format;
+ }
+ }
+#endif
+ }
+}
+
+
+// Serialization of nvidia::MaterialFrame
+
+
+void serialize(physx::PxFileBuf& stream, const nvidia::MaterialFrame& f)
+{
+ // f.mCoordinateSystem
+ const PxMat44& m44 = f.mCoordinateSystem;
+ stream << m44(0, 0) << m44(0, 1) << m44(0, 2)
+ << m44(1, 0) << m44(1, 1) << m44(1, 2)
+ << m44(2, 0) << m44(2, 1) << m44(2, 2) << m44.getPosition();
+
+ // Other fields of f
+ stream << f.mUVPlane << f.mUVScale << f.mUVOffset << f.mFractureMethod << f.mFractureIndex << f.mSliceDepth;
+}
+
+void deserialize(physx::PxFileBuf& stream, uint32_t apexVersion, uint32_t meshVersion, nvidia::MaterialFrame& f)
+{
+ PX_UNUSED(apexVersion);
+
+ f.mSliceDepth = 0;
+
+ if (meshVersion >= ExplicitHierarchicalMeshImpl::ChangedMaterialFrameToIncludeFracturingMethodContext) // First version in which this struct exists
+ {
+ // f.mCoordinateSystem
+ PxMat44 &m44 = f.mCoordinateSystem;
+ stream >> m44(0, 0) >> m44(0, 1) >> m44(0, 2)
+ >> m44(1, 0) >> m44(1, 1) >> m44(1, 2)
+ >> m44(2, 0) >> m44(2, 1) >> m44(2, 2) >> *reinterpret_cast<PxVec3*>(&m44.column3);
+
+ // Other fields of f
+ stream >> f.mUVPlane >> f.mUVScale >> f.mUVOffset >> f.mFractureMethod >> f.mFractureIndex;
+
+ if (meshVersion >= ExplicitHierarchicalMeshImpl::AddedSliceDepthToMaterialFrame)
+ {
+ stream >> f.mSliceDepth;
+ }
+ }
+}
+
+
+// ExplicitHierarchicalMeshImpl
+
+ExplicitHierarchicalMeshImpl::ExplicitHierarchicalMeshImpl()
+{
+ mBSPMemCache = ApexCSG::createBSPMemCache();
+ mRootSubmeshCount = 0;
+}
+
+ExplicitHierarchicalMeshImpl::~ExplicitHierarchicalMeshImpl()
+{
+ clear();
+ mBSPMemCache->release();
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::addPart()
+{
+ const uint32_t index = mParts.size();
+ mParts.insert();
+ Part* part = PX_NEW(Part);
+ part->mMeshBSP = createBSP(mBSPMemCache);
+ mParts.back() = part;
+ return index;
+}
+
+bool ExplicitHierarchicalMeshImpl::removePart(uint32_t index)
+{
+ if (index >= partCount())
+ {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ if (mChunks[i]->mPartIndex == (int32_t)index)
+ {
+ mChunks[i]->mPartIndex = -1;
+ }
+ else if (mChunks[i]->mPartIndex > (int32_t)index)
+ {
+ --mChunks[i]->mPartIndex;
+ }
+ }
+
+ delete mParts[index];
+ mParts.remove(index);
+
+ return true;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::addChunk()
+{
+ const uint32_t index = mChunks.size();
+ mChunks.insert();
+ mChunks.back() = PX_NEW(Chunk);
+ return index;
+}
+
+bool ExplicitHierarchicalMeshImpl::removeChunk(uint32_t index)
+{
+ if (index >= chunkCount())
+ {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ if (mChunks[i]->mParentIndex == (int32_t)index)
+ {
+ mChunks[i]->mParentIndex = -1;
+ }
+ else if (mChunks[i]->mParentIndex > (int32_t)index)
+ {
+ --mChunks[i]->mParentIndex;
+ }
+ }
+
+ delete mChunks[index];
+ mChunks.remove(index);
+
+ return true;
+}
+
+void ExplicitHierarchicalMeshImpl::serialize(physx::PxFileBuf& stream, Embedding& embedding) const
+{
+ stream << (uint32_t)ExplicitHierarchicalMeshImpl::Current;
+ stream << (uint32_t)ApexStreamVersion::Current;
+ stream << mParts.size();
+ for (uint32_t i = 0; i < mParts.size(); ++i)
+ {
+ stream << mParts[i]->mBounds;
+ apex::serialize(stream, mParts[i]->mMesh);
+ stream.storeDword(mParts[i]->mCollision.size());
+ for (uint32_t j = 0; j < mParts[i]->mCollision.size(); ++j)
+ {
+ apex::serialize(stream, mParts[i]->mCollision[j]->impl);
+ }
+ if (mParts[i]->mMeshBSP != NULL)
+ {
+ stream << (uint32_t)1;
+ mParts[i]->mMeshBSP->serialize(stream);
+ }
+ else
+ {
+ stream << (uint32_t)0;
+ }
+ stream << mParts[i]->mFlags;
+ }
+ stream << mChunks.size();
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ stream << mChunks[i]->mParentIndex;
+ stream << mChunks[i]->mFlags;
+ stream << mChunks[i]->mPartIndex;
+ stream << mChunks[i]->mInstancedPositionOffset;
+ stream << mChunks[i]->mInstancedUVOffset;
+ stream << mChunks[i]->mPrivateFlags;
+ }
+ apex::serialize(stream, mSubmeshData);
+ apex::serialize(stream, mMaterialFrames);
+ embedding.serialize(stream, Embedding::MaterialLibrary);
+ stream << mRootSubmeshCount;
+}
+
+void ExplicitHierarchicalMeshImpl::deserialize(physx::PxFileBuf& stream, Embedding& embedding)
+{
+ clear();
+
+ uint32_t meshStreamVersion;
+ stream >> meshStreamVersion;
+ uint32_t apexStreamVersion;
+ stream >> apexStreamVersion;
+
+ if (meshStreamVersion < ExplicitHierarchicalMeshImpl::RemovedExplicitHMesh_mMaxDepth)
+ {
+ int32_t maxDepth;
+ stream >> maxDepth;
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::InstancingData)
+ {
+ uint32_t partCount;
+ stream >> partCount;
+ mParts.resize(partCount);
+ for (uint32_t i = 0; i < partCount; ++i)
+ {
+ mParts[i] = PX_NEW(Part);
+ stream >> mParts[i]->mBounds;
+ apex::deserialize(stream, apexStreamVersion, mParts[i]->mMesh);
+ resizeCollision(mParts[i]->mCollision, stream.readDword());
+ for (uint32_t hullNum = 0; hullNum < mParts[i]->mCollision.size(); ++hullNum)
+ {
+ apex::deserialize(stream, apexStreamVersion, mParts[i]->mCollision[hullNum]->impl);
+ }
+ mParts[i]->mMeshBSP = createBSP(mBSPMemCache);
+ uint32_t createMeshBSP;
+ stream >> createMeshBSP;
+ if (createMeshBSP)
+ {
+ mParts[i]->mMeshBSP->deserialize(stream);
+ }
+ else
+ {
+ ApexCSG::BSPBuildParameters bspBuildParameters = gDefaultBuildParameters;
+ bspBuildParameters.internalTransform = physx::PxMat44(physx::PxIdentity);
+ bspBuildParameters.rnd = &userRnd;
+ userRnd.m_rnd.setSeed(0);
+ mParts[i]->mMeshBSP->fromMesh(&mParts[i]->mMesh[0], mParts[i]->mMesh.size(), bspBuildParameters);
+ }
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::ReaddedFlagsToPart)
+ {
+ stream >> mParts[i]->mFlags;
+ }
+ }
+
+ uint32_t chunkCount;
+ stream >> chunkCount;
+ mChunks.resize(chunkCount);
+ for (uint32_t i = 0; i < chunkCount; ++i)
+ {
+ mChunks[i] = PX_NEW(Chunk);
+ stream >> mChunks[i]->mParentIndex;
+ stream >> mChunks[i]->mFlags;
+ stream >> mChunks[i]->mPartIndex;
+ stream >> mChunks[i]->mInstancedPositionOffset;
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::UVInstancingData)
+ {
+ stream >> mChunks[i]->mInstancedUVOffset;
+ }
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::IntroducingChunkPrivateFlags)
+ {
+ stream >> mChunks[i]->mPrivateFlags;
+ }
+ }
+ }
+ else
+ {
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::UsingExplicitPartContainers)
+ {
+ uint32_t partCount;
+ stream >> partCount;
+ mParts.resize(partCount);
+ mChunks.resize(partCount);
+ for (uint32_t i = 0; i < partCount; ++i)
+ {
+ mParts[i] = PX_NEW(Part);
+ mChunks[i] = PX_NEW(Chunk);
+ stream >> mChunks[i]->mParentIndex;
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::SerializingMeshBounds)
+ {
+ stream >> mParts[i]->mBounds;
+ }
+ apex::deserialize(stream, apexStreamVersion, mParts[i]->mMesh);
+ if (meshStreamVersion < ExplicitHierarchicalMeshImpl::SerializingMeshBounds)
+ {
+ buildMeshBounds(i);
+ }
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::MultipleConvexHullsPerChunk)
+ {
+ resizeCollision(mParts[i]->mCollision, stream.readDword());
+ }
+ else
+ {
+ resizeCollision(mParts[i]->mCollision, 1);
+ }
+ for (uint32_t hullNum = 0; hullNum < mParts[i]->mCollision.size(); ++hullNum)
+ {
+ apex::deserialize(stream, apexStreamVersion, mParts[i]->mCollision[hullNum]->impl);
+ }
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::PerPartMeshBSPs)
+ {
+ mParts[i]->mMeshBSP = createBSP(mBSPMemCache);
+ uint32_t createMeshBSP;
+ stream >> createMeshBSP;
+ if (createMeshBSP)
+ {
+ mParts[i]->mMeshBSP->deserialize(stream);
+ }
+ else
+ {
+ ApexCSG::BSPBuildParameters bspBuildParameters = gDefaultBuildParameters;
+ bspBuildParameters.internalTransform = physx::PxMat44(physx::PxIdentity);
+ mParts[i]->mMeshBSP->fromMesh(&mParts[i]->mMesh[0], mParts[i]->mMesh.size(), bspBuildParameters);
+ }
+ }
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::AddedFlagsFieldToPart)
+ {
+ stream >> mChunks[i]->mFlags;
+ }
+ }
+ }
+ else
+ {
+ physx::Array<int32_t> parentIndices;
+ physx::Array< physx::Array< ExplicitRenderTriangle > > meshes;
+ physx::Array< ConvexHullImpl > meshHulls;
+ apex::deserialize(stream, apexStreamVersion, parentIndices);
+ apex::deserialize(stream, apexStreamVersion, meshes);
+ apex::deserialize(stream, apexStreamVersion, meshHulls);
+ PX_ASSERT(parentIndices.size() == meshes.size() && meshes.size() == meshHulls.size());
+ uint32_t partCount = PxMin(parentIndices.size(), PxMin(meshes.size(), meshHulls.size()));
+ mParts.resize(partCount);
+ mChunks.resize(partCount);
+ for (uint32_t i = 0; i < partCount; ++i)
+ {
+ mParts[i] = PX_NEW(Part);
+ mChunks[i] = PX_NEW(Chunk);
+ mChunks[i]->mParentIndex = parentIndices[i];
+ mParts[i]->mMesh = meshes[i];
+ resizeCollision(mParts[i]->mCollision, 1);
+ mParts[i]->mCollision[0]->impl = meshHulls[i];
+ buildMeshBounds(i);
+ }
+ }
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ mChunks[i]->mPartIndex = (int32_t)i;
+ }
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::SerializingMeshBSP && meshStreamVersion < ExplicitHierarchicalMeshImpl::PerPartMeshBSPs)
+ {
+ mParts[0]->mMeshBSP = createBSP(mBSPMemCache);
+ uint32_t createMeshBSP;
+ stream >> createMeshBSP;
+ if (createMeshBSP)
+ {
+ mParts[0]->mMeshBSP->deserialize(stream);
+ }
+ else
+ {
+ ApexCSG::BSPBuildParameters bspBuildParameters = gDefaultBuildParameters;
+ bspBuildParameters.internalTransform = physx::PxMat44(physx::PxIdentity);
+ mParts[0]->mMeshBSP->fromMesh(&mParts[0]->mMesh[0], mParts[0]->mMesh.size(), bspBuildParameters);
+ }
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::IncludingVertexFormatInSubmeshData)
+ {
+ apex::deserialize(stream, apexStreamVersion, meshStreamVersion, mSubmeshData);
+ }
+ else
+ {
+ physx::Array<ApexSimpleString> materialNames;
+ apex::deserialize(stream, apexStreamVersion, materialNames);
+ mSubmeshData.resize(0); // Make sure the next resize calls constructors
+ mSubmeshData.resize(materialNames.size());
+ for (uint32_t i = 0; i < materialNames.size(); ++i)
+ {
+ nvidia::strlcpy(mSubmeshData[i].mMaterialName, ExplicitSubmeshData::MaterialNameBufferSize, materialNames[i].c_str());
+ mSubmeshData[i].mVertexFormat.mHasStaticPositions = true;
+ mSubmeshData[i].mVertexFormat.mHasStaticNormals = true;
+ mSubmeshData[i].mVertexFormat.mHasStaticTangents = true;
+ mSubmeshData[i].mVertexFormat.mHasStaticBinormals = true;
+ mSubmeshData[i].mVertexFormat.mHasStaticColors = true;
+ mSubmeshData[i].mVertexFormat.mHasStaticDisplacements = false;
+ mSubmeshData[i].mVertexFormat.mUVCount = 1;
+ mSubmeshData[i].mVertexFormat.mBonesPerVertex = 1;
+ }
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::AddedMaterialFramesToHMesh_and_NoiseType_and_GridSize_to_Cleavage)
+ {
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::ChangedMaterialFrameToIncludeFracturingMethodContext)
+ {
+ apex::deserialize(stream, apexStreamVersion, meshStreamVersion, mMaterialFrames);
+ }
+ else
+ {
+ const uint32_t size = stream.readDword();
+ mMaterialFrames.resize(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ PxMat44 &m44 = mMaterialFrames[i].mCoordinateSystem;
+ stream >> m44(0, 0) >> m44(0, 1) >> m44(0, 2)
+ >> m44(1, 0) >> m44(1, 1) >> m44(1, 2)
+ >> m44(2, 0) >> m44(2, 1) >> m44(2, 2) >> *reinterpret_cast<PxVec3*>(&m44.column3);
+ mMaterialFrames[i].mUVPlane = physx::PxPlane(m44.getPosition(), m44.column2.getXYZ());
+ mMaterialFrames[i].mUVScale = physx::PxVec2(1.0f);
+ mMaterialFrames[i].mUVOffset = physx::PxVec2(0.0f);
+ mMaterialFrames[i].mFractureMethod = nvidia::FractureMethod::Unknown;
+ mMaterialFrames[i].mFractureIndex = -1;
+ }
+ }
+ }
+ else
+ {
+ mMaterialFrames.resize(0);
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::AddedMaterialLibraryToMesh)
+ {
+ embedding.deserialize(stream, Embedding::MaterialLibrary, meshStreamVersion);
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::AddedCacheChunkSurfaceTracesAndInteriorSubmeshIndex && meshStreamVersion < ExplicitHierarchicalMeshImpl::RemovedInteriorSubmeshIndex)
+ {
+ int32_t interiorSubmeshIndex;
+ stream >> interiorSubmeshIndex;
+ }
+
+
+ if (meshStreamVersion < ExplicitHierarchicalMeshImpl::IntroducingChunkPrivateFlags)
+ {
+ uint32_t rootDepth = 0;
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::PerPartMeshBSPs)
+ {
+ stream >> rootDepth;
+ }
+
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ mChunks[i]->mPrivateFlags = 0;
+ const uint32_t chunkDepth = depth(i);
+ if (chunkDepth <= rootDepth)
+ {
+ mChunks[i]->mPrivateFlags |= Chunk::Root;
+ if (chunkDepth == rootDepth)
+ {
+ mChunks[i]->mPrivateFlags |= Chunk::RootLeaf;
+ }
+ }
+ }
+ }
+
+ if (meshStreamVersion >= ExplicitHierarchicalMeshImpl::StoringRootSubmeshCount)
+ {
+ stream >> mRootSubmeshCount;
+ }
+ else
+ {
+ mRootSubmeshCount = mSubmeshData.size();
+ }
+
+ if (meshStreamVersion < ExplicitHierarchicalMeshImpl::RemovedNxChunkAuthoringFlag)
+ {
+ /* Need to translate flags:
+ IsCutoutFaceSplit = (1U << 0),
+ IsCutoutLeftover = (1U << 1),
+ Instance = (1U << 31)
+ */
+ for (uint32_t chunkIndex = 0; chunkIndex < mChunks.size(); ++chunkIndex)
+ {
+ // IsCutoutFaceSplit and IsCutoutLeftover are no longer used.
+ // Translate Instance flag:
+ if (mChunks[chunkIndex]->mFlags & (1U << 31))
+ {
+ mChunks[chunkIndex]->mFlags = nvidia::apex::DestructibleAsset::ChunkIsInstanced;
+ }
+ }
+ }
+}
+
+int32_t ExplicitHierarchicalMeshImpl::maxDepth() const
+{
+ int32_t max = -1;
+ int32_t index = (int32_t)chunkCount()-1;
+ while (index >= 0)
+ {
+ index = mChunks[(uint32_t)index]->mParentIndex;
+ ++max;
+ }
+ return max;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::partCount() const
+{
+ return mParts.size();
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::chunkCount() const
+{
+ return mChunks.size();
+}
+
+int32_t* ExplicitHierarchicalMeshImpl::parentIndex(uint32_t chunkIndex)
+{
+ return chunkIndex < chunkCount() ? &mChunks[chunkIndex]->mParentIndex : NULL;
+}
+
+uint64_t ExplicitHierarchicalMeshImpl::chunkUniqueID(uint32_t chunkIndex)
+{
+ return chunkIndex < chunkCount() ? mChunks[chunkIndex]->getEUID() : (uint64_t)0;
+}
+
+int32_t* ExplicitHierarchicalMeshImpl::partIndex(uint32_t chunkIndex)
+{
+ return chunkIndex < chunkCount() ? &mChunks[chunkIndex]->mPartIndex : NULL;
+}
+
+physx::PxVec3* ExplicitHierarchicalMeshImpl::instancedPositionOffset(uint32_t chunkIndex)
+{
+ return chunkIndex < chunkCount() ? &mChunks[chunkIndex]->mInstancedPositionOffset : NULL;
+}
+
+physx::PxVec2* ExplicitHierarchicalMeshImpl::instancedUVOffset(uint32_t chunkIndex)
+{
+ return chunkIndex < chunkCount() ? &mChunks[chunkIndex]->mInstancedUVOffset : NULL;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::depth(uint32_t chunkIndex) const
+{
+ if (chunkIndex >= mChunks.size())
+ {
+ return 0;
+ }
+
+ uint32_t depth = 0;
+ int32_t index = (int32_t)chunkIndex;
+ while ((index = mChunks[(uint32_t)index]->mParentIndex) >= 0)
+ {
+ ++depth;
+ }
+
+ return depth;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::meshTriangleCount(uint32_t partIndex) const
+{
+ return partIndex < partCount() ? mParts[partIndex]->mMesh.size() : 0;
+}
+
+ExplicitRenderTriangle* ExplicitHierarchicalMeshImpl::meshTriangles(uint32_t partIndex)
+{
+ return partIndex < partCount() ? mParts[partIndex]->mMesh.begin() : NULL;
+}
+
+physx::PxBounds3 ExplicitHierarchicalMeshImpl::meshBounds(uint32_t partIndex) const
+{
+ physx::PxBounds3 bounds;
+ bounds.setEmpty();
+ if (partIndex < partCount())
+ {
+ bounds = mParts[partIndex]->mBounds;
+ }
+ return bounds;
+}
+
+physx::PxBounds3 ExplicitHierarchicalMeshImpl::chunkBounds(uint32_t chunkIndex) const
+{
+ physx::PxBounds3 bounds;
+ bounds.setEmpty();
+
+ if (chunkIndex < chunkCount())
+ {
+ bounds = mParts[(uint32_t)mChunks[chunkIndex]->mPartIndex]->mBounds;
+ bounds.minimum += mChunks[chunkIndex]->mInstancedPositionOffset;
+ bounds.maximum += mChunks[chunkIndex]->mInstancedPositionOffset;
+ }
+ return bounds;
+}
+
+uint32_t* ExplicitHierarchicalMeshImpl::chunkFlags(uint32_t chunkIndex) const
+{
+ if (chunkIndex < chunkCount())
+ {
+ return &mChunks[chunkIndex]->mFlags;
+ }
+ return NULL;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::convexHullCount(uint32_t partIndex) const
+{
+ if (partIndex < partCount())
+ {
+ return mParts[partIndex]->mCollision.size();
+ }
+ return 0;
+}
+
+const ExplicitHierarchicalMeshImpl::ConvexHull** ExplicitHierarchicalMeshImpl::convexHulls(uint32_t partIndex) const
+{
+ if (partIndex < partCount())
+ {
+ Part* part = mParts[partIndex];
+ return part->mCollision.size() > 0 ? (const ExplicitHierarchicalMeshImpl::ConvexHull**)&part->mCollision[0] : NULL;
+ }
+ return NULL;
+}
+
+physx::PxVec3* ExplicitHierarchicalMeshImpl::surfaceNormal(uint32_t partIndex)
+{
+ if (partIndex < partCount())
+ {
+ Part* part = mParts[partIndex];
+ return &part->mSurfaceNormal;
+ }
+ return NULL;
+}
+
+const DisplacementMapVolume& ExplicitHierarchicalMeshImpl::displacementMapVolume() const
+{
+ return mDisplacementMapVolume;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::submeshCount() const
+{
+ return mSubmeshData.size();
+}
+
+ExplicitSubmeshData* ExplicitHierarchicalMeshImpl::submeshData(uint32_t submeshIndex)
+{
+ return submeshIndex < mSubmeshData.size() ? mSubmeshData.begin() + submeshIndex : NULL;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::addSubmesh(const ExplicitSubmeshData& submeshData)
+{
+ const uint32_t index = mSubmeshData.size();
+ mSubmeshData.pushBack(submeshData);
+ return index;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::getMaterialFrameCount() const
+{
+ return mMaterialFrames.size();
+}
+
+nvidia::MaterialFrame ExplicitHierarchicalMeshImpl::getMaterialFrame(uint32_t index) const
+{
+ return mMaterialFrames[index];
+}
+
+void ExplicitHierarchicalMeshImpl::setMaterialFrame(uint32_t index, const nvidia::MaterialFrame& materialFrame)
+{
+ mMaterialFrames[index] = materialFrame;
+}
+
+uint32_t ExplicitHierarchicalMeshImpl::addMaterialFrame()
+{
+ mMaterialFrames.insert();
+ return mMaterialFrames.size()-1;
+}
+
+void ExplicitHierarchicalMeshImpl::clear(bool keepRoot)
+{
+ uint32_t newPartCount = 0;
+ uint32_t index = chunkCount();
+ while (index-- > 0)
+ {
+ if (!keepRoot || !mChunks[index]->isRootChunk())
+ {
+ removeChunk(index);
+ }
+ else
+ {
+ newPartCount = PxMax(newPartCount, (uint32_t)(mChunks[index]->mPartIndex+1));
+ }
+ }
+
+ while (newPartCount < partCount())
+ {
+ removePart(partCount()-1);
+ }
+
+ mMaterialFrames.resize(0);
+
+ if (!keepRoot)
+ {
+ mSubmeshData.reset();
+ mBSPMemCache->clearAll();
+ mRootSubmeshCount = 0;
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::sortChunks(physx::Array<uint32_t>* indexRemap)
+{
+ if (mChunks.size() <= 1)
+ {
+ return;
+ }
+
+ // Sort by original parent index
+ physx::Array<ChunkIndexer> chunkIndices(mChunks.size());
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ chunkIndices[i].chunk = mChunks[i];
+ chunkIndices[i].parentIndex = mChunks[i]->mParentIndex;
+ chunkIndices[i].index = (int32_t)i;
+ }
+ qsort(chunkIndices.begin(), chunkIndices.size(), sizeof(ChunkIndexer), ChunkIndexer::compareParentIndices);
+
+ // Now arrange in depth order
+ physx::Array<uint32_t> parentStarts;
+ createIndexStartLookup(parentStarts, -1, chunkIndices.size() + 1, &chunkIndices[0].parentIndex, chunkIndices.size(), sizeof(ChunkIndexer));
+
+ physx::Array<ChunkIndexer> newChunkIndices;
+ newChunkIndices.reserve(mChunks.size());
+ int32_t parentIndex = -1;
+ uint32_t nextPart = 0;
+ while (newChunkIndices.size() < mChunks.size())
+ {
+ const uint32_t start = parentStarts[(uint32_t)parentIndex + 1];
+ const uint32_t stop = parentStarts[(uint32_t)parentIndex + 2];
+ for (uint32_t index = start; index < stop; ++index)
+ {
+ newChunkIndices.pushBack(chunkIndices[index]);
+ }
+ parentIndex = newChunkIndices[nextPart++].index;
+ }
+
+ // Remap the parts and parent indices
+ physx::Array<uint32_t> internalRemap;
+ physx::Array<uint32_t>& remap = indexRemap != NULL ? *indexRemap : internalRemap;
+ remap.resize(newChunkIndices.size());
+ for (uint32_t i = 0; i < newChunkIndices.size(); ++i)
+ {
+ mChunks[i] = newChunkIndices[i].chunk;
+ remap[(uint32_t)newChunkIndices[i].index] = i;
+ }
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ if (mChunks[i]->mParentIndex >= 0)
+ {
+ mChunks[i]->mParentIndex = (int32_t)remap[(uint32_t)mChunks[i]->mParentIndex];
+ }
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::createPartSurfaceNormals()
+{
+ for (uint32_t partIndex = 0; partIndex < mParts.size(); ++partIndex)
+ {
+ Part* part = mParts[partIndex];
+ physx::Array<ExplicitRenderTriangle>& mesh = part->mMesh;
+ physx::PxVec3 normal(0.0f);
+ for (uint32_t triangleIndex = 0; triangleIndex < mesh.size(); ++triangleIndex)
+ {
+ ExplicitRenderTriangle& triangle = mesh[triangleIndex];
+ if (triangle.extraDataIndex == 0xFFFFFFFF)
+ {
+ normal += (triangle.vertices[1].position - triangle.vertices[0].position).cross(triangle.vertices[2].position - triangle.vertices[0].position);
+ }
+ }
+ part->mSurfaceNormal = normal.getNormalized();
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::set(const ExplicitHierarchicalMesh& mesh)
+{
+ const ExplicitHierarchicalMeshImpl& m = (const ExplicitHierarchicalMeshImpl&)mesh;
+ clear();
+ mParts.resize(0);
+ mParts.reserve(m.mParts.size());
+ for (uint32_t i = 0; i < m.mParts.size(); ++i)
+ {
+ const uint32_t newPartIndex = addPart();
+ PX_ASSERT(newPartIndex == i);
+ mParts[newPartIndex]->mBounds = m.mParts[i]->mBounds;
+ mParts[newPartIndex]->mMesh = m.mParts[i]->mMesh;
+ PX_ASSERT(m.mParts[i]->mMeshBSP != NULL);
+ mParts[newPartIndex]->mMeshBSP->copy(*m.mParts[i]->mMeshBSP);
+ resizeCollision(mParts[newPartIndex]->mCollision, m.mParts[i]->mCollision.size());
+ for (uint32_t j = 0; j < mParts[newPartIndex]->mCollision.size(); ++j)
+ {
+ mParts[newPartIndex]->mCollision[j]->impl = m.mParts[i]->mCollision[j]->impl;
+ }
+ mParts[newPartIndex]->mFlags = m.mParts[i]->mFlags;
+ }
+ mChunks.resize(m.mChunks.size());
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ mChunks[i] = PX_NEW(Chunk);
+ mChunks[i]->mParentIndex = m.mChunks[i]->mParentIndex;
+ mChunks[i]->mFlags = m.mChunks[i]->mFlags;
+ mChunks[i]->mPartIndex = m.mChunks[i]->mPartIndex;
+ mChunks[i]->mInstancedPositionOffset = m.mChunks[i]->mInstancedPositionOffset;
+ mChunks[i]->mInstancedUVOffset = m.mChunks[i]->mInstancedUVOffset;
+ mChunks[i]->mPrivateFlags = m.mChunks[i]->mPrivateFlags;
+ }
+ mSubmeshData = m.mSubmeshData;
+ mMaterialFrames = m.mMaterialFrames;
+ mRootSubmeshCount = m.mRootSubmeshCount;
+}
+
+static void buildCollisionGeometryForPartInternal(physx::Array<PartConvexHullProxy*>& volumes, ExplicitHierarchicalMeshImpl::Part* part, const CollisionVolumeDesc& desc, float inflation = 0.0f)
+{
+ uint32_t vertexCount = part->mMesh.size() * 3;
+ if (inflation > 0.0f)
+ {
+ vertexCount *= 7; // Will add vertices
+ }
+ physx::Array<physx::PxVec3> vertices(vertexCount);
+ uint32_t vertexN = 0;
+ for (uint32_t i = 0; i < part->mMesh.size(); ++i)
+ {
+ nvidia::ExplicitRenderTriangle& triangle = part->mMesh[i];
+ for (uint32_t v = 0; v < 3; ++v)
+ {
+ const physx::PxVec3& position = triangle.vertices[v].position;
+ vertices[vertexN++] = position;
+ if (inflation > 0.0f)
+ {
+ for (uint32_t j = 0; j < 3; ++j)
+ {
+ physx::PxVec3 offset(0.0f);
+ offset[j] = inflation;
+ for (uint32_t k = 0; k < 2; ++k)
+ {
+ vertices[vertexN++] = position + offset;
+ offset[j] *= -1.0f;
+ }
+ }
+ }
+ }
+ }
+
+ // Identity index buffer
+ PX_ALLOCA(indices, uint32_t, vertices.size());
+ for (uint32_t i = 0; i < vertices.size(); ++i)
+ {
+ indices[i] = i;
+ }
+
+ buildCollisionGeometry(volumes, desc, vertices.begin(), vertices.size(), sizeof(physx::PxVec3), indices, vertices.size());
+}
+
+bool ExplicitHierarchicalMeshImpl::calculatePartBSP(uint32_t partIndex, uint32_t randomSeed, uint32_t microgridSize, BSPOpenMode::Enum meshMode, IProgressListener* progressListener, volatile bool* cancel)
+{
+ if (partIndex >= mParts.size())
+ {
+ return false;
+ }
+
+ PX_ASSERT(mParts[partIndex]->mMeshBSP != NULL);
+
+ ApexCSG::BSPBuildParameters bspBuildParameters = gDefaultBuildParameters;
+ bspBuildParameters.snapGridSize = microgridSize;
+ bspBuildParameters.internalTransform = physx::PxMat44(physx::PxZero);
+ bspBuildParameters.rnd = &userRnd;
+ userRnd.m_rnd.setSeed(randomSeed);
+ bool ok = mParts[partIndex]->mMeshBSP->fromMesh(&mParts[partIndex]->mMesh[0], mParts[partIndex]->mMesh.size(), bspBuildParameters, progressListener, cancel);
+ if (!ok)
+ {
+ return false;
+ }
+
+ // Check for open mesh
+ if (meshMode == BSPOpenMode::Closed)
+ {
+ return true;
+ }
+
+ for (uint32_t chunkIndex = 0; chunkIndex < chunkCount(); ++chunkIndex)
+ {
+ // Find a chunk which uses this part
+ if ((uint32_t)mChunks[chunkIndex]->mPartIndex == partIndex)
+ {
+ // If the chunk is a root chunk, test for openness
+ if (mChunks[chunkIndex]->isRootChunk())
+ {
+ float area, volume;
+ if (meshMode == BSPOpenMode::Open || !mParts[partIndex]->mMeshBSP->getSurfaceAreaAndVolume(area, volume, true))
+ {
+ // Mark the mesh as open
+ mParts[partIndex]->mFlags |= Part::MeshOpen;
+ // Instead of using this mesh's BSP, use the convex hull
+ physx::Array<PartConvexHullProxy*> volumes;
+ CollisionVolumeDesc collisionDesc;
+ collisionDesc.mHullMethod = nvidia::ConvexHullMethod::WRAP_GRAPHICS_MESH;
+ buildCollisionGeometryForPartInternal(volumes, mParts[partIndex], collisionDesc, mParts[partIndex]->mBounds.getExtents().magnitude()*0.01f);
+ PX_ASSERT(volumes.size() == 1);
+ if (volumes.size() > 0)
+ {
+ PartConvexHullProxy& hull = *volumes[0];
+ physx::Array<physx::PxPlane> planes;
+ planes.resize(hull.impl.getPlaneCount());
+ const physx::PxVec3 extents = hull.impl.getBounds().getExtents();
+ const float padding = 0.001f*extents.magnitude();
+ for (uint32_t planeIndex = 0; planeIndex < hull.impl.getPlaneCount(); ++planeIndex)
+ {
+ planes[planeIndex] = hull.impl.getPlane(planeIndex);
+ planes[planeIndex].d -= padding;
+ }
+ physx::PxMat44 internalTransform = physx::PxMat44(physx::PxIdentity);
+ const physx::PxVec3 scale(1.0f/extents[0], 1.0f/extents[1], 1.0f/extents[2]);
+ internalTransform.scale(physx::PxVec4(scale, 1.0f));
+ internalTransform.setPosition(-scale.multiply(hull.impl.getBounds().getCenter()));
+ mParts[partIndex]->mMeshBSP->fromConvexPolyhedron(&planes[0], planes.size(), internalTransform, &mParts[partIndex]->mMesh[0], mParts[partIndex]->mMesh.size());
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+void ExplicitHierarchicalMeshImpl::replaceInteriorSubmeshes(uint32_t partIndex, uint32_t frameCount, uint32_t* frameIndices, uint32_t submeshIndex)
+{
+ if (partIndex >= mParts.size())
+ {
+ return;
+ }
+
+ Part* part = mParts[partIndex];
+
+ // Replace render mesh submesh indices
+ for (uint32_t triangleIndex = 0; triangleIndex < part->mMesh.size(); ++triangleIndex)
+ {
+ ExplicitRenderTriangle& triangle = part->mMesh[triangleIndex];
+ for (uint32_t frameNum = 0; frameNum < frameCount; ++frameNum)
+ {
+ if (triangle.extraDataIndex == frameIndices[frameNum])
+ {
+ triangle.submeshIndex = (int32_t)submeshIndex;
+ }
+ }
+ }
+
+ // Replace BSP mesh submesh indices
+ part->mMeshBSP->replaceInteriorSubmeshes(frameCount, frameIndices, submeshIndex);
+}
+
+void ExplicitHierarchicalMeshImpl::calculateMeshBSP(uint32_t randomSeed, IProgressListener* progressListener, const uint32_t* microgridSize, BSPOpenMode::Enum meshMode)
+{
+ if (partCount() == 0)
+ {
+ outputMessage("No mesh, cannot calculate BSP.", physx::PxErrorCode::eDEBUG_WARNING);
+ return;
+ }
+
+ uint32_t bspCount = 0;
+ for (uint32_t chunkIndex = 0; chunkIndex < mChunks.size(); ++chunkIndex)
+ {
+ if (mChunks[chunkIndex]->isRootLeafChunk())
+ {
+ ++bspCount;
+ }
+ }
+
+ if (bspCount == 0)
+ {
+ outputMessage("No parts at root depth, no BSPs to calculate", physx::PxErrorCode::eDEBUG_WARNING);
+ return;
+ }
+
+ HierarchicalProgressListener progress(PxMax((int32_t)bspCount, 1), progressListener);
+
+ const uint32_t microgridSizeToUse = microgridSize != NULL ? *microgridSize : gMicrogridSize;
+
+ for (uint32_t chunkIndex = 0; chunkIndex < mChunks.size(); ++chunkIndex)
+ {
+ if (mChunks[chunkIndex]->isRootLeafChunk())
+ {
+ uint32_t chunkPartIndex = (uint32_t)*partIndex(chunkIndex);
+ calculatePartBSP(chunkPartIndex, randomSeed, microgridSizeToUse, meshMode, &progress);
+ progress.completeSubtask();
+ }
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::visualize(RenderDebugInterface& debugRender, uint32_t flags, uint32_t index) const
+{
+#ifdef WITHOUT_DEBUG_VISUALIZE
+ PX_UNUSED(debugRender);
+ PX_UNUSED(flags);
+ PX_UNUSED(index);
+#else
+ uint32_t bspMeshFlags = 0;
+ if (flags & VisualizeMeshBSPInsideRegions)
+ {
+ bspMeshFlags |= ApexCSG::BSPVisualizationFlags::InsideRegions;
+ }
+ if (flags & VisualizeMeshBSPOutsideRegions)
+ {
+ bspMeshFlags |= ApexCSG::BSPVisualizationFlags::OutsideRegions;
+ }
+ if (flags & VisualizeMeshBSPSingleRegion)
+ {
+ bspMeshFlags |= ApexCSG::BSPVisualizationFlags::SingleRegion;
+ }
+ for (uint32_t partIndex = 0; partIndex < mParts.size(); ++partIndex)
+ {
+ if (mParts[partIndex]->mMeshBSP != NULL)
+ {
+ mParts[partIndex]->mMeshBSP->visualize(debugRender, bspMeshFlags, index);
+ }
+ }
+#endif
+}
+
+void ExplicitHierarchicalMeshImpl::release()
+{
+ delete this;
+}
+
+void ExplicitHierarchicalMeshImpl::buildMeshBounds(uint32_t partIndex)
+{
+ if (partIndex < partCount())
+ {
+ physx::PxBounds3& bounds = mParts[partIndex]->mBounds;
+ bounds.setEmpty();
+ const physx::Array<ExplicitRenderTriangle>& mesh = mParts[partIndex]->mMesh;
+ for (uint32_t i = 0; i < mesh.size(); ++i)
+ {
+ bounds.include(mesh[i].vertices[0].position);
+ bounds.include(mesh[i].vertices[1].position);
+ bounds.include(mesh[i].vertices[2].position);
+ }
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::buildCollisionGeometryForPart(uint32_t partIndex, const CollisionVolumeDesc& desc)
+{
+ if (partIndex < partCount())
+ {
+ Part* part = mParts[partIndex];
+ buildCollisionGeometryForPartInternal(part->mCollision, part, desc);
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::aggregateCollisionHullsFromRootChildren(uint32_t chunkIndex)
+{
+ nvidia::InlineArray<uint32_t,16> rootChildren;
+ for (uint32_t i = 0; i < mChunks.size(); ++i)
+ {
+ if (mChunks[i]->mParentIndex == (int32_t)chunkIndex && mChunks[i]->isRootChunk())
+ {
+ rootChildren.pushBack(i);
+ }
+ }
+
+ if (rootChildren.size() != 0)
+ {
+ uint32_t newHullCount = 0;
+ for (uint32_t rootChildNum = 0; rootChildNum < rootChildren.size(); ++rootChildNum)
+ {
+ const uint32_t rootChild = rootChildren[rootChildNum];
+ aggregateCollisionHullsFromRootChildren(rootChild);
+ const uint32_t childPartIndex = (uint32_t)mChunks[rootChild]->mPartIndex;
+ newHullCount += mParts[childPartIndex]->mCollision.size();
+ }
+ const uint32_t partIndex = (uint32_t)mChunks[chunkIndex]->mPartIndex;
+ resizeCollision(mParts[partIndex]->mCollision, newHullCount);
+ newHullCount = 0;
+ for (uint32_t rootChildNum = 0; rootChildNum < rootChildren.size(); ++rootChildNum)
+ {
+ const uint32_t rootChild = rootChildren[rootChildNum];
+ const uint32_t childPartIndex = (uint32_t)mChunks[rootChild]->mPartIndex;
+ for (uint32_t hullN = 0; hullN < mParts[childPartIndex]->mCollision.size(); ++hullN)
+ {
+ *mParts[partIndex]->mCollision[newHullCount++] = *mParts[childPartIndex]->mCollision[hullN];
+ }
+ }
+ PX_ASSERT(newHullCount == mParts[partIndex]->mCollision.size());
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::buildCollisionGeometryForRootChunkParts(const CollisionDesc& desc, bool aggregateRootChunkParentCollision)
+{
+ // This helps keep the loops small if there are a lot of child chunks
+ uint32_t rootChunkStop = 0;
+
+ for (uint32_t chunkIndex = 0; chunkIndex < chunkCount(); ++chunkIndex)
+ {
+ if (mChunks[chunkIndex]->isRootChunk())
+ {
+ rootChunkStop = chunkIndex+1;
+ const uint32_t partIndex = (uint32_t)mChunks[chunkIndex]->mPartIndex;
+ if (partIndex < mParts.size())
+ {
+ resizeCollision(mParts[partIndex]->mCollision, 0);
+ }
+ }
+ }
+
+ for (uint32_t chunkIndex = 0; chunkIndex < rootChunkStop; ++chunkIndex)
+ {
+ if (mChunks[chunkIndex]->isRootLeafChunk() || (mChunks[chunkIndex]->isRootChunk() && !aggregateRootChunkParentCollision))
+ {
+ const uint32_t partIndex = (uint32_t)mChunks[chunkIndex]->mPartIndex;
+ if (partIndex < mParts.size() && mParts[partIndex]->mCollision.size() == 0)
+ {
+ CollisionVolumeDesc volumeDesc = getVolumeDesc(desc, depth(chunkIndex));
+ volumeDesc.mMaxVertexCount = volumeDesc.mMaxEdgeCount = volumeDesc.mMaxFaceCount = 0; // Don't reduce hulls until the very end
+ buildCollisionGeometryForPart(partIndex, volumeDesc);
+ }
+ }
+ }
+
+ if (aggregateRootChunkParentCollision)
+ {
+ // Aggregate collision volumes from root depth chunks to their parents, recursing to depth 0
+ aggregateCollisionHullsFromRootChildren(0);
+ }
+
+ if (desc.mMaximumTrimming > 0.0f)
+ {
+ // Trim hulls up to root depth
+ for (uint32_t processDepth = 1; (int32_t)processDepth <= maxDepth(); ++processDepth)
+ {
+ physx::Array<uint32_t> chunkIndexArray;
+ for (uint32_t chunkIndex = 0; chunkIndex < rootChunkStop; ++chunkIndex)
+ {
+ if (mChunks[chunkIndex]->isRootChunk() && depth(chunkIndex) == processDepth)
+ {
+ chunkIndexArray.pushBack(chunkIndex);
+ }
+ }
+ if (chunkIndexArray.size() > 0)
+ {
+ trimChunkHulls(*this, &chunkIndexArray[0], chunkIndexArray.size(), desc.mMaximumTrimming);
+ }
+ }
+ }
+
+ // Finally reduce the hulls
+ reduceHulls(desc, true);
+}
+
+void ExplicitHierarchicalMeshImpl::reduceHulls(const CollisionDesc& desc, bool inflated)
+{
+ physx::Array<bool> partReduced(mParts.size(), false);
+
+ for (uint32_t chunkIndex = 0; chunkIndex < mChunks.size(); ++chunkIndex)
+ {
+ uint32_t partIndex = (uint32_t)mChunks[chunkIndex]->mPartIndex;
+ if (partReduced[partIndex])
+ {
+ continue;
+ }
+ Part* part = mParts[partIndex];
+ CollisionVolumeDesc volumeDesc = getVolumeDesc(desc, depth(chunkIndex));
+ for (uint32_t hullIndex = 0; hullIndex < part->mCollision.size(); ++hullIndex)
+ {
+ // First try uninflated, then try with inflation (if requested). This may find a better reduction
+ part->mCollision[hullIndex]->reduceHull(volumeDesc.mMaxVertexCount, volumeDesc.mMaxEdgeCount, volumeDesc.mMaxFaceCount, false);
+ if (inflated)
+ {
+ part->mCollision[hullIndex]->reduceHull(volumeDesc.mMaxVertexCount, volumeDesc.mMaxEdgeCount, volumeDesc.mMaxFaceCount, true);
+ }
+ }
+ partReduced[partIndex] = true;
+ }
+}
+
+void ExplicitHierarchicalMeshImpl::initializeDisplacementMapVolume(const nvidia::FractureSliceDesc& desc)
+{
+ mDisplacementMapVolume.init(desc);
+}
+
+}
+} // namespace nvidia::apex
+
+namespace FractureTools
+{
+ExplicitHierarchicalMesh* createExplicitHierarchicalMesh()
+{
+ return PX_NEW(ExplicitHierarchicalMeshImpl)();
+}
+
+ExplicitHierarchicalMeshImpl::ConvexHull* createExplicitHierarchicalMeshConvexHull()
+{
+ return PX_NEW(PartConvexHullProxy)();
+}
+} // namespace FractureTools
+
+#endif // !defined(WITHOUT_APEX_AUTHORING)
+
+//#ifdef _MANAGED
+//#pragma managed(pop)
+//#endif
diff --git a/APEX_1.4/shared/internal/src/authoring/Noise.h b/APEX_1.4/shared/internal/src/authoring/Noise.h
new file mode 100644
index 00000000..df5865bb
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/Noise.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef NOISE_H
+#define NOISE_H
+
+#include "authoring/ApexCSGMath.h"
+
+#include "NoiseUtils.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+class UserRandom;
+
+/**
+ Provides Perlin noise sampling across multiple dimensions and for different data types
+*/
+template<typename T, int SampleSize = 1024, int D = 3, class VecType = Vec<T, D> >
+class PerlinNoise
+{
+public:
+ PerlinNoise(UserRandom& rnd, int octaves = 1, T frequency = 1., T amplitude = 1.)
+ : mRnd(rnd),
+ mOctaves(octaves),
+ mFrequency(frequency),
+ mAmplitude(amplitude),
+ mbInit(false)
+ {
+
+ }
+
+ void reset(int octaves = 1, T frequency = (T)1., T amplitude = (T)1.)
+ {
+ mOctaves = octaves;
+ mFrequency = frequency;
+ mAmplitude = amplitude;
+ init();
+ }
+
+ T sample(const VecType& point)
+ {
+ return perlinNoise(point);
+ }
+
+private:
+ PerlinNoise& operator=(const PerlinNoise&);
+
+ T perlinNoise(VecType point)
+ {
+ if (!mbInit)
+ init();
+
+ const int octaves = mOctaves;
+ const T frequency = mFrequency;
+ T amplitude = mAmplitude;
+ T result = (T)0;
+
+ point *= frequency;
+
+ for (int i = 0; i < octaves; ++i)
+ {
+ result += noiseSample<T, SampleSize>(point, p, g) * amplitude;
+ point *= (T)2.0;
+ amplitude *= (T)0.5;
+ }
+
+ return result;
+ }
+
+ void init(void)
+ {
+ mbInit = true;
+
+ unsigned i, j;
+ int k;
+
+ for (i = 0 ; i < (unsigned)SampleSize; i++)
+ {
+ p[i] = (int)i;
+ for (j = 0; j < D; ++j)
+ g[i][j] = (T)((mRnd.getInt() % (SampleSize + SampleSize)) - SampleSize) / SampleSize;
+ g[i].normalize();
+ }
+
+ while (--i)
+ {
+ k = p[i];
+ j = (unsigned)mRnd.getInt() % SampleSize;
+ p[i] = p[j];
+ p[j] = k;
+ }
+
+ for (i = 0 ; i < SampleSize + 2; ++i)
+ {
+ p [(unsigned)SampleSize + i] = p[i];
+ for (j = 0; j < D; ++j)
+ g[(unsigned)SampleSize + i][j] = g[i][j];
+ }
+
+ }
+
+ UserRandom& mRnd;
+ int mOctaves;
+ T mFrequency;
+ T mAmplitude;
+
+ // Permutation vector
+ int p[(unsigned)(SampleSize + SampleSize + 2)];
+ // Gradient vector
+ VecType g[(unsigned)(SampleSize + SampleSize + 2)];
+
+ bool mbInit;
+};
+
+}
+
+#endif /* #ifndef WITHOUT_APEX_AUTHORING */
+
+#endif /* #ifndef NOISE_H */
+
diff --git a/APEX_1.4/shared/internal/src/authoring/NoiseUtils.h b/APEX_1.4/shared/internal/src/authoring/NoiseUtils.h
new file mode 100644
index 00000000..e284b616
--- /dev/null
+++ b/APEX_1.4/shared/internal/src/authoring/NoiseUtils.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2008-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
+ */
+
+
+#ifndef NOISE_UTILS_H
+#define NOISE_UTILS_H
+
+#include "authoring/ApexCSGMath.h"
+
+#ifndef WITHOUT_APEX_AUTHORING
+
+namespace ApexCSG
+{
+
+template<typename T>
+PX_INLINE T fade(T t) { return t * t * t * (t * (t * (T)6.0 - (T)15.0) + (T)10.0); }
+
+template<typename T>
+PX_INLINE T lerp(T t, T a, T b) { return a + t * (b - a); }
+
+template<typename T, class VecT, int SampleSize>
+PX_INLINE void setup(int i, VecT& point, T& t, int& b0, int& b1, T& r0, T& r1)
+{
+ t = point[i] + (0x1000);
+ b0 = ((int)t) & (SampleSize-1);
+ b1 = (b0+1) & (SampleSize-1);
+ r0 = t - (int)t;
+ r1 = r0 - 1.0f;
+}
+
+template<typename T, class VecT>
+PX_INLINE T at2(const T& rx, const T& ry, const VecT& q)
+{
+ return rx * q[0] + ry * q[1];
+}
+
+template<typename T, class VecT>
+PX_INLINE T at3(const T& rx, const T& ry, const T& rz, const VecT& q)
+{
+ return rx * q[0] + ry * q[1] + rz * q[2];
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+template<typename T, int SampleSize>
+T noiseSample(ApexCSG::Vec<T, 1> point, int* p, ApexCSG::Vec<T,1>* g)
+{
+ int bx0, bx1;
+ T rx0, rx1, sx, t, u, v;
+
+ setup<T,Vec<T,1>,SampleSize>(0, point,t, bx0,bx1, rx0,rx1);
+
+ sx = fade(rx0);
+
+ u = rx0 * g[ p[ bx0 ] ];
+ v = rx1 * g[ p[ bx1 ] ];
+
+ return lerp(sx, u, v);
+}
+
+template<typename T, int SampleSize>
+T noiseSample(Vec<T,2> point, int* p, Vec<T,2>* g)
+{
+ int bx0, bx1, by0, by1, b00, b10, b01, b11;
+ T rx0, rx1, ry0, ry1, sx, sy, a, b, t, u, v;
+ Vec<T,2> q;
+ int i, j;
+
+ setup<T,Vec<T,2>,SampleSize>(0, point,t, bx0,bx1, rx0,rx1);
+ setup<T,Vec<T,2>,SampleSize>(1, point,t, by0,by1, ry0,ry1);
+
+ i = p[bx0];
+ j = p[bx1];
+
+ b00 = p[i + by0];
+ b10 = p[j + by0];
+ b01 = p[i + by1];
+ b11 = p[j + by1];
+
+ sx = fade(rx0);
+ sy = fade(ry0);
+
+ q = g[b00];
+ u = at2(rx0,ry0,q);
+ q = g[b10];
+ v = at2(rx1,ry0,q);
+ a = lerp(sx, u, v);
+
+ q = g[b01];
+ u = at2(rx0,ry1,q);
+ q = g[b11];
+ v = at2(rx1,ry1,q);
+ b = lerp(sx, u, v);
+
+ return lerp(sy, a, b);
+}
+
+template<typename T, int SampleSize>
+T noiseSample(Vec<T,3> point, int* p, Vec<T,3>* g)
+{
+ int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11;
+ T rx0, rx1, ry0, ry1, rz0, rz1, sy, sz, a, b, c, d, t, u, v;
+ Vec<T,3> q;
+ int i, j;
+
+ setup<T,Vec<T,3>,SampleSize>(0, point,t, bx0,bx1, rx0,rx1);
+ setup<T,Vec<T,3>,SampleSize>(1, point,t, by0,by1, ry0,ry1);
+ setup<T,Vec<T,3>,SampleSize>(2, point,t, bz0,bz1, rz0,rz1);
+
+ i = p[ bx0 ];
+ j = p[ bx1 ];
+
+ b00 = p[ i + by0 ];
+ b10 = p[ j + by0 ];
+ b01 = p[ i + by1 ];
+ b11 = p[ j + by1 ];
+
+ t = fade(rx0);
+ sy = fade(ry0);
+ sz = fade(rz0);
+
+ q = g[ b00 + bz0 ] ; u = at3(rx0,ry0,rz0,q);
+ q = g[ b10 + bz0 ] ; v = at3(rx1,ry0,rz0,q);
+ a = lerp(t, u, v);
+
+ q = g[ b01 + bz0 ] ; u = at3(rx0,ry1,rz0,q);
+ q = g[ b11 + bz0 ] ; v = at3(rx1,ry1,rz0,q);
+ b = lerp(t, u, v);
+
+ c = lerp(sy, a, b);
+
+ q = g[ b00 + bz1 ] ; u = at3(rx0,ry0,rz1,q);
+ q = g[ b10 + bz1 ] ; v = at3(rx1,ry0,rz1,q);
+ a = lerp(t, u, v);
+
+ q = g[ b01 + bz1 ] ; u = at3(rx0,ry1,rz1,q);
+ q = g[ b11 + bz1 ] ; v = at3(rx1,ry1,rz1,q);
+ b = lerp(t, u, v);
+
+ d = lerp(sy, a, b);
+
+ return lerp(sz, c, d);
+}
+
+}
+
+#endif /* #ifndef WITHOUT_APEX_AUTHORING */
+
+#endif /* #ifndef NOISE_UTILS_H */ \ No newline at end of file