aboutsummaryrefslogtreecommitdiff
path: root/PhysX_3.4/Source/PhysXCooking/src/mesh
diff options
context:
space:
mode:
authorgit perforce import user <a@b>2016-10-25 12:29:14 -0600
committerSheikh Dawood Abdul Ajees <Sheikh Dawood Abdul Ajees>2016-10-25 18:56:37 -0500
commit3dfe2108cfab31ba3ee5527e217d0d8e99a51162 (patch)
treefa6485c169e50d7415a651bf838f5bcd0fd3bfbd /PhysX_3.4/Source/PhysXCooking/src/mesh
downloadphysx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.tar.xz
physx-3.4-3dfe2108cfab31ba3ee5527e217d0d8e99a51162.zip
Initial commit:
PhysX 3.4.0 Update @ 21294896 APEX 1.4.0 Update @ 21275617 [CL 21300167]
Diffstat (limited to 'PhysX_3.4/Source/PhysXCooking/src/mesh')
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp29
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h337
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp84
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h35
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h114
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp893
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h51
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp1443
-rw-r--r--PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h120
9 files changed, 3106 insertions, 0 deletions
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp
new file mode 100644
index 00000000..974e1faa
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.cpp
@@ -0,0 +1,29 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h
new file mode 100644
index 00000000..a41889ed
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/GrbTriangleMeshCooking.h
@@ -0,0 +1,337 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_GRBTRIANGLEMESHCOOKING
+#define PX_COLLISION_GRBTRIANGLEMESHCOOKING
+
+#include "GuMeshData.h"
+#include "cooking/PxCooking.h"
+
+namespace physx
+{
+ namespace Gu
+ {
+
+ }
+
+// TODO avoroshilov: remove duplicate definitions
+static const PxU32 BOUNDARY = 0xffffffff;
+static const PxU32 NONCONVEX_FLAG = 0x80000000;
+
+struct EdgeTriLookup
+{
+ PxU32 edgeId0, edgeId1;
+ PxU32 triId;
+
+ bool operator < (const EdgeTriLookup& edge1) const
+ {
+ return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 < edge1.edgeId1);
+ }
+
+ bool operator <=(const EdgeTriLookup& edge1) const
+ {
+ return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 <= edge1.edgeId1);
+ }
+};
+
+
+static PxU32 binarySearch(const EdgeTriLookup* __restrict data, const PxU32 numElements, const EdgeTriLookup& value)
+{
+ PxU32 left = 0;
+ PxU32 right = numElements;
+
+ while ((right - left) > 1)
+ {
+ PxU32 pos = (left + right) / 2;
+ const EdgeTriLookup& element = data[pos];
+ if (element <= value)
+ {
+ left = pos;
+ }
+ else
+ {
+ right = pos;
+ }
+ }
+
+ return left;
+}
+
+// slightly different behavior from collide2: boundary edges are filtered out
+
+static PxU32 findAdjacent(const PxVec3* triVertices, const PxVec3* triNormals, const uint3* triIndices,
+ PxU32 nbTris, PxU32 i0, PxU32 i1, const PxPlane& plane,
+ EdgeTriLookup* triLookups, PxU32 triangleIndex)
+{
+ PxU32 result = BOUNDARY;
+ PxReal bestCos = -FLT_MAX;
+
+ EdgeTriLookup lookup;
+ lookup.edgeId0 = PxMin(i0, i1);
+ lookup.edgeId1 = PxMax(i0, i1);
+
+ PxU32 startIndex = binarySearch(triLookups, nbTris * 3, lookup);
+
+ for (PxU32 a = startIndex; a > 0; --a)
+ {
+ if (triLookups[a - 1].edgeId0 == lookup.edgeId0 && triLookups[a - 1].edgeId1 == lookup.edgeId1)
+ startIndex = a - 1;
+ else
+ break;
+ }
+
+ for (PxU32 a = startIndex; a < nbTris * 3; ++a)
+ {
+ EdgeTriLookup& edgeTri = triLookups[a];
+
+ if (edgeTri.edgeId0 != lookup.edgeId0 || edgeTri.edgeId1 != lookup.edgeId1)
+ break;
+
+ if (edgeTri.triId == triangleIndex)
+ continue;
+
+ const uint3& triIdx = triIndices[edgeTri.triId];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ PxU32 other = vIdx0 + vIdx1 + vIdx2 - (i0 + i1);
+
+ if (plane.distance(triVertices[other]) >= 0)
+ return NONCONVEX_FLAG | edgeTri.triId;
+
+ PxReal c = plane.n.dot(triNormals[edgeTri.triId]);
+ if (c>bestCos)
+ {
+ bestCos = c;
+ result = edgeTri.triId;
+ }
+
+ }
+
+ return result;
+}
+
+
+static void buildAdjacencies(uint4* triAdjacencies, PxVec3* tempNormalsPerTri_prealloc, const PxVec3* triVertices, const uint3* triIndices, PxU32 nbTris)
+{
+ //PxVec3 * triNormals = new PxVec3[nbTris];
+
+ EdgeTriLookup* edgeLookups = reinterpret_cast<EdgeTriLookup*>(PX_ALLOC(sizeof(EdgeTriLookup) * nbTris * 3, PX_DEBUG_EXP("edgeLookups")));
+
+
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ const uint3& triIdx = triIndices[i];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ tempNormalsPerTri_prealloc[i] = (triVertices[vIdx1] - triVertices[vIdx0]).cross(triVertices[vIdx2] - triVertices[vIdx0]).getNormalized();
+
+ edgeLookups[i * 3].edgeId0 = PxMin(vIdx0, vIdx1);
+ edgeLookups[i * 3].edgeId1 = PxMax(vIdx0, vIdx1);
+ edgeLookups[i * 3].triId = i;
+
+ edgeLookups[i * 3 + 1].edgeId0 = PxMin(vIdx1, vIdx2);
+ edgeLookups[i * 3 + 1].edgeId1 = PxMax(vIdx1, vIdx2);
+ edgeLookups[i * 3 + 1].triId = i;
+
+ edgeLookups[i * 3 + 2].edgeId0 = PxMin(vIdx0, vIdx2);
+ edgeLookups[i * 3 + 2].edgeId1 = PxMax(vIdx0, vIdx2);
+ edgeLookups[i * 3 + 2].triId = i;
+ }
+
+ Ps::sort<EdgeTriLookup>(edgeLookups, PxU32(nbTris * 3));
+
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ const uint3& triIdx = triIndices[i];
+ PxU32 vIdx0 = triIdx.x;
+ PxU32 vIdx1 = triIdx.y;
+ PxU32 vIdx2 = triIdx.z;
+
+ PxPlane triPlane(triVertices[vIdx0], tempNormalsPerTri_prealloc[i]);
+ uint4 triAdjIdx;
+
+ triAdjIdx.x = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx0, vIdx1, triPlane, edgeLookups, i);
+ triAdjIdx.y = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx1, vIdx2, triPlane, edgeLookups, i);
+ triAdjIdx.z = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx2, vIdx0, triPlane, edgeLookups, i);
+ triAdjIdx.w = 0;
+
+ triAdjacencies[i] = triAdjIdx;
+ }
+
+
+ PX_FREE(edgeLookups);
+}
+
+static bool isEdgeNonconvex(PxU32 adjEdgeIndex)
+{
+ return (adjEdgeIndex != BOUNDARY) && (adjEdgeIndex & NONCONVEX_FLAG);
+}
+
+PX_INLINE void buildVertexConnection_p1(PxU32 * vertValency, PxU32 * vertNeighborStart, PxU32 & tempNumAdjVertices, const float4 * /*triVertices*/, const uint4 * triIndices, const uint4 * triAdjacencies, PxU32 nbVerts, PxU32 nbTris)
+{
+ tempNumAdjVertices = 0;
+ memset(vertValency, 0, nbVerts*sizeof(PxU32));
+
+ // Calculate max num of adjVerts
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ uint4 triIdx = triIndices[i];
+ PxU32 vi0 = triIdx.x;
+ PxU32 vi1 = triIdx.y;
+ PxU32 vi2 = triIdx.z;
+
+ uint4 triAdjIdx = triAdjacencies[i];
+
+ PxU32 totalVertsAdded = 0;
+ if (!isEdgeNonconvex(triAdjIdx.x))
+ {
+ ++vertValency[vi0];
+ ++vertValency[vi1];
+ totalVertsAdded += 2;
+ }
+ if (!isEdgeNonconvex(triAdjIdx.y))
+ {
+ ++vertValency[vi1];
+ ++vertValency[vi2];
+ totalVertsAdded += 2;
+ }
+ if (!isEdgeNonconvex(triAdjIdx.z))
+ {
+ ++vertValency[vi2];
+ ++vertValency[vi0];
+ totalVertsAdded += 2;
+ }
+ tempNumAdjVertices += totalVertsAdded;
+ }
+ PxU32 offset = 0;
+ for (PxU32 i = 0; i < nbVerts; i++)
+ {
+ vertNeighborStart[i] = offset;
+ offset += vertValency[i];
+ }
+
+ memset(vertValency, 0, nbVerts*sizeof(PxU32));
+}
+
+PX_INLINE PxU32 buildVertexConnection_p2(PxU32 * vertValency, PxU32 * vertNeighborStart, PxU32 * vertNeighboringPairs_prealloc, PxU32 tempNumAdjVertices, const float4 * /*triVertices*/, const uint4 * triIndices, const uint4 * triAdjacencies, PxU32 /*nbVerts*/, PxU32 nbTris)
+{
+ memset(vertNeighboringPairs_prealloc, 0xff, tempNumAdjVertices*2*sizeof(PxU32));
+
+ PxU32 newAdjVertsNum = 0;
+ for (PxU32 i = 0; i < nbTris; i++)
+ {
+ uint4 triIdx = triIndices[i];
+ PxU32 vi[3] =
+ {
+ triIdx.x,
+ triIdx.y,
+ triIdx.z
+ };
+ uint4 triAdjIdx = triAdjacencies[i];
+ PxU32 ta[3] =
+ {
+ triAdjIdx.x,
+ triAdjIdx.y,
+ triAdjIdx.z
+ };
+
+ for (int tvi = 0; tvi < 3; ++tvi)
+ {
+ PxU32 curIdx = vi[tvi];
+ PxU32 nextIdx = vi[(tvi+1)%3];
+ if (!isEdgeNonconvex(ta[tvi]))
+ {
+ bool matchFound = false;
+ for (PxU32 valIdx = vertNeighborStart[curIdx], valIdxEnd = vertNeighborStart[curIdx] + vertValency[curIdx]; valIdx < valIdxEnd; ++valIdx)
+ {
+ if (vertNeighboringPairs_prealloc[valIdx*2+1] == nextIdx)
+ {
+ matchFound = true;
+ break;
+ }
+ }
+
+ if (!matchFound)
+ {
+ PxU32 curPairIdx;
+
+ curPairIdx = vertNeighborStart[curIdx] + vertValency[curIdx];
+ vertNeighboringPairs_prealloc[curPairIdx*2+0] = curIdx;
+ vertNeighboringPairs_prealloc[curPairIdx*2+1] = nextIdx;
+ ++vertValency[curIdx];
+
+ curPairIdx = vertNeighborStart[nextIdx] + vertValency[nextIdx];
+ vertNeighboringPairs_prealloc[curPairIdx*2+0] = nextIdx;
+ vertNeighboringPairs_prealloc[curPairIdx*2+1] = curIdx;
+ ++vertValency[nextIdx];
+
+ newAdjVertsNum += 2;
+ }
+ }
+ }
+ }
+
+ return newAdjVertsNum;
+}
+
+PX_INLINE void buildVertexConnection_p3(PxU32 * vertNeighbors, PxU32 * /*vertValency*/, PxU32 * vertNeighborStart, PxU32 * vertNeighboringPairs_prealloc, PxU32 tempNumAdjVertices, PxU32 newNumAdjVertices, const float4 * /*triVertices*/, const uint4 * /*triIndices*/, const uint4 * /*triAdjacencies*/, PxU32 /*nbVerts*/, PxU32 /*nbTris*/)
+{
+ PX_UNUSED(newNumAdjVertices);
+ PxU32 prevVertex = 0xFFffFFff;
+ PxU32 writingIndex = 0;
+ for (PxU32 i = 0; i < tempNumAdjVertices; ++i)
+ {
+ PxU32 curPairIdx0 = vertNeighboringPairs_prealloc[i*2+0];
+ if (curPairIdx0 == 0xFFffFFff)
+ {
+ continue;
+ }
+
+ PxU32 curPairIdx1 = vertNeighboringPairs_prealloc[i*2+1];
+ vertNeighbors[writingIndex] = curPairIdx1;
+ if (curPairIdx0 != prevVertex)
+ {
+ vertNeighborStart[curPairIdx0] = writingIndex;
+ }
+ prevVertex = curPairIdx0;
+
+ ++writingIndex;
+ }
+
+ PX_ASSERT(writingIndex == newNumAdjVertices);
+}
+
+} // namespace physx
+
+#endif
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp
new file mode 100644
index 00000000..7215b1c7
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.cpp
@@ -0,0 +1,84 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxIO.h"
+#include "GuHeightField.h"
+#include "GuSerialize.h"
+
+using namespace physx;
+using namespace Gu;
+
+namespace physx
+{
+
+bool saveHeightField(const HeightField& hf, PxOutputStream& stream, bool endian)
+{
+ // write header
+ if(!writeHeader('H', 'F', 'H', 'F', PX_HEIGHTFIELD_VERSION, endian, stream))
+ return false;
+
+ const Gu::HeightFieldData& hfData = hf.getData();
+
+ // write mData members
+ writeDword(hfData.rows, endian, stream);
+ writeDword(hfData.columns, endian, stream);
+ writeFloat(hfData.rowLimit, endian, stream);
+ writeFloat(hfData.colLimit, endian, stream);
+ writeFloat(hfData.nbColumns, endian, stream);
+ writeFloat(hfData.thickness, endian, stream);
+ writeFloat(hfData.convexEdgeThreshold, endian, stream);
+ writeWord(hfData.flags, endian, stream);
+ writeDword(hfData.format, endian, stream);
+
+ writeFloat(hfData.mAABB.getMin(0), endian, stream);
+ writeFloat(hfData.mAABB.getMin(1), endian, stream);
+ writeFloat(hfData.mAABB.getMin(2), endian, stream);
+ writeFloat(hfData.mAABB.getMax(0), endian, stream);
+ writeFloat(hfData.mAABB.getMax(1), endian, stream);
+ writeFloat(hfData.mAABB.getMax(2), endian, stream);
+
+ // write this-> members
+ writeDword(hf.mSampleStride, endian, stream);
+ writeDword(hf.mNbSamples, endian, stream);
+ writeFloat(hf.mMinHeight, endian, stream);
+ writeFloat(hf.mMaxHeight, endian, stream);
+
+ // write samples
+ for(PxU32 i = 0; i < hf.mNbSamples; i++)
+ {
+ const PxHeightFieldSample& s = hfData.samples[i];
+ writeWord(PxU16(s.height), endian, stream);
+ stream.write(&s.materialIndex0, sizeof(s.materialIndex0));
+ stream.write(&s.materialIndex1, sizeof(s.materialIndex1));
+ }
+
+ return true;
+}
+
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h
new file mode 100644
index 00000000..9e1e2ce4
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/HeightFieldCooking.h
@@ -0,0 +1,35 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "GuHeightField.h"
+
+namespace physx
+{
+ bool saveHeightField(const Gu::HeightField& hf, PxOutputStream& stream, bool endianSwap);
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h
new file mode 100644
index 00000000..7dddd554
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/QuickSelect.h
@@ -0,0 +1,114 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#ifndef QUICKSELECT_H
+#define QUICKSELECT_H
+
+#include "foundation/PxSimpleTypes.h"
+
+// Google "wikipedia QuickSelect" for algorithm explanation
+namespace physx { namespace quickSelect {
+
+
+ #define SWAP32(x, y) { PxU32 tmp = y; y = x; x = tmp; }
+
+ // left is the index of the leftmost element of the subarray
+ // right is the index of the rightmost element of the subarray (inclusive)
+ // number of elements in subarray = right-left+1
+ template<typename LtEq>
+ PxU32 partition(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 pivotIndex, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(pivotIndex >= left && pivotIndex <= right);
+ PxU32 pivotValue = a[pivotIndex];
+ SWAP32(a[pivotIndex], a[right]) // Move pivot to end
+ PxU32 storeIndex = left;
+ for (PxU32 i = left; i < right; i++) // left <= i < right
+ if (cmpLtEq(a[i], pivotValue))
+ {
+ SWAP32(a[i], a[storeIndex]);
+ storeIndex++;
+ }
+ SWAP32(a[storeIndex], a[right]); // Move pivot to its final place
+ for (PxU32 i = left; i < storeIndex; i++)
+ PX_ASSERT(cmpLtEq(a[i], a[storeIndex]));
+ for (PxU32 i = storeIndex+1; i <= right; i++)
+ PX_ASSERT(cmpLtEq(a[storeIndex], a[i]));
+ return storeIndex;
+ }
+
+ // left is the index of the leftmost element of the subarray
+ // right is the index of the rightmost element of the subarray (inclusive)
+ // number of elements in subarray = right-left+1
+ // recursive version
+ template<typename LtEq>
+ void quickFindFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(k <= right-left+1);
+ if (right > left)
+ {
+ // select pivotIndex between left and right
+ PxU32 pivotIndex = (left + right) >> 1;
+ PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
+ // now all elements to the left of pivotNewIndex are < old value of a[pivotIndex] (bottom half values)
+ if (pivotNewIndex > left + k) // new condition
+ quickFindFirstK(a, left, pivotNewIndex-1, k, cmpLtEq);
+ if (pivotNewIndex < left + k)
+ quickFindFirstK(a, pivotNewIndex+1, right, k+left-pivotNewIndex-1, cmpLtEq);
+ }
+ }
+
+ // non-recursive version
+ template<typename LtEq>
+ void quickSelectFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
+ {
+ PX_ASSERT(k <= right-left+1);
+ for (;;)
+ {
+ PxU32 pivotIndex = (left+right) >> 1;
+ PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
+ PxU32 pivotDist = pivotNewIndex - left + 1;
+ if (pivotDist == k)
+ return;
+ else if (k < pivotDist)
+ {
+ PX_ASSERT(pivotNewIndex > 0);
+ right = pivotNewIndex - 1;
+ }
+ else
+ {
+ k = k - pivotDist;
+ left = pivotNewIndex+1;
+ }
+ }
+ }
+
+} } // namespace quickSelect, physx
+
+#endif
+
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp
new file mode 100644
index 00000000..08ab1a1b
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.cpp
@@ -0,0 +1,893 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "foundation/PxBounds3.h"
+#include "CmPhysXCommon.h"
+#include "RTreeCooking.h"
+#include "PsSort.h"
+#include "PsMathUtils.h"
+#include "PsAllocator.h"
+#include "PsVecMath.h"
+#include "PxTolerancesScale.h"
+#include "QuickSelect.h"
+#include "PsInlineArray.h"
+#include "GuRTree.h"
+
+#define PRINT_RTREE_COOKING_STATS 0 // AP: keeping this frequently used macro for diagnostics/benchmarking
+
+#if PRINT_RTREE_COOKING_STATS
+#include <stdio.h>
+#endif
+
+using namespace physx::Gu;
+using namespace physx::shdfnd;
+using namespace physx::shdfnd::aos;
+
+namespace physx
+{
+
+// Intermediate non-quantized representation for RTree node in a page (final format is SIMD transposed page)
+struct RTreeNodeNQ
+{
+ PxBounds3 bounds;
+ PxI32 childPageFirstNodeIndex; // relative to the beginning of all build tree nodes array
+ PxI32 leafCount; // -1 for empty nodes, 0 for non-terminal nodes, number of enclosed tris if non-zero (LeafTriangles), also means a terminal node
+
+ struct U {}; // selector struct for uninitialized constructor
+ RTreeNodeNQ(U) {} // uninitialized constructor
+ RTreeNodeNQ() : bounds(PxBounds3::empty()), childPageFirstNodeIndex(-1), leafCount(0) {}
+};
+
+// SIMD version of bounds class
+struct PxBounds3V
+{
+ struct U {}; // selector struct for uninitialized constructor
+ Vec3V mn, mx;
+ PxBounds3V(Vec3VArg mn_, Vec3VArg mx_) : mn(mn_), mx(mx_) {}
+ PxBounds3V(U) {} // uninitialized constructor
+
+ PX_FORCE_INLINE Vec3V getExtents() const { return V3Sub(mx, mn); }
+ PX_FORCE_INLINE void include(const PxBounds3V& other) { mn = V3Min(mn, other.mn); mx = V3Max(mx, other.mx); }
+
+ // convert vector extents to PxVec3
+ PX_FORCE_INLINE const PxVec3 getMinVec3() const { PxVec3 ret; V3StoreU(mn, ret); return ret; }
+ PX_FORCE_INLINE const PxVec3 getMaxVec3() const { PxVec3 ret; V3StoreU(mx, ret); return ret; }
+};
+
+static void buildFromBounds(
+ Gu::RTree& resultTree, const PxBounds3V* allBounds, PxU32 numBounds,
+ Array<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
+ PxReal sizePerfTradeOff, PxMeshCookingHint::Enum hint);
+
+/////////////////////////////////////////////////////////////////////////
+void RTreeCooker::buildFromTriangles(
+ Gu::RTree& result, const PxVec3* verts, PxU32 numVerts, const PxU16* tris16, const PxU32* tris32, PxU32 numTris,
+ Array<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
+{
+ PX_UNUSED(numVerts);
+ Array<PxBounds3V> allBounds;
+ allBounds.reserve(numTris);
+ Vec3V allMn = Vec3V_From_FloatV(FMax()), allMx = Vec3V_From_FloatV(FNegMax());
+ Vec3V eps = V3Splat(FLoad(5e-4f)); // AP scaffold: use PxTolerancesScale here?
+
+ // build RTree AABB bounds from triangles, conservative bound inflation is also performed here
+ for(PxU32 i = 0; i < numTris; i ++)
+ {
+ PxU32 i0, i1, i2;
+ PxU32 i3 = i*3;
+ if(tris16)
+ {
+ i0 = tris16[i3]; i1 = tris16[i3+1]; i2 = tris16[i3+2];
+ } else
+ {
+ i0 = tris32[i3]; i1 = tris32[i3+1]; i2 = tris32[i3+2];
+ }
+ PX_ASSERT_WITH_MESSAGE(i0 < numVerts && i1 < numVerts && i2 < numVerts ,"Input mesh triangle's vertex index exceeds specified numVerts.");
+ Vec3V v0 = V3LoadU(verts[i0]), v1 = V3LoadU(verts[i1]), v2 = V3LoadU(verts[i2]);
+ Vec3V mn = V3Sub(V3Min(V3Min(v0, v1), v2), eps); // min over 3 verts, subtract eps to inflate
+ Vec3V mx = V3Add(V3Max(V3Max(v0, v1), v2), eps); // max over 3 verts, add eps to inflate
+ allMn = V3Min(allMn, mn); allMx = V3Max(allMx, mx);
+ allBounds.pushBack(PxBounds3V(mn, mx));
+ }
+
+ buildFromBounds(result, allBounds.begin(), numTris, resultPermute, rc, allMn, allMx, sizePerfTradeOff01, hint);
+}
+
+/////////////////////////////////////////////////////////////////////////
+// Fast but lower quality 4-way split sorting using repeated application of quickselect
+
+// comparator template struct for sortin gbounds centers given a coordinate index (x,y,z=0,1,2)
+struct BoundsLTE
+{
+ PxU32 coordIndex;
+ const PxVec3* PX_RESTRICT boundCenters; // AP: precomputed centers are faster than recomputing the centers
+ BoundsLTE(PxU32 coordIndex_, const PxVec3* boundCenters_)
+ : coordIndex(coordIndex_), boundCenters(boundCenters_)
+ {}
+
+ PX_FORCE_INLINE bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
+ {
+ PxF32 center1 = boundCenters[idx1][coordIndex];
+ PxF32 center2 = boundCenters[idx2][coordIndex];
+ return (center1 <= center2);
+ }
+};
+
+// ======================================================================
+// Quick sorting method
+// recursive sorting procedure:
+// 1. find min and max extent along each axis for the current cluster
+// 2. split input cluster into two 3 times using quickselect, splitting off a quarter of the initial cluster size each time
+// 3. the axis is potentialy different for each split using the following
+// approximate splitting heuristic - reduce max length by some estimated factor to encourage split along other axis
+// since we cut off between a quarter to a half of elements in this direction per split
+// the reduction for first split should be *0.75f but we use 0.8
+// to account for some node overlap. This is somewhat of an arbitrary choice and there's room for improvement.
+// 4. recurse on new clusters (goto step 1)
+//
+struct SubSortQuick
+{
+ static const PxReal reductionFactors[RTREE_N-1];
+
+ enum { NTRADEOFF = 9 };
+ static const PxU32 stopAtTrisPerLeaf1[NTRADEOFF]; // presets for PxCookingParams::meshSizePerformanceTradeoff implementation
+
+ const PxU32* permuteEnd;
+ const PxU32* permuteStart;
+ const PxBounds3V* allBounds;
+ Array<PxVec3> boundCenters;
+ PxU32 maxBoundsPerLeafPage;
+
+ // initialize the context for the sorting routine
+ SubSortQuick(PxU32* permute, const PxBounds3V* allBounds_, PxU32 allBoundsSize, PxReal sizePerfTradeOff01)
+ : allBounds(allBounds_)
+ {
+ permuteEnd = permute + allBoundsSize;
+ permuteStart = permute;
+ PxU32 boundsCount = allBoundsSize;
+ boundCenters.reserve(boundsCount); // AP - measured that precomputing centers helps with perf significantly (~20% on 1k verts)
+ for(PxU32 i = 0; i < boundsCount; i++)
+ boundCenters.pushBack( allBounds[i].getMinVec3() + allBounds[i].getMaxVec3() );
+ PxU32 iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
+ maxBoundsPerLeafPage = stopAtTrisPerLeaf1[iTradeOff];
+ }
+
+ // implements the sorting/splitting procedure
+ void sort4(
+ PxU32* PX_RESTRICT permute, const PxU32 clusterSize, // beginning and size of current recursively processed cluster
+ Array<RTreeNodeNQ>& resultTree, PxU32& maxLevels,
+ PxBounds3V& subTreeBound, PxU32 level = 0)
+ {
+ if(level == 0)
+ maxLevels = 1;
+ else
+ maxLevels = PxMax(maxLevels, level+1);
+
+ PX_ASSERT(permute + clusterSize <= permuteEnd);
+ PX_ASSERT(maxBoundsPerLeafPage >= RTREE_N-1);
+
+ const PxU32 cluster4 = PxMax<PxU32>(clusterSize/RTREE_N, 1);
+
+ PX_ASSERT(clusterSize > 0);
+ // find min and max world bound for current cluster
+ Vec3V mx = allBounds[permute[0]].mx, mn = allBounds[permute[0]].mn; PX_ASSERT(permute[0] < boundCenters.size());
+ for(PxU32 i = 1; i < clusterSize; i ++)
+ {
+ PX_ASSERT(permute[i] < boundCenters.size());
+ mx = V3Max(mx, allBounds[permute[i]].mx);
+ mn = V3Min(mn, allBounds[permute[i]].mn);
+ }
+ PX_ALIGN_PREFIX(16) PxReal maxElem[4] PX_ALIGN_SUFFIX(16);
+ V3StoreA(V3Sub(mx, mn), *reinterpret_cast<PxVec3*>(maxElem)); // compute the dimensions and store into a scalar maxElem array
+
+ // split along the longest axis
+ const PxU32 maxDiagElement = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
+ BoundsLTE cmpLte(maxDiagElement, boundCenters.begin());
+
+ const PxU32 startNodeIndex = resultTree.size();
+ resultTree.resizeUninitialized(startNodeIndex+RTREE_N); // at each recursion level we add 4 nodes to the tree
+
+ PxBounds3V childBound( (PxBounds3V::U()) ); // start off uninitialized for performance
+ const PxI32 leftover = PxMax<PxI32>(PxI32(clusterSize - cluster4*(RTREE_N-1)), 0);
+ PxU32 totalCount = 0;
+ for(PxU32 i = 0; i < RTREE_N; i++)
+ {
+ // split off cluster4 count nodes out of the entire cluster for each i
+ const PxU32 clusterOffset = cluster4*i;
+ PxU32 count1; // cluster4 or leftover depending on whether it's the last cluster
+ if(i < RTREE_N-1)
+ {
+ // only need to so quickSelect for the first pagesize-1 clusters
+ if(clusterOffset <= clusterSize-1)
+ {
+ quickSelect::quickSelectFirstK(permute, clusterOffset, clusterSize-1, cluster4, cmpLte);
+ // approximate heuristic - reduce max length by some estimated factor to encourage split along other axis
+ // since we cut off a quarter of elements in this direction the reduction should be *0.75f but we use 0.8
+ // to account for some node overlap. This is somewhat of an arbitrary choice though
+ maxElem[cmpLte.coordIndex] *= reductionFactors[i];
+ // recompute cmpLte.coordIndex from updated maxElements
+ cmpLte.coordIndex = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
+ }
+ count1 = cluster4;
+ } else
+ {
+ count1 = PxU32(leftover);
+ // verify that leftover + sum of previous clusters adds up to clusterSize or leftover is 0
+ // leftover can be 0 if clusterSize<RTREE_N, this is generally rare, can happen for meshes with < RTREE_N tris
+ PX_ASSERT(leftover == 0 || cluster4*i + count1 == clusterSize);
+ }
+
+ RTreeNodeNQ& curNode = resultTree[startNodeIndex+i];
+
+ totalCount += count1; // accumulate total node count
+ if(count1 <= maxBoundsPerLeafPage) // terminal page according to specified maxBoundsPerLeafPage
+ {
+ if(count1 && totalCount <= clusterSize)
+ {
+ // this will be true most of the time except when the total number of triangles in the mesh is < PAGESIZE
+ curNode.leafCount = PxI32(count1);
+ curNode.childPageFirstNodeIndex = PxI32(clusterOffset + PxU32(permute-permuteStart));
+ childBound = allBounds[permute[clusterOffset+0]];
+ for(PxU32 i1 = 1; i1 < count1; i1++)
+ {
+ const PxBounds3V& bnd = allBounds[permute[clusterOffset+i1]];
+ childBound.include(bnd);
+ }
+ } else
+ {
+ // since we are required to have PAGESIZE nodes per page for simd, we fill any leftover with empty nodes
+ // we should only hit this if the total number of triangles in the mesh is < PAGESIZE
+ childBound.mn = childBound.mx = V3Zero(); // shouldn't be necessary but setting just in case
+ curNode.bounds.setEmpty();
+ curNode.leafCount = -1;
+ curNode.childPageFirstNodeIndex = -1; // using -1 for empty node
+ }
+ } else // not a terminal page, recurse on count1 nodes cluster
+ {
+ curNode.childPageFirstNodeIndex = PxI32(resultTree.size());
+ curNode.leafCount = 0;
+ sort4(permute+cluster4*i, count1, resultTree, maxLevels, childBound, level+1);
+ }
+ if(i == 0)
+ subTreeBound = childBound; // initialize subTreeBound with first childBound
+ else
+ subTreeBound.include(childBound); // expand subTreeBound with current childBound
+
+ // can use curNode since the reference change due to resizing in recursive call, need to recompute the pointer
+ RTreeNodeNQ& curNode1 = resultTree[startNodeIndex+i];
+ curNode1.bounds.minimum = childBound.getMinVec3(); // update node bounds using recursively computed childBound
+ curNode1.bounds.maximum = childBound.getMaxVec3();
+ }
+ }
+};
+
+// heuristic size reduction factors for splitting heuristic (see how it's used above)
+const PxReal SubSortQuick::reductionFactors[RTREE_N-1] = {0.8f, 0.7f, 0.6f};
+
+// sizePerf trade-off presets for sorting routines
+const PxU32 SubSortQuick::stopAtTrisPerLeaf1[SubSortQuick::NTRADEOFF] = {16, 14, 12, 10, 8, 7, 6, 5, 4};
+
+/////////////////////////////////////////////////////////////////////////
+// SAH sorting method
+//
+// Preset table: lower index=better size -> higher index = better perf
+static const PxU32 NTRADEOFF = 15;
+ // % -24 -23 -17 -15 -10 -8 -5 -3 0 +3 +3 +5 +7 +8 +9 - % raycast MeshSurface*Random benchmark perf
+ // K 717 734 752 777 793 811 824 866 903 939 971 1030 1087 1139 1266 - testzone size in K
+ // # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - preset number
+static const PxU32 stopAtTrisPerPage[NTRADEOFF] = { 64, 60, 56, 48, 46, 44, 40, 36, 32, 28, 24, 20, 16, 12, 12};
+static const PxU32 stopAtTrisPerLeaf[NTRADEOFF] = { 16, 14, 12, 10, 9, 8, 8, 6, 5, 5, 5, 4, 4, 4, 2}; // capped at 2 anyway
+
+/////////////////////////////////////////////////////////////////////////
+// comparator struct for sorting the bounds along a specified coordIndex (coordIndex=0,1,2 for X,Y,Z)
+struct SortBoundsPredicate
+{
+ PxU32 coordIndex;
+ const PxBounds3V* allBounds;
+ SortBoundsPredicate(PxU32 coordIndex_, const PxBounds3V* allBounds_) : coordIndex(coordIndex_), allBounds(allBounds_)
+ {}
+
+ bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
+ {
+ // using the bounds center for comparison
+ PxF32 center1 = V3ReadXYZ(allBounds[idx1].mn)[coordIndex] + V3ReadXYZ(allBounds[idx1].mx)[coordIndex];
+ PxF32 center2 = V3ReadXYZ(allBounds[idx2].mn)[coordIndex] + V3ReadXYZ(allBounds[idx2].mx)[coordIndex];
+ return (center1 < center2);
+ }
+};
+
+
+/////////////////////////////////////////////////////////////////////////
+// auxiliary class for SAH build (SAH = surface area heuristic)
+struct Interval
+{
+ PxU32 start, count;
+ Interval(PxU32 s, PxU32 c) : start(s), count(c) {}
+};
+
+// SAH function - returns surface area for given AABB extents
+static PX_FORCE_INLINE void PxSAH(const Vec3VArg v, PxF32& sah)
+{
+ FStore(V3Dot(v, V3PermZXY(v)), &sah); // v.x*v.y + v.y*v.z + v.x*v.z;
+}
+
+struct SubSortSAH
+{
+ PxU32* PX_RESTRICT permuteStart, *PX_RESTRICT tempPermute;
+ const PxBounds3V* PX_RESTRICT allBounds;
+ PxF32* PX_RESTRICT metricL;
+ PxF32* PX_RESTRICT metricR;
+ const PxU32* PX_RESTRICT xOrder, *PX_RESTRICT yOrder, *PX_RESTRICT zOrder;
+ const PxU32* PX_RESTRICT xRanks, *PX_RESTRICT yRanks, *PX_RESTRICT zRanks;
+ PxU32* PX_RESTRICT tempRanks;
+ PxU32 nbTotalBounds;
+ PxU32 iTradeOff;
+
+ // precompute various values used during sort
+ SubSortSAH(
+ PxU32* permute, const PxBounds3V* allBounds_, PxU32 numBounds,
+ const PxU32* xOrder_, const PxU32* yOrder_, const PxU32* zOrder_,
+ const PxU32* xRanks_, const PxU32* yRanks_, const PxU32* zRanks_, PxReal sizePerfTradeOff01)
+ : permuteStart(permute), allBounds(allBounds_),
+ xOrder(xOrder_), yOrder(yOrder_), zOrder(zOrder_),
+ xRanks(xRanks_), yRanks(yRanks_), zRanks(zRanks_), nbTotalBounds(numBounds)
+ {
+ metricL = new PxF32[numBounds];
+ metricR = new PxF32[numBounds];
+ tempPermute = new PxU32[numBounds*2+1];
+ tempRanks = new PxU32[numBounds];
+ iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
+ }
+
+ ~SubSortSAH() // release temporarily used memory
+ {
+ delete [] metricL; metricL = NULL;
+ delete [] metricR; metricR = NULL;
+ delete [] tempPermute; tempPermute = NULL;
+ delete [] tempRanks; tempRanks = NULL;
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // returns split position for second array start relative to permute ptr
+ PxU32 split(PxU32* permute, PxU32 clusterSize)
+ {
+ if(clusterSize <= 1)
+ return 0;
+ if(clusterSize == 2)
+ return 1;
+
+ PxI32 minCount = clusterSize >= 4 ? 2 : 1;
+ PxI32 splitStartL = minCount; // range=[startL->endL)
+ PxI32 splitEndL = PxI32(clusterSize-minCount);
+ PxI32 splitStartR = PxI32(clusterSize-splitStartL); // range=(endR<-startR], startR > endR
+ PxI32 splitEndR = PxI32(clusterSize-splitEndL);
+ PX_ASSERT(splitEndL-splitStartL == splitStartR-splitEndR);
+ PX_ASSERT(splitStartL <= splitEndL);
+ PX_ASSERT(splitStartR >= splitEndR);
+ PX_ASSERT(splitEndR >= 1);
+ PX_ASSERT(splitEndL < PxI32(clusterSize));
+
+ // pick the best axis with some splitting metric
+ // axis index is X=0, Y=1, Z=2
+ PxF32 minMetric[3];
+ PxU32 minMetricSplit[3];
+ const PxU32* ranks3[3] = { xRanks, yRanks, zRanks };
+ const PxU32* orders3[3] = { xOrder, yOrder, zOrder };
+ for(PxU32 coordIndex = 0; coordIndex <= 2; coordIndex++)
+ {
+ SortBoundsPredicate sortPredicateLR(coordIndex, allBounds);
+
+ const PxU32* rank = ranks3[coordIndex];
+ const PxU32* order = orders3[coordIndex];
+
+ // build ranks in tempPermute
+ if(clusterSize == nbTotalBounds) // AP: about 4% perf gain from this optimization
+ {
+ // if this is a full cluster sort, we already have it done
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempPermute[i] = order[i];
+ } else
+ {
+ // sort the tempRanks
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempRanks[i] = rank[permute[i]];
+ Ps::sort(tempRanks, clusterSize);
+ for(PxU32 i = 0; i < clusterSize; i ++) // convert back from ranks to indices
+ tempPermute[i] = order[tempRanks[i]];
+ }
+
+ // we consider overlapping intervals for minimum sum of metrics
+ // left interval is from splitStartL up to splitEndL
+ // right interval is from splitStartR down to splitEndR
+
+
+ // first compute the array metricL
+ Vec3V boundsLmn = allBounds[tempPermute[0]].mn; // init with 0th bound
+ Vec3V boundsLmx = allBounds[tempPermute[0]].mx; // init with 0th bound
+ PxI32 ii;
+ for(ii = 1; ii < splitStartL; ii++) // sweep right to include all bounds up to splitStartL-1
+ {
+ boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
+ boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
+ }
+
+ PxU32 countL0 = 0;
+ for(ii = splitStartL; ii <= splitEndL; ii++) // compute metric for inclusive bounds from splitStartL to splitEndL
+ {
+ boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
+ boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
+ PxSAH(V3Sub(boundsLmx, boundsLmn), metricL[countL0++]);
+ }
+ // now we have metricL
+
+ // now compute the array metricR
+ Vec3V boundsRmn = allBounds[tempPermute[clusterSize-1]].mn; // init with last bound
+ Vec3V boundsRmx = allBounds[tempPermute[clusterSize-1]].mx; // init with last bound
+ for(ii = PxI32(clusterSize-2); ii > splitStartR; ii--) // include bounds to the left of splitEndR down to splitStartR
+ {
+ boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
+ boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
+ }
+
+ PxU32 countR0 = 0;
+ for(ii = splitStartR; ii >= splitEndR; ii--) // continue sweeping left, including bounds and recomputing the metric
+ {
+ boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
+ boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
+ PxSAH(V3Sub(boundsRmx, boundsRmn), metricR[countR0++]);
+ }
+
+ PX_ASSERT((countL0 == countR0) && (countL0 == PxU32(splitEndL-splitStartL+1)));
+
+ // now iterate over splitRange and compute the minimum sum of SAHLeft*countLeft + SAHRight*countRight
+ PxU32 minMetricSplitPosition = 0;
+ PxF32 minMetricLocal = PX_MAX_REAL;
+ const PxI32 hsI32 = PxI32(clusterSize/2);
+ const PxI32 splitRange = (splitEndL-splitStartL+1);
+ for(ii = 0; ii < splitRange; ii++)
+ {
+ PxF32 countL = PxF32(ii+minCount); // need to add minCount since ii iterates over splitRange
+ PxF32 countR = PxF32(splitRange-ii-1+minCount);
+ PX_ASSERT(PxU32(countL + countR) == clusterSize);
+
+ const PxF32 metric = (countL*metricL[ii] + countR*metricR[splitRange-ii-1]);
+ const PxU32 splitPos = PxU32(ii+splitStartL);
+ if(metric < minMetricLocal ||
+ (metric <= minMetricLocal && // same metric but more even split
+ PxAbs(PxI32(splitPos)-hsI32) < PxAbs(PxI32(minMetricSplitPosition)-hsI32)))
+ {
+ minMetricLocal = metric;
+ minMetricSplitPosition = splitPos;
+ }
+ }
+
+ minMetric[coordIndex] = minMetricLocal;
+ minMetricSplit[coordIndex] = minMetricSplitPosition;
+
+ // sum of axis lengths for both left and right AABBs
+ }
+
+ PxU32 winIndex = 2;
+ if(minMetric[0] <= minMetric[1] && minMetric[0] <= minMetric[2])
+ winIndex = 0;
+ else if(minMetric[1] <= minMetric[2])
+ winIndex = 1;
+
+ const PxU32* rank = ranks3[winIndex];
+ const PxU32* order = orders3[winIndex];
+ if(clusterSize == nbTotalBounds) // AP: about 4% gain from this special case optimization
+ {
+ // if this is a full cluster sort, we already have it done
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ permute[i] = order[i];
+ } else
+ {
+ // sort the tempRanks
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ tempRanks[i] = rank[permute[i]];
+ Ps::sort(tempRanks, clusterSize);
+ for(PxU32 i = 0; i < clusterSize; i ++)
+ permute[i] = order[tempRanks[i]];
+ }
+
+ PxU32 splitPoint = minMetricSplit[winIndex];
+ if(clusterSize == 3 && splitPoint == 0)
+ splitPoint = 1; // special case due to rounding
+ return splitPoint;
+ }
+
+ // compute surface area for a given split
+ PxF32 computeSA(const PxU32* permute, const Interval& split) // both permute and i are relative
+ {
+ PX_ASSERT(split.count >= 1);
+ Vec3V bmn = allBounds[permute[split.start]].mn;
+ Vec3V bmx = allBounds[permute[split.start]].mx;
+ for(PxU32 i = 1; i < split.count; i++)
+ {
+ const PxBounds3V& b1 = allBounds[permute[split.start+i]];
+ bmn = V3Min(bmn, b1.mn); bmx = V3Max(bmx, b1.mx);
+ }
+
+ PxF32 ret; PxSAH(V3Sub(bmx, bmn), ret);
+ return ret;
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // main SAH sort routine
+ void sort4(PxU32* permute, PxU32 clusterSize,
+ Array<RTreeNodeNQ>& resultTree, PxU32& maxLevels, PxU32 level = 0, RTreeNodeNQ* parentNode = NULL)
+ {
+ PX_UNUSED(parentNode);
+
+ if(level == 0)
+ maxLevels = 1;
+ else
+ maxLevels = PxMax(maxLevels, level+1);
+
+ PxU32 splitPos[RTREE_N];
+ for(PxU32 j = 0; j < RTREE_N; j++)
+ splitPos[j] = j+1;
+
+ if(clusterSize >= RTREE_N)
+ {
+ // split into RTREE_N number of regions via RTREE_N-1 subsequent splits
+ // each split is represented as a current interval
+ // we iterate over currently active intervals and compute it's surface area
+ // then we split the interval with maximum surface area
+ // AP scaffold: possible optimization - seems like computeSA can be cached for unchanged intervals
+ InlineArray<Interval, 4> splits;
+ splits.pushBack(Interval(0, clusterSize));
+ for(PxU32 iSplit = 0; iSplit < RTREE_N-1; iSplit++)
+ {
+ PxF32 maxSAH = -FLT_MAX;
+ PxU32 maxSplit = 0xFFFFffff;
+ for(PxU32 i = 0; i < splits.size(); i++)
+ {
+ if(splits[i].count == 1)
+ continue;
+ PxF32 SAH = computeSA(permute, splits[i])*splits[i].count;
+ if(SAH > maxSAH)
+ {
+ maxSAH = SAH;
+ maxSplit = i;
+ }
+ }
+ PX_ASSERT(maxSplit != 0xFFFFffff);
+
+ // maxSplit is now the index of the interval in splits array with maximum surface area
+ // we now split it into 2 using the split() function
+ Interval old = splits[maxSplit];
+ PX_ASSERT(old.count > 1);
+ PxU32 splitLocal = split(permute+old.start, old.count); // relative split pos
+
+ PX_ASSERT(splitLocal >= 1);
+ PX_ASSERT(old.count-splitLocal >= 1);
+ splits.pushBack(Interval(old.start, splitLocal));
+ splits.pushBack(Interval(old.start+splitLocal, old.count-splitLocal));
+ splits.replaceWithLast(maxSplit);
+ splitPos[iSplit] = old.start+splitLocal;
+ }
+
+ // verification code, make sure split counts add up to clusterSize
+ PX_ASSERT(splits.size() == RTREE_N);
+ PxU32 sum = 0;
+ for(PxU32 j = 0; j < RTREE_N; j++)
+ sum += splits[j].count;
+ PX_ASSERT(sum == clusterSize);
+ }
+ else // clusterSize < RTREE_N
+ {
+ // make it so splitCounts based on splitPos add up correctly for small cluster sizes
+ for(PxU32 i = clusterSize; i < RTREE_N-1; i++)
+ splitPos[i] = clusterSize;
+ }
+
+ // sort splitPos index array using quicksort (just a few values)
+ Ps::sort(splitPos, RTREE_N-1);
+ splitPos[RTREE_N-1] = clusterSize; // splitCount[n] is computed as splitPos[n+1]-splitPos[n], so we need to add this last value
+
+ // now compute splitStarts and splitCounts from splitPos[] array. Also perform a bunch of correctness verification
+ PxU32 splitStarts[RTREE_N];
+ PxU32 splitCounts[RTREE_N];
+ splitStarts[0] = 0;
+ splitCounts[0] = splitPos[0];
+ PxU32 sumCounts = splitCounts[0];
+ for(PxU32 j = 1; j < RTREE_N; j++)
+ {
+ splitStarts[j] = splitPos[j-1];
+ PX_ASSERT(splitStarts[j-1]<=splitStarts[j]);
+ splitCounts[j] = splitPos[j]-splitPos[j-1];
+ PX_ASSERT(splitCounts[j] > 0 || clusterSize < RTREE_N);
+ sumCounts += splitCounts[j];
+ PX_ASSERT(splitStarts[j-1]+splitCounts[j-1]<=splitStarts[j]);
+ }
+ PX_ASSERT(sumCounts == clusterSize);
+ PX_ASSERT(splitStarts[RTREE_N-1]+splitCounts[RTREE_N-1]<=clusterSize);
+
+ // mark this cluster as terminal based on clusterSize <= stopAtTrisPerPage parameter for current iTradeOff user specified preset
+ bool terminalClusterByTotalCount = (clusterSize <= stopAtTrisPerPage[iTradeOff]);
+ // iterate over splitCounts for the current cluster, if any of counts exceed 16 (which is the maximum supported by LeafTriangles
+ // we cannot mark this cluster as terminal (has to be split more)
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ if(splitCounts[s] > 16) // LeafTriangles doesn't support > 16 tris
+ terminalClusterByTotalCount = false;
+
+ // iterate over all the splits
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ {
+ RTreeNodeNQ rtn;
+ PxU32 splitCount = splitCounts[s];
+ if(splitCount > 0) // splits shouldn't be empty generally
+ {
+ // sweep left to right and compute min and max SAH for each individual bound in current split
+ PxBounds3V b = allBounds[permute[splitStarts[s]]];
+ PxF32 sahMin; PxSAH(b.getExtents(), sahMin);
+ PxF32 sahMax = sahMin;
+ // AP scaffold - looks like this could be optimized (we are recomputing bounds top down)
+ for(PxU32 i = 1; i < splitCount; i++)
+ {
+ PxU32 localIndex = i + splitStarts[s];
+ const PxBounds3V& b1 = allBounds[permute[localIndex]];
+ PxF32 sah1; PxSAH(b1.getExtents(), sah1);
+ sahMin = PxMin(sahMin, sah1);
+ sahMax = PxMax(sahMax, sah1);
+ b.include(b1);
+ }
+
+ rtn.bounds.minimum = V3ReadXYZ(b.mn);
+ rtn.bounds.maximum = V3ReadXYZ(b.mx);
+
+ // if bounds differ widely (according to some heuristic preset), we continue splitting
+ // this is important for a mixed cluster with large and small triangles
+ bool okSAH = (sahMax/sahMin < 40.0f);
+ if(!okSAH)
+ terminalClusterByTotalCount = false; // force splitting this cluster
+
+ bool stopSplitting = // compute the final splitting criterion
+ splitCount <= 2 || (okSAH && splitCount <= 3) // stop splitting at 2 nodes or if SAH ratio is OK and splitCount <= 3
+ || terminalClusterByTotalCount || splitCount <= stopAtTrisPerLeaf[iTradeOff];
+ if(stopSplitting)
+ {
+ // this is a terminal page then, mark as such
+ // first node index is relative to the top level input array beginning
+ rtn.childPageFirstNodeIndex = PxI32(splitStarts[s]+(permute-permuteStart));
+ rtn.leafCount = PxI32(splitCount);
+ PX_ASSERT(splitCount <= 16); // LeafTriangles doesn't support more
+ }
+ else
+ {
+ // this is not a terminal page, we will recompute this later, after we recurse on subpages (label ZZZ)
+ rtn.childPageFirstNodeIndex = -1;
+ rtn.leafCount = 0;
+ }
+ }
+ else // splitCount == 0 at this point, this is an empty paddding node (with current presets it's very rare)
+ {
+ PX_ASSERT(splitCount == 0);
+ rtn.bounds.setEmpty();
+ rtn.childPageFirstNodeIndex = -1;
+ rtn.leafCount = -1;
+ }
+ resultTree.pushBack(rtn); // push the new node into the resultTree array
+ }
+
+ if(terminalClusterByTotalCount) // abort recursion if terminal cluster
+ return;
+
+ // recurse on subpages
+ PxU32 parentIndex = resultTree.size() - RTREE_N; // save the parentIndex as specified (array can be resized during recursion)
+ for(PxU32 s = 0; s<RTREE_N; s++)
+ {
+ RTreeNodeNQ* sParent = &resultTree[parentIndex+s]; // array can be resized and relocated during recursion
+ if(sParent->leafCount == 0) // only split pages that were marked as non-terminal during splitting (see "label ZZZ" above)
+ {
+ // all child nodes will be pushed inside of this recursive call,
+ // so we set the child pointer for parent node to resultTree.size()
+ sParent->childPageFirstNodeIndex = PxI32(resultTree.size());
+ sort4(permute+splitStarts[s], splitCounts[s], resultTree, maxLevels, level+1, sParent);
+ }
+ }
+ }
+};
+
+
+
+
+/////////////////////////////////////////////////////////////////////////
+// initializes the input permute array with identity permutation
+// and shuffles it so that new sorted index, newIndex = resultPermute[oldIndex]
+static void buildFromBounds(
+ Gu::RTree& result, const PxBounds3V* allBounds, PxU32 numBounds,
+ Array<PxU32>& permute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
+ PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
+{
+ PX_UNUSED(sizePerfTradeOff01);
+ PxBounds3V treeBounds(allMn, allMx);
+
+ // start off with an identity permutation
+ permute.resize(0);
+ permute.reserve(numBounds+1);
+ for(PxU32 j = 0; j < numBounds; j ++)
+ permute.pushBack(j);
+ const PxU32 sentinel = 0xABCDEF01;
+ permute.pushBack(sentinel);
+
+ // load sorted nodes into an RTreeNodeNQ tree representation
+ // build the tree structure from sorted nodes
+ const PxU32 pageSize = RTREE_N;
+ Array<RTreeNodeNQ> resultTree;
+ resultTree.reserve(numBounds*2);
+
+ PxU32 maxLevels = 0;
+ if(hint == PxMeshCookingHint::eSIM_PERFORMANCE) // use high quality SAH build
+ {
+ Array<PxU32> xRanks(numBounds), yRanks(numBounds), zRanks(numBounds), xOrder(numBounds), yOrder(numBounds), zOrder(numBounds);
+ memcpy(xOrder.begin(), permute.begin(), sizeof(xOrder[0])*numBounds);
+ memcpy(yOrder.begin(), permute.begin(), sizeof(yOrder[0])*numBounds);
+ memcpy(zOrder.begin(), permute.begin(), sizeof(zOrder[0])*numBounds);
+ // sort by shuffling the permutation, precompute sorted ranks for x,y,z-orders
+ Ps::sort(xOrder.begin(), xOrder.size(), SortBoundsPredicate(0, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) xRanks[xOrder[i]] = i;
+ Ps::sort(yOrder.begin(), yOrder.size(), SortBoundsPredicate(1, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) yRanks[yOrder[i]] = i;
+ Ps::sort(zOrder.begin(), zOrder.size(), SortBoundsPredicate(2, allBounds));
+ for(PxU32 i = 0; i < numBounds; i++) zRanks[zOrder[i]] = i;
+
+ SubSortSAH ss(permute.begin(), allBounds, numBounds,
+ xOrder.begin(), yOrder.begin(), zOrder.begin(), xRanks.begin(), yRanks.begin(), zRanks.begin(), sizePerfTradeOff01);
+ ss.sort4(permute.begin(), numBounds, resultTree, maxLevels);
+ } else
+ { // use fast cooking path
+ PX_ASSERT(hint == PxMeshCookingHint::eCOOKING_PERFORMANCE);
+ SubSortQuick ss(permute.begin(), allBounds, numBounds, sizePerfTradeOff01);
+ PxBounds3V discard((PxBounds3V::U()));
+ ss.sort4(permute.begin(), permute.size()-1, resultTree, maxLevels, discard); // AP scaffold: need to implement build speed/runtime perf slider
+ }
+
+ PX_ASSERT(permute[numBounds] == sentinel); // verify we didn't write past the array
+ permute.popBack(); // discard the sentinel value
+
+ #if PRINT_RTREE_COOKING_STATS // stats code
+ PxU32 totalLeafTris = 0;
+ PxU32 numLeaves = 0;
+ PxI32 maxLeafTris = 0;
+ PxU32 numEmpty = 0;
+ for(PxU32 i = 0; i < resultTree.size(); i++)
+ {
+ PxI32 leafCount = resultTree[i].leafCount;
+ numEmpty += (resultTree[i].bounds.isEmpty());
+ if(leafCount > 0)
+ {
+ numLeaves++;
+ totalLeafTris += leafCount;
+ if(leafCount > maxLeafTris)
+ maxLeafTris = leafCount;
+ }
+ }
+
+ printf("AABBs total/empty=%d/%d\n", resultTree.size(), numEmpty);
+ printf("numTris=%d, numLeafAABBs=%d, avgTrisPerLeaf=%.2f, maxTrisPerLeaf = %d\n",
+ numBounds, numLeaves, PxF32(totalLeafTris)/numLeaves, maxLeafTris);
+ #endif
+
+ PX_ASSERT(RTREE_N*sizeof(RTreeNodeQ) == sizeof(RTreePage)); // needed for nodePtrMultiplier computation to be correct
+ const int nodePtrMultiplier = sizeof(RTreeNodeQ); // convert offset as count in qnodes to page ptr
+
+ // Quantize the tree. AP scaffold - might be possible to merge this phase with the page pass below this loop
+ Array<RTreeNodeQ> qtreeNodes;
+ PxU32 firstEmptyIndex = PxU32(-1);
+ PxU32 resultCount = resultTree.size();
+ qtreeNodes.reserve(resultCount);
+
+ for(PxU32 i = 0; i < resultCount; i++) // AP scaffold - eliminate this pass
+ {
+ RTreeNodeNQ & u = resultTree[i];
+ RTreeNodeQ q;
+ q.setLeaf(u.leafCount > 0); // set the leaf flag
+ if(u.childPageFirstNodeIndex == -1) // empty node?
+ {
+ if(firstEmptyIndex == PxU32(-1))
+ firstEmptyIndex = qtreeNodes.size();
+ q.minx = q.miny = q.minz = FLT_MAX; // AP scaffold improvement - use empty 1e30 bounds instead and reference a valid leaf
+ q.maxx = q.maxy = q.maxz = -FLT_MAX; // that will allow to remove the empty node test from the runtime
+
+ q.ptr = firstEmptyIndex*nodePtrMultiplier; PX_ASSERT((q.ptr & 1) == 0);
+ q.setLeaf(true); // label empty node as leaf node
+ } else
+ {
+ // non-leaf node
+ q.minx = u.bounds.minimum.x;
+ q.miny = u.bounds.minimum.y;
+ q.minz = u.bounds.minimum.z;
+ q.maxx = u.bounds.maximum.x;
+ q.maxy = u.bounds.maximum.y;
+ q.maxz = u.bounds.maximum.z;
+ if(u.leafCount > 0)
+ {
+ q.ptr = PxU32(u.childPageFirstNodeIndex);
+ rc->remap(&q.ptr, q.ptr, PxU32(u.leafCount));
+ PX_ASSERT(q.isLeaf()); // remap is expected to set the isLeaf bit
+ }
+ else
+ {
+ // verify that all children bounds are included in the parent bounds
+ for(PxU32 s = 0; s < RTREE_N; s++)
+ {
+ const RTreeNodeNQ& child = resultTree[u.childPageFirstNodeIndex+s];
+ PX_UNUSED(child);
+ // is a sentinel node or is inside parent's bounds
+ PX_ASSERT(child.leafCount == -1 || child.bounds.isInside(u.bounds));
+ }
+
+ q.ptr = PxU32(u.childPageFirstNodeIndex * nodePtrMultiplier);
+ PX_ASSERT(q.ptr % RTREE_N == 0);
+ q.setLeaf(false);
+ }
+ }
+ qtreeNodes.pushBack(q);
+ }
+
+ // build the final rtree image
+ result.mInvDiagonal = PxVec4(1.0f);
+ PX_ASSERT(qtreeNodes.size() % RTREE_N == 0);
+ result.mTotalNodes = qtreeNodes.size();
+ result.mTotalPages = result.mTotalNodes / pageSize;
+ result.mPages = static_cast<RTreePage*>(
+ Ps::AlignedAllocator<128>().allocate(sizeof(RTreePage)*result.mTotalPages, __FILE__, __LINE__));
+ result.mBoundsMin = PxVec4(V3ReadXYZ(treeBounds.mn), 0.0f);
+ result.mBoundsMax = PxVec4(V3ReadXYZ(treeBounds.mx), 0.0f);
+ result.mDiagonalScaler = (result.mBoundsMax - result.mBoundsMin) / 65535.0f;
+ result.mPageSize = pageSize;
+ result.mNumLevels = maxLevels;
+ PX_ASSERT(result.mTotalNodes % pageSize == 0);
+ result.mNumRootPages = 1;
+
+ for(PxU32 j = 0; j < result.mTotalPages; j++)
+ {
+ RTreePage& page = result.mPages[j];
+ for(PxU32 k = 0; k < RTREE_N; k ++)
+ {
+ const RTreeNodeQ& n = qtreeNodes[j*RTREE_N+k];
+ page.maxx[k] = n.maxx;
+ page.maxy[k] = n.maxy;
+ page.maxz[k] = n.maxz;
+ page.minx[k] = n.minx;
+ page.miny[k] = n.miny;
+ page.minz[k] = n.minz;
+ page.ptrs[k] = n.ptr;
+ }
+ }
+
+ //printf("Tree size=%d\n", result.mTotalPages*sizeof(RTreePage));
+#if PX_DEBUG
+ result.validate(); // make sure the child bounds are included in the parent and other validation
+#endif
+}
+
+} // namespace physx
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h
new file mode 100644
index 00000000..04a0b15c
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/RTreeCooking.h
@@ -0,0 +1,51 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "CmPhysXCommon.h"
+#include "GuMeshData.h"
+#include "PxCooking.h"
+#include "PsArray.h"
+#include "GuRTree.h"
+
+namespace physx
+{
+ struct RTreeCooker
+ {
+ struct RemapCallback // a callback to convert indices from triangle to LeafTriangles or other uses
+ {
+ virtual ~RemapCallback() {}
+ virtual void remap(PxU32* rtreePtr, PxU32 start, PxU32 leafCount) = 0;
+ };
+
+ // triangles will be remapped so that newIndex = resultPermute[oldIndex]
+ static void buildFromTriangles(
+ Gu::RTree& resultTree, const PxVec3* verts, PxU32 numVerts, const PxU16* tris16, const PxU32* tris32, PxU32 numTris,
+ Ps::Array<PxU32>& resultPermute, RemapCallback* rc, PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint);
+ };
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp
new file mode 100644
index 00000000..877d752e
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.cpp
@@ -0,0 +1,1443 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+#include "RTreeCooking.h"
+#include "TriangleMeshBuilder.h"
+#include "EdgeList.h"
+#include "MeshCleaner.h"
+#include "GuConvexEdgeFlags.h"
+#include "PxTriangleMeshDesc.h"
+#include "GuSerialize.h"
+#include "Cooking.h"
+#include "GuMeshData.h"
+#include "GuTriangle32.h"
+#include "GuRTree.h"
+#include "GuInternal.h"
+#include "GuBV4Build.h"
+#include "GuBV32Build.h"
+#include "PsFoundation.h"
+#include "PsHashMap.h"
+#include "PsSort.h"
+
+namespace physx {
+
+struct int3
+{
+ int x, y, z;
+};
+
+struct uint3
+{
+ unsigned int x, y, z;
+};
+
+PX_ALIGN_PREFIX(16)
+struct uint4
+{
+ unsigned int x, y, z, w;
+}
+PX_ALIGN_SUFFIX(16);
+
+PX_ALIGN_PREFIX(16)
+struct float4
+{
+ float x, y, z, w;
+}
+PX_ALIGN_SUFFIX(16);
+
+}
+
+#include "GrbTriangleMeshCooking.h"
+
+using namespace physx;
+using namespace Gu;
+using namespace Ps;
+
+namespace physx {
+
+TriangleMeshBuilder::TriangleMeshBuilder(TriangleMeshData& m, const PxCookingParams& params) :
+ edgeList (NULL),
+ mParams (params),
+ mMeshData (m)
+{
+}
+
+TriangleMeshBuilder::~TriangleMeshBuilder()
+{
+ releaseEdgeList();
+}
+
+void TriangleMeshBuilder::remapTopology(const PxU32* order)
+{
+ if(!mMeshData.mNbTriangles)
+ return;
+
+ // Remap one array at a time to limit memory usage
+
+ Gu::TriangleT<PxU32>* newTopo = reinterpret_cast<Gu::TriangleT<PxU32>*>(PX_ALLOC(mMeshData.mNbTriangles * sizeof(Gu::TriangleT<PxU32>), "Gu::TriangleT<PxU32>"));
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newTopo[i] = reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[order[i]];
+ PX_FREE_AND_RESET(mMeshData.mTriangles);
+ mMeshData.mTriangles = newTopo;
+
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* newMat = PX_NEW(PxMaterialTableIndex)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMat[i] = mMeshData.mMaterialIndices[order[i]];
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = newMat;
+ }
+
+ if(!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
+ PX_DELETE_POD(mMeshData.mFaceRemap);
+ mMeshData.mFaceRemap = newMap;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool TriangleMeshBuilder::cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition)
+{
+ PX_ASSERT(mMeshData.mFaceRemap == NULL);
+
+ PxF32 meshWeldTolerance = 0.0f;
+ if(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eWELD_VERTICES)
+ {
+ if(mParams.meshWeldTolerance == 0.f)
+ {
+ Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "TriangleMesh: Enable mesh welding with 0 weld tolerance!");
+ }
+ else
+ {
+ meshWeldTolerance = mParams.meshWeldTolerance;
+ }
+ }
+ MeshCleaner cleaner(mMeshData.mNbVertices, mMeshData.mVertices, mMeshData.mNbTriangles, reinterpret_cast<const PxU32*>(mMeshData.mTriangles), meshWeldTolerance);
+ if(!cleaner.mNbTris)
+ return false;
+
+ if(validate)
+ {
+ // if we do only validate, we check if cleaning did not remove any verts or triangles.
+ // such a mesh can be then directly used for cooking without clean flag
+ if((cleaner.mNbVerts != mMeshData.mNbVertices) || (cleaner.mNbTris != mMeshData.mNbTriangles))
+ {
+ return false;
+ }
+ }
+
+ // PT: deal with the remap table
+ {
+ // PT: TODO: optimize this
+ if(cleaner.mRemap)
+ {
+ const PxU32 newNbTris = cleaner.mNbTris;
+
+ // Remap material array
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* tmp = PX_NEW(PxMaterialTableIndex)[newNbTris];
+ for(PxU32 i=0;i<newNbTris;i++)
+ tmp[i] = mMeshData.mMaterialIndices[cleaner.mRemap[i]];
+
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = tmp;
+ }
+
+ if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ mMeshData.mFaceRemap = PX_NEW(PxU32)[newNbTris];
+ PxMemCopy(mMeshData.mFaceRemap, cleaner.mRemap, newNbTris*sizeof(PxU32));
+ }
+ }
+ }
+
+ // PT: deal with geometry
+ {
+ if(mMeshData.mNbVertices!=cleaner.mNbVerts)
+ {
+ PX_FREE_AND_RESET(mMeshData.mVertices);
+ mMeshData.allocateVertices(cleaner.mNbVerts);
+ }
+ PxMemCopy(mMeshData.mVertices, cleaner.mVerts, mMeshData.mNbVertices*sizeof(PxVec3));
+ }
+
+ // PT: deal with topology
+ {
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+ if(mMeshData.mNbTriangles!=cleaner.mNbTris)
+ {
+ PX_FREE_AND_RESET(mMeshData.mTriangles);
+ mMeshData.allocateTriangles(cleaner.mNbTris, true);
+ }
+
+ const float testLength = 500.0f*500.0f*mParams.scale.length*mParams.scale.length;
+ bool bigTriangle = false;
+ const PxVec3* v = mMeshData.mVertices;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ const PxU32 vref0 = cleaner.mIndices[i*3+0];
+ const PxU32 vref1 = cleaner.mIndices[i*3+1];
+ const PxU32 vref2 = cleaner.mIndices[i*3+2];
+ PX_ASSERT(vref0!=vref1 && vref0!=vref2 && vref1!=vref2);
+
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[0] = vref0;
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[1] = vref1;
+ reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.mTriangles)[i].v[2] = vref2;
+
+ if( (v[vref0] - v[vref1]).magnitudeSquared() >= testLength
+ || (v[vref1] - v[vref2]).magnitudeSquared() >= testLength
+ || (v[vref2] - v[vref0]).magnitudeSquared() >= testLength
+ )
+ bigTriangle = true;
+ }
+ if(bigTriangle)
+ {
+ if(condition)
+ *condition = PxTriangleMeshCookingResult::eLARGE_TRIANGLE;
+ Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "TriangleMesh: triangles are too big, reduce their size to increase simulation stability!");
+ }
+ }
+
+ return true;
+}
+
+void TriangleMeshBuilder::createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges)
+{
+ if(buildAdjacencies) // building edges is required if buildAdjacencies is requested
+ buildActiveEdges = true;
+
+ PX_ASSERT(mMeshData.mExtraTrigData == NULL);
+ PX_ASSERT(mMeshData.mAdjacencies == NULL);
+
+ if(!buildActiveEdges)
+ return;
+
+ const PxU32 nTrigs = mMeshData.mNbTriangles;
+
+ mMeshData.mExtraTrigData = PX_NEW(PxU8)[nTrigs];
+ memset(mMeshData.mExtraTrigData, 0, sizeof(PxU8)*nTrigs);
+
+ const Gu::TriangleT<PxU32>* trigs = reinterpret_cast<const Gu::TriangleT<PxU32>*>(mMeshData.mTriangles);
+ if(0x40000000 <= nTrigs)
+ {
+ //mesh is too big for this algo, need to be able to express trig indices in 30 bits, and still have an index reserved for "unused":
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh: mesh is too big for this algo!");
+ return;
+ }
+
+ createEdgeList();
+ if(edgeList)
+ {
+ PX_ASSERT(edgeList->getNbFaces()==mMeshData.mNbTriangles);
+ if(edgeList->getNbFaces()==mMeshData.mNbTriangles)
+ {
+ for(PxU32 i=0;i<edgeList->getNbFaces();i++)
+ {
+ const Gu::EdgeTriangleData& ET = edgeList->getEdgeTriangle(i);
+ // Replicate flags
+ if(Gu::EdgeTriangleAC::HasActiveEdge01(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_01;
+ if(Gu::EdgeTriangleAC::HasActiveEdge12(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_12;
+ if(Gu::EdgeTriangleAC::HasActiveEdge20(ET)) mMeshData.mExtraTrigData[i] |= Gu::ETD_CONVEX_EDGE_20;
+ }
+ }
+ }
+
+ // fill the adjacencies
+ if(buildAdjacencies)
+ {
+ mMeshData.mAdjacencies = PX_NEW(PxU32)[nTrigs*3];
+ memset(mMeshData.mAdjacencies, 0xFFFFffff, sizeof(PxU32)*nTrigs*3);
+
+ PxU32 NbEdges = edgeList->getNbEdges();
+ const Gu::EdgeDescData* ED = edgeList->getEdgeToTriangles();
+ const Gu::EdgeData* Edges = edgeList->getEdges();
+ const PxU32* FBE = edgeList->getFacesByEdges();
+
+ while(NbEdges--)
+ {
+ // Get number of triangles sharing current edge
+ PxU32 Count = ED->Count;
+
+ if(Count > 1)
+ {
+ PxU32 FaceIndex0 = FBE[ED->Offset+0];
+ PxU32 FaceIndex1 = FBE[ED->Offset+1];
+
+ const Gu::EdgeData& edgeData = *Edges;
+ const Gu::TriangleT<PxU32>& T0 = trigs[FaceIndex0];
+ const Gu::TriangleT<PxU32>& T1 = trigs[FaceIndex1];
+
+ PxU32 offset0 = T0.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
+ PxU32 offset1 = T1.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
+
+ mMeshData.setTriangleAdjacency(FaceIndex0, FaceIndex1, offset0);
+ mMeshData.setTriangleAdjacency(FaceIndex1, FaceIndex0, offset1);
+ }
+ ED++;
+ Edges++;
+ }
+ }
+
+#if PX_DEBUG
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ const Gu::TriangleT<PxU32>& T = trigs[i];
+ PX_UNUSED(T);
+ const Gu::EdgeTriangleData& ET = edgeList->getEdgeTriangle(i);
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge01(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_01)) || (!Gu::EdgeTriangleAC::HasActiveEdge01(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_01)));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge12(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_12)) || (!Gu::EdgeTriangleAC::HasActiveEdge12(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_12)));
+ PX_ASSERT((Gu::EdgeTriangleAC::HasActiveEdge20(ET) && (mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_20)) || (!Gu::EdgeTriangleAC::HasActiveEdge20(ET) && !(mMeshData.mExtraTrigData[i] & Gu::ETD_CONVEX_EDGE_20)));
+ }
+#endif
+ return;
+}
+
+namespace GrbTrimeshCookerHelper
+{
+
+struct SortedNeighbor
+{
+ PxU32 v, a; // vertex and adjacent vertex
+ bool boundary;
+
+ SortedNeighbor(PxU32 v_, PxU32 a_, bool b_): v(v_), a(a_), boundary(b_) {}
+
+ // sort boundary edges to the front so that they are kept when duplicates are removed
+ bool operator<(const SortedNeighbor& b) const
+ {
+ return (v<b.v || (v == b.v && a<b.a) || (v == b.v && a == b.a && boundary && !b.boundary));
+ }
+};
+
+struct SharpEdgeRange
+{
+ PxU32 start, length;
+ SharpEdgeRange(): start(0), length(0) {}
+ SharpEdgeRange(PxU32 s, PxU32 l): start(s), length(l) {}
+};
+
+class LocalIndexer
+{
+public:
+ bool insert(PxU32 meshIndex) // returns true if this is a new index
+ {
+ bool isNew = mMeshToLocal.insert(meshIndex, mLocalToMesh.size());
+ if(isNew)
+ mLocalToMesh.pushBack(meshIndex);
+ return isNew;
+ }
+
+ PxU32 meshIndex(PxU32 localIndex)
+ {
+ PX_ASSERT(localIndex<mLocalToMesh.size());
+ return mLocalToMesh[localIndex];
+ }
+
+ PxU32 localIndex(PxU32 meshIndex)
+ {
+ PX_ASSERT(mMeshToLocal.find(meshIndex));
+ return mMeshToLocal[meshIndex];
+ }
+
+ bool contains(PxU32 meshIndex)
+ {
+ return mMeshToLocal.find(meshIndex) != 0;
+ }
+
+ PxU32 size()
+ {
+ return mLocalToMesh.size();
+ }
+
+private:
+ Ps::Array<PxU32> mLocalToMesh;
+ Ps::HashMap<PxU32, PxU32> mMeshToLocal;
+};
+
+#include <stdio.h>
+
+
+void findSharpVertices(
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ /*const Ps::Array<Triangle>& triangles,*/
+ const uint3* triIndices,
+ const uint4* triAdjacencies,
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ // sort the edges which are sharp or boundary
+ for(PxU32 i=0;i<nbTris;i++)
+ {
+ const uint4& triAdj = triAdjacencies[i];
+ const uint3& triIdx = triIndices[i];
+
+ if (!isEdgeNonconvex(triAdj.x))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.x, triIdx.y, triAdj.x == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.y, triIdx.x, triAdj.x == BOUNDARY));
+ }
+
+ if (!isEdgeNonconvex(triAdj.y))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.y, triIdx.z, triAdj.y == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.z, triIdx.y, triAdj.y == BOUNDARY));
+ }
+
+ if (!isEdgeNonconvex(triAdj.z))
+ {
+ pairList.pushBack(SortedNeighbor(triIdx.z, triIdx.x, triAdj.z == BOUNDARY));
+ pairList.pushBack(SortedNeighbor(triIdx.x, triIdx.z, triAdj.z == BOUNDARY));
+ }
+ }
+
+ Ps::sort(pairList.begin(), pairList.size());
+
+ // remove duplicates - note that boundary edges are sorted earlier, so we keep them
+ PxU32 unique = 1;
+ for(PxU32 i=1;i<pairList.size();i++)
+ {
+ if(pairList[i].v != pairList[i-1].v || pairList[i].a != pairList[i-1].a)
+ pairList[unique++] = pairList[i];
+ }
+ pairList.resizeUninitialized(unique);
+
+ // a vertex is marked for sharp vertex processing if it has a boundary edge or at least three convex edges
+ edgeRanges.resize(nbVerts);
+ for(PxU32 p = 0, u ; p<pairList.size(); p = u)
+ {
+ bool boundary = false;
+ for(u=p+1; u<pairList.size() && pairList[u].v == pairList[p].v; u++)
+ boundary |= pairList[u].boundary;
+ if(boundary || u-p>=3)
+ edgeRanges[pairList[p].v] = SharpEdgeRange(p, u-p);
+ }
+}
+
+#if 0
+PxU32 buildVertexConnectionNew_p1(
+ Ps::Array<SortedNeighbor> & pairList,
+ Ps::Array<SharpEdgeRange> & edgeRanges,
+ LocalIndexer & vertexMap,
+
+ const uint4 * triIndices,
+ const uint4 * triAdjacencies,
+
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ findSharpVertices(
+ pairList,
+ edgeRanges,
+ triIndices,
+ triAdjacencies,
+ nbTris,
+ nbVerts
+ );
+
+ // add all the original triangles and vertices and record how big the core is
+ for(PxU32 i=0; i<nbTris; i++)
+ {
+ const uint4 & triIdx = triIndices[i];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+ PxU32 nbCoreVerts = vertexMap.size();
+
+ PX_ASSERT(nbCoreVerts == nbVerts);
+
+ // add adjacent triangles
+ for(PxU32 i=0;i<nbTris;i++)
+ {
+ const uint4 & triAdj = triAdjacencies[i];
+
+#define IS_TRI(triAdjIdx) (( (triAdjIdx) != BOUNDARY ) && ( !((triAdjIdx) & NONCONVEX_FLAG) ))
+
+ if(IS_TRI(triAdj.x))
+ {
+ const uint4 & triIdx = triIndices[triAdj.x];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+
+ if(IS_TRI(triAdj.y))
+ {
+ const uint4 & triIdx = triIndices[triAdj.y];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+ }
+
+ if(IS_TRI(triAdj.z))
+ {
+ const uint4 & triIdx = triIndices[triAdj.z];
+ vertexMap.insert(triIdx.x);
+ vertexMap.insert(triIdx.y);
+ vertexMap.insert(triIdx.z);
+
+ }
+
+#undef IS_TRI
+ }
+
+ // add the neighbors of the sharp vertices
+ PxU32 nbNeighbors = 0;
+ for(PxU32 i=0;i<nbCoreVerts;i++)
+ {
+ PxU32 meshIndex = vertexMap.meshIndex(i);
+ const SharpEdgeRange& er = edgeRanges[meshIndex];
+ for(PxU32 j = 0;j<er.length;j++)
+ {
+ PX_ASSERT(pairList[er.start+j].v == meshIndex);
+ vertexMap.insert(pairList[er.start + j].a);
+ }
+ nbNeighbors += er.length;
+ }
+
+ return nbNeighbors;
+}
+
+void buildVertexConnectionNew_p2(
+ PxU32 * adjVertStart,
+ PxU32 * vertValency,
+ PxU32 * adjVertices,
+
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ LocalIndexer & vertexMap,
+
+ const uint4 * /*triIndices*/,
+ const uint4 * /*triAdjacencies*/,
+
+ PxU32 /*nbTris*/,
+ PxU32 nbVerts,
+ PxU32 /*nbNeighbors*/
+ )
+{
+ PxU32 n = 0;
+ for(PxU32 i=0;i<nbVerts;i++)
+ {
+ PxU32 meshIdx = vertexMap.meshIndex(i);
+ const SharpEdgeRange& er = edgeRanges[vertexMap.meshIndex(i)];
+ adjVertStart[meshIdx] = n;
+ vertValency[meshIdx] = er.length;
+ for(PxU32 j = 0;j<er.length;j++)
+ adjVertices[n++] = pairList[er.start+j].a;
+ }
+}
+#else
+
+
+PxU32 buildVertexConnectionNew_p1(
+ Ps::Array<SortedNeighbor> & pairList,
+ Ps::Array<SharpEdgeRange> & edgeRanges,
+
+ const uint3* triIndices,
+ const uint4 * triAdjacencies,
+
+ PxU32 nbTris,
+ PxU32 nbVerts
+ )
+{
+ findSharpVertices(
+ pairList,
+ edgeRanges,
+ triIndices,
+ triAdjacencies,
+ nbTris,
+ nbVerts
+ );
+
+
+ // add the neighbors of the sharp vertices
+ PxU32 nbNeighbors = 0;
+ for (PxU32 i = 0; i<nbVerts; i++)
+ {
+ const SharpEdgeRange& er = edgeRanges[i];
+ nbNeighbors += er.length;
+ }
+
+ return nbNeighbors;
+}
+
+void buildVertexConnectionNew_p2(
+ PxU32 * adjVertStart,
+ PxU32 * vertValency,
+ PxU32 * adjVertices,
+
+ Ps::Array<SortedNeighbor>& pairList,
+ Ps::Array<SharpEdgeRange>& edgeRanges,
+ PxU32 nbVerts
+ )
+{
+ PxU32 n = 0;
+ for (PxU32 i = 0; i<nbVerts; i++)
+ {
+ const SharpEdgeRange& er = edgeRanges[i];
+ adjVertStart[i] = n;
+ vertValency[i] = er.length;
+ for (PxU32 j = 0; j<er.length; j++)
+ adjVertices[n++] = pairList[er.start + j].a;
+ }
+}
+#endif
+
+} // namespace GrbTrimeshCookerHelper
+
+void TriangleMeshBuilder::recordTriangleIndices()
+{
+ if (mParams.buildGPUData)
+ {
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+ PX_ASSERT(mMeshData.mGRB_triIndices);
+
+ //copy the original traingle indices to originalTrangles32
+ PxMemCopy(mMeshData.mGRB_triIndices, mMeshData.mTriangles, sizeof(IndTri32) *mMeshData.mNbTriangles);
+
+
+ if (mMeshData.mFaceRemap)
+ {
+ //We must have discarded some triangles so let's
+ mMeshData.mGRB_faceRemap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ PxMemCopy(mMeshData.mGRB_faceRemap, mMeshData.mFaceRemap, sizeof(PxU32)*mMeshData.mNbTriangles);
+ }
+
+ }
+}
+
+void TriangleMeshBuilder::createGRBData()
+{
+
+ const PxU32 & numTris = mMeshData.mNbTriangles;
+ const PxU32 & numVerts = mMeshData.mNbVertices;
+
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+
+ // Core: Mesh data
+ ///////////////////////////////////////////////////////////////////////////////////
+
+ // (by using adjacency info generated by physx cooker)
+ PxVec3 * tempNormalsPerTri_prealloc = reinterpret_cast<PxVec3 *>(PX_ALLOC(numTris * sizeof(PxVec3), PX_DEBUG_EXP("tempNormalsPerTri_prealloc")));
+
+ mMeshData.mGRB_triAdjacencies = PX_ALLOC(sizeof(uint4)*numTris, PX_DEBUG_EXP("GRB_triAdjacencies"));
+
+
+ buildAdjacencies(
+ reinterpret_cast<uint4 *>(mMeshData.mGRB_triAdjacencies),
+ tempNormalsPerTri_prealloc,
+ mMeshData.mVertices,
+ reinterpret_cast<uint3*>(mMeshData.mGRB_triIndices),
+ numTris
+ );
+
+
+ PX_FREE(tempNormalsPerTri_prealloc);
+
+ mMeshData.mGRB_vertValency = PX_NEW(PxU32)[numVerts];
+ mMeshData.mGRB_adjVertStart = PX_NEW(PxU32)[numVerts];
+
+
+ Ps::Array<GrbTrimeshCookerHelper::SortedNeighbor> pairsList;
+ Ps::Array<GrbTrimeshCookerHelper::SharpEdgeRange> edgeRanges;
+
+
+ mMeshData.mGRB_meshAdjVerticiesTotal = GrbTrimeshCookerHelper::buildVertexConnectionNew_p1(
+ pairsList,
+ edgeRanges,
+
+ reinterpret_cast<uint3*>(mMeshData.mGRB_triIndices),
+ reinterpret_cast<uint4 *>(mMeshData.mGRB_triAdjacencies),
+ numTris,
+ numVerts
+ );
+
+
+
+ mMeshData.mGRB_adjVertices = PX_NEW(PxU32)[mMeshData.mGRB_meshAdjVerticiesTotal];
+ GrbTrimeshCookerHelper::buildVertexConnectionNew_p2(
+ mMeshData.mGRB_adjVertStart,
+ mMeshData.mGRB_vertValency,
+ mMeshData.mGRB_adjVertices,
+ pairsList,
+ edgeRanges,
+ numVerts
+ );
+
+
+}
+
+void TriangleMeshBuilder::createGRBMidPhaseAndData(const PxU32 originalTriangleCount)
+{
+ if (mParams.buildGPUData)
+ {
+
+ PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+ BV32Tree* bv32Tree = PX_NEW(BV32Tree);
+ mMeshData.mGRB_BV32Tree = bv32Tree;
+
+ BV32TriangleMeshBuilder::createMidPhaseStructure(mParams, mMeshData, *bv32Tree);
+
+ createGRBData();
+
+ //create a remap table from GPU to CPU remap table
+ PxU32* orignalToRemap = PX_NEW(PxU32)[originalTriangleCount];
+
+ PX_ASSERT(mMeshData.mFaceRemap);
+
+
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ const PxU32 index = mMeshData.mFaceRemap[i];
+ PX_ASSERT(index < originalTriangleCount);
+ orignalToRemap[index] = i;
+ }
+
+
+ //map CPU remap triangle index to GPU remap triangle index
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ const PxU32 index = mMeshData.mGRB_faceRemap[i];
+ mMeshData.mGRB_faceRemap[i] = orignalToRemap[index];
+ }
+
+#if BV32_VALIDATE
+ IndTri32* grbTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mGRB_triIndices);
+ IndTri32* cpuTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+ //map CPU remap triangle index to GPU remap triangle index
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
+ {
+ PX_ASSERT(grbTriIndices[i].mRef[0] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[0]);
+ PX_ASSERT(grbTriIndices[i].mRef[1] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[1]);
+ PX_ASSERT(grbTriIndices[i].mRef[2] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[2]);
+ }
+#endif
+
+ if (orignalToRemap)
+ PX_DELETE_POD(orignalToRemap);
+
+ }
+}
+
+void TriangleMeshBuilder::createEdgeList()
+{
+ Gu::EDGELISTCREATE create;
+ create.NbFaces = mMeshData.mNbTriangles;
+ if(mMeshData.has16BitIndices())
+ {
+ create.DFaces = NULL;
+ create.WFaces = reinterpret_cast<PxU16*>(mMeshData.mTriangles);
+ }
+ else
+ {
+ create.DFaces = reinterpret_cast<PxU32*>(mMeshData.mTriangles);
+ create.WFaces = NULL;
+ }
+ create.FacesToEdges = true;
+ create.EdgesToFaces = true;
+ create.Verts = mMeshData.mVertices;
+ //create.Epsilon = 0.1f;
+ // create.Epsilon = convexEdgeThreshold;
+ edgeList = PX_NEW(Gu::EdgeListBuilder);
+ if(!edgeList->init(create))
+ {
+ PX_DELETE(edgeList);
+ edgeList = 0;
+ }
+}
+
+void TriangleMeshBuilder::releaseEdgeList()
+{
+ PX_DELETE_AND_RESET(edgeList);
+}
+
+//
+// When suppressTriangleMeshRemapTable is true, the face remap table is not created. This saves a significant amount of memory,
+// but the SDK will not be able to provide information about which mesh triangle is hit in collisions, sweeps or raycasts hits.
+//
+// The sequence is as follows:
+
+bool TriangleMeshBuilder::loadFromDesc(const PxTriangleMeshDesc& _desc, PxTriangleMeshCookingResult::Enum* condition, bool validateMesh)
+{
+ const PxU32 originalTriangleCount = _desc.triangles.count;
+ if(!_desc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh::loadFromDesc: desc.isValid() failed!");
+ return false;
+ }
+
+ // verify the mesh params
+ if(!mParams.midphaseDesc.isValid())
+ {
+ Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "TriangleMesh::loadFromDesc: mParams.midphaseDesc.isValid() failed!");
+ return false;
+ }
+
+ // Create a local copy that we can modify
+ PxTriangleMeshDesc desc = _desc;
+
+ // Save simple params
+ {
+ // Handle implicit topology
+ PxU32* topology = NULL;
+ if(!desc.triangles.data)
+ {
+ // We'll create 32-bit indices
+ desc.flags &= ~PxMeshFlag::e16_BIT_INDICES;
+ desc.triangles.stride = sizeof(PxU32)*3;
+
+ {
+ // Non-indexed mesh => create implicit topology
+ desc.triangles.count = desc.points.count/3;
+ // Create default implicit topology
+ topology = PX_NEW_TEMP(PxU32)[desc.points.count];
+ for(PxU32 i=0;i<desc.points.count;i++)
+ topology[i] = i;
+ desc.triangles.data = topology;
+ }
+ }
+ // Continue as usual using our new descriptor
+
+ // Convert and clean the input mesh
+ if (!importMesh(desc, mParams, condition, validateMesh))
+ return false;
+
+ // Cleanup if needed
+ PX_DELETE_POD(topology);
+ }
+
+
+ //copy the original triangle indices to grb triangle indices if buildGRBData is true
+ recordTriangleIndices();
+
+ createMidPhaseStructure();
+
+ // Compute local bounds
+ computeLocalBounds(); // AP scaffold: local bounds are already computed in builder.createRTree efficiently with SIMD
+
+ createSharedEdgeData(mParams.buildTriangleAdjacencies, !(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_ACTIVE_EDGES_PRECOMPUTE));
+
+ createGRBMidPhaseAndData(originalTriangleCount);
+
+
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool TriangleMeshBuilder::save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const
+{
+ // Export header
+ if(!writeHeader('M', 'E', 'S', 'H', PX_MESH_VERSION, platformMismatch, stream))
+ return false;
+
+ // Export midphase ID
+ writeDword(getMidphaseID(), platformMismatch, stream);
+
+ // Export serialization flags
+ PxU32 serialFlags = 0;
+ if(mMeshData.mMaterialIndices) serialFlags |= Gu::IMSF_MATERIALS;
+ if(mMeshData.mFaceRemap) serialFlags |= Gu::IMSF_FACE_REMAP;
+ if(mMeshData.mAdjacencies) serialFlags |= Gu::IMSF_ADJACENCIES;
+ if (params.buildGPUData) serialFlags |= Gu::IMSF_GRB_DATA;
+ // Compute serialization flags for indices
+ PxU32 maxIndex=0;
+ const Gu::TriangleT<PxU32>* tris = reinterpret_cast<const Gu::TriangleT<PxU32>*>(mMeshData.mTriangles);
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ {
+ if(tris[i].v[0]>maxIndex) maxIndex = tris[i].v[0];
+ if(tris[i].v[1]>maxIndex) maxIndex = tris[i].v[1];
+ if(tris[i].v[2]>maxIndex) maxIndex = tris[i].v[2];
+ }
+
+ bool force32 = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES);
+ if (maxIndex <= 0xFFFF && !force32)
+ serialFlags |= (maxIndex <= 0xFF ? Gu::IMSF_8BIT_INDICES : Gu::IMSF_16BIT_INDICES);
+ writeDword(serialFlags, platformMismatch, stream);
+
+ // Export mesh
+ writeDword(mMeshData.mNbVertices, platformMismatch, stream);
+ writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
+ writeFloatBuffer(&mMeshData.mVertices->x, mMeshData.mNbVertices*3, platformMismatch, stream);
+ if(serialFlags & Gu::IMSF_8BIT_INDICES)
+ {
+ const PxU32* indices = tris->v;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
+ {
+ PxI8 data = PxI8(indices[i]);
+ stream.write(&data, sizeof(PxU8));
+ }
+ }
+ else if(serialFlags & Gu::IMSF_16BIT_INDICES)
+ {
+ const PxU32* indices = tris->v;
+ for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
+ writeWord(Ps::to16(indices[i]), platformMismatch, stream);
+ }
+ else
+ writeIntBuffer(tris->v, mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ if(mMeshData.mMaterialIndices)
+ writeWordBuffer(mMeshData.mMaterialIndices, mMeshData.mNbTriangles, platformMismatch, stream);
+
+ if(mMeshData.mFaceRemap)
+ {
+ PxU32 maxId = computeMaxIndex(mMeshData.mFaceRemap, mMeshData.mNbTriangles);
+ writeDword(maxId, platformMismatch, stream);
+ storeIndices(maxId, mMeshData.mNbTriangles, mMeshData.mFaceRemap, stream, platformMismatch);
+// writeIntBuffer(mMeshData.mFaceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
+ }
+
+ if(mMeshData.mAdjacencies)
+ writeIntBuffer(mMeshData.mAdjacencies, mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ // Export midphase structure
+ saveMidPhaseStructure(stream);
+
+
+ // Export local bounds
+ writeFloat(mMeshData.mGeomEpsilon, platformMismatch, stream);
+
+ writeFloat(mMeshData.mAABB.minimum.x, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.minimum.y, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.minimum.z, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.x, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.y, platformMismatch, stream);
+ writeFloat(mMeshData.mAABB.maximum.z, platformMismatch, stream);
+
+ if(mMeshData.mExtraTrigData)
+ {
+ writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
+ // No need to convert those bytes
+ stream.write(mMeshData.mExtraTrigData, mMeshData.mNbTriangles*sizeof(PxU8));
+ }
+ else
+ writeDword(0, platformMismatch, stream);
+
+ // GRB write -----------------------------------------------------------------
+ if (params.buildGPUData)
+ {
+ writeDword(mMeshData.mGRB_meshAdjVerticiesTotal, platformMismatch, stream);
+
+ const PxU32* indices = reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices);
+ if (serialFlags & Gu::IMSF_8BIT_INDICES)
+ {
+ for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
+ {
+ PxI8 data = PxI8(indices[i]);
+ stream.write(&data, sizeof(PxU8));
+ }
+ }
+ else if (serialFlags & Gu::IMSF_16BIT_INDICES)
+ {
+ for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
+ writeWord(Ps::to16(indices[i]), platformMismatch, stream);
+ }
+ else
+ writeIntBuffer(indices, mMeshData.mNbTriangles * 3, platformMismatch, stream);
+
+
+ //writeIntBuffer(reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices), , mMeshData.mNbTriangles*3, platformMismatch, stream);
+
+ //writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_triIndices), mMeshData.mNbTriangles*4, platformMismatch, stream);
+
+ writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_triAdjacencies), mMeshData.mNbTriangles*4, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_vertValency, mMeshData.mNbVertices, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_adjVertStart, mMeshData.mNbVertices, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_adjVertices, mMeshData.mGRB_meshAdjVerticiesTotal, platformMismatch, stream);
+ writeIntBuffer(mMeshData.mGRB_faceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
+
+ //Export GPU midphase structure
+ BV32Tree* bv32Tree = reinterpret_cast<BV32Tree*>(mMeshData.mGRB_BV32Tree);
+ BV32TriangleMeshBuilder::saveMidPhaseStructure(bv32Tree, stream);
+ }
+
+ // End of GRB write ----------------------------------------------------------
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if PX_VC
+#pragma warning(push)
+#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
+#endif
+
+bool TriangleMeshBuilder::importMesh(const PxTriangleMeshDesc& desc,const PxCookingParams& params,PxTriangleMeshCookingResult::Enum* condition, bool validate)
+{
+ //convert and clean the input mesh
+ //this is where the mesh data gets copied from user mem to our mem
+
+ PxVec3* verts = mMeshData.allocateVertices(desc.points.count);
+ Gu::TriangleT<PxU32>* tris = reinterpret_cast<Gu::TriangleT<PxU32>*>(mMeshData.allocateTriangles(desc.triangles.count, true, PxU32(params.buildGPUData)));
+
+ //copy, and compact to get rid of strides:
+ Cooking::gatherStrided(desc.points.data, verts, mMeshData.mNbVertices, sizeof(PxVec3), desc.points.stride);
+
+#if PX_CHECKED
+ // PT: check all input vertices are valid
+ for(PxU32 i=0;i<desc.points.count;i++)
+ {
+ const PxVec3& p = verts[i];
+ if(!PxIsFinite(p.x) || !PxIsFinite(p.y) || !PxIsFinite(p.z))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "input mesh contains corrupted vertex data");
+ return false;
+ }
+ }
+#endif
+
+ //for trigs index stride conversion and eventual reordering is also needed, I don't think flexicopy can do that for us.
+
+ Gu::TriangleT<PxU32>* dest = tris;
+ const Gu::TriangleT<PxU32>* pastLastDest = tris + mMeshData.mNbTriangles;
+ const PxU8* source = reinterpret_cast<const PxU8*>(desc.triangles.data);
+
+ //4 combos of 16 vs 32 and flip vs no flip
+ PxU32 c = (desc.flags & PxMeshFlag::eFLIPNORMALS)?PxU32(1):0;
+ if (desc.flags & PxMeshFlag::e16_BIT_INDICES)
+ {
+ //index stride conversion is also needed, I don't think flexicopy can do that for us.
+ while (dest < pastLastDest)
+ {
+ const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
+ dest->v[0] = trig16[0];
+ dest->v[1] = trig16[1+c];
+ dest->v[2] = trig16[2-c];
+ dest ++;
+ source += desc.triangles.stride;
+ }
+ }
+ else
+ {
+ while (dest < pastLastDest)
+ {
+ const PxU32 * trig32 = reinterpret_cast<const PxU32*>(source);
+ dest->v[0] = trig32[0];
+ dest->v[1] = trig32[1+c];
+ dest->v[2] = trig32[2-c];
+ dest ++;
+ source += desc.triangles.stride;
+ }
+ }
+
+ //copy the material index list if any:
+ if(desc.materialIndices.data)
+ {
+ PxMaterialTableIndex* materials = mMeshData.allocateMaterials();
+ Cooking::gatherStrided(desc.materialIndices.data, materials, mMeshData.mNbTriangles, sizeof(PxMaterialTableIndex), desc.materialIndices.stride);
+
+ // Check material indices
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++) PX_ASSERT(materials[i]!=0xffff);
+ }
+
+ // Clean the mesh using ICE's MeshBuilder
+ // This fixes the bug in ConvexTest06 where the inertia tensor computation fails for a mesh => it works with a clean mesh
+
+ if (!(params.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH) || validate)
+ {
+ if(!cleanMesh(validate, condition))
+ {
+ if(!validate)
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "cleaning the mesh failed");
+ return false;
+ }
+ }
+ else
+ {
+ // we need to fill the remap table if no cleaning was done
+ if(params.suppressTriangleMeshRemapTable == false)
+ {
+ PX_ASSERT(mMeshData.mFaceRemap == NULL);
+ mMeshData.mFaceRemap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for (PxU32 i = 0; i < mMeshData.mNbTriangles; i++)
+ mMeshData.mFaceRemap[i] = i;
+ }
+ }
+ return true;
+}
+
+#if PX_VC
+#pragma warning(pop)
+#endif
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+//#define PROFILE_BOUNDS
+#ifdef PROFILE_BOUNDS
+ #include <windows.h>
+ #pragma comment(lib, "winmm.lib")
+#endif
+
+void TriangleMeshBuilder::computeLocalBounds()
+{
+#ifdef PROFILE_BOUNDS
+ int time = timeGetTime();
+#endif
+
+ PxBounds3& localBounds = mMeshData.mAABB;
+ computeBoundsAroundVertices(localBounds, mMeshData.mNbVertices, mMeshData.mVertices);
+
+ // Derive a good geometric epsilon from local bounds. We must do this before bounds extrusion for heightfields.
+ //
+ // From Charles Bloom:
+ // "Epsilon must be big enough so that the consistency condition abs(D(Hit))
+ // <= Epsilon is satisfied for all queries. You want the smallest epsilon
+ // you can have that meets that constraint. Normal floats have a 24 bit
+ // mantissa. When you do any float addition, you may have round-off error
+ // that makes the result off by roughly 2^-24 * result. Our result is
+ // scaled by the position values. If our world is strictly required to be
+ // in a box of world size W (each coordinate in -W to W), then the maximum
+ // error is 2^-24 * W. Thus Epsilon must be at least >= 2^-24 * W. If
+ // you're doing coordinate transforms, that may scale your error up by some
+ // amount, so you'll need a bigger epsilon. In general something like
+ // 2^-22*W is reasonable. If you allow scaled transforms, it needs to be
+ // something like 2^-22*W*MAX_SCALE."
+ // PT: TODO: runtime checkings for this
+ PxReal geomEpsilon = 0.0f;
+ for (PxU32 i = 0; i < 3; i++)
+ geomEpsilon = PxMax(geomEpsilon, PxMax(PxAbs(localBounds.maximum[i]), PxAbs(localBounds.minimum[i])));
+ geomEpsilon *= powf(2.0f, -22.0f);
+ mMeshData.mGeomEpsilon = geomEpsilon;
+
+#ifdef PROFILE_BOUNDS
+ int deltaTime = timeGetTime() - time;
+ printf("Bounds time: %f\n", float(deltaTime)*0.001f);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void TriangleMeshBuilder::checkMeshIndicesSize()
+{
+ Gu::TriangleMeshData& m = mMeshData;
+
+ // check if we can change indices from 32bits to 16bits
+ if(m.mNbVertices <= 0xffff && !m.has16BitIndices())
+ {
+ const PxU32 numTriangles = m.mNbTriangles;
+ PxU32* PX_RESTRICT indices32 = reinterpret_cast<PxU32*> (m.mTriangles);
+
+ m.mTriangles = 0; // force a realloc
+ m.allocateTriangles(numTriangles, false);
+ PX_ASSERT(m.has16BitIndices()); // realloc'ing without the force32bit flag changed it.
+
+ PxU16* PX_RESTRICT indices16 = reinterpret_cast<PxU16*> (m.mTriangles);
+ for (PxU32 i = 0; i < numTriangles*3; i++)
+ indices16[i] = Ps::to16(indices32[i]);
+
+ PX_FREE(indices32);
+
+ onMeshIndexFormatChange();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+BV4TriangleMeshBuilder::BV4TriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
+{
+}
+
+BV4TriangleMeshBuilder::~BV4TriangleMeshBuilder()
+{
+}
+
+void BV4TriangleMeshBuilder::onMeshIndexFormatChange()
+{
+ IndTri32* triangles32 = NULL;
+ IndTri16* triangles16 = NULL;
+ if(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
+ triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
+ else
+ triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+
+ mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
+}
+
+void BV4TriangleMeshBuilder::createMidPhaseStructure()
+{
+ const float gBoxEpsilon = 2e-4f;
+// const float gBoxEpsilon = 0.1f;
+ mData.mMeshInterface.initRemap();
+ mData.mMeshInterface.setNbVertices(mMeshData.mNbVertices);
+ mData.mMeshInterface.setNbTriangles(mMeshData.mNbTriangles);
+
+ IndTri32* triangles32 = NULL;
+ IndTri16* triangles16 = NULL;
+ if (mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
+ {
+ triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
+ }
+ else
+ {
+ triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
+ }
+
+ mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
+
+ const PxU32 nbTrisPerLeaf = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eBVH34) ? mParams.midphaseDesc.mBVH34Desc.numTrisPerLeaf : 4;
+
+ if(!BuildBV4Ex(mData.mBV4Tree, mData.mMeshInterface, gBoxEpsilon, nbTrisPerLeaf))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "BV4 tree failed to build.");
+ return;
+ }
+
+// remapTopology(mData.mMeshInterface);
+
+ const PxU32* order = mData.mMeshInterface.getRemap();
+ if(mMeshData.mMaterialIndices)
+ {
+ PxMaterialTableIndex* newMat = PX_NEW(PxMaterialTableIndex)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMat[i] = mMeshData.mMaterialIndices[order[i]];
+ PX_DELETE_POD(mMeshData.mMaterialIndices);
+ mMeshData.mMaterialIndices = newMat;
+ }
+
+ if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[mMeshData.mNbTriangles];
+ for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
+ newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
+ PX_DELETE_POD(mMeshData.mFaceRemap);
+ mMeshData.mFaceRemap = newMap;
+ }
+ mData.mMeshInterface.releaseRemap();
+}
+
+void BV4TriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream) const
+{
+ const PxU32 version = 1;
+
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('B', 'V', '4', ' ', stream);
+ writeDword(version, mismatch, stream);
+
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.z, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mLocalBounds.mExtentsMagnitude, mismatch, stream);
+
+ writeDword(mData.mBV4Tree.mInitData, mismatch, stream);
+#ifdef GU_BV4_QUANTIZED_TREE
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.z, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.x, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.y, mismatch, stream);
+ writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.z, mismatch, stream);
+#endif
+ writeDword(mData.mBV4Tree.mNbNodes, mismatch, stream);
+ for(PxU32 i=0;i<mData.mBV4Tree.mNbNodes;i++)
+ {
+ const BVDataPacked& node = mData.mBV4Tree.mNodes[i];
+#ifdef GU_BV4_QUANTIZED_TREE
+ writeWordBuffer(&node.mAABB.mData[0].mExtents, 6, mismatch, stream);
+#else
+ writeFloatBuffer(&node.mAABB.mCenter.x, 6, mismatch, stream);
+#endif
+ writeDword(node.mData, mismatch, stream);
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void BV32TriangleMeshBuilder::createMidPhaseStructure(const PxCookingParams& params, Gu::TriangleMeshData& meshData, Gu::BV32Tree& bv32Tree)
+{
+ const float gBoxEpsilon = 2e-4f;
+
+ Gu::SourceMesh meshInterface;
+ // const float gBoxEpsilon = 0.1f;
+ meshInterface.initRemap();
+ meshInterface.setNbVertices(meshData.mNbVertices);
+ meshInterface.setNbTriangles(meshData.mNbTriangles);
+
+ PX_ASSERT(!(meshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
+
+ IndTri32* triangles32 = reinterpret_cast<IndTri32*>(meshData.mGRB_triIndices);
+
+ meshInterface.setPointers(triangles32, NULL, meshData.mVertices);
+
+ PxU32 nbTrisPerLeaf = 32;
+
+ if (!BuildBV32Ex(bv32Tree, meshInterface, gBoxEpsilon, nbTrisPerLeaf))
+ {
+ Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "BV32 tree failed to build.");
+ return;
+ }
+
+ const PxU32* order = meshInterface.getRemap();
+
+ if (!params.suppressTriangleMeshRemapTable || params.buildGPUData)
+ {
+ PxU32* newMap = PX_NEW(PxU32)[meshData.mNbTriangles];
+ for (PxU32 i = 0; i<meshData.mNbTriangles; i++)
+ newMap[i] = meshData.mGRB_faceRemap ? meshData.mGRB_faceRemap[order[i]] : order[i];
+ PX_DELETE_POD(meshData.mGRB_faceRemap);
+ meshData.mGRB_faceRemap = newMap;
+ }
+
+ meshInterface.releaseRemap();
+
+}
+
+void BV32TriangleMeshBuilder::saveMidPhaseStructure(Gu::BV32Tree* bv32Tree, PxOutputStream& stream)
+{
+ const PxU32 version = 1;
+
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('B', 'V', '3', '2', stream);
+ writeDword(version, mismatch, stream);
+
+ writeFloat(bv32Tree->mLocalBounds.mCenter.x, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mCenter.y, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mCenter.z, mismatch, stream);
+ writeFloat(bv32Tree->mLocalBounds.mExtentsMagnitude, mismatch, stream);
+
+ writeDword(bv32Tree->mInitData, mismatch, stream);
+
+ writeDword(bv32Tree->mNbPackedNodes, mismatch, stream);
+
+ PX_ASSERT(bv32Tree->mNbPackedNodes > 0);
+ for (PxU32 i = 0; i < bv32Tree->mNbPackedNodes; ++i)
+ {
+ BV32DataPacked& node = bv32Tree->mPackedNodes[i];
+
+ const PxU32 nbElements = node.mNbNodes * 4;
+ writeDword(node.mNbNodes, mismatch, stream);
+ WriteDwordBuffer(node.mData, node.mNbNodes, mismatch, stream);
+ writeFloatBuffer(&node.mCenter[0].x, nbElements, mismatch, stream);
+ writeFloatBuffer(&node.mExtents[0].x, nbElements, mismatch, stream);
+
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+RTreeTriangleMeshBuilder::RTreeTriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
+{
+}
+
+RTreeTriangleMeshBuilder::~RTreeTriangleMeshBuilder()
+{
+}
+
+struct RTreeCookerRemap : RTreeCooker::RemapCallback
+{
+ PxU32 mNbTris;
+ RTreeCookerRemap(PxU32 numTris) : mNbTris(numTris)
+ {
+ }
+
+ virtual void remap(PxU32* val, PxU32 start, PxU32 leafCount)
+ {
+ PX_ASSERT(leafCount > 0);
+ PX_ASSERT(leafCount <= 16); // sanity check
+ PX_ASSERT(start < mNbTris);
+ PX_ASSERT(start+leafCount <= mNbTris);
+ PX_ASSERT(val);
+ LeafTriangles lt;
+ // here we remap from ordered leaf index in the rtree to index in post-remap in triangles
+ // this post-remap will happen later
+ lt.SetData(leafCount, start);
+ *val = lt.Data;
+ }
+};
+
+void RTreeTriangleMeshBuilder::createMidPhaseStructure()
+{
+ const PxReal meshSizePerformanceTradeOff = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) ?
+ mParams.meshSizePerformanceTradeOff : mParams.midphaseDesc.mBVH33Desc.meshSizePerformanceTradeOff;
+ const PxMeshCookingHint::Enum meshCookingHint = (mParams.midphaseDesc.getType() == PxMeshMidPhase::eINVALID) ?
+ mParams.meshCookingHint : mParams.midphaseDesc.mBVH33Desc.meshCookingHint;
+
+ Array<PxU32> resultPermute;
+ RTreeCookerRemap rc(mMeshData.mNbTriangles);
+ RTreeCooker::buildFromTriangles(
+ mData.mRTree,
+ mMeshData.mVertices, mMeshData.mNbVertices,
+ (mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU16*>(mMeshData.mTriangles) : NULL,
+ !(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU32*>(mMeshData.mTriangles) : NULL,
+ mMeshData.mNbTriangles, resultPermute, &rc, meshSizePerformanceTradeOff, meshCookingHint);
+
+ PX_ASSERT(resultPermute.size() == mMeshData.mNbTriangles);
+
+ remapTopology(resultPermute.begin());
+}
+
+static void saveRTreeData(PxOutputStream& stream, const RTree& d)
+{
+ // save the RTree root structure followed immediately by RTreePage pages to an output stream
+ const bool mismatch = (littleEndian() == 1);
+ writeChunk('R', 'T', 'R', 'E', stream);
+ writeDword(RTREE_COOK_VERSION, mismatch, stream);
+ writeFloatBuffer(&d.mBoundsMin.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mBoundsMax.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mInvDiagonal.x, 4, mismatch, stream);
+ writeFloatBuffer(&d.mDiagonalScaler.x, 4, mismatch, stream);
+ writeDword(d.mPageSize, mismatch, stream);
+ writeDword(d.mNumRootPages, mismatch, stream);
+ writeDword(d.mNumLevels, mismatch, stream);
+ writeDword(d.mTotalNodes, mismatch, stream);
+ writeDword(d.mTotalPages, mismatch, stream);
+ PxU32 unused = 0; writeDword(unused, mismatch, stream); // backwards compatibility
+ for (PxU32 j = 0; j < d.mTotalPages; j++)
+ {
+ writeFloatBuffer(d.mPages[j].minx, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].miny, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].minz, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxx, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxy, RTREE_N, mismatch, stream);
+ writeFloatBuffer(d.mPages[j].maxz, RTREE_N, mismatch, stream);
+ WriteDwordBuffer(d.mPages[j].ptrs, RTREE_N, mismatch, stream);
+ }
+}
+
+void RTreeTriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream) const
+{
+ // Export RTree
+ saveRTreeData(stream, mData.mRTree);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+}
diff --git a/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h
new file mode 100644
index 00000000..639d0a12
--- /dev/null
+++ b/PhysX_3.4/Source/PhysXCooking/src/mesh/TriangleMeshBuilder.h
@@ -0,0 +1,120 @@
+// This code contains NVIDIA Confidential Information and is disclosed to you
+// under a form of NVIDIA software license agreement provided separately to you.
+//
+// Notice
+// NVIDIA Corporation and its licensors retain all intellectual property and
+// proprietary rights in and to this software and related documentation and
+// any modifications thereto. Any use, reproduction, disclosure, or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA Corporation is strictly prohibited.
+//
+// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
+// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
+// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
+// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// Information and code furnished is believed to be accurate and reliable.
+// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
+// information or for any infringement of patents or other rights of third parties that may
+// result from its use. No license is granted by implication or otherwise under any patent
+// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
+// This code supersedes and replaces all information previously supplied.
+// NVIDIA Corporation products are not authorized for use as critical
+// components in life support devices or systems without express written approval of
+// NVIDIA Corporation.
+//
+// Copyright (c) 2008-2016 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
+
+
+#ifndef PX_COLLISION_TriangleMeshBUILDER
+#define PX_COLLISION_TriangleMeshBUILDER
+
+#include "GuMeshData.h"
+#include "cooking/PxCooking.h"
+
+namespace physx
+{
+ namespace Gu
+ {
+ class EdgeListBuilder;
+ }
+
+ class TriangleMeshBuilder
+ {
+ public:
+ TriangleMeshBuilder(Gu::TriangleMeshData& mesh, const PxCookingParams& params);
+ virtual ~TriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const = 0;
+ // Called by base code when midphase structure should be built
+ virtual void createMidPhaseStructure() = 0;
+ // Called by base code when midphase structure should be saved
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const = 0;
+ // Called by base code when mesh index format has changed and the change should be reflected in midphase structure
+ virtual void onMeshIndexFormatChange() {}
+
+ bool cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition);
+ void remapTopology(const PxU32* order);
+
+ void createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges);
+
+ void recordTriangleIndices();
+ void createGRBMidPhaseAndData(const PxU32 originalTriangleCount);
+ void createGRBData();
+
+ bool loadFromDesc(const PxTriangleMeshDesc&, PxTriangleMeshCookingResult::Enum* condition ,bool validate = false);
+ bool save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const;
+ void checkMeshIndicesSize();
+ PX_FORCE_INLINE Gu::TriangleMeshData& getMeshData() { return mMeshData; }
+ protected:
+ void computeLocalBounds();
+ bool importMesh(const PxTriangleMeshDesc& desc, const PxCookingParams& params, PxTriangleMeshCookingResult::Enum* condition ,bool validate = false);
+
+ TriangleMeshBuilder& operator=(const TriangleMeshBuilder&);
+ Gu::EdgeListBuilder* edgeList;
+ const PxCookingParams& mParams;
+ Gu::TriangleMeshData& mMeshData;
+
+ void releaseEdgeList();
+ void createEdgeList();
+ };
+
+ class RTreeTriangleMeshBuilder : public TriangleMeshBuilder
+ {
+ public:
+ RTreeTriangleMeshBuilder(const PxCookingParams& params);
+ virtual ~RTreeTriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const { return PxMeshMidPhase::eBVH33; }
+ virtual void createMidPhaseStructure();
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const;
+
+ Gu::RTreeTriangleData mData;
+ };
+
+ class BV4TriangleMeshBuilder : public TriangleMeshBuilder
+ {
+ public:
+ BV4TriangleMeshBuilder(const PxCookingParams& params);
+ virtual ~BV4TriangleMeshBuilder();
+
+ virtual PxMeshMidPhase::Enum getMidphaseID() const { return PxMeshMidPhase::eBVH34; }
+ virtual void createMidPhaseStructure();
+ virtual void saveMidPhaseStructure(PxOutputStream& stream) const;
+ virtual void onMeshIndexFormatChange();
+
+ Gu::BV4TriangleData mData;
+ };
+
+ class BV32TriangleMeshBuilder
+ {
+ public:
+ static void createMidPhaseStructure(const PxCookingParams& params, Gu::TriangleMeshData& meshData, Gu::BV32Tree& bv32Tree);
+ static void saveMidPhaseStructure(Gu::BV32Tree* tree, PxOutputStream& stream);
+ };
+
+}
+
+#endif